ivpu_accel.h 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
  2. /*
  3. * Copyright (C) 2020-2023 Intel Corporation
  4. */
  5. #ifndef __UAPI_IVPU_DRM_H__
  6. #define __UAPI_IVPU_DRM_H__
  7. #include "drm.h"
  8. #if defined(__cplusplus)
  9. extern "C" {
  10. #endif
  11. #define DRM_IVPU_DRIVER_MAJOR 1
  12. #define DRM_IVPU_DRIVER_MINOR 0
  13. #define DRM_IVPU_GET_PARAM 0x00
  14. #define DRM_IVPU_SET_PARAM 0x01
  15. #define DRM_IVPU_BO_CREATE 0x02
  16. #define DRM_IVPU_BO_INFO 0x03
  17. #define DRM_IVPU_SUBMIT 0x05
  18. #define DRM_IVPU_BO_WAIT 0x06
  19. #define DRM_IOCTL_IVPU_GET_PARAM \
  20. DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param)
  21. #define DRM_IOCTL_IVPU_SET_PARAM \
  22. DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SET_PARAM, struct drm_ivpu_param)
  23. #define DRM_IOCTL_IVPU_BO_CREATE \
  24. DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_CREATE, struct drm_ivpu_bo_create)
  25. #define DRM_IOCTL_IVPU_BO_INFO \
  26. DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_INFO, struct drm_ivpu_bo_info)
  27. #define DRM_IOCTL_IVPU_SUBMIT \
  28. DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SUBMIT, struct drm_ivpu_submit)
  29. #define DRM_IOCTL_IVPU_BO_WAIT \
  30. DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait)
  31. /**
  32. * DOC: contexts
  33. *
  34. * VPU contexts have private virtual address space, job queues and priority.
  35. * Each context is identified by an unique ID. Context is created on open().
  36. */
  37. #define DRM_IVPU_PARAM_DEVICE_ID 0
  38. #define DRM_IVPU_PARAM_DEVICE_REVISION 1
  39. #define DRM_IVPU_PARAM_PLATFORM_TYPE 2
  40. #define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
  41. #define DRM_IVPU_PARAM_NUM_CONTEXTS 4
  42. #define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
  43. #define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6
  44. #define DRM_IVPU_PARAM_CONTEXT_ID 7
  45. #define DRM_IVPU_PARAM_FW_API_VERSION 8
  46. #define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
  47. #define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
  48. #define DRM_IVPU_PARAM_TILE_CONFIG 11
  49. #define DRM_IVPU_PARAM_SKU 12
  50. #define DRM_IVPU_PARAM_CAPABILITIES 13
  51. #define DRM_IVPU_PLATFORM_TYPE_SILICON 0
  52. #define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
  53. #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
  54. #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
  55. #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
  56. #define DRM_IVPU_CAP_METRIC_STREAMER 1
  57. #define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2
  58. /**
  59. * struct drm_ivpu_param - Get/Set VPU parameters
  60. */
  61. struct drm_ivpu_param {
  62. /**
  63. * @param:
  64. *
  65. * Supported params:
  66. *
  67. * %DRM_IVPU_PARAM_DEVICE_ID:
  68. * PCI Device ID of the VPU device (read-only)
  69. *
  70. * %DRM_IVPU_PARAM_DEVICE_REVISION:
  71. * VPU device revision (read-only)
  72. *
  73. * %DRM_IVPU_PARAM_PLATFORM_TYPE:
  74. * Returns %DRM_IVPU_PLATFORM_TYPE_SILICON on real hardware or device specific
  75. * platform type when executing on a simulator or emulator (read-only)
  76. *
  77. * %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
  78. * Current PLL frequency (read-only)
  79. *
  80. * %DRM_IVPU_PARAM_NUM_CONTEXTS:
  81. * Maximum number of simultaneously existing contexts (read-only)
  82. *
  83. * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
  84. * Lowest VPU virtual address available in the current context (read-only)
  85. *
  86. * %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
  87. * Value of current context scheduling priority (read-write).
  88. * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
  89. *
  90. * %DRM_IVPU_PARAM_CONTEXT_ID:
  91. * Current context ID, always greater than 0 (read-only)
  92. *
  93. * %DRM_IVPU_PARAM_FW_API_VERSION:
  94. * Firmware API version array (read-only)
  95. *
  96. * %DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
  97. * Heartbeat value from an engine (read-only).
  98. * Engine ID (i.e. DRM_IVPU_ENGINE_COMPUTE) is given via index.
  99. *
  100. * %DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
  101. * Device-unique inference ID (read-only)
  102. *
  103. * %DRM_IVPU_PARAM_TILE_CONFIG:
  104. * VPU tile configuration (read-only)
  105. *
  106. * %DRM_IVPU_PARAM_SKU:
  107. * VPU SKU ID (read-only)
  108. *
  109. */
  110. __u32 param;
  111. /** @index: Index for params that have multiple instances */
  112. __u32 index;
  113. /** @value: Param value */
  114. __u64 value;
  115. };
  116. #define DRM_IVPU_BO_SHAVE_MEM 0x00000001
  117. #define DRM_IVPU_BO_HIGH_MEM DRM_IVPU_BO_SHAVE_MEM
  118. #define DRM_IVPU_BO_MAPPABLE 0x00000002
  119. #define DRM_IVPU_BO_DMA_MEM 0x00000004
  120. #define DRM_IVPU_BO_CACHED 0x00000000
  121. #define DRM_IVPU_BO_UNCACHED 0x00010000
  122. #define DRM_IVPU_BO_WC 0x00020000
  123. #define DRM_IVPU_BO_CACHE_MASK 0x00030000
  124. #define DRM_IVPU_BO_FLAGS \
  125. (DRM_IVPU_BO_HIGH_MEM | \
  126. DRM_IVPU_BO_MAPPABLE | \
  127. DRM_IVPU_BO_DMA_MEM | \
  128. DRM_IVPU_BO_CACHE_MASK)
  129. /**
  130. * struct drm_ivpu_bo_create - Create BO backed by SHMEM
  131. *
  132. * Create GEM buffer object allocated in SHMEM memory.
  133. */
  134. struct drm_ivpu_bo_create {
  135. /** @size: The size in bytes of the allocated memory */
  136. __u64 size;
  137. /**
  138. * @flags:
  139. *
  140. * Supported flags:
  141. *
  142. * %DRM_IVPU_BO_HIGH_MEM:
  143. *
  144. * Allocate VPU address from >4GB range.
  145. * Buffer object with vpu address >4GB can be always accessed by the
  146. * VPU DMA engine, but some HW generation may not be able to access
  147. * this memory from then firmware running on the VPU management processor.
  148. * Suitable for input, output and some scratch buffers.
  149. *
  150. * %DRM_IVPU_BO_MAPPABLE:
  151. *
  152. * Buffer object can be mapped using mmap().
  153. *
  154. * %DRM_IVPU_BO_CACHED:
  155. *
  156. * Allocated BO will be cached on host side (WB) and snooped on the VPU side.
  157. * This is the default caching mode.
  158. *
  159. * %DRM_IVPU_BO_UNCACHED:
  160. *
  161. * Allocated BO will not be cached on host side nor snooped on the VPU side.
  162. *
  163. * %DRM_IVPU_BO_WC:
  164. *
  165. * Allocated BO will use write combining buffer for writes but reads will be
  166. * uncached.
  167. */
  168. __u32 flags;
  169. /** @handle: Returned GEM object handle */
  170. __u32 handle;
  171. /** @vpu_addr: Returned VPU virtual address */
  172. __u64 vpu_addr;
  173. };
  174. /**
  175. * struct drm_ivpu_bo_info - Query buffer object info
  176. */
  177. struct drm_ivpu_bo_info {
  178. /** @handle: Handle of the queried BO */
  179. __u32 handle;
  180. /** @flags: Returned flags used to create the BO */
  181. __u32 flags;
  182. /** @vpu_addr: Returned VPU virtual address */
  183. __u64 vpu_addr;
  184. /**
  185. * @mmap_offset:
  186. *
  187. * Returned offset to be used in mmap(). 0 in case the BO is not mappable.
  188. */
  189. __u64 mmap_offset;
  190. /** @size: Returned GEM object size, aligned to PAGE_SIZE */
  191. __u64 size;
  192. };
  193. /* drm_ivpu_submit engines */
  194. #define DRM_IVPU_ENGINE_COMPUTE 0
  195. #define DRM_IVPU_ENGINE_COPY 1
  196. /**
  197. * struct drm_ivpu_submit - Submit commands to the VPU
  198. *
  199. * Execute a single command buffer on a given VPU engine.
  200. * Handles to all referenced buffer objects have to be provided in @buffers_ptr.
  201. *
  202. * User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl.
  203. */
  204. struct drm_ivpu_submit {
  205. /**
  206. * @buffers_ptr:
  207. *
  208. * A pointer to an u32 array of GEM handles of the BOs required for this job.
  209. * The number of elements in the array must be equal to the value given by @buffer_count.
  210. *
  211. * The first BO is the command buffer. The rest of array has to contain all
  212. * BOs referenced from the command buffer.
  213. */
  214. __u64 buffers_ptr;
  215. /** @buffer_count: Number of elements in the @buffers_ptr */
  216. __u32 buffer_count;
  217. /**
  218. * @engine: Select the engine this job should be executed on
  219. *
  220. * %DRM_IVPU_ENGINE_COMPUTE:
  221. *
  222. * Performs Deep Learning Neural Compute Inference Operations
  223. *
  224. * %DRM_IVPU_ENGINE_COPY:
  225. *
  226. * Performs memory copy operations to/from system memory allocated for VPU
  227. */
  228. __u32 engine;
  229. /** @flags: Reserved for future use - must be zero */
  230. __u32 flags;
  231. /**
  232. * @commands_offset:
  233. *
  234. * Offset inside the first buffer in @buffers_ptr containing commands
  235. * to be executed. The offset has to be 8-byte aligned.
  236. */
  237. __u32 commands_offset;
  238. };
  239. /* drm_ivpu_bo_wait job status codes */
  240. #define DRM_IVPU_JOB_STATUS_SUCCESS 0
  241. /**
  242. * struct drm_ivpu_bo_wait - Wait for BO to become inactive
  243. *
  244. * Blocks until a given buffer object becomes inactive.
  245. * With @timeout_ms set to 0 returns immediately.
  246. */
  247. struct drm_ivpu_bo_wait {
  248. /** @handle: Handle to the buffer object to be waited on */
  249. __u32 handle;
  250. /** @flags: Reserved for future use - must be zero */
  251. __u32 flags;
  252. /** @timeout_ns: Absolute timeout in nanoseconds (may be zero) */
  253. __s64 timeout_ns;
  254. /**
  255. * @job_status:
  256. *
  257. * Job status code which is updated after the job is completed.
  258. * &DRM_IVPU_JOB_STATUS_SUCCESS or device specific error otherwise.
  259. * Valid only if @handle points to a command buffer.
  260. */
  261. __u32 job_status;
  262. /** @pad: Padding - must be zero */
  263. __u32 pad;
  264. };
  265. #if defined(__cplusplus)
  266. }
  267. #endif
  268. #endif /* __UAPI_IVPU_DRM_H__ */