ivpu_accel.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
  2. /*
  3. * Copyright (C) 2020-2023 Intel Corporation
  4. */
  5. #ifndef __UAPI_IVPU_DRM_H__
  6. #define __UAPI_IVPU_DRM_H__
  7. #include "drm.h"
  8. #if defined(__cplusplus)
  9. extern "C" {
  10. #endif
  11. #define DRM_IVPU_DRIVER_MAJOR 1
  12. #define DRM_IVPU_DRIVER_MINOR 0
  13. #define DRM_IVPU_GET_PARAM 0x00
  14. #define DRM_IVPU_SET_PARAM 0x01
  15. #define DRM_IVPU_BO_CREATE 0x02
  16. #define DRM_IVPU_BO_INFO 0x03
  17. #define DRM_IVPU_SUBMIT 0x05
  18. #define DRM_IVPU_BO_WAIT 0x06
  19. #define DRM_IOCTL_IVPU_GET_PARAM \
  20. DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param)
  21. #define DRM_IOCTL_IVPU_SET_PARAM \
  22. DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SET_PARAM, struct drm_ivpu_param)
  23. #define DRM_IOCTL_IVPU_BO_CREATE \
  24. DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_CREATE, struct drm_ivpu_bo_create)
  25. #define DRM_IOCTL_IVPU_BO_INFO \
  26. DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_INFO, struct drm_ivpu_bo_info)
  27. #define DRM_IOCTL_IVPU_SUBMIT \
  28. DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SUBMIT, struct drm_ivpu_submit)
  29. #define DRM_IOCTL_IVPU_BO_WAIT \
  30. DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait)
  31. /**
  32. * DOC: contexts
  33. *
  34. * VPU contexts have private virtual address space, job queues and priority.
  35. * Each context is identified by an unique ID. Context is created on open().
  36. */
  37. #define DRM_IVPU_PARAM_DEVICE_ID 0
  38. #define DRM_IVPU_PARAM_DEVICE_REVISION 1
  39. #define DRM_IVPU_PARAM_PLATFORM_TYPE 2
  40. #define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
  41. #define DRM_IVPU_PARAM_NUM_CONTEXTS 4
  42. #define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
  43. #define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6
  44. #define DRM_IVPU_PARAM_CONTEXT_ID 7
  45. #define DRM_IVPU_PARAM_FW_API_VERSION 8
  46. #define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
  47. #define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
  48. #define DRM_IVPU_PARAM_TILE_CONFIG 11
  49. #define DRM_IVPU_PARAM_SKU 12
  50. #define DRM_IVPU_PLATFORM_TYPE_SILICON 0
  51. #define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
  52. #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
  53. #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
  54. #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
  55. /**
  56. * struct drm_ivpu_param - Get/Set VPU parameters
  57. */
  58. struct drm_ivpu_param {
  59. /**
  60. * @param:
  61. *
  62. * Supported params:
  63. *
  64. * %DRM_IVPU_PARAM_DEVICE_ID:
  65. * PCI Device ID of the VPU device (read-only)
  66. *
  67. * %DRM_IVPU_PARAM_DEVICE_REVISION:
  68. * VPU device revision (read-only)
  69. *
  70. * %DRM_IVPU_PARAM_PLATFORM_TYPE:
  71. * Returns %DRM_IVPU_PLATFORM_TYPE_SILICON on real hardware or device specific
  72. * platform type when executing on a simulator or emulator (read-only)
  73. *
  74. * %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
  75. * Current PLL frequency (read-only)
  76. *
  77. * %DRM_IVPU_PARAM_NUM_CONTEXTS:
  78. * Maximum number of simultaneously existing contexts (read-only)
  79. *
  80. * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
  81. * Lowest VPU virtual address available in the current context (read-only)
  82. *
  83. * %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
  84. * Value of current context scheduling priority (read-write).
  85. * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
  86. *
  87. * %DRM_IVPU_PARAM_CONTEXT_ID:
  88. * Current context ID, always greater than 0 (read-only)
  89. *
  90. * %DRM_IVPU_PARAM_FW_API_VERSION:
  91. * Firmware API version array (read-only)
  92. *
  93. * %DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
  94. * Heartbeat value from an engine (read-only).
  95. * Engine ID (i.e. DRM_IVPU_ENGINE_COMPUTE) is given via index.
  96. *
  97. * %DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
  98. * Device-unique inference ID (read-only)
  99. *
  100. * %DRM_IVPU_PARAM_TILE_CONFIG:
  101. * VPU tile configuration (read-only)
  102. *
  103. * %DRM_IVPU_PARAM_SKU:
  104. * VPU SKU ID (read-only)
  105. *
  106. */
  107. __u32 param;
  108. /** @index: Index for params that have multiple instances */
  109. __u32 index;
  110. /** @value: Param value */
  111. __u64 value;
  112. };
  113. #define DRM_IVPU_BO_HIGH_MEM 0x00000001
  114. #define DRM_IVPU_BO_MAPPABLE 0x00000002
  115. #define DRM_IVPU_BO_CACHED 0x00000000
  116. #define DRM_IVPU_BO_UNCACHED 0x00010000
  117. #define DRM_IVPU_BO_WC 0x00020000
  118. #define DRM_IVPU_BO_CACHE_MASK 0x00030000
  119. #define DRM_IVPU_BO_FLAGS \
  120. (DRM_IVPU_BO_HIGH_MEM | \
  121. DRM_IVPU_BO_MAPPABLE | \
  122. DRM_IVPU_BO_CACHE_MASK)
  123. /**
  124. * struct drm_ivpu_bo_create - Create BO backed by SHMEM
  125. *
  126. * Create GEM buffer object allocated in SHMEM memory.
  127. */
  128. struct drm_ivpu_bo_create {
  129. /** @size: The size in bytes of the allocated memory */
  130. __u64 size;
  131. /**
  132. * @flags:
  133. *
  134. * Supported flags:
  135. *
  136. * %DRM_IVPU_BO_HIGH_MEM:
  137. *
  138. * Allocate VPU address from >4GB range.
  139. * Buffer object with vpu address >4GB can be always accessed by the
  140. * VPU DMA engine, but some HW generation may not be able to access
  141. * this memory from then firmware running on the VPU management processor.
  142. * Suitable for input, output and some scratch buffers.
  143. *
  144. * %DRM_IVPU_BO_MAPPABLE:
  145. *
  146. * Buffer object can be mapped using mmap().
  147. *
  148. * %DRM_IVPU_BO_CACHED:
  149. *
  150. * Allocated BO will be cached on host side (WB) and snooped on the VPU side.
  151. * This is the default caching mode.
  152. *
  153. * %DRM_IVPU_BO_UNCACHED:
  154. *
  155. * Allocated BO will not be cached on host side nor snooped on the VPU side.
  156. *
  157. * %DRM_IVPU_BO_WC:
  158. *
  159. * Allocated BO will use write combining buffer for writes but reads will be
  160. * uncached.
  161. */
  162. __u32 flags;
  163. /** @handle: Returned GEM object handle */
  164. __u32 handle;
  165. /** @vpu_addr: Returned VPU virtual address */
  166. __u64 vpu_addr;
  167. };
  168. /**
  169. * struct drm_ivpu_bo_info - Query buffer object info
  170. */
  171. struct drm_ivpu_bo_info {
  172. /** @handle: Handle of the queried BO */
  173. __u32 handle;
  174. /** @flags: Returned flags used to create the BO */
  175. __u32 flags;
  176. /** @vpu_addr: Returned VPU virtual address */
  177. __u64 vpu_addr;
  178. /**
  179. * @mmap_offset:
  180. *
  181. * Returned offset to be used in mmap(). 0 in case the BO is not mappable.
  182. */
  183. __u64 mmap_offset;
  184. /** @size: Returned GEM object size, aligned to PAGE_SIZE */
  185. __u64 size;
  186. };
  187. /* drm_ivpu_submit engines */
  188. #define DRM_IVPU_ENGINE_COMPUTE 0
  189. #define DRM_IVPU_ENGINE_COPY 1
  190. /**
  191. * struct drm_ivpu_submit - Submit commands to the VPU
  192. *
  193. * Execute a single command buffer on a given VPU engine.
  194. * Handles to all referenced buffer objects have to be provided in @buffers_ptr.
  195. *
  196. * User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl.
  197. */
  198. struct drm_ivpu_submit {
  199. /**
  200. * @buffers_ptr:
  201. *
  202. * A pointer to an u32 array of GEM handles of the BOs required for this job.
  203. * The number of elements in the array must be equal to the value given by @buffer_count.
  204. *
  205. * The first BO is the command buffer. The rest of array has to contain all
  206. * BOs referenced from the command buffer.
  207. */
  208. __u64 buffers_ptr;
  209. /** @buffer_count: Number of elements in the @buffers_ptr */
  210. __u32 buffer_count;
  211. /**
  212. * @engine: Select the engine this job should be executed on
  213. *
  214. * %DRM_IVPU_ENGINE_COMPUTE:
  215. *
  216. * Performs Deep Learning Neural Compute Inference Operations
  217. *
  218. * %DRM_IVPU_ENGINE_COPY:
  219. *
  220. * Performs memory copy operations to/from system memory allocated for VPU
  221. */
  222. __u32 engine;
  223. /** @flags: Reserved for future use - must be zero */
  224. __u32 flags;
  225. /**
  226. * @commands_offset:
  227. *
  228. * Offset inside the first buffer in @buffers_ptr containing commands
  229. * to be executed. The offset has to be 8-byte aligned.
  230. */
  231. __u32 commands_offset;
  232. };
  233. /* drm_ivpu_bo_wait job status codes */
  234. #define DRM_IVPU_JOB_STATUS_SUCCESS 0
  235. /**
  236. * struct drm_ivpu_bo_wait - Wait for BO to become inactive
  237. *
  238. * Blocks until a given buffer object becomes inactive.
  239. * With @timeout_ms set to 0 returns immediately.
  240. */
  241. struct drm_ivpu_bo_wait {
  242. /** @handle: Handle to the buffer object to be waited on */
  243. __u32 handle;
  244. /** @flags: Reserved for future use - must be zero */
  245. __u32 flags;
  246. /** @timeout_ns: Absolute timeout in nanoseconds (may be zero) */
  247. __s64 timeout_ns;
  248. /**
  249. * @job_status:
  250. *
  251. * Job status code which is updated after the job is completed.
  252. * &DRM_IVPU_JOB_STATUS_SUCCESS or device specific error otherwise.
  253. * Valid only if @handle points to a command buffer.
  254. */
  255. __u32 job_status;
  256. /** @pad: Padding - must be zero */
  257. __u32 pad;
  258. };
  259. #if defined(__cplusplus)
  260. }
  261. #endif
  262. #endif /* __UAPI_IVPU_DRM_H__ */