nouveau_drm.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. /*
  2. * Copyright 2005 Stephane Marchesin.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. */
  24. #ifndef __NOUVEAU_DRM_H__
  25. #define __NOUVEAU_DRM_H__
  26. #define DRM_NOUVEAU_EVENT_NVIF 0x80000000
  27. #include "drm.h"
  28. #if defined(__cplusplus)
  29. extern "C" {
  30. #endif
  31. #define NOUVEAU_GETPARAM_PCI_VENDOR 3
  32. #define NOUVEAU_GETPARAM_PCI_DEVICE 4
  33. #define NOUVEAU_GETPARAM_BUS_TYPE 5
  34. #define NOUVEAU_GETPARAM_FB_SIZE 8
  35. #define NOUVEAU_GETPARAM_AGP_SIZE 9
  36. #define NOUVEAU_GETPARAM_CHIPSET_ID 11
  37. #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
  38. #define NOUVEAU_GETPARAM_GRAPH_UNITS 13
  39. #define NOUVEAU_GETPARAM_PTIMER_TIME 14
  40. #define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
  41. #define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
  42. /*
  43. * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
  44. *
  45. * Query the maximum amount of IBs that can be pushed through a single
  46. * &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
  47. * ioctl().
  48. */
  49. #define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
  50. struct drm_nouveau_getparam {
  51. __u64 param;
  52. __u64 value;
  53. };
  54. struct drm_nouveau_channel_alloc {
  55. __u32 fb_ctxdma_handle;
  56. __u32 tt_ctxdma_handle;
  57. __s32 channel;
  58. __u32 pushbuf_domains;
  59. /* Notifier memory */
  60. __u32 notifier_handle;
  61. /* DRM-enforced subchannel assignments */
  62. struct {
  63. __u32 handle;
  64. __u32 grclass;
  65. } subchan[8];
  66. __u32 nr_subchan;
  67. };
  68. struct drm_nouveau_channel_free {
  69. __s32 channel;
  70. };
  71. #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
  72. #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
  73. #define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
  74. #define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
  75. #define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
  76. /* The BO will never be shared via import or export. */
  77. #define NOUVEAU_GEM_DOMAIN_NO_SHARE (1 << 5)
  78. #define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
  79. #define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
  80. #define NOUVEAU_GEM_TILE_16BPP 0x00000001
  81. #define NOUVEAU_GEM_TILE_32BPP 0x00000002
  82. #define NOUVEAU_GEM_TILE_ZETA 0x00000004
  83. #define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
  84. struct drm_nouveau_gem_info {
  85. __u32 handle;
  86. __u32 domain;
  87. __u64 size;
  88. __u64 offset;
  89. __u64 map_handle;
  90. __u32 tile_mode;
  91. __u32 tile_flags;
  92. };
  93. struct drm_nouveau_gem_new {
  94. struct drm_nouveau_gem_info info;
  95. __u32 channel_hint;
  96. __u32 align;
  97. };
  98. #define NOUVEAU_GEM_MAX_BUFFERS 1024
  99. struct drm_nouveau_gem_pushbuf_bo_presumed {
  100. __u32 valid;
  101. __u32 domain;
  102. __u64 offset;
  103. };
  104. struct drm_nouveau_gem_pushbuf_bo {
  105. __u64 user_priv;
  106. __u32 handle;
  107. __u32 read_domains;
  108. __u32 write_domains;
  109. __u32 valid_domains;
  110. struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
  111. };
  112. #define NOUVEAU_GEM_RELOC_LOW (1 << 0)
  113. #define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
  114. #define NOUVEAU_GEM_RELOC_OR (1 << 2)
  115. #define NOUVEAU_GEM_MAX_RELOCS 1024
  116. struct drm_nouveau_gem_pushbuf_reloc {
  117. __u32 reloc_bo_index;
  118. __u32 reloc_bo_offset;
  119. __u32 bo_index;
  120. __u32 flags;
  121. __u32 data;
  122. __u32 vor;
  123. __u32 tor;
  124. };
  125. #define NOUVEAU_GEM_MAX_PUSH 512
  126. struct drm_nouveau_gem_pushbuf_push {
  127. __u32 bo_index;
  128. __u32 pad;
  129. __u64 offset;
  130. __u64 length;
  131. #define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
  132. };
  133. struct drm_nouveau_gem_pushbuf {
  134. __u32 channel;
  135. __u32 nr_buffers;
  136. __u64 buffers;
  137. __u32 nr_relocs;
  138. __u32 nr_push;
  139. __u64 relocs;
  140. __u64 push;
  141. __u32 suffix0;
  142. __u32 suffix1;
  143. #define NOUVEAU_GEM_PUSHBUF_SYNC (1ULL << 0)
  144. __u64 vram_available;
  145. __u64 gart_available;
  146. };
  147. #define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
  148. #define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
  149. struct drm_nouveau_gem_cpu_prep {
  150. __u32 handle;
  151. __u32 flags;
  152. };
  153. struct drm_nouveau_gem_cpu_fini {
  154. __u32 handle;
  155. };
  156. /**
  157. * struct drm_nouveau_sync - sync object
  158. *
  159. * This structure serves as synchronization mechanism for (potentially)
  160. * asynchronous operations such as EXEC or VM_BIND.
  161. */
  162. struct drm_nouveau_sync {
  163. /**
  164. * @flags: the flags for a sync object
  165. *
  166. * The first 8 bits are used to determine the type of the sync object.
  167. */
  168. __u32 flags;
  169. #define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
  170. #define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
  171. #define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
  172. /**
  173. * @handle: the handle of the sync object
  174. */
  175. __u32 handle;
  176. /**
  177. * @timeline_value:
  178. *
  179. * The timeline point of the sync object in case the syncobj is of
  180. * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
  181. */
  182. __u64 timeline_value;
  183. };
  184. /**
  185. * struct drm_nouveau_vm_init - GPU VA space init structure
  186. *
  187. * Used to initialize the GPU's VA space for a user client, telling the kernel
  188. * which portion of the VA space is managed by the UMD and kernel respectively.
  189. *
  190. * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
  191. * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
  192. * with -ENOSYS.
  193. */
  194. struct drm_nouveau_vm_init {
  195. /**
  196. * @kernel_managed_addr: start address of the kernel managed VA space
  197. * region
  198. */
  199. __u64 kernel_managed_addr;
  200. /**
  201. * @kernel_managed_size: size of the kernel managed VA space region in
  202. * bytes
  203. */
  204. __u64 kernel_managed_size;
  205. };
  206. /**
  207. * struct drm_nouveau_vm_bind_op - VM_BIND operation
  208. *
  209. * This structure represents a single VM_BIND operation. UMDs should pass
  210. * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
  211. */
  212. struct drm_nouveau_vm_bind_op {
  213. /**
  214. * @op: the operation type
  215. */
  216. __u32 op;
  217. /**
  218. * @DRM_NOUVEAU_VM_BIND_OP_MAP:
  219. *
  220. * Map a GEM object to the GPU's VA space. Optionally, the
  221. * &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
  222. * create sparse mappings for the given range.
  223. */
  224. #define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
  225. /**
  226. * @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
  227. *
  228. * Unmap an existing mapping in the GPU's VA space. If the region the mapping
  229. * is located in is a sparse region, new sparse mappings are created where the
  230. * unmapped (memory backed) mapping was mapped previously. To remove a sparse
  231. * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
  232. */
  233. #define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
  234. /**
  235. * @flags: the flags for a &drm_nouveau_vm_bind_op
  236. */
  237. __u32 flags;
  238. /**
  239. * @DRM_NOUVEAU_VM_BIND_SPARSE:
  240. *
  241. * Indicates that an allocated VA space region should be sparse.
  242. */
  243. #define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
  244. /**
  245. * @handle: the handle of the DRM GEM object to map
  246. */
  247. __u32 handle;
  248. /**
  249. * @pad: 32 bit padding, should be 0
  250. */
  251. __u32 pad;
  252. /**
  253. * @addr:
  254. *
  255. * the address the VA space region or (memory backed) mapping should be mapped to
  256. */
  257. __u64 addr;
  258. /**
  259. * @bo_offset: the offset within the BO backing the mapping
  260. */
  261. __u64 bo_offset;
  262. /**
  263. * @range: the size of the requested mapping in bytes
  264. */
  265. __u64 range;
  266. };
  267. /**
  268. * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
  269. */
  270. struct drm_nouveau_vm_bind {
  271. /**
  272. * @op_count: the number of &drm_nouveau_vm_bind_op
  273. */
  274. __u32 op_count;
  275. /**
  276. * @flags: the flags for a &drm_nouveau_vm_bind ioctl
  277. */
  278. __u32 flags;
  279. /**
  280. * @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
  281. *
  282. * Indicates that the given VM_BIND operation should be executed asynchronously
  283. * by the kernel.
  284. *
  285. * If this flag is not supplied the kernel executes the associated operations
  286. * synchronously and doesn't accept any &drm_nouveau_sync objects.
  287. */
  288. #define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
  289. /**
  290. * @wait_count: the number of wait &drm_nouveau_syncs
  291. */
  292. __u32 wait_count;
  293. /**
  294. * @sig_count: the number of &drm_nouveau_syncs to signal when finished
  295. */
  296. __u32 sig_count;
  297. /**
  298. * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
  299. */
  300. __u64 wait_ptr;
  301. /**
  302. * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
  303. */
  304. __u64 sig_ptr;
  305. /**
  306. * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
  307. */
  308. __u64 op_ptr;
  309. };
  310. /**
  311. * struct drm_nouveau_exec_push - EXEC push operation
  312. *
  313. * This structure represents a single EXEC push operation. UMDs should pass an
  314. * array of this structure via struct drm_nouveau_exec's &push_ptr field.
  315. */
  316. struct drm_nouveau_exec_push {
  317. /**
  318. * @va: the virtual address of the push buffer mapping
  319. */
  320. __u64 va;
  321. /**
  322. * @va_len: the length of the push buffer mapping
  323. */
  324. __u32 va_len;
  325. /**
  326. * @flags: the flags for this push buffer mapping
  327. */
  328. __u32 flags;
  329. #define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
  330. };
  331. /**
  332. * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
  333. */
  334. struct drm_nouveau_exec {
  335. /**
  336. * @channel: the channel to execute the push buffer in
  337. */
  338. __u32 channel;
  339. /**
  340. * @push_count: the number of &drm_nouveau_exec_push ops
  341. */
  342. __u32 push_count;
  343. /**
  344. * @wait_count: the number of wait &drm_nouveau_syncs
  345. */
  346. __u32 wait_count;
  347. /**
  348. * @sig_count: the number of &drm_nouveau_syncs to signal when finished
  349. */
  350. __u32 sig_count;
  351. /**
  352. * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
  353. */
  354. __u64 wait_ptr;
  355. /**
  356. * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
  357. */
  358. __u64 sig_ptr;
  359. /**
  360. * @push_ptr: pointer to &drm_nouveau_exec_push ops
  361. */
  362. __u64 push_ptr;
  363. };
  364. #define DRM_NOUVEAU_GETPARAM 0x00
  365. #define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
  366. #define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
  367. #define DRM_NOUVEAU_CHANNEL_FREE 0x03
  368. #define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
  369. #define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
  370. #define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
  371. #define DRM_NOUVEAU_NVIF 0x07
  372. #define DRM_NOUVEAU_SVM_INIT 0x08
  373. #define DRM_NOUVEAU_SVM_BIND 0x09
  374. #define DRM_NOUVEAU_VM_INIT 0x10
  375. #define DRM_NOUVEAU_VM_BIND 0x11
  376. #define DRM_NOUVEAU_EXEC 0x12
  377. #define DRM_NOUVEAU_GEM_NEW 0x40
  378. #define DRM_NOUVEAU_GEM_PUSHBUF 0x41
  379. #define DRM_NOUVEAU_GEM_CPU_PREP 0x42
  380. #define DRM_NOUVEAU_GEM_CPU_FINI 0x43
  381. #define DRM_NOUVEAU_GEM_INFO 0x44
  382. struct drm_nouveau_svm_init {
  383. __u64 unmanaged_addr;
  384. __u64 unmanaged_size;
  385. };
  386. struct drm_nouveau_svm_bind {
  387. __u64 header;
  388. __u64 va_start;
  389. __u64 va_end;
  390. __u64 npages;
  391. __u64 stride;
  392. __u64 result;
  393. __u64 reserved0;
  394. __u64 reserved1;
  395. };
  396. #define NOUVEAU_SVM_BIND_COMMAND_SHIFT 0
  397. #define NOUVEAU_SVM_BIND_COMMAND_BITS 8
  398. #define NOUVEAU_SVM_BIND_COMMAND_MASK ((1 << 8) - 1)
  399. #define NOUVEAU_SVM_BIND_PRIORITY_SHIFT 8
  400. #define NOUVEAU_SVM_BIND_PRIORITY_BITS 8
  401. #define NOUVEAU_SVM_BIND_PRIORITY_MASK ((1 << 8) - 1)
  402. #define NOUVEAU_SVM_BIND_TARGET_SHIFT 16
  403. #define NOUVEAU_SVM_BIND_TARGET_BITS 32
  404. #define NOUVEAU_SVM_BIND_TARGET_MASK 0xffffffff
  405. /*
  406. * Below is use to validate ioctl argument, userspace can also use it to make
  407. * sure that no bit are set beyond known fields for a given kernel version.
  408. */
  409. #define NOUVEAU_SVM_BIND_VALID_BITS 48
  410. #define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
  411. /*
  412. * NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
  413. * result: number of page successfuly migrate to the target memory.
  414. */
  415. #define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
  416. /*
  417. * NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
  418. */
  419. #define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
  420. #define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
  421. #define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
  422. #define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
  423. #define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
  424. #define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
  425. #define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
  426. #define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
  427. #define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
  428. #define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
  429. #define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
  430. #define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
  431. #define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
  432. #define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
  433. #if defined(__cplusplus)
  434. }
  435. #endif
  436. #endif /* __NOUVEAU_DRM_H__ */