virtgpu_drm.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /*
  2. * Copyright 2013 Red Hat
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. */
  24. #ifndef VIRTGPU_DRM_H
  25. #define VIRTGPU_DRM_H
  26. #include "drm.h"
  27. #if defined(__cplusplus)
  28. extern "C" {
  29. #endif
  30. /* Please note that modifications to all structs defined here are
  31. * subject to backwards-compatibility constraints.
  32. *
  33. * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
  34. * compatibility Keep fields aligned to their size
  35. */
  36. #define DRM_VIRTGPU_MAP 0x01
  37. #define DRM_VIRTGPU_EXECBUFFER 0x02
  38. #define DRM_VIRTGPU_GETPARAM 0x03
  39. #define DRM_VIRTGPU_RESOURCE_CREATE 0x04
  40. #define DRM_VIRTGPU_RESOURCE_INFO 0x05
  41. #define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
  42. #define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
  43. #define DRM_VIRTGPU_WAIT 0x08
  44. #define DRM_VIRTGPU_GET_CAPS 0x09
  45. #define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
  46. #define DRM_VIRTGPU_CONTEXT_INIT 0x0b
  47. #define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
  48. #define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
  49. #define VIRTGPU_EXECBUF_RING_IDX 0x04
  50. #define VIRTGPU_EXECBUF_FLAGS (\
  51. VIRTGPU_EXECBUF_FENCE_FD_IN |\
  52. VIRTGPU_EXECBUF_FENCE_FD_OUT |\
  53. VIRTGPU_EXECBUF_RING_IDX |\
  54. 0)
  55. struct drm_virtgpu_map {
  56. __u64 offset; /* use for mmap system call */
  57. __u32 handle;
  58. __u32 pad;
  59. };
  60. /* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
  61. struct drm_virtgpu_execbuffer {
  62. __u32 flags;
  63. __u32 size;
  64. __u64 command; /* void* */
  65. __u64 bo_handles;
  66. __u32 num_bo_handles;
  67. __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
  68. __u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
  69. __u32 pad;
  70. };
  71. #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
  72. #define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
  73. #define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
  74. #define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
  75. #define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
  76. #define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
  77. #define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
  78. struct drm_virtgpu_getparam {
  79. __u64 param;
  80. __u64 value;
  81. };
  82. /* NO_BO flags? NO resource flag? */
  83. /* resource flag for y_0_top */
  84. struct drm_virtgpu_resource_create {
  85. __u32 target;
  86. __u32 format;
  87. __u32 bind;
  88. __u32 width;
  89. __u32 height;
  90. __u32 depth;
  91. __u32 array_size;
  92. __u32 last_level;
  93. __u32 nr_samples;
  94. __u32 flags;
  95. __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
  96. __u32 res_handle; /* returned by kernel */
  97. __u32 size; /* validate transfer in the host */
  98. __u32 stride; /* validate transfer in the host */
  99. };
  100. struct drm_virtgpu_resource_info {
  101. __u32 bo_handle;
  102. __u32 res_handle;
  103. __u32 size;
  104. __u32 blob_mem;
  105. };
  106. struct drm_virtgpu_3d_box {
  107. __u32 x;
  108. __u32 y;
  109. __u32 z;
  110. __u32 w;
  111. __u32 h;
  112. __u32 d;
  113. };
  114. struct drm_virtgpu_3d_transfer_to_host {
  115. __u32 bo_handle;
  116. struct drm_virtgpu_3d_box box;
  117. __u32 level;
  118. __u32 offset;
  119. __u32 stride;
  120. __u32 layer_stride;
  121. };
  122. struct drm_virtgpu_3d_transfer_from_host {
  123. __u32 bo_handle;
  124. struct drm_virtgpu_3d_box box;
  125. __u32 level;
  126. __u32 offset;
  127. __u32 stride;
  128. __u32 layer_stride;
  129. };
  130. #define VIRTGPU_WAIT_NOWAIT 1 /* like it */
  131. struct drm_virtgpu_3d_wait {
  132. __u32 handle; /* 0 is an invalid handle */
  133. __u32 flags;
  134. };
  135. struct drm_virtgpu_get_caps {
  136. __u32 cap_set_id;
  137. __u32 cap_set_ver;
  138. __u64 addr;
  139. __u32 size;
  140. __u32 pad;
  141. };
  142. struct drm_virtgpu_resource_create_blob {
  143. #define VIRTGPU_BLOB_MEM_GUEST 0x0001
  144. #define VIRTGPU_BLOB_MEM_HOST3D 0x0002
  145. #define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
  146. #define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
  147. #define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
  148. #define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
  149. /* zero is invalid blob_mem */
  150. __u32 blob_mem;
  151. __u32 blob_flags;
  152. __u32 bo_handle;
  153. __u32 res_handle;
  154. __u64 size;
  155. /*
  156. * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
  157. * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
  158. */
  159. __u32 pad;
  160. __u32 cmd_size;
  161. __u64 cmd;
  162. __u64 blob_id;
  163. };
  164. #define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
  165. #define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
  166. #define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
  167. struct drm_virtgpu_context_set_param {
  168. __u64 param;
  169. __u64 value;
  170. };
  171. struct drm_virtgpu_context_init {
  172. __u32 num_params;
  173. __u32 pad;
  174. /* pointer to drm_virtgpu_context_set_param array */
  175. __u64 ctx_set_params;
  176. };
  177. /*
  178. * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
  179. * effect. The event size is sizeof(drm_event), since there is no additional
  180. * payload.
  181. */
  182. #define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
  183. #define DRM_IOCTL_VIRTGPU_MAP \
  184. DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
  185. #define DRM_IOCTL_VIRTGPU_EXECBUFFER \
  186. DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
  187. struct drm_virtgpu_execbuffer)
  188. #define DRM_IOCTL_VIRTGPU_GETPARAM \
  189. DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
  190. struct drm_virtgpu_getparam)
  191. #define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \
  192. DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \
  193. struct drm_virtgpu_resource_create)
  194. #define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
  195. DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
  196. struct drm_virtgpu_resource_info)
  197. #define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
  198. DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \
  199. struct drm_virtgpu_3d_transfer_from_host)
  200. #define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
  201. DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \
  202. struct drm_virtgpu_3d_transfer_to_host)
  203. #define DRM_IOCTL_VIRTGPU_WAIT \
  204. DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \
  205. struct drm_virtgpu_3d_wait)
  206. #define DRM_IOCTL_VIRTGPU_GET_CAPS \
  207. DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
  208. struct drm_virtgpu_get_caps)
  209. #define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
  210. DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
  211. struct drm_virtgpu_resource_create_blob)
  212. #define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
  213. DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
  214. struct drm_virtgpu_context_init)
  215. #if defined(__cplusplus)
  216. }
  217. #endif
  218. #endif