irdma-abi.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */
  2. /*
  3. * Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
  4. * Copyright (c) 2005 Topspin Communications. All rights reserved.
  5. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  6. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  7. */
  8. #ifndef IRDMA_ABI_H
  9. #define IRDMA_ABI_H
  10. #include <linux/types.h>
  11. /* irdma must support legacy GEN_1 i40iw kernel
  12. * and user-space whose last ABI ver is 5
  13. */
  14. #define IRDMA_ABI_VER 5
  15. enum irdma_memreg_type {
  16. IRDMA_MEMREG_TYPE_MEM = 0,
  17. IRDMA_MEMREG_TYPE_QP = 1,
  18. IRDMA_MEMREG_TYPE_CQ = 2,
  19. };
  20. enum {
  21. IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
  22. IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
  23. };
  24. struct irdma_alloc_ucontext_req {
  25. __u32 rsvd32;
  26. __u8 userspace_ver;
  27. __u8 rsvd8[3];
  28. __aligned_u64 comp_mask;
  29. };
  30. struct irdma_alloc_ucontext_resp {
  31. __u32 max_pds;
  32. __u32 max_qps;
  33. __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */
  34. __u8 kernel_ver;
  35. __u8 rsvd[3];
  36. __aligned_u64 feature_flags;
  37. __aligned_u64 db_mmap_key;
  38. __u32 max_hw_wq_frags;
  39. __u32 max_hw_read_sges;
  40. __u32 max_hw_inline;
  41. __u32 max_hw_rq_quanta;
  42. __u32 max_hw_wq_quanta;
  43. __u32 min_hw_cq_size;
  44. __u32 max_hw_cq_size;
  45. __u16 max_hw_sq_chunk;
  46. __u8 hw_rev;
  47. __u8 rsvd2;
  48. __aligned_u64 comp_mask;
  49. __u16 min_hw_wq_size;
  50. __u8 rsvd3[6];
  51. };
  52. struct irdma_alloc_pd_resp {
  53. __u32 pd_id;
  54. __u8 rsvd[4];
  55. };
  56. struct irdma_resize_cq_req {
  57. __aligned_u64 user_cq_buffer;
  58. };
  59. struct irdma_create_cq_req {
  60. __aligned_u64 user_cq_buf;
  61. __aligned_u64 user_shadow_area;
  62. };
  63. struct irdma_create_qp_req {
  64. __aligned_u64 user_wqe_bufs;
  65. __aligned_u64 user_compl_ctx;
  66. };
  67. struct irdma_mem_reg_req {
  68. __u16 reg_type; /* enum irdma_memreg_type */
  69. __u16 cq_pages;
  70. __u16 rq_pages;
  71. __u16 sq_pages;
  72. };
  73. struct irdma_modify_qp_req {
  74. __u8 sq_flush;
  75. __u8 rq_flush;
  76. __u8 rsvd[6];
  77. };
  78. struct irdma_create_cq_resp {
  79. __u32 cq_id;
  80. __u32 cq_size;
  81. };
  82. struct irdma_create_qp_resp {
  83. __u32 qp_id;
  84. __u32 actual_sq_size;
  85. __u32 actual_rq_size;
  86. __u32 irdma_drv_opt;
  87. __u16 push_idx;
  88. __u8 lsmm;
  89. __u8 rsvd;
  90. __u32 qp_caps;
  91. };
  92. struct irdma_modify_qp_resp {
  93. __aligned_u64 push_wqe_mmap_key;
  94. __aligned_u64 push_db_mmap_key;
  95. __u16 push_offset;
  96. __u8 push_valid;
  97. __u8 rsvd[5];
  98. };
  99. struct irdma_create_ah_resp {
  100. __u32 ah_id;
  101. __u8 rsvd[4];
  102. };
  103. #endif /* IRDMA_ABI_H */