mlx5dv.h 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700
  1. /*
  2. * Copyright (c) 2017 Mellanox Technologies, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef _MLX5DV_H_
  33. #define _MLX5DV_H_
  34. #include <stdio.h>
  35. #include <stdbool.h>
  36. #include <linux/types.h> /* For the __be64 type */
  37. #include <sys/types.h>
  38. #include <endian.h>
  39. #if defined(__SSE3__)
  40. #include <limits.h>
  41. #include <emmintrin.h>
  42. #include <tmmintrin.h>
  43. #endif /* defined(__SSE3__) */
  44. #include <infiniband/verbs.h>
  45. #include <infiniband/tm_types.h>
  46. #include <infiniband/mlx5_api.h>
  47. #ifdef __cplusplus
  48. extern "C" {
  49. #endif
  50. /* Always inline the functions */
  51. #ifdef __GNUC__
  52. #define MLX5DV_ALWAYS_INLINE inline __attribute__((always_inline))
  53. #else
  54. #define MLX5DV_ALWAYS_INLINE inline
  55. #endif
  56. #define MLX5DV_RES_TYPE_QP ((uint64_t)RDMA_DRIVER_MLX5 << 32 | 1)
  57. #define MLX5DV_RES_TYPE_RWQ ((uint64_t)RDMA_DRIVER_MLX5 << 32 | 2)
  58. #define MLX5DV_RES_TYPE_DBR ((uint64_t)RDMA_DRIVER_MLX5 << 32 | 3)
  59. #define MLX5DV_RES_TYPE_SRQ ((uint64_t)RDMA_DRIVER_MLX5 << 32 | 4)
  60. #define MLX5DV_RES_TYPE_CQ ((uint64_t)RDMA_DRIVER_MLX5 << 32 | 5)
  61. enum {
  62. MLX5_RCV_DBR = 0,
  63. MLX5_SND_DBR = 1,
  64. };
  65. enum mlx5dv_context_comp_mask {
  66. MLX5DV_CONTEXT_MASK_CQE_COMPRESION = 1 << 0,
  67. MLX5DV_CONTEXT_MASK_SWP = 1 << 1,
  68. MLX5DV_CONTEXT_MASK_STRIDING_RQ = 1 << 2,
  69. MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS = 1 << 3,
  70. MLX5DV_CONTEXT_MASK_DYN_BFREGS = 1 << 4,
  71. MLX5DV_CONTEXT_MASK_CLOCK_INFO_UPDATE = 1 << 5,
  72. MLX5DV_CONTEXT_MASK_FLOW_ACTION_FLAGS = 1 << 6,
  73. MLX5DV_CONTEXT_MASK_DC_ODP_CAPS = 1 << 7,
  74. MLX5DV_CONTEXT_MASK_HCA_CORE_CLOCK = 1 << 8,
  75. MLX5DV_CONTEXT_MASK_NUM_LAG_PORTS = 1 << 9,
  76. };
  77. struct mlx5dv_cqe_comp_caps {
  78. uint32_t max_num;
  79. uint32_t supported_format; /* enum mlx5dv_cqe_comp_res_format */
  80. };
  81. struct mlx5dv_sw_parsing_caps {
  82. uint32_t sw_parsing_offloads; /* Use enum mlx5dv_sw_parsing_offloads */
  83. uint32_t supported_qpts;
  84. };
  85. struct mlx5dv_striding_rq_caps {
  86. uint32_t min_single_stride_log_num_of_bytes;
  87. uint32_t max_single_stride_log_num_of_bytes;
  88. uint32_t min_single_wqe_log_num_of_strides;
  89. uint32_t max_single_wqe_log_num_of_strides;
  90. uint32_t supported_qpts;
  91. };
  92. enum mlx5dv_tunnel_offloads {
  93. MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN = 1 << 0,
  94. MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE = 1 << 1,
  95. MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2,
  96. MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE = 1 << 3,
  97. MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP = 1 << 4,
  98. };
  99. enum mlx5dv_flow_action_cap_flags {
  100. MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
  101. MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
  102. MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
  103. MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
  104. MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
  105. };
  106. struct mlx5dv_devx_port {
  107. uint64_t comp_mask;
  108. uint16_t vport_num;
  109. uint16_t vport_vhca_id;
  110. uint16_t esw_owner_vhca_id;
  111. uint64_t icm_addr_rx;
  112. uint64_t icm_addr_tx;
  113. struct mlx5dv_devx_reg_32 reg_c_0;
  114. };
  115. /*
  116. * Direct verbs device-specific attributes
  117. */
  118. struct mlx5dv_context {
  119. uint8_t version;
  120. uint64_t flags;
  121. uint64_t comp_mask;
  122. struct mlx5dv_cqe_comp_caps cqe_comp_caps;
  123. struct mlx5dv_sw_parsing_caps sw_parsing_caps;
  124. struct mlx5dv_striding_rq_caps striding_rq_caps;
  125. uint32_t tunnel_offloads_caps;
  126. uint32_t max_dynamic_bfregs;
  127. uint64_t max_clock_info_update_nsec;
  128. uint32_t flow_action_flags; /* use enum mlx5dv_flow_action_cap_flags */
  129. uint32_t dc_odp_caps; /* use enum ibv_odp_transport_cap_bits */
  130. void *hca_core_clock;
  131. uint8_t num_lag_ports;
  132. };
  133. enum mlx5dv_context_flags {
  134. /*
  135. * This flag indicates if CQE version 0 or 1 is needed.
  136. */
  137. MLX5DV_CONTEXT_FLAGS_CQE_V1 = (1 << 0),
  138. MLX5DV_CONTEXT_FLAGS_OBSOLETE = (1 << 1), /* Obsoleted, don't use */
  139. MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED = (1 << 2),
  140. MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW = (1 << 3),
  141. MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP = (1 << 4), /* Support CQE 128B compression */
  142. MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD = (1 << 5), /* Support CQE 128B padding */
  143. MLX5DV_CONTEXT_FLAGS_PACKET_BASED_CREDIT_MODE = (1 << 6),
  144. };
  145. enum mlx5dv_cq_init_attr_mask {
  146. MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE = 1 << 0,
  147. MLX5DV_CQ_INIT_ATTR_MASK_FLAGS = 1 << 1,
  148. MLX5DV_CQ_INIT_ATTR_MASK_CQE_SIZE = 1 << 2,
  149. };
  150. enum mlx5dv_cq_init_attr_flags {
  151. MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD = 1 << 0,
  152. MLX5DV_CQ_INIT_ATTR_FLAGS_RESERVED = 1 << 1,
  153. };
  154. struct mlx5dv_cq_init_attr {
  155. uint64_t comp_mask; /* Use enum mlx5dv_cq_init_attr_mask */
  156. uint8_t cqe_comp_res_format; /* Use enum mlx5dv_cqe_comp_res_format */
  157. uint32_t flags; /* Use enum mlx5dv_cq_init_attr_flags */
  158. uint16_t cqe_size; /* when MLX5DV_CQ_INIT_ATTR_MASK_CQE_SIZE set */
  159. };
  160. struct ibv_cq_ex *mlx5dv_create_cq(struct ibv_context *context,
  161. struct ibv_cq_init_attr_ex *cq_attr,
  162. struct mlx5dv_cq_init_attr *mlx5_cq_attr);
  163. enum mlx5dv_qp_create_flags {
  164. MLX5DV_QP_CREATE_TUNNEL_OFFLOADS = 1 << 0,
  165. MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC = 1 << 1,
  166. MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_MC = 1 << 2,
  167. MLX5DV_QP_CREATE_DISABLE_SCATTER_TO_CQE = 1 << 3,
  168. MLX5DV_QP_CREATE_ALLOW_SCATTER_TO_CQE = 1 << 4,
  169. MLX5DV_QP_CREATE_PACKET_BASED_CREDIT_MODE = 1 << 5,
  170. };
  171. enum mlx5dv_mkey_init_attr_flags {
  172. MLX5DV_MKEY_INIT_ATTR_FLAGS_INDIRECT = 1 << 0,
  173. };
  174. struct mlx5dv_mkey_init_attr {
  175. struct ibv_pd *pd;
  176. uint32_t create_flags; /* Use enum mlx5dv_mkey_init_attr_flags */
  177. uint16_t max_entries; /* Requested max number of pointed entries by this indirect mkey */
  178. };
  179. struct mlx5dv_mkey {
  180. uint32_t lkey;
  181. uint32_t rkey;
  182. };
  183. struct mlx5dv_mkey *mlx5dv_create_mkey(struct mlx5dv_mkey_init_attr *mkey_init_attr);
  184. int mlx5dv_destroy_mkey(struct mlx5dv_mkey *mkey);
  185. enum mlx5dv_qp_init_attr_mask {
  186. MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS = 1 << 0,
  187. MLX5DV_QP_INIT_ATTR_MASK_DC = 1 << 1,
  188. MLX5DV_QP_INIT_ATTR_MASK_SEND_OPS_FLAGS = 1 << 2,
  189. };
  190. enum mlx5dv_dc_type {
  191. MLX5DV_DCTYPE_DCT = 1,
  192. MLX5DV_DCTYPE_DCI,
  193. };
  194. struct mlx5dv_dc_init_attr {
  195. enum mlx5dv_dc_type dc_type;
  196. uint64_t dct_access_key;
  197. };
  198. enum mlx5dv_qp_create_send_ops_flags {
  199. MLX5DV_QP_EX_WITH_MR_INTERLEAVED = 1 << 0,
  200. MLX5DV_QP_EX_WITH_MR_LIST = 1 << 1,
  201. };
  202. struct mlx5dv_qp_init_attr {
  203. uint64_t comp_mask; /* Use enum mlx5dv_qp_init_attr_mask */
  204. uint32_t create_flags; /* Use enum mlx5dv_qp_create_flags */
  205. struct mlx5dv_dc_init_attr dc_init_attr;
  206. uint64_t send_ops_flags; /* Use enum mlx5dv_qp_create_send_ops_flags */
  207. };
  208. struct ibv_qp *mlx5dv_create_qp(struct ibv_context *context,
  209. struct ibv_qp_init_attr_ex *qp_attr,
  210. struct mlx5dv_qp_init_attr *mlx5_qp_attr);
  211. struct mlx5dv_mr_interleaved {
  212. uint64_t addr;
  213. uint32_t bytes_count;
  214. uint32_t bytes_skip;
  215. uint32_t lkey;
  216. };
  217. enum mlx5dv_wc_opcode {
  218. MLX5DV_WC_UMR = IBV_WC_DRIVER1,
  219. };
  220. struct mlx5dv_qp_ex {
  221. uint64_t comp_mask;
  222. /*
  223. * Available just for the MLX5 DC QP type with send opcodes of type:
  224. * rdma, atomic and send.
  225. */
  226. void (*wr_set_dc_addr)(struct mlx5dv_qp_ex *mqp, struct ibv_ah *ah,
  227. uint32_t remote_dctn, uint64_t remote_dc_key);
  228. void (*wr_mr_interleaved)(struct mlx5dv_qp_ex *mqp,
  229. struct mlx5dv_mkey *mkey,
  230. uint32_t access_flags, /* use enum ibv_access_flags */
  231. uint32_t repeat_count,
  232. uint16_t num_interleaved,
  233. struct mlx5dv_mr_interleaved *data);
  234. void (*wr_mr_list)(struct mlx5dv_qp_ex *mqp,
  235. struct mlx5dv_mkey *mkey,
  236. uint32_t access_flags, /* use enum ibv_access_flags */
  237. uint16_t num_sges,
  238. struct ibv_sge *sge);
  239. };
  240. struct mlx5dv_qp_ex *mlx5dv_qp_ex_from_ibv_qp_ex(struct ibv_qp_ex *qp);
  241. int mlx5dv_query_devx_port(struct ibv_context *ctx,
  242. uint32_t port_num,
  243. struct mlx5dv_devx_port *mlx5_devx_port);
  244. static inline void mlx5dv_wr_set_dc_addr(struct mlx5dv_qp_ex *mqp,
  245. struct ibv_ah *ah,
  246. uint32_t remote_dctn,
  247. uint64_t remote_dc_key)
  248. {
  249. mqp->wr_set_dc_addr(mqp, ah, remote_dctn, remote_dc_key);
  250. }
  251. static inline void mlx5dv_wr_mr_interleaved(struct mlx5dv_qp_ex *mqp,
  252. struct mlx5dv_mkey *mkey,
  253. uint32_t access_flags,
  254. uint32_t repeat_count,
  255. uint16_t num_interleaved,
  256. struct mlx5dv_mr_interleaved *data)
  257. {
  258. mqp->wr_mr_interleaved(mqp, mkey, access_flags, repeat_count,
  259. num_interleaved, data);
  260. }
  261. static inline void mlx5dv_wr_mr_list(struct mlx5dv_qp_ex *mqp,
  262. struct mlx5dv_mkey *mkey,
  263. uint32_t access_flags,
  264. uint16_t num_sges,
  265. struct ibv_sge *sge)
  266. {
  267. mqp->wr_mr_list(mqp, mkey, access_flags, num_sges, sge);
  268. }
  269. enum mlx5dv_flow_action_esp_mask {
  270. MLX5DV_FLOW_ACTION_ESP_MASK_FLAGS = 1 << 0,
  271. };
  272. struct mlx5dv_flow_action_esp {
  273. uint64_t comp_mask; /* Use enum mlx5dv_flow_action_esp_mask */
  274. uint32_t action_flags; /* Use enum mlx5dv_flow_action_flags */
  275. };
  276. struct mlx5dv_flow_match_parameters {
  277. size_t match_sz;
  278. uint64_t match_buf[]; /* Device spec format */
  279. };
  280. enum mlx5dv_flow_matcher_attr_mask {
  281. MLX5DV_FLOW_MATCHER_MASK_FT_TYPE = 1 << 0,
  282. };
  283. struct mlx5dv_flow_matcher_attr {
  284. enum ibv_flow_attr_type type;
  285. uint32_t flags; /* From enum ibv_flow_flags */
  286. uint16_t priority;
  287. uint8_t match_criteria_enable; /* Device spec format */
  288. struct mlx5dv_flow_match_parameters *match_mask;
  289. uint64_t comp_mask; /* use mlx5dv_flow_matcher_attr_mask */
  290. enum mlx5dv_flow_table_type ft_type;
  291. };
  292. struct mlx5dv_flow_matcher;
  293. struct mlx5dv_flow_matcher *
  294. mlx5dv_create_flow_matcher(struct ibv_context *context,
  295. struct mlx5dv_flow_matcher_attr *matcher_attr);
  296. int mlx5dv_destroy_flow_matcher(struct mlx5dv_flow_matcher *matcher);
  297. enum mlx5dv_flow_action_type {
  298. MLX5DV_FLOW_ACTION_DEST_IBV_QP,
  299. MLX5DV_FLOW_ACTION_DROP,
  300. MLX5DV_FLOW_ACTION_IBV_COUNTER,
  301. MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION,
  302. MLX5DV_FLOW_ACTION_TAG,
  303. MLX5DV_FLOW_ACTION_DEST_DEVX,
  304. MLX5DV_FLOW_ACTION_COUNTERS_DEVX,
  305. MLX5DV_FLOW_ACTION_DEFAULT_MISS,
  306. };
  307. struct mlx5dv_flow_action_attr {
  308. enum mlx5dv_flow_action_type type;
  309. union {
  310. struct ibv_qp *qp;
  311. struct ibv_counters *counter;
  312. struct ibv_flow_action *action;
  313. uint32_t tag_value;
  314. struct mlx5dv_devx_obj *obj;
  315. };
  316. };
  317. struct ibv_flow *
  318. mlx5dv_create_flow(struct mlx5dv_flow_matcher *matcher,
  319. struct mlx5dv_flow_match_parameters *match_value,
  320. size_t num_actions,
  321. struct mlx5dv_flow_action_attr actions_attr[]);
  322. struct ibv_flow_action *mlx5dv_create_flow_action_esp(struct ibv_context *ctx,
  323. struct ibv_flow_action_esp_attr *esp,
  324. struct mlx5dv_flow_action_esp *mlx5_attr);
  325. /*
  326. * mlx5dv_create_flow_action_modify_header - Create a flow action which mutates
  327. * a packet. The flow action can be attached to steering rules via
  328. * ibv_create_flow().
  329. *
  330. * @ctx: RDMA device context to create the action on.
  331. * @actions_sz: The size of *actions* buffer in bytes.
  332. * @actions: A buffer which contains modify actions provided in device spec
  333. * format.
  334. * @ft_type: Defines the flow table type to which the modify
  335. * header action will be attached.
  336. *
  337. * Return a valid ibv_flow_action if successful, NULL otherwise.
  338. */
  339. struct ibv_flow_action *
  340. mlx5dv_create_flow_action_modify_header(struct ibv_context *ctx,
  341. size_t actions_sz,
  342. uint64_t actions[],
  343. enum mlx5dv_flow_table_type ft_type);
  344. /*
  345. * mlx5dv_create_flow_action_packet_reformat - Create flow action which can
  346. * encap/decap packets.
  347. */
  348. struct ibv_flow_action *
  349. mlx5dv_create_flow_action_packet_reformat(struct ibv_context *ctx,
  350. size_t data_sz,
  351. void *data,
  352. enum mlx5dv_flow_action_packet_reformat_type reformat_type,
  353. enum mlx5dv_flow_table_type ft_type);
  354. /*
  355. * Most device capabilities are exported by ibv_query_device(...),
  356. * but there is HW device-specific information which is important
  357. * for data-path, but isn't provided.
  358. *
  359. * Return 0 on success.
  360. */
  361. int mlx5dv_query_device(struct ibv_context *ctx_in,
  362. struct mlx5dv_context *attrs_out);
  363. enum mlx5dv_qp_comp_mask {
  364. MLX5DV_QP_MASK_UAR_MMAP_OFFSET = 1 << 0,
  365. MLX5DV_QP_MASK_RAW_QP_HANDLES = 1 << 1,
  366. MLX5DV_QP_MASK_RAW_QP_TIR_ADDR = 1 << 2,
  367. };
  368. struct mlx5dv_qp {
  369. __be32 *dbrec;
  370. struct {
  371. void *buf;
  372. uint32_t wqe_cnt;
  373. uint32_t stride;
  374. } sq;
  375. struct {
  376. void *buf;
  377. uint32_t wqe_cnt;
  378. uint32_t stride;
  379. } rq;
  380. struct {
  381. void *reg;
  382. uint32_t size;
  383. } bf;
  384. uint64_t comp_mask;
  385. off_t uar_mmap_offset;
  386. uint32_t tirn;
  387. uint32_t tisn;
  388. uint32_t rqn;
  389. uint32_t sqn;
  390. uint64_t tir_icm_addr;
  391. };
  392. struct mlx5dv_cq {
  393. void *buf;
  394. __be32 *dbrec;
  395. uint32_t cqe_cnt;
  396. uint32_t cqe_size;
  397. void *cq_uar;
  398. uint32_t cqn;
  399. uint64_t comp_mask;
  400. };
  401. enum mlx5dv_srq_comp_mask {
  402. MLX5DV_SRQ_MASK_SRQN = 1 << 0,
  403. };
  404. struct mlx5dv_srq {
  405. void *buf;
  406. __be32 *dbrec;
  407. uint32_t stride;
  408. uint32_t head;
  409. uint32_t tail;
  410. uint64_t comp_mask;
  411. uint32_t srqn;
  412. };
  413. struct mlx5dv_rwq {
  414. void *buf;
  415. __be32 *dbrec;
  416. uint32_t wqe_cnt;
  417. uint32_t stride;
  418. uint64_t comp_mask;
  419. };
  420. struct mlx5dv_alloc_dm_attr {
  421. enum mlx5dv_alloc_dm_type type;
  422. uint64_t comp_mask;
  423. };
  424. enum mlx5dv_dm_comp_mask {
  425. MLX5DV_DM_MASK_REMOTE_VA = 1 << 0,
  426. };
  427. struct mlx5dv_dm {
  428. void *buf;
  429. uint64_t length;
  430. uint64_t comp_mask;
  431. uint64_t remote_va;
  432. };
  433. struct ibv_dm *mlx5dv_alloc_dm(struct ibv_context *context,
  434. struct ibv_alloc_dm_attr *dm_attr,
  435. struct mlx5dv_alloc_dm_attr *mlx5_dm_attr);
  436. struct mlx5_wqe_av;
  437. struct mlx5dv_ah {
  438. struct mlx5_wqe_av *av;
  439. uint64_t comp_mask;
  440. };
  441. struct mlx5dv_pd {
  442. uint32_t pdn;
  443. uint64_t comp_mask;
  444. };
  445. struct mlx5dv_obj {
  446. struct {
  447. struct ibv_qp *in;
  448. struct mlx5dv_qp *out;
  449. } qp;
  450. struct {
  451. struct ibv_cq *in;
  452. struct mlx5dv_cq *out;
  453. } cq;
  454. struct {
  455. struct ibv_srq *in;
  456. struct mlx5dv_srq *out;
  457. } srq;
  458. struct {
  459. struct ibv_wq *in;
  460. struct mlx5dv_rwq *out;
  461. } rwq;
  462. struct {
  463. struct ibv_dm *in;
  464. struct mlx5dv_dm *out;
  465. } dm;
  466. struct {
  467. struct ibv_ah *in;
  468. struct mlx5dv_ah *out;
  469. } ah;
  470. struct {
  471. struct ibv_pd *in;
  472. struct mlx5dv_pd *out;
  473. } pd;
  474. };
  475. enum mlx5dv_obj_type {
  476. MLX5DV_OBJ_QP = 1 << 0,
  477. MLX5DV_OBJ_CQ = 1 << 1,
  478. MLX5DV_OBJ_SRQ = 1 << 2,
  479. MLX5DV_OBJ_RWQ = 1 << 3,
  480. MLX5DV_OBJ_DM = 1 << 4,
  481. MLX5DV_OBJ_AH = 1 << 5,
  482. MLX5DV_OBJ_PD = 1 << 6,
  483. };
  484. enum mlx5dv_wq_init_attr_mask {
  485. MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ = 1 << 0,
  486. };
  487. struct mlx5dv_striding_rq_init_attr {
  488. uint32_t single_stride_log_num_of_bytes;
  489. uint32_t single_wqe_log_num_of_strides;
  490. uint8_t two_byte_shift_en;
  491. };
  492. struct mlx5dv_wq_init_attr {
  493. uint64_t comp_mask; /* Use enum mlx5dv_wq_init_attr_mask */
  494. struct mlx5dv_striding_rq_init_attr striding_rq_attrs;
  495. };
  496. /*
  497. * This function creates a work queue object with extra properties
  498. * defined by mlx5dv_wq_init_attr struct.
  499. *
  500. * For each bit in the comp_mask, a field in mlx5dv_wq_init_attr
  501. * should follow.
  502. *
  503. * MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ: Create a work queue with
  504. * striding RQ capabilities.
  505. * - single_stride_log_num_of_bytes represents the size of each stride in the
  506. * WQE and its value should be between min_single_stride_log_num_of_bytes
  507. * and max_single_stride_log_num_of_bytes that are reported in
  508. * mlx5dv_query_device.
  509. * - single_wqe_log_num_of_strides represents the number of strides in each WQE.
  510. * Its value should be between min_single_wqe_log_num_of_strides and
  511. * max_single_wqe_log_num_of_strides that are reported in mlx5dv_query_device.
  512. * - two_byte_shift_en: When enabled, hardware pads 2 bytes of zeroes
  513. * before writing the message to memory (e.g. for IP alignment)
  514. */
  515. struct ibv_wq *mlx5dv_create_wq(struct ibv_context *context,
  516. struct ibv_wq_init_attr *wq_init_attr,
  517. struct mlx5dv_wq_init_attr *mlx5_wq_attr);
  518. /*
  519. * This function will initialize mlx5dv_xxx structs based on supplied type.
  520. * The information for initialization is taken from ibv_xx structs supplied
  521. * as part of input.
  522. *
  523. * Request information of CQ marks its owned by DV for all consumer index
  524. * related actions.
  525. *
  526. * The initialization type can be combination of several types together.
  527. *
  528. * Return: 0 in case of success.
  529. */
  530. int mlx5dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type);
  531. enum {
  532. MLX5_OPCODE_NOP = 0x00,
  533. MLX5_OPCODE_SEND_INVAL = 0x01,
  534. MLX5_OPCODE_RDMA_WRITE = 0x08,
  535. MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
  536. MLX5_OPCODE_SEND = 0x0a,
  537. MLX5_OPCODE_SEND_IMM = 0x0b,
  538. MLX5_OPCODE_TSO = 0x0e,
  539. MLX5_OPCODE_RDMA_READ = 0x10,
  540. MLX5_OPCODE_ATOMIC_CS = 0x11,
  541. MLX5_OPCODE_ATOMIC_FA = 0x12,
  542. MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
  543. MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
  544. MLX5_OPCODE_FMR = 0x19,
  545. MLX5_OPCODE_LOCAL_INVAL = 0x1b,
  546. MLX5_OPCODE_CONFIG_CMD = 0x1f,
  547. MLX5_OPCODE_UMR = 0x25,
  548. MLX5_OPCODE_TAG_MATCHING = 0x28,
  549. MLX5_OPCODE_FLOW_TBL_ACCESS = 0x2c,
  550. };
  551. /*
  552. * CQE related part
  553. */
  554. enum {
  555. MLX5_INLINE_SCATTER_32 = 0x4,
  556. MLX5_INLINE_SCATTER_64 = 0x8,
  557. };
  558. enum {
  559. MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
  560. MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
  561. MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
  562. MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
  563. MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
  564. MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
  565. MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
  566. MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
  567. MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
  568. MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
  569. MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
  570. MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
  571. MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
  572. };
  573. enum {
  574. MLX5_CQE_VENDOR_SYNDROME_ODP_PFAULT = 0x93,
  575. };
  576. enum {
  577. MLX5_CQE_L2_OK = 1 << 0,
  578. MLX5_CQE_L3_OK = 1 << 1,
  579. MLX5_CQE_L4_OK = 1 << 2,
  580. };
  581. enum {
  582. MLX5_CQE_L3_HDR_TYPE_NONE = 0x0,
  583. MLX5_CQE_L3_HDR_TYPE_IPV6 = 0x1,
  584. MLX5_CQE_L3_HDR_TYPE_IPV4 = 0x2,
  585. };
  586. enum {
  587. MLX5_CQE_OWNER_MASK = 1,
  588. MLX5_CQE_REQ = 0,
  589. MLX5_CQE_RESP_WR_IMM = 1,
  590. MLX5_CQE_RESP_SEND = 2,
  591. MLX5_CQE_RESP_SEND_IMM = 3,
  592. MLX5_CQE_RESP_SEND_INV = 4,
  593. MLX5_CQE_RESIZE_CQ = 5,
  594. MLX5_CQE_NO_PACKET = 6,
  595. MLX5_CQE_REQ_ERR = 13,
  596. MLX5_CQE_RESP_ERR = 14,
  597. MLX5_CQE_INVALID = 15,
  598. };
  599. enum {
  600. MLX5_CQ_DOORBELL = 0x20
  601. };
  602. enum {
  603. MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
  604. MLX5_CQ_DB_REQ_NOT = 0 << 24,
  605. };
  606. struct mlx5_err_cqe {
  607. uint8_t rsvd0[32];
  608. uint32_t srqn;
  609. uint8_t rsvd1[18];
  610. uint8_t vendor_err_synd;
  611. uint8_t syndrome;
  612. uint32_t s_wqe_opcode_qpn;
  613. uint16_t wqe_counter;
  614. uint8_t signature;
  615. uint8_t op_own;
  616. };
  617. struct mlx5_tm_cqe {
  618. __be32 success;
  619. __be16 hw_phase_cnt;
  620. uint8_t rsvd0[12];
  621. };
  622. struct mlx5_cqe64 {
  623. union {
  624. struct {
  625. uint8_t rsvd0[2];
  626. __be16 wqe_id;
  627. uint8_t rsvd4[13];
  628. uint8_t ml_path;
  629. uint8_t rsvd20[4];
  630. __be16 slid;
  631. __be32 flags_rqpn;
  632. uint8_t hds_ip_ext;
  633. uint8_t l4_hdr_type_etc;
  634. __be16 vlan_info;
  635. };
  636. struct mlx5_tm_cqe tm_cqe;
  637. /* TMH is scattered to CQE upon match */
  638. struct ibv_tmh tmh;
  639. };
  640. __be32 srqn_uidx;
  641. __be32 imm_inval_pkey;
  642. uint8_t app;
  643. uint8_t app_op;
  644. __be16 app_info;
  645. __be32 byte_cnt;
  646. __be64 timestamp;
  647. __be32 sop_drop_qpn;
  648. __be16 wqe_counter;
  649. uint8_t signature;
  650. uint8_t op_own;
  651. };
  652. enum {
  653. MLX5_TMC_SUCCESS = 0x80000000U,
  654. };
  655. enum mlx5dv_cqe_comp_res_format {
  656. MLX5DV_CQE_RES_FORMAT_HASH = 1 << 0,
  657. MLX5DV_CQE_RES_FORMAT_CSUM = 1 << 1,
  658. MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
  659. };
  660. enum mlx5dv_sw_parsing_offloads {
  661. MLX5DV_SW_PARSING = 1 << 0,
  662. MLX5DV_SW_PARSING_CSUM = 1 << 1,
  663. MLX5DV_SW_PARSING_LSO = 1 << 2,
  664. };
  665. static MLX5DV_ALWAYS_INLINE
  666. uint8_t mlx5dv_get_cqe_owner(struct mlx5_cqe64 *cqe)
  667. {
  668. return cqe->op_own & 0x1;
  669. }
  670. static MLX5DV_ALWAYS_INLINE
  671. void mlx5dv_set_cqe_owner(struct mlx5_cqe64 *cqe, uint8_t val)
  672. {
  673. cqe->op_own = (val & 0x1) | (cqe->op_own & ~0x1);
  674. }
  675. /* Solicited event */
  676. static MLX5DV_ALWAYS_INLINE
  677. uint8_t mlx5dv_get_cqe_se(struct mlx5_cqe64 *cqe)
  678. {
  679. return (cqe->op_own >> 1) & 0x1;
  680. }
  681. static MLX5DV_ALWAYS_INLINE
  682. uint8_t mlx5dv_get_cqe_format(struct mlx5_cqe64 *cqe)
  683. {
  684. return (cqe->op_own >> 2) & 0x3;
  685. }
  686. static MLX5DV_ALWAYS_INLINE
  687. uint8_t mlx5dv_get_cqe_opcode(struct mlx5_cqe64 *cqe)
  688. {
  689. return cqe->op_own >> 4;
  690. }
  691. /*
  692. * WQE related part
  693. */
  694. enum {
  695. MLX5_INVALID_LKEY = 0x100,
  696. };
  697. enum {
  698. MLX5_EXTENDED_UD_AV = 0x80000000,
  699. };
  700. enum {
  701. MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
  702. MLX5_WQE_CTRL_SOLICITED = 1 << 1,
  703. MLX5_WQE_CTRL_FENCE = 4 << 5,
  704. MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5,
  705. };
  706. enum {
  707. MLX5_SEND_WQE_BB = 64,
  708. MLX5_SEND_WQE_SHIFT = 6,
  709. };
  710. enum {
  711. MLX5_INLINE_SEG = 0x80000000,
  712. };
  713. enum {
  714. MLX5_ETH_WQE_L3_CSUM = (1 << 6),
  715. MLX5_ETH_WQE_L4_CSUM = (1 << 7),
  716. };
  717. struct mlx5_wqe_srq_next_seg {
  718. uint8_t rsvd0[2];
  719. __be16 next_wqe_index;
  720. uint8_t signature;
  721. uint8_t rsvd1[11];
  722. };
  723. struct mlx5_wqe_data_seg {
  724. __be32 byte_count;
  725. __be32 lkey;
  726. __be64 addr;
  727. };
  728. struct mlx5_wqe_ctrl_seg {
  729. __be32 opmod_idx_opcode;
  730. __be32 qpn_ds;
  731. uint8_t signature;
  732. uint8_t rsvd[2];
  733. uint8_t fm_ce_se;
  734. __be32 imm;
  735. };
  736. struct mlx5_wqe_flow_update_ctrl_seg {
  737. __be32 flow_idx_update;
  738. __be32 dest_handle;
  739. uint8_t reserved0[40];
  740. };
  741. struct mlx5_wqe_header_modify_argument_update_seg {
  742. uint8_t argument_list[64];
  743. };
  744. struct mlx5_mprq_wqe {
  745. struct mlx5_wqe_srq_next_seg nseg;
  746. struct mlx5_wqe_data_seg dseg;
  747. };
  748. struct mlx5_wqe_av {
  749. union {
  750. struct {
  751. __be32 qkey;
  752. __be32 reserved;
  753. } qkey;
  754. __be64 dc_key;
  755. } key;
  756. __be32 dqp_dct;
  757. uint8_t stat_rate_sl;
  758. uint8_t fl_mlid;
  759. __be16 rlid;
  760. uint8_t reserved0[4];
  761. uint8_t rmac[6];
  762. uint8_t tclass;
  763. uint8_t hop_limit;
  764. __be32 grh_gid_fl;
  765. uint8_t rgid[16];
  766. };
  767. struct mlx5_wqe_datagram_seg {
  768. struct mlx5_wqe_av av;
  769. };
  770. struct mlx5_wqe_raddr_seg {
  771. __be64 raddr;
  772. __be32 rkey;
  773. __be32 reserved;
  774. };
  775. struct mlx5_wqe_atomic_seg {
  776. __be64 swap_add;
  777. __be64 compare;
  778. };
  779. struct mlx5_wqe_inl_data_seg {
  780. uint32_t byte_count;
  781. };
  782. struct mlx5_wqe_eth_seg {
  783. __be32 rsvd0;
  784. uint8_t cs_flags;
  785. uint8_t rsvd1;
  786. __be16 mss;
  787. __be32 rsvd2;
  788. __be16 inline_hdr_sz;
  789. uint8_t inline_hdr_start[2];
  790. uint8_t inline_hdr[16];
  791. };
  792. struct mlx5_wqe_tm_seg {
  793. uint8_t opcode;
  794. uint8_t flags;
  795. __be16 index;
  796. uint8_t rsvd0[2];
  797. __be16 sw_cnt;
  798. uint8_t rsvd1[8];
  799. __be64 append_tag;
  800. __be64 append_mask;
  801. };
  802. enum {
  803. MLX5_WQE_UMR_CTRL_FLAG_INLINE = 1 << 7,
  804. MLX5_WQE_UMR_CTRL_FLAG_CHECK_FREE = 1 << 5,
  805. MLX5_WQE_UMR_CTRL_FLAG_TRNSLATION_OFFSET = 1 << 4,
  806. MLX5_WQE_UMR_CTRL_FLAG_CHECK_QPN = 1 << 3,
  807. };
  808. enum {
  809. MLX5_WQE_UMR_CTRL_MKEY_MASK_LEN = 1 << 0,
  810. MLX5_WQE_UMR_CTRL_MKEY_MASK_START_ADDR = 1 << 6,
  811. MLX5_WQE_UMR_CTRL_MKEY_MASK_MKEY = 1 << 13,
  812. MLX5_WQE_UMR_CTRL_MKEY_MASK_QPN = 1 << 14,
  813. MLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_LOCAL_WRITE = 1 << 18,
  814. MLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_REMOTE_READ = 1 << 19,
  815. MLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_REMOTE_WRITE = 1 << 20,
  816. MLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_ATOMIC = 1 << 21,
  817. MLX5_WQE_UMR_CTRL_MKEY_MASK_FREE = 1 << 29,
  818. };
  819. struct mlx5_wqe_umr_ctrl_seg {
  820. uint8_t flags;
  821. uint8_t rsvd0[3];
  822. __be16 klm_octowords;
  823. __be16 translation_offset;
  824. __be64 mkey_mask;
  825. uint8_t rsvd1[32];
  826. };
  827. struct mlx5_wqe_umr_klm_seg {
  828. /* up to 2GB */
  829. __be32 byte_count;
  830. __be32 mkey;
  831. __be64 address;
  832. };
  833. union mlx5_wqe_umr_inline_seg {
  834. struct mlx5_wqe_umr_klm_seg klm;
  835. };
  836. struct mlx5_wqe_umr_repeat_ent_seg {
  837. __be16 stride;
  838. __be16 byte_count;
  839. __be32 memkey;
  840. __be64 va;
  841. };
  842. struct mlx5_wqe_umr_repeat_block_seg {
  843. __be32 byte_count;
  844. __be32 op;
  845. __be32 repeat_count;
  846. __be16 reserved;
  847. __be16 num_ent;
  848. struct mlx5_wqe_umr_repeat_ent_seg entries[0];
  849. };
  850. enum {
  851. MLX5_WQE_MKEY_CONTEXT_FREE = 1 << 6
  852. };
  853. enum {
  854. MLX5_WQE_MKEY_CONTEXT_ACCESS_FLAGS_ATOMIC = 1 << 6,
  855. MLX5_WQE_MKEY_CONTEXT_ACCESS_FLAGS_REMOTE_WRITE = 1 << 5,
  856. MLX5_WQE_MKEY_CONTEXT_ACCESS_FLAGS_REMOTE_READ = 1 << 4,
  857. MLX5_WQE_MKEY_CONTEXT_ACCESS_FLAGS_LOCAL_WRITE = 1 << 3,
  858. MLX5_WQE_MKEY_CONTEXT_ACCESS_FLAGS_LOCAL_READ = 1 << 2
  859. };
  860. struct mlx5_wqe_mkey_context_seg {
  861. uint8_t free;
  862. uint8_t reserved1;
  863. uint8_t access_flags;
  864. uint8_t sf;
  865. __be32 qpn_mkey;
  866. __be32 reserved2;
  867. __be32 flags_pd;
  868. __be64 start_addr;
  869. __be64 len;
  870. __be32 bsf_octword_size;
  871. __be32 reserved3[4];
  872. __be32 translations_octword_size;
  873. uint8_t reserved4[3];
  874. uint8_t log_page_size;
  875. __be32 reserved;
  876. union mlx5_wqe_umr_inline_seg inseg[0];
  877. };
  878. /*
  879. * Control segment - contains some control information for the current WQE.
  880. *
  881. * Output:
  882. * seg - control segment to be filled
  883. * Input:
  884. * pi - WQEBB number of the first block of this WQE.
  885. * This number should wrap at 0xffff, regardless of
  886. * size of the WQ.
  887. * opcode - Opcode of this WQE. Encodes the type of operation
  888. * to be executed on the QP.
  889. * opmod - Opcode modifier.
  890. * qp_num - QP/SQ number this WQE is posted to.
  891. * fm_ce_se - FM (fence mode), CE (completion and event mode)
  892. * and SE (solicited event).
  893. * ds - WQE size in octowords (16-byte units). DS accounts for all
  894. * the segments in the WQE as summarized in WQE construction.
  895. * signature - WQE signature.
  896. * imm - Immediate data/Invalidation key/UMR mkey.
  897. */
  898. static MLX5DV_ALWAYS_INLINE
  899. void mlx5dv_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *seg, uint16_t pi,
  900. uint8_t opcode, uint8_t opmod, uint32_t qp_num,
  901. uint8_t fm_ce_se, uint8_t ds,
  902. uint8_t signature, uint32_t imm)
  903. {
  904. seg->opmod_idx_opcode = htobe32(((uint32_t)opmod << 24) | ((uint32_t)pi << 8) | opcode);
  905. seg->qpn_ds = htobe32((qp_num << 8) | ds);
  906. seg->fm_ce_se = fm_ce_se;
  907. seg->signature = signature;
  908. /*
  909. * The caller should prepare "imm" in advance based on WR opcode.
  910. * For IBV_WR_SEND_WITH_IMM and IBV_WR_RDMA_WRITE_WITH_IMM,
  911. * the "imm" should be assigned as is.
  912. * For the IBV_WR_SEND_WITH_INV, it should be htobe32(imm).
  913. */
  914. seg->imm = imm;
  915. }
  916. /* x86 optimized version of mlx5dv_set_ctrl_seg()
  917. *
  918. * This is useful when doing calculations on large data sets
  919. * for parallel calculations.
  920. *
  921. * It doesn't suit for serialized algorithms.
  922. */
  923. #if defined(__SSE3__)
  924. static MLX5DV_ALWAYS_INLINE
  925. void mlx5dv_x86_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *seg, uint16_t pi,
  926. uint8_t opcode, uint8_t opmod, uint32_t qp_num,
  927. uint8_t fm_ce_se, uint8_t ds,
  928. uint8_t signature, uint32_t imm)
  929. {
  930. __m128i val = _mm_set_epi32(imm, qp_num, (ds << 16) | pi,
  931. (signature << 24) | (opcode << 16) | (opmod << 8) | fm_ce_se);
  932. __m128i mask = _mm_set_epi8(15, 14, 13, 12, /* immediate */
  933. 0, /* signal/fence_mode */
  934. #if CHAR_MIN
  935. -128, -128, /* reserved */
  936. #else
  937. 0x80, 0x80, /* reserved */
  938. #endif
  939. 3, /* signature */
  940. 6, /* data size */
  941. 8, 9, 10, /* QP num */
  942. 2, /* opcode */
  943. 4, 5, /* sw_pi in BE */
  944. 1 /* opmod */
  945. );
  946. *(__m128i *) seg = _mm_shuffle_epi8(val, mask);
  947. }
  948. #endif /* defined(__SSE3__) */
  949. /*
  950. * Datagram Segment - contains address information required in order
  951. * to form a datagram message.
  952. *
  953. * Output:
  954. * seg - datagram segment to be filled.
  955. * Input:
  956. * key - Q_key/access key.
  957. * dqp_dct - Destination QP number for UD and DCT for DC.
  958. * ext - Address vector extension.
  959. * stat_rate_sl - Maximum static rate control, SL/ethernet priority.
  960. * fl_mlid - Force loopback and source LID for IB.
  961. * rlid - Remote LID
  962. * rmac - Remote MAC
  963. * tclass - GRH tclass/IPv6 tclass/IPv4 ToS
  964. * hop_limit - GRH hop limit/IPv6 hop limit/IPv4 TTL
  965. * grh_gid_fi - GRH, source GID address and IPv6 flow label.
  966. * rgid - Remote GID/IP address.
  967. */
  968. static MLX5DV_ALWAYS_INLINE
  969. void mlx5dv_set_dgram_seg(struct mlx5_wqe_datagram_seg *seg,
  970. uint64_t key, uint32_t dqp_dct,
  971. uint8_t ext, uint8_t stat_rate_sl,
  972. uint8_t fl_mlid, uint16_t rlid,
  973. uint8_t *rmac, uint8_t tclass,
  974. uint8_t hop_limit, uint32_t grh_gid_fi,
  975. uint8_t *rgid)
  976. {
  977. /* Always put 64 bits, in q_key, the reserved part will be 0 */
  978. seg->av.key.dc_key = htobe64(key);
  979. seg->av.dqp_dct = htobe32(((uint32_t)ext << 31) | dqp_dct);
  980. seg->av.stat_rate_sl = stat_rate_sl;
  981. seg->av.fl_mlid = fl_mlid;
  982. seg->av.rlid = htobe16(rlid);
  983. memcpy(seg->av.rmac, rmac, 6);
  984. seg->av.tclass = tclass;
  985. seg->av.hop_limit = hop_limit;
  986. seg->av.grh_gid_fl = htobe32(grh_gid_fi);
  987. memcpy(seg->av.rgid, rgid, 16);
  988. }
  989. /*
  990. * Data Segments - contain pointers and a byte count for the scatter/gather list.
  991. * They can optionally contain data, which will save a memory read access for
  992. * gather Work Requests.
  993. */
  994. static MLX5DV_ALWAYS_INLINE
  995. void mlx5dv_set_data_seg(struct mlx5_wqe_data_seg *seg,
  996. uint32_t length, uint32_t lkey,
  997. uintptr_t address)
  998. {
  999. seg->byte_count = htobe32(length);
  1000. seg->lkey = htobe32(lkey);
  1001. seg->addr = htobe64(address);
  1002. }
  1003. /*
  1004. * x86 optimized version of mlx5dv_set_data_seg()
  1005. *
  1006. * This is useful when doing calculations on large data sets
  1007. * for parallel calculations.
  1008. *
  1009. * It doesn't suit for serialized algorithms.
  1010. */
  1011. #if defined(__SSE3__)
  1012. static MLX5DV_ALWAYS_INLINE
  1013. void mlx5dv_x86_set_data_seg(struct mlx5_wqe_data_seg *seg,
  1014. uint32_t length, uint32_t lkey,
  1015. uintptr_t address)
  1016. {
  1017. uint64_t address64 = address;
  1018. __m128i val = _mm_set_epi32((uint32_t)address64, (uint32_t)(address64 >> 32), lkey, length);
  1019. __m128i mask = _mm_set_epi8(12, 13, 14, 15, /* local address low */
  1020. 8, 9, 10, 11, /* local address high */
  1021. 4, 5, 6, 7, /* l_key */
  1022. 0, 1, 2, 3 /* byte count */
  1023. );
  1024. *(__m128i *) seg = _mm_shuffle_epi8(val, mask);
  1025. }
  1026. #endif /* defined(__SSE3__) */
  1027. /*
  1028. * Eth Segment - contains packet headers and information for stateless L2, L3, L4 offloading.
  1029. *
  1030. * Output:
  1031. * seg - Eth segment to be filled.
  1032. * Input:
  1033. * cs_flags - l3cs/l3cs_inner/l4cs/l4cs_inner.
  1034. * mss - Maximum segment size. For TSO WQEs, the number of bytes
  1035. * in the TCP payload to be transmitted in each packet. Must
  1036. * be 0 on non TSO WQEs.
  1037. * inline_hdr_sz - Length of the inlined packet headers.
  1038. * inline_hdr_start - Inlined packet header.
  1039. */
  1040. static MLX5DV_ALWAYS_INLINE
  1041. void mlx5dv_set_eth_seg(struct mlx5_wqe_eth_seg *seg, uint8_t cs_flags,
  1042. uint16_t mss, uint16_t inline_hdr_sz,
  1043. uint8_t *inline_hdr_start)
  1044. {
  1045. seg->cs_flags = cs_flags;
  1046. seg->mss = htobe16(mss);
  1047. seg->inline_hdr_sz = htobe16(inline_hdr_sz);
  1048. memcpy(seg->inline_hdr_start, inline_hdr_start, inline_hdr_sz);
  1049. }
  1050. enum mlx5dv_set_ctx_attr_type {
  1051. MLX5DV_CTX_ATTR_BUF_ALLOCATORS = 1,
  1052. };
  1053. enum {
  1054. MLX5_MMAP_GET_REGULAR_PAGES_CMD = 0,
  1055. MLX5_MMAP_GET_NC_PAGES_CMD = 3,
  1056. };
  1057. struct mlx5dv_ctx_allocators {
  1058. void *(*alloc)(size_t size, void *priv_data);
  1059. void (*free)(void *ptr, void *priv_data);
  1060. void *data;
  1061. };
  1062. /*
  1063. * Generic context attributes set API
  1064. *
  1065. * Returns 0 on success, or the value of errno on failure
  1066. * (which indicates the failure reason).
  1067. */
  1068. int mlx5dv_set_context_attr(struct ibv_context *context,
  1069. enum mlx5dv_set_ctx_attr_type type, void *attr);
  1070. struct mlx5dv_clock_info {
  1071. uint64_t nsec;
  1072. uint64_t last_cycles;
  1073. uint64_t frac;
  1074. uint32_t mult;
  1075. uint32_t shift;
  1076. uint64_t mask;
  1077. };
  1078. /*
  1079. * Get mlx5 core clock info
  1080. *
  1081. * Output:
  1082. * clock_info - clock info to be filled
  1083. * Input:
  1084. * context - device context
  1085. *
  1086. * Return: 0 on success, or the value of errno on failure
  1087. */
  1088. int mlx5dv_get_clock_info(struct ibv_context *context,
  1089. struct mlx5dv_clock_info *clock_info);
  1090. /*
  1091. * Translate device timestamp to nano-sec
  1092. *
  1093. * Input:
  1094. * clock_info - clock info to be filled
  1095. * device_timestamp - timestamp to translate
  1096. *
  1097. * Return: nano-sec
  1098. */
  1099. static inline uint64_t mlx5dv_ts_to_ns(struct mlx5dv_clock_info *clock_info,
  1100. uint64_t device_timestamp)
  1101. {
  1102. uint64_t delta, nsec;
  1103. /*
  1104. * device_timestamp & cycles are the free running 'mask' bit counters
  1105. * from the hardware hca_core_clock clock.
  1106. */
  1107. delta = (device_timestamp - clock_info->last_cycles) & clock_info->mask;
  1108. nsec = clock_info->nsec;
  1109. /*
  1110. * Guess if the device_timestamp is more recent than
  1111. * clock_info->last_cycles, if not (too far in the future) treat
  1112. * it as old time stamp. This will break every max_clock_info_update_nsec.
  1113. */
  1114. if (delta > clock_info->mask / 2) {
  1115. delta = (clock_info->last_cycles - device_timestamp) &
  1116. clock_info->mask;
  1117. nsec -= ((delta * clock_info->mult) - clock_info->frac) >>
  1118. clock_info->shift;
  1119. } else {
  1120. nsec += ((delta * clock_info->mult) + clock_info->frac) >>
  1121. clock_info->shift;
  1122. }
  1123. return nsec;
  1124. }
  1125. enum mlx5dv_context_attr_flags {
  1126. MLX5DV_CONTEXT_FLAGS_DEVX = 1 << 0,
  1127. };
  1128. struct mlx5dv_context_attr {
  1129. uint32_t flags; /* Use enum mlx5dv_context_attr_flags */
  1130. uint64_t comp_mask;
  1131. };
  1132. bool mlx5dv_is_supported(struct ibv_device *device);
  1133. struct ibv_context *
  1134. mlx5dv_open_device(struct ibv_device *device, struct mlx5dv_context_attr *attr);
  1135. struct mlx5dv_devx_obj;
  1136. struct mlx5dv_devx_obj *
  1137. mlx5dv_devx_obj_create(struct ibv_context *context, const void *in, size_t inlen,
  1138. void *out, size_t outlen);
  1139. int mlx5dv_devx_obj_query(struct mlx5dv_devx_obj *obj, const void *in, size_t inlen,
  1140. void *out, size_t outlen);
  1141. int mlx5dv_devx_obj_modify(struct mlx5dv_devx_obj *obj, const void *in, size_t inlen,
  1142. void *out, size_t outlen);
  1143. int mlx5dv_devx_obj_destroy(struct mlx5dv_devx_obj *obj);
  1144. int mlx5dv_devx_general_cmd(struct ibv_context *context, const void *in, size_t inlen,
  1145. void *out, size_t outlen);
  1146. struct mlx5dv_devx_umem {
  1147. uint32_t umem_id;
  1148. };
  1149. struct mlx5dv_devx_umem *
  1150. mlx5dv_devx_umem_reg(struct ibv_context *ctx, void *addr, size_t size, uint32_t access);
  1151. int mlx5dv_devx_umem_dereg(struct mlx5dv_devx_umem *umem);
  1152. struct mlx5dv_devx_uar {
  1153. void *reg_addr;
  1154. void *base_addr;
  1155. uint32_t page_id;
  1156. off_t mmap_off;
  1157. uint64_t comp_mask;
  1158. };
  1159. struct mlx5dv_devx_uar *mlx5dv_devx_alloc_uar(struct ibv_context *context,
  1160. uint32_t flags);
  1161. void mlx5dv_devx_free_uar(struct mlx5dv_devx_uar *devx_uar);
  1162. struct mlx5dv_var {
  1163. uint32_t page_id;
  1164. uint32_t length;
  1165. off_t mmap_off;
  1166. uint64_t comp_mask;
  1167. };
  1168. struct mlx5dv_var *
  1169. mlx5dv_alloc_var(struct ibv_context *context, uint32_t flags);
  1170. void mlx5dv_free_var(struct mlx5dv_var *dv_var);
  1171. int mlx5dv_devx_query_eqn(struct ibv_context *context, uint32_t vector,
  1172. uint32_t *eqn);
  1173. int mlx5dv_devx_cq_query(struct ibv_cq *cq, const void *in, size_t inlen,
  1174. void *out, size_t outlen);
  1175. int mlx5dv_devx_cq_modify(struct ibv_cq *cq, const void *in, size_t inlen,
  1176. void *out, size_t outlen);
  1177. int mlx5dv_devx_qp_query(struct ibv_qp *qp, const void *in, size_t inlen,
  1178. void *out, size_t outlen);
  1179. int mlx5dv_devx_qp_modify(struct ibv_qp *qp, const void *in, size_t inlen,
  1180. void *out, size_t outlen);
  1181. int mlx5dv_devx_srq_query(struct ibv_srq *srq, const void *in, size_t inlen,
  1182. void *out, size_t outlen);
  1183. int mlx5dv_devx_srq_modify(struct ibv_srq *srq, const void *in, size_t inlen,
  1184. void *out, size_t outlen);
  1185. int mlx5dv_devx_wq_query(struct ibv_wq *wq, const void *in, size_t inlen,
  1186. void *out, size_t outlen);
  1187. int mlx5dv_devx_wq_modify(struct ibv_wq *wq, const void *in, size_t inlen,
  1188. void *out, size_t outlen);
  1189. int mlx5dv_devx_ind_tbl_query(struct ibv_rwq_ind_table *ind_tbl,
  1190. const void *in, size_t inlen,
  1191. void *out, size_t outlen);
  1192. int mlx5dv_devx_ind_tbl_modify(struct ibv_rwq_ind_table *ind_tbl,
  1193. const void *in, size_t inlen,
  1194. void *out, size_t outlen);
  1195. struct mlx5dv_devx_cmd_comp {
  1196. int fd;
  1197. };
  1198. struct mlx5dv_devx_cmd_comp *
  1199. mlx5dv_devx_create_cmd_comp(struct ibv_context *context);
  1200. void mlx5dv_devx_destroy_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp);
  1201. int mlx5dv_devx_obj_query_async(struct mlx5dv_devx_obj *obj, const void *in,
  1202. size_t inlen, size_t outlen,
  1203. uint64_t wr_id,
  1204. struct mlx5dv_devx_cmd_comp *cmd_comp);
  1205. int mlx5dv_devx_get_async_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp,
  1206. struct mlx5dv_devx_async_cmd_hdr *cmd_resp,
  1207. size_t cmd_resp_len);
  1208. struct mlx5dv_devx_event_channel {
  1209. int fd;
  1210. };
  1211. struct mlx5dv_devx_event_channel *
  1212. mlx5dv_devx_create_event_channel(struct ibv_context *context,
  1213. enum mlx5dv_devx_create_event_channel_flags flags);
  1214. void mlx5dv_devx_destroy_event_channel(struct mlx5dv_devx_event_channel *event_channel);
  1215. int mlx5dv_devx_subscribe_devx_event(struct mlx5dv_devx_event_channel *event_channel,
  1216. struct mlx5dv_devx_obj *obj, /* can be NULL for unaffiliated events */
  1217. uint16_t events_sz,
  1218. uint16_t events_num[],
  1219. uint64_t cookie);
  1220. int mlx5dv_devx_subscribe_devx_event_fd(struct mlx5dv_devx_event_channel *event_channel,
  1221. int fd,
  1222. struct mlx5dv_devx_obj *obj, /* can be NULL for unaffiliated events */
  1223. uint16_t event_num);
  1224. /* return code: upon success number of bytes read, otherwise -1 and errno was set */
  1225. ssize_t mlx5dv_devx_get_event(struct mlx5dv_devx_event_channel *event_channel,
  1226. struct mlx5dv_devx_async_event_hdr *event_data,
  1227. size_t event_resp_len);
  1228. #define __devx_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)NULL)
  1229. #define __devx_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
  1230. #define __devx_bit_sz(typ, fld) sizeof(__devx_nullp(typ)->fld)
  1231. #define __devx_bit_off(typ, fld) offsetof(struct mlx5_ifc_##typ##_bits, fld)
  1232. #define __devx_dw_off(bit_off) ((bit_off) / 32)
  1233. #define __devx_64_off(bit_off) ((bit_off) / 64)
  1234. #define __devx_dw_bit_off(bit_sz, bit_off) (32 - (bit_sz) - ((bit_off) & 0x1f))
  1235. #define __devx_mask(bit_sz) ((uint32_t)((1ull << (bit_sz)) - 1))
  1236. #define __devx_dw_mask(bit_sz, bit_off) \
  1237. (__devx_mask(bit_sz) << __devx_dw_bit_off(bit_sz, bit_off))
  1238. #define DEVX_FLD_SZ_BYTES(typ, fld) (__devx_bit_sz(typ, fld) / 8)
  1239. #define DEVX_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
  1240. #define DEVX_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
  1241. #define DEVX_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
  1242. #define DEVX_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
  1243. #define DEVX_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
  1244. #define DEVX_BYTE_OFF(typ, fld) (__devx_bit_off(typ, fld) / 8)
  1245. #define DEVX_ADDR_OF(typ, p, fld) \
  1246. ((unsigned char *)(p) + DEVX_BYTE_OFF(typ, fld))
  1247. static inline void _devx_set(void *p, uint32_t value, size_t bit_off,
  1248. size_t bit_sz)
  1249. {
  1250. __be32 *fld = (__be32 *)(p) + __devx_dw_off(bit_off);
  1251. uint32_t dw_mask = __devx_dw_mask(bit_sz, bit_off);
  1252. uint32_t mask = __devx_mask(bit_sz);
  1253. *fld = htobe32((be32toh(*fld) & (~dw_mask)) |
  1254. ((value & mask) << __devx_dw_bit_off(bit_sz, bit_off)));
  1255. }
  1256. #define DEVX_SET(typ, p, fld, v) \
  1257. _devx_set(p, v, __devx_bit_off(typ, fld), __devx_bit_sz(typ, fld))
  1258. static inline uint32_t _devx_get(const void *p, size_t bit_off, size_t bit_sz)
  1259. {
  1260. return ((be32toh(*((const __be32 *)(p) + __devx_dw_off(bit_off))) >>
  1261. __devx_dw_bit_off(bit_sz, bit_off)) &
  1262. __devx_mask(bit_sz));
  1263. }
  1264. #define DEVX_GET(typ, p, fld) \
  1265. _devx_get(p, __devx_bit_off(typ, fld), __devx_bit_sz(typ, fld))
  1266. static inline void _devx_set64(void *p, uint64_t v, size_t bit_off)
  1267. {
  1268. *((__be64 *)(p) + __devx_64_off(bit_off)) = htobe64(v);
  1269. }
  1270. #define DEVX_SET64(typ, p, fld, v) _devx_set64(p, v, __devx_bit_off(typ, fld))
  1271. static inline uint64_t _devx_get64(const void *p, size_t bit_off)
  1272. {
  1273. return be64toh(*((const __be64 *)(p) + __devx_64_off(bit_off)));
  1274. }
  1275. #define DEVX_GET64(typ, p, fld) _devx_get64(p, __devx_bit_off(typ, fld))
  1276. struct mlx5dv_dr_domain;
  1277. struct mlx5dv_dr_table;
  1278. struct mlx5dv_dr_matcher;
  1279. struct mlx5dv_dr_rule;
  1280. struct mlx5dv_dr_action;
  1281. enum mlx5dv_dr_domain_type {
  1282. MLX5DV_DR_DOMAIN_TYPE_NIC_RX,
  1283. MLX5DV_DR_DOMAIN_TYPE_NIC_TX,
  1284. MLX5DV_DR_DOMAIN_TYPE_FDB,
  1285. };
  1286. enum mlx5dv_dr_domain_sync_flags {
  1287. MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW = 1 << 0,
  1288. MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW = 1 << 1,
  1289. MLX5DV_DR_DOMAIN_SYNC_FLAGS_MEM = 1 << 2,
  1290. };
  1291. struct mlx5dv_dr_flow_meter_attr {
  1292. struct mlx5dv_dr_table *next_table;
  1293. uint8_t active;
  1294. uint8_t reg_c_index;
  1295. size_t flow_meter_parameter_sz;
  1296. void *flow_meter_parameter;
  1297. };
  1298. struct mlx5dv_dr_flow_sampler_attr {
  1299. uint32_t sample_ratio;
  1300. struct mlx5dv_dr_table *default_next_table;
  1301. uint32_t num_sample_actions;
  1302. struct mlx5dv_dr_action **sample_actions;
  1303. __be64 action;
  1304. };
  1305. struct mlx5dv_dr_domain *
  1306. mlx5dv_dr_domain_create(struct ibv_context *ctx,
  1307. enum mlx5dv_dr_domain_type type);
  1308. int mlx5dv_dr_domain_destroy(struct mlx5dv_dr_domain *domain);
  1309. int mlx5dv_dr_domain_sync(struct mlx5dv_dr_domain *domain, uint32_t flags);
  1310. void mlx5dv_dr_domain_set_reclaim_device_memory(struct mlx5dv_dr_domain *dmn,
  1311. bool enable);
  1312. struct mlx5dv_dr_table *
  1313. mlx5dv_dr_table_create(struct mlx5dv_dr_domain *domain, uint32_t level);
  1314. int mlx5dv_dr_table_destroy(struct mlx5dv_dr_table *table);
  1315. struct mlx5dv_dr_matcher *
  1316. mlx5dv_dr_matcher_create(struct mlx5dv_dr_table *table,
  1317. uint16_t priority,
  1318. uint8_t match_criteria_enable,
  1319. struct mlx5dv_flow_match_parameters *mask);
  1320. int mlx5dv_dr_matcher_destroy(struct mlx5dv_dr_matcher *matcher);
  1321. struct mlx5dv_dr_rule *
  1322. mlx5dv_dr_rule_create(struct mlx5dv_dr_matcher *matcher,
  1323. struct mlx5dv_flow_match_parameters *value,
  1324. size_t num_actions,
  1325. struct mlx5dv_dr_action *actions[]);
  1326. int mlx5dv_dr_rule_destroy(struct mlx5dv_dr_rule *rule);
  1327. enum mlx5dv_dr_action_flags {
  1328. MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL = 1 << 0,
  1329. };
  1330. struct mlx5dv_dr_action *
  1331. mlx5dv_dr_action_create_dest_ibv_qp(struct ibv_qp *ibqp);
  1332. struct mlx5dv_dr_action *
  1333. mlx5dv_dr_action_create_dest_table(struct mlx5dv_dr_table *table);
  1334. struct mlx5dv_dr_action *
  1335. mlx5dv_dr_action_create_dest_vport(struct mlx5dv_dr_domain *domain,
  1336. uint32_t vport);
  1337. struct mlx5dv_dr_action *
  1338. mlx5dv_dr_action_create_dest_devx_tir(struct mlx5dv_devx_obj *devx_obj);
  1339. struct mlx5dv_dr_action *
  1340. mlx5dv_dr_action_create_dest_ib_port(struct mlx5dv_dr_domain *domain,
  1341. uint32_t ib_port);
  1342. enum mlx5dv_dr_action_dest_type {
  1343. MLX5DV_DR_ACTION_DEST,
  1344. MLX5DV_DR_ACTION_DEST_REFORMAT,
  1345. };
  1346. struct mlx5dv_dr_action_dest_reformat {
  1347. struct mlx5dv_dr_action *reformat;
  1348. struct mlx5dv_dr_action *dest;
  1349. };
  1350. struct mlx5dv_dr_action_dest_attr {
  1351. enum mlx5dv_dr_action_dest_type type;
  1352. union {
  1353. struct mlx5dv_dr_action *dest;
  1354. struct mlx5dv_dr_action_dest_reformat *dest_reformat;
  1355. };
  1356. };
  1357. struct mlx5dv_dr_action *
  1358. mlx5dv_dr_action_create_dest_array(struct mlx5dv_dr_domain *domain,
  1359. size_t num_dest,
  1360. struct mlx5dv_dr_action_dest_attr *dests[]);
  1361. struct mlx5dv_dr_action *mlx5dv_dr_action_create_drop(void);
  1362. struct mlx5dv_dr_action *mlx5dv_dr_action_create_default_miss(void);
  1363. struct mlx5dv_dr_action *mlx5dv_dr_action_create_tag(uint32_t tag_value);
  1364. struct mlx5dv_dr_action *
  1365. mlx5dv_dr_action_create_flow_counter(struct mlx5dv_devx_obj *devx_obj,
  1366. uint32_t offset);
  1367. enum mlx5dv_dr_action_aso_first_hit_flags {
  1368. MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET = 1 << 0,
  1369. };
  1370. enum mlx5dv_dr_action_aso_flow_meter_flags {
  1371. MLX5DV_DR_ACTION_FLAGS_ASO_FLOW_METER_RED = 1 << 0,
  1372. MLX5DV_DR_ACTION_FLAGS_ASO_FLOW_METER_YELLOW = 1 << 1,
  1373. MLX5DV_DR_ACTION_FLAGS_ASO_FLOW_METER_GREEN = 1 << 2,
  1374. MLX5DV_DR_ACTION_FLAGS_ASO_FLOW_METER_UNDEFINED = 1 << 3,
  1375. };
  1376. enum mlx5dv_dr_action_aso_ct_flags {
  1377. MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR = 1 << 0,
  1378. MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER = 1 << 1,
  1379. };
  1380. struct mlx5dv_dr_action *
  1381. mlx5dv_dr_action_create_aso(struct mlx5dv_dr_domain *domain,
  1382. struct mlx5dv_devx_obj *devx_obj,
  1383. uint32_t offset,
  1384. uint32_t flags,
  1385. uint8_t return_reg_c);
  1386. int mlx5dv_dr_action_modify_aso(struct mlx5dv_dr_action *action,
  1387. uint32_t offset,
  1388. uint32_t flags,
  1389. uint8_t return_reg_c);
  1390. struct mlx5dv_dr_action *
  1391. mlx5dv_dr_action_create_packet_reformat(struct mlx5dv_dr_domain *domain,
  1392. uint32_t flags,
  1393. enum mlx5dv_flow_action_packet_reformat_type reformat_type,
  1394. size_t data_sz, void *data);
  1395. struct mlx5dv_dr_action *
  1396. mlx5dv_dr_action_create_modify_header(struct mlx5dv_dr_domain *domain,
  1397. uint32_t flags,
  1398. size_t actions_sz,
  1399. __be64 actions[]);
  1400. struct mlx5dv_dr_action *
  1401. mlx5dv_dr_action_create_flow_meter(struct mlx5dv_dr_flow_meter_attr *attr);
  1402. int mlx5dv_dr_action_modify_flow_meter(struct mlx5dv_dr_action *action,
  1403. struct mlx5dv_dr_flow_meter_attr *attr,
  1404. __be64 modify_field_select);
  1405. struct mlx5dv_dr_action *
  1406. mlx5dv_dr_action_create_flow_sampler(struct mlx5dv_dr_flow_sampler_attr *attr);
  1407. struct mlx5dv_dr_action *mlx5dv_dr_action_create_pop_vlan(void);
  1408. struct mlx5dv_dr_action
  1409. *mlx5dv_dr_action_create_push_vlan(struct mlx5dv_dr_domain *domain,
  1410. __be32 vlan_hdr);
  1411. int mlx5dv_dr_action_destroy(struct mlx5dv_dr_action *action);
  1412. int mlx5dv_dump_dr_domain(FILE *fout, struct mlx5dv_dr_domain *domain);
  1413. int mlx5dv_dump_dr_table(FILE *fout, struct mlx5dv_dr_table *table);
  1414. int mlx5dv_dump_dr_matcher(FILE *fout, struct mlx5dv_dr_matcher *matcher);
  1415. int mlx5dv_dump_dr_rule(FILE *fout, struct mlx5dv_dr_rule *rule);
  1416. struct mlx5dv_pp {
  1417. uint16_t index;
  1418. };
  1419. struct mlx5dv_pp *mlx5dv_pp_alloc(struct ibv_context *context,
  1420. size_t pp_context_sz,
  1421. const void *pp_context,
  1422. uint32_t flags);
  1423. void mlx5dv_pp_free(struct mlx5dv_pp *pp);
  1424. int mlx5dv_query_qp_lag_port(struct ibv_qp *qp,
  1425. uint8_t *port_num,
  1426. uint8_t *active_port_num);
  1427. int mlx5dv_modify_qp_lag_port(struct ibv_qp *qp, uint8_t port_num);
  1428. int mlx5dv_modify_qp_udp_sport(struct ibv_qp *qp, uint16_t udp_sport);
  1429. enum mlx5dv_sched_elem_attr_flags {
  1430. MLX5DV_SCHED_ELEM_ATTR_FLAGS_BW_SHARE = 1 << 0,
  1431. MLX5DV_SCHED_ELEM_ATTR_FLAGS_MAX_AVG_BW = 1 << 1,
  1432. };
  1433. struct mlx5dv_sched_attr {
  1434. struct mlx5dv_sched_node *parent;
  1435. uint32_t flags; /* Use mlx5dv_sched_elem_attr_flags */
  1436. uint32_t bw_share;
  1437. uint32_t max_avg_bw;
  1438. uint64_t comp_mask;
  1439. };
  1440. struct mlx5dv_sched_node;
  1441. struct mlx5dv_sched_leaf;
  1442. struct mlx5dv_sched_node *
  1443. mlx5dv_sched_node_create(struct ibv_context *context,
  1444. const struct mlx5dv_sched_attr *sched_attr);
  1445. struct mlx5dv_sched_leaf *
  1446. mlx5dv_sched_leaf_create(struct ibv_context *context,
  1447. const struct mlx5dv_sched_attr *sched_attr);
  1448. int mlx5dv_sched_node_modify(struct mlx5dv_sched_node *node,
  1449. const struct mlx5dv_sched_attr *sched_attr);
  1450. int mlx5dv_sched_leaf_modify(struct mlx5dv_sched_leaf *leaf,
  1451. const struct mlx5dv_sched_attr *sched_attr);
  1452. int mlx5dv_sched_node_destroy(struct mlx5dv_sched_node *node);
  1453. int mlx5dv_sched_leaf_destroy(struct mlx5dv_sched_leaf *leaf);
  1454. int mlx5dv_modify_qp_sched_elem(struct ibv_qp *qp,
  1455. const struct mlx5dv_sched_leaf *requestor,
  1456. const struct mlx5dv_sched_leaf *responder);
  1457. int mlx5dv_reserved_qpn_alloc(struct ibv_context *ctx, uint32_t *qpn);
  1458. int mlx5dv_reserved_qpn_dealloc(struct ibv_context *ctx, uint32_t qpn);
  1459. #ifdef __cplusplus
  1460. }
  1461. #endif
  1462. #endif /* _MLX5DV_H_ */