IntrinsicsRISCV.td 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555
  1. //===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file defines all of the RISCV-specific intrinsics.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. //===----------------------------------------------------------------------===//
  13. // Atomics
  14. // Atomic Intrinsics have multiple versions for different access widths, which
  15. // all follow one of the following signatures (depending on how many arguments
  16. // they require). We carefully instantiate only specific versions of these for
  17. // specific integer widths, rather than using `llvm_anyint_ty`.
  18. //
  19. // In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
  20. // canonical names, and the intrinsics used in the code will have a name
  21. // suffixed with the pointer type they are specialised for (denoted `<p>` in the
  22. // names below), in order to avoid type conflicts.
  23. let TargetPrefix = "riscv" in {
  24. // T @llvm.<name>.T.<p>(any*, T, T, T imm);
  25. class MaskedAtomicRMWFourArg<LLVMType itype>
  26. : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
  27. [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
  28. // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
  29. class MaskedAtomicRMWFiveArg<LLVMType itype>
  30. : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
  31. [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
  32. // We define 32-bit and 64-bit variants of the above, where T stands for i32
  33. // or i64 respectively:
  34. multiclass MaskedAtomicRMWFourArgIntrinsics {
  35. // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
  36. def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
  37. // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
  38. def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
  39. }
  40. multiclass MaskedAtomicRMWFiveArgIntrinsics {
  41. // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
  42. def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
  43. // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
  44. def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
  45. }
  46. // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(...)
  47. defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
  48. defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
  49. defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
  50. defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
  51. // Signed min and max need an extra operand to do sign extension with.
  52. defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
  53. defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
  54. // Unsigned min and max don't need the extra operand.
  55. defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
  56. defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
  57. // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(...)
  58. defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
  59. } // TargetPrefix = "riscv"
  60. //===----------------------------------------------------------------------===//
  61. // Bitmanip (Bit Manipulation) Extension
  62. let TargetPrefix = "riscv" in {
  63. class BitManipGPRIntrinsics
  64. : Intrinsic<[llvm_any_ty],
  65. [LLVMMatchType<0>],
  66. [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
  67. class BitManipGPRGPRIntrinsics
  68. : Intrinsic<[llvm_any_ty],
  69. [LLVMMatchType<0>, LLVMMatchType<0>],
  70. [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
  71. class BitManipGPRGPRGRIntrinsics
  72. : Intrinsic<[llvm_any_ty],
  73. [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
  74. [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
  75. // Zbb
  76. def int_riscv_orc_b : BitManipGPRIntrinsics;
  77. // Zbc or Zbkc
  78. def int_riscv_clmul : BitManipGPRGPRIntrinsics;
  79. def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
  80. // Zbc
  81. def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
  82. // Zbe
  83. def int_riscv_bcompress : BitManipGPRGPRIntrinsics;
  84. def int_riscv_bdecompress : BitManipGPRGPRIntrinsics;
  85. // Zbf
  86. def int_riscv_bfp : BitManipGPRGPRIntrinsics;
  87. // Zbp
  88. def int_riscv_grev : BitManipGPRGPRIntrinsics;
  89. def int_riscv_gorc : BitManipGPRGPRIntrinsics;
  90. def int_riscv_shfl : BitManipGPRGPRIntrinsics;
  91. def int_riscv_unshfl : BitManipGPRGPRIntrinsics;
  92. def int_riscv_xperm_n : BitManipGPRGPRIntrinsics;
  93. def int_riscv_xperm_b : BitManipGPRGPRIntrinsics;
  94. def int_riscv_xperm_h : BitManipGPRGPRIntrinsics;
  95. def int_riscv_xperm_w : BitManipGPRGPRIntrinsics;
  96. // Zbr
  97. def int_riscv_crc32_b : BitManipGPRIntrinsics;
  98. def int_riscv_crc32_h : BitManipGPRIntrinsics;
  99. def int_riscv_crc32_w : BitManipGPRIntrinsics;
  100. def int_riscv_crc32_d : BitManipGPRIntrinsics;
  101. def int_riscv_crc32c_b : BitManipGPRIntrinsics;
  102. def int_riscv_crc32c_h : BitManipGPRIntrinsics;
  103. def int_riscv_crc32c_w : BitManipGPRIntrinsics;
  104. def int_riscv_crc32c_d : BitManipGPRIntrinsics;
  105. // Zbt
  106. def int_riscv_fsl : BitManipGPRGPRGRIntrinsics;
  107. def int_riscv_fsr : BitManipGPRGPRGRIntrinsics;
  108. // Zbkb
  109. def int_riscv_brev8 : BitManipGPRIntrinsics;
  110. def int_riscv_zip : BitManipGPRIntrinsics;
  111. def int_riscv_unzip : BitManipGPRIntrinsics;
  112. // Zbkx
  113. def int_riscv_xperm4 : BitManipGPRGPRIntrinsics;
  114. def int_riscv_xperm8 : BitManipGPRGPRIntrinsics;
  115. } // TargetPrefix = "riscv"
  116. //===----------------------------------------------------------------------===//
  117. // Vectors
  118. // The intrinsic does not have any operand that must be extended.
  119. defvar NoSplatOperand = 0xF;
  120. // The intrinsic does not have a VL operand.
  121. // (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
  122. defvar NoVLOperand = 0x1F;
  123. class RISCVVIntrinsic {
  124. // These intrinsics may accept illegal integer values in their llvm_any_ty
  125. // operand, so they have to be extended.
  126. Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
  127. bits<4> SplatOperand = NoSplatOperand;
  128. bits<5> VLOperand = NoVLOperand;
  129. }
  130. let TargetPrefix = "riscv" in {
  131. // We use anyint here but we only support XLen.
  132. def int_riscv_vsetvli : Intrinsic<[llvm_anyint_ty],
  133. /* AVL */ [LLVMMatchType<0>,
  134. /* VSEW */ LLVMMatchType<0>,
  135. /* VLMUL */ LLVMMatchType<0>],
  136. [IntrNoMem, IntrHasSideEffects,
  137. ImmArg<ArgIndex<1>>,
  138. ImmArg<ArgIndex<2>>]>;
  139. def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
  140. /* VSEW */ [LLVMMatchType<0>,
  141. /* VLMUL */ LLVMMatchType<0>],
  142. [IntrNoMem, IntrHasSideEffects,
  143. ImmArg<ArgIndex<0>>,
  144. ImmArg<ArgIndex<1>>]>;
  145. // Versions without side effects: better optimizable and usable if only the
  146. // returned vector length is important.
  147. def int_riscv_vsetvli_opt : Intrinsic<[llvm_anyint_ty],
  148. /* AVL */ [LLVMMatchType<0>,
  149. /* VSEW */ LLVMMatchType<0>,
  150. /* VLMUL */ LLVMMatchType<0>],
  151. [IntrNoMem,
  152. ImmArg<ArgIndex<1>>,
  153. ImmArg<ArgIndex<2>>]>;
  154. def int_riscv_vsetvlimax_opt : Intrinsic<[llvm_anyint_ty],
  155. /* VSEW */ [LLVMMatchType<0>,
  156. /* VLMUL */ LLVMMatchType<0>],
  157. [IntrNoMem,
  158. ImmArg<ArgIndex<0>>,
  159. ImmArg<ArgIndex<1>>]>;
  160. // For unit stride mask load
  161. // Input: (pointer, vl)
  162. class RISCVUSMLoad
  163. : Intrinsic<[llvm_anyvector_ty],
  164. [LLVMPointerType<LLVMMatchType<0>>,
  165. llvm_anyint_ty],
  166. [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
  167. let VLOperand = 1;
  168. }
  169. // For unit stride load
  170. // Input: (passthru, pointer, vl)
  171. class RISCVUSLoad
  172. : Intrinsic<[llvm_anyvector_ty],
  173. [LLVMMatchType<0>,
  174. LLVMPointerType<LLVMMatchType<0>>,
  175. llvm_anyint_ty],
  176. [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
  177. let VLOperand = 2;
  178. }
  179. // For unit stride fault-only-first load
  180. // Input: (passthru, pointer, vl)
  181. // Output: (data, vl)
  182. // NOTE: We model this with default memory properties since we model writing
  183. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  184. class RISCVUSLoadFF
  185. : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
  186. [LLVMMatchType<0>,
  187. LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
  188. [NoCapture<ArgIndex<1>>]>,
  189. RISCVVIntrinsic {
  190. let VLOperand = 2;
  191. }
  192. // For unit stride load with mask
  193. // Input: (maskedoff, pointer, mask, vl, ta)
  194. class RISCVUSLoadMask
  195. : Intrinsic<[llvm_anyvector_ty ],
  196. [LLVMMatchType<0>,
  197. LLVMPointerType<LLVMMatchType<0>>,
  198. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  199. llvm_anyint_ty, LLVMMatchType<1>],
  200. [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
  201. RISCVVIntrinsic {
  202. let VLOperand = 3;
  203. }
  204. // For unit stride fault-only-first load with mask
  205. // Input: (maskedoff, pointer, mask, vl, ta)
  206. // Output: (data, vl)
  207. // NOTE: We model this with default memory properties since we model writing
  208. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  209. class RISCVUSLoadFFMask
  210. : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
  211. [LLVMMatchType<0>,
  212. LLVMPointerType<LLVMMatchType<0>>,
  213. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  214. LLVMMatchType<1>, LLVMMatchType<1>],
  215. [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
  216. let VLOperand = 3;
  217. }
  218. // For strided load with passthru operand
  219. // Input: (passthru, pointer, stride, vl)
  220. class RISCVSLoad
  221. : Intrinsic<[llvm_anyvector_ty],
  222. [LLVMMatchType<0>,
  223. LLVMPointerType<LLVMMatchType<0>>,
  224. llvm_anyint_ty, LLVMMatchType<1>],
  225. [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
  226. let VLOperand = 3;
  227. }
  228. // For strided load with mask
  229. // Input: (maskedoff, pointer, stride, mask, vl, ta)
  230. class RISCVSLoadMask
  231. : Intrinsic<[llvm_anyvector_ty ],
  232. [LLVMMatchType<0>,
  233. LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
  234. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
  235. LLVMMatchType<1>],
  236. [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
  237. RISCVVIntrinsic {
  238. let VLOperand = 4;
  239. }
  240. // For indexed load with passthru operand
  241. // Input: (passthru, pointer, index, vl)
  242. class RISCVILoad
  243. : Intrinsic<[llvm_anyvector_ty],
  244. [LLVMMatchType<0>,
  245. LLVMPointerType<LLVMMatchType<0>>,
  246. llvm_anyvector_ty, llvm_anyint_ty],
  247. [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
  248. let VLOperand = 3;
  249. }
  250. // For indexed load with mask
  251. // Input: (maskedoff, pointer, index, mask, vl, ta)
  252. class RISCVILoadMask
  253. : Intrinsic<[llvm_anyvector_ty ],
  254. [LLVMMatchType<0>,
  255. LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
  256. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  257. LLVMMatchType<2>],
  258. [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
  259. RISCVVIntrinsic {
  260. let VLOperand = 4;
  261. }
  262. // For unit stride store
  263. // Input: (vector_in, pointer, vl)
  264. class RISCVUSStore
  265. : Intrinsic<[],
  266. [llvm_anyvector_ty,
  267. LLVMPointerType<LLVMMatchType<0>>,
  268. llvm_anyint_ty],
  269. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  270. let VLOperand = 2;
  271. }
  272. // For unit stride store with mask
  273. // Input: (vector_in, pointer, mask, vl)
  274. class RISCVUSStoreMask
  275. : Intrinsic<[],
  276. [llvm_anyvector_ty,
  277. LLVMPointerType<LLVMMatchType<0>>,
  278. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  279. llvm_anyint_ty],
  280. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  281. let VLOperand = 3;
  282. }
  283. // For strided store
  284. // Input: (vector_in, pointer, stride, vl)
  285. class RISCVSStore
  286. : Intrinsic<[],
  287. [llvm_anyvector_ty,
  288. LLVMPointerType<LLVMMatchType<0>>,
  289. llvm_anyint_ty, LLVMMatchType<1>],
  290. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  291. let VLOperand = 3;
  292. }
  293. // For stride store with mask
  294. // Input: (vector_in, pointer, stirde, mask, vl)
  295. class RISCVSStoreMask
  296. : Intrinsic<[],
  297. [llvm_anyvector_ty,
  298. LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
  299. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
  300. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  301. let VLOperand = 4;
  302. }
  303. // For indexed store
  304. // Input: (vector_in, pointer, index, vl)
  305. class RISCVIStore
  306. : Intrinsic<[],
  307. [llvm_anyvector_ty,
  308. LLVMPointerType<LLVMMatchType<0>>,
  309. llvm_anyint_ty, llvm_anyint_ty],
  310. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  311. let VLOperand = 3;
  312. }
  313. // For indexed store with mask
  314. // Input: (vector_in, pointer, index, mask, vl)
  315. class RISCVIStoreMask
  316. : Intrinsic<[],
  317. [llvm_anyvector_ty,
  318. LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
  319. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  320. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  321. let VLOperand = 4;
  322. }
  323. // For destination vector type is the same as source vector.
  324. // Input: (vector_in, vl)
  325. class RISCVUnaryAANoMask
  326. : Intrinsic<[llvm_anyvector_ty],
  327. [LLVMMatchType<0>, llvm_anyint_ty],
  328. [IntrNoMem]>, RISCVVIntrinsic {
  329. let VLOperand = 1;
  330. }
  331. // For destination vector type is the same as first source vector (with mask).
  332. // Input: (vector_in, mask, vl, ta)
  333. class RISCVUnaryAAMask
  334. : Intrinsic<[llvm_anyvector_ty],
  335. [LLVMMatchType<0>, LLVMMatchType<0>,
  336. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  337. LLVMMatchType<1>],
  338. [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
  339. let VLOperand = 3;
  340. }
  341. class RISCVUnaryAAMaskNoTA
  342. : Intrinsic<[llvm_anyvector_ty],
  343. [LLVMMatchType<0>, LLVMMatchType<0>,
  344. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  345. [IntrNoMem]>, RISCVVIntrinsic {
  346. let VLOperand = 3;
  347. }
  348. // For destination vector type is the same as first and second source vector.
  349. // Input: (vector_in, vector_in, vl)
  350. class RISCVBinaryAAANoMask
  351. : Intrinsic<[llvm_anyvector_ty],
  352. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
  353. [IntrNoMem]>, RISCVVIntrinsic {
  354. let VLOperand = 2;
  355. }
  356. // For destination vector type is the same as first and second source vector.
  357. // Input: (vector_in, int_vector_in, vl)
  358. class RISCVRGatherVVNoMask
  359. : Intrinsic<[llvm_anyvector_ty],
  360. [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
  361. [IntrNoMem]>, RISCVVIntrinsic {
  362. let VLOperand = 2;
  363. }
  364. // For destination vector type is the same as first and second source vector.
  365. // Input: (vector_in, vector_in, int_vector_in, vl, ta)
  366. class RISCVRGatherVVMask
  367. : Intrinsic<[llvm_anyvector_ty],
  368. [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
  369. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  370. LLVMMatchType<1>],
  371. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  372. let VLOperand = 4;
  373. }
  374. // Input: (vector_in, int16_vector_in, vl)
  375. class RISCVRGatherEI16VVNoMask
  376. : Intrinsic<[llvm_anyvector_ty],
  377. [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
  378. llvm_anyint_ty],
  379. [IntrNoMem]>, RISCVVIntrinsic {
  380. let VLOperand = 2;
  381. }
  382. // For destination vector type is the same as first and second source vector.
  383. // Input: (vector_in, vector_in, int16_vector_in, vl, ta)
  384. class RISCVRGatherEI16VVMask
  385. : Intrinsic<[llvm_anyvector_ty],
  386. [LLVMMatchType<0>, LLVMMatchType<0>,
  387. LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
  388. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  389. LLVMMatchType<1>],
  390. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  391. let VLOperand = 4;
  392. }
  393. // For destination vector type is the same as first source vector, and the
  394. // second operand is XLen.
  395. // Input: (vector_in, xlen_in, vl)
  396. class RISCVGatherVXNoMask
  397. : Intrinsic<[llvm_anyvector_ty],
  398. [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
  399. [IntrNoMem]>, RISCVVIntrinsic {
  400. let VLOperand = 2;
  401. }
  402. // For destination vector type is the same as first source vector (with mask).
  403. // Second operand is XLen.
  404. // Input: (maskedoff, vector_in, xlen_in, mask, vl, ta)
  405. class RISCVGatherVXMask
  406. : Intrinsic<[llvm_anyvector_ty],
  407. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
  408. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
  409. LLVMMatchType<1>],
  410. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  411. let VLOperand = 4;
  412. }
  413. // For destination vector type is the same as first source vector.
  414. // Input: (vector_in, vector_in/scalar_in, vl)
  415. class RISCVBinaryAAXNoMask
  416. : Intrinsic<[llvm_anyvector_ty],
  417. [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
  418. [IntrNoMem]>, RISCVVIntrinsic {
  419. let SplatOperand = 1;
  420. let VLOperand = 2;
  421. }
  422. // For destination vector type is the same as first source vector (with mask).
  423. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
  424. class RISCVBinaryAAXMask
  425. : Intrinsic<[llvm_anyvector_ty],
  426. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  427. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  428. LLVMMatchType<2>],
  429. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  430. let SplatOperand = 2;
  431. let VLOperand = 4;
  432. }
  433. // For destination vector type is the same as first source vector. The
  434. // second source operand must match the destination type or be an XLen scalar.
  435. // Input: (vector_in, vector_in/scalar_in, vl)
  436. class RISCVBinaryAAShiftNoMask
  437. : Intrinsic<[llvm_anyvector_ty],
  438. [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
  439. [IntrNoMem]>, RISCVVIntrinsic {
  440. let VLOperand = 2;
  441. }
  442. // For destination vector type is the same as first source vector (with mask).
  443. // The second source operand must match the destination type or be an XLen scalar.
  444. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
  445. class RISCVBinaryAAShiftMask
  446. : Intrinsic<[llvm_anyvector_ty],
  447. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  448. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  449. LLVMMatchType<2>],
  450. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  451. let VLOperand = 4;
  452. }
  453. // For destination vector type is NOT the same as first source vector.
  454. // Input: (vector_in, vector_in/scalar_in, vl)
  455. class RISCVBinaryABXNoMask
  456. : Intrinsic<[llvm_anyvector_ty],
  457. [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
  458. [IntrNoMem]>, RISCVVIntrinsic {
  459. let SplatOperand = 1;
  460. let VLOperand = 2;
  461. }
  462. // For destination vector type is NOT the same as first source vector (with mask).
  463. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
  464. class RISCVBinaryABXMask
  465. : Intrinsic<[llvm_anyvector_ty],
  466. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  467. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  468. LLVMMatchType<3>],
  469. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  470. let SplatOperand = 2;
  471. let VLOperand = 4;
  472. }
  473. // For destination vector type is NOT the same as first source vector. The
  474. // second source operand must match the destination type or be an XLen scalar.
  475. // Input: (vector_in, vector_in/scalar_in, vl)
  476. class RISCVBinaryABShiftNoMask
  477. : Intrinsic<[llvm_anyvector_ty],
  478. [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
  479. [IntrNoMem]>, RISCVVIntrinsic {
  480. let VLOperand = 2;
  481. }
  482. // For destination vector type is NOT the same as first source vector (with mask).
  483. // The second source operand must match the destination type or be an XLen scalar.
  484. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
  485. class RISCVBinaryABShiftMask
  486. : Intrinsic<[llvm_anyvector_ty],
  487. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  488. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  489. LLVMMatchType<3>],
  490. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  491. let VLOperand = 4;
  492. }
  493. // For binary operations with V0 as input.
  494. // Input: (vector_in, vector_in/scalar_in, V0, vl)
  495. class RISCVBinaryWithV0
  496. : Intrinsic<[llvm_anyvector_ty],
  497. [LLVMMatchType<0>, llvm_any_ty,
  498. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  499. llvm_anyint_ty],
  500. [IntrNoMem]>, RISCVVIntrinsic {
  501. let SplatOperand = 1;
  502. let VLOperand = 3;
  503. }
  504. // For binary operations with mask type output and V0 as input.
  505. // Output: (mask type output)
  506. // Input: (vector_in, vector_in/scalar_in, V0, vl)
  507. class RISCVBinaryMOutWithV0
  508. :Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  509. [llvm_anyvector_ty, llvm_any_ty,
  510. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  511. llvm_anyint_ty],
  512. [IntrNoMem]>, RISCVVIntrinsic {
  513. let SplatOperand = 1;
  514. let VLOperand = 3;
  515. }
  516. // For binary operations with mask type output.
  517. // Output: (mask type output)
  518. // Input: (vector_in, vector_in/scalar_in, vl)
  519. class RISCVBinaryMOut
  520. : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  521. [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
  522. [IntrNoMem]>, RISCVVIntrinsic {
  523. let SplatOperand = 1;
  524. let VLOperand = 2;
  525. }
  526. // For binary operations with mask type output without mask.
  527. // Output: (mask type output)
  528. // Input: (vector_in, vector_in/scalar_in, vl)
  529. class RISCVCompareNoMask
  530. : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  531. [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
  532. [IntrNoMem]>, RISCVVIntrinsic {
  533. let SplatOperand = 1;
  534. let VLOperand = 2;
  535. }
  536. // For binary operations with mask type output with mask.
  537. // Output: (mask type output)
  538. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
  539. class RISCVCompareMask
  540. : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  541. [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  542. llvm_anyvector_ty, llvm_any_ty,
  543. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  544. [IntrNoMem]>, RISCVVIntrinsic {
  545. let SplatOperand = 2;
  546. let VLOperand = 4;
  547. }
  548. // For FP classify operations.
  549. // Output: (bit mask type output)
  550. // Input: (vector_in, vl)
  551. class RISCVClassifyNoMask
  552. : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
  553. [llvm_anyvector_ty, llvm_anyint_ty],
  554. [IntrNoMem]>, RISCVVIntrinsic {
  555. let VLOperand = 1;
  556. }
  557. // For FP classify operations with mask.
  558. // Output: (bit mask type output)
  559. // Input: (maskedoff, vector_in, mask, vl)
  560. class RISCVClassifyMask
  561. : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
  562. [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
  563. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  564. [IntrNoMem]>, RISCVVIntrinsic {
  565. let VLOperand = 3;
  566. }
  567. // For Saturating binary operations.
  568. // The destination vector type is the same as first source vector.
  569. // Input: (vector_in, vector_in/scalar_in, vl)
  570. class RISCVSaturatingBinaryAAXNoMask
  571. : Intrinsic<[llvm_anyvector_ty],
  572. [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
  573. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  574. let SplatOperand = 1;
  575. let VLOperand = 2;
  576. }
  577. // For Saturating binary operations with mask.
  578. // The destination vector type is the same as first source vector.
  579. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
  580. class RISCVSaturatingBinaryAAXMask
  581. : Intrinsic<[llvm_anyvector_ty],
  582. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  583. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  584. LLVMMatchType<2>],
  585. [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  586. let SplatOperand = 2;
  587. let VLOperand = 4;
  588. }
  589. // For Saturating binary operations.
  590. // The destination vector type is the same as first source vector.
  591. // The second source operand matches the destination type or is an XLen scalar.
  592. // Input: (vector_in, vector_in/scalar_in, vl)
  593. class RISCVSaturatingBinaryAAShiftNoMask
  594. : Intrinsic<[llvm_anyvector_ty],
  595. [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
  596. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  597. let VLOperand = 2;
  598. }
  599. // For Saturating binary operations with mask.
  600. // The destination vector type is the same as first source vector.
  601. // The second source operand matches the destination type or is an XLen scalar.
  602. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
  603. class RISCVSaturatingBinaryAAShiftMask
  604. : Intrinsic<[llvm_anyvector_ty],
  605. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  606. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  607. LLVMMatchType<2>],
  608. [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  609. let VLOperand = 4;
  610. }
  611. // For Saturating binary operations.
  612. // The destination vector type is NOT the same as first source vector.
  613. // The second source operand matches the destination type or is an XLen scalar.
  614. // Input: (vector_in, vector_in/scalar_in, vl)
  615. class RISCVSaturatingBinaryABShiftNoMask
  616. : Intrinsic<[llvm_anyvector_ty],
  617. [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
  618. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  619. let VLOperand = 2;
  620. }
  621. // For Saturating binary operations with mask.
  622. // The destination vector type is NOT the same as first source vector (with mask).
  623. // The second source operand matches the destination type or is an XLen scalar.
  624. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
  625. class RISCVSaturatingBinaryABShiftMask
  626. : Intrinsic<[llvm_anyvector_ty],
  627. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  628. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  629. LLVMMatchType<3>],
  630. [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  631. let VLOperand = 4;
  632. }
  633. class RISCVTernaryAAAXNoMask
  634. : Intrinsic<[llvm_anyvector_ty],
  635. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
  636. LLVMMatchType<1>],
  637. [IntrNoMem]>, RISCVVIntrinsic {
  638. let VLOperand = 3;
  639. }
  640. class RISCVTernaryAAAXMask
  641. : Intrinsic<[llvm_anyvector_ty],
  642. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
  643. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
  644. [IntrNoMem]>, RISCVVIntrinsic {
  645. let VLOperand = 4;
  646. }
  647. class RISCVTernaryAAXANoMask
  648. : Intrinsic<[llvm_anyvector_ty],
  649. [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
  650. llvm_anyint_ty],
  651. [IntrNoMem]>, RISCVVIntrinsic {
  652. let SplatOperand = 1;
  653. let VLOperand = 3;
  654. }
  655. class RISCVTernaryAAXAMask
  656. : Intrinsic<[llvm_anyvector_ty],
  657. [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
  658. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  659. [IntrNoMem]>, RISCVVIntrinsic {
  660. let SplatOperand = 1;
  661. let VLOperand = 4;
  662. }
  663. class RISCVTernaryWideNoMask
  664. : Intrinsic< [llvm_anyvector_ty],
  665. [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
  666. llvm_anyint_ty],
  667. [IntrNoMem] >, RISCVVIntrinsic {
  668. let SplatOperand = 1;
  669. let VLOperand = 3;
  670. }
  671. class RISCVTernaryWideMask
  672. : Intrinsic< [llvm_anyvector_ty],
  673. [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
  674. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  675. [IntrNoMem]>, RISCVVIntrinsic {
  676. let SplatOperand = 1;
  677. let VLOperand = 4;
  678. }
  679. // For Reduction ternary operations.
  680. // For destination vector type is the same as first and third source vector.
  681. // Input: (vector_in, vector_in, vector_in, vl)
  682. class RISCVReductionNoMask
  683. : Intrinsic<[llvm_anyvector_ty],
  684. [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
  685. llvm_anyint_ty],
  686. [IntrNoMem]>, RISCVVIntrinsic {
  687. let VLOperand = 3;
  688. }
  689. // For Reduction ternary operations with mask.
  690. // For destination vector type is the same as first and third source vector.
  691. // The mask type come from second source vector.
  692. // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
  693. class RISCVReductionMask
  694. : Intrinsic<[llvm_anyvector_ty],
  695. [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
  696. LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
  697. [IntrNoMem]>, RISCVVIntrinsic {
  698. let VLOperand = 4;
  699. }
  700. // For unary operations with scalar type output without mask
  701. // Output: (scalar type)
  702. // Input: (vector_in, vl)
  703. class RISCVMaskUnarySOutNoMask
  704. : Intrinsic<[LLVMMatchType<1>],
  705. [llvm_anyvector_ty, llvm_anyint_ty],
  706. [IntrNoMem]>, RISCVVIntrinsic {
  707. let VLOperand = 1;
  708. }
  709. // For unary operations with scalar type output with mask
  710. // Output: (scalar type)
  711. // Input: (vector_in, mask, vl)
  712. class RISCVMaskUnarySOutMask
  713. : Intrinsic<[LLVMMatchType<1>],
  714. [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
  715. [IntrNoMem]>, RISCVVIntrinsic {
  716. let VLOperand = 2;
  717. }
  718. // For destination vector type is NOT the same as source vector.
  719. // Input: (vector_in, vl)
  720. class RISCVUnaryABNoMask
  721. : Intrinsic<[llvm_anyvector_ty],
  722. [llvm_anyvector_ty, llvm_anyint_ty],
  723. [IntrNoMem]>, RISCVVIntrinsic {
  724. let VLOperand = 1;
  725. }
  726. // For destination vector type is NOT the same as source vector (with mask).
  727. // Input: (maskedoff, vector_in, mask, vl, ta)
  728. class RISCVUnaryABMask
  729. : Intrinsic<[llvm_anyvector_ty],
  730. [LLVMMatchType<0>, llvm_anyvector_ty,
  731. LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
  732. llvm_anyint_ty, LLVMMatchType<2>],
  733. [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
  734. let VLOperand = 3;
  735. }
  736. // For unary operations with the same vector type in/out without mask
  737. // Output: (vector)
  738. // Input: (vector_in, vl)
  739. class RISCVUnaryNoMask
  740. : Intrinsic<[llvm_anyvector_ty],
  741. [LLVMMatchType<0>, llvm_anyint_ty],
  742. [IntrNoMem]>, RISCVVIntrinsic {
  743. let VLOperand = 1;
  744. }
  745. // For mask unary operations with mask type in/out with mask
  746. // Output: (mask type output)
  747. // Input: (mask type maskedoff, mask type vector_in, mask, vl)
  748. class RISCVMaskUnaryMOutMask
  749. : Intrinsic<[llvm_anyint_ty],
  750. [LLVMMatchType<0>, LLVMMatchType<0>,
  751. LLVMMatchType<0>, llvm_anyint_ty],
  752. [IntrNoMem]>, RISCVVIntrinsic {
  753. let VLOperand = 3;
  754. }
  755. // Output: (vector)
  756. // Input: (vl)
  757. class RISCVNullaryIntrinsic
  758. : Intrinsic<[llvm_anyvector_ty],
  759. [llvm_anyint_ty],
  760. [IntrNoMem]>, RISCVVIntrinsic {
  761. let VLOperand = 0;
  762. }
  763. // For Conversion unary operations.
  764. // Input: (vector_in, vl)
  765. class RISCVConversionNoMask
  766. : Intrinsic<[llvm_anyvector_ty],
  767. [llvm_anyvector_ty, llvm_anyint_ty],
  768. [IntrNoMem]>, RISCVVIntrinsic {
  769. let VLOperand = 1;
  770. }
  771. // For Conversion unary operations with mask.
  772. // Input: (maskedoff, vector_in, mask, vl, ta)
  773. class RISCVConversionMask
  774. : Intrinsic<[llvm_anyvector_ty],
  775. [LLVMMatchType<0>, llvm_anyvector_ty,
  776. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  777. LLVMMatchType<2>],
  778. [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
  779. let VLOperand = 3;
  780. }
  781. // For unit stride segment load
  782. // Input: (pointer, vl)
  783. class RISCVUSSegLoad<int nf>
  784. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  785. !add(nf, -1))),
  786. [LLVMPointerToElt<0>, llvm_anyint_ty],
  787. [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
  788. let VLOperand = 1;
  789. }
  790. // For unit stride segment load with mask
  791. // Input: (maskedoff, pointer, mask, vl, ta)
  792. class RISCVUSSegLoadMask<int nf>
  793. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  794. !add(nf, -1))),
  795. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  796. [LLVMPointerToElt<0>,
  797. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  798. llvm_anyint_ty, LLVMMatchType<1>]),
  799. [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
  800. RISCVVIntrinsic {
  801. let VLOperand = !add(nf, 2);
  802. }
  803. // For unit stride fault-only-first segment load
  804. // Input: (pointer, vl)
  805. // Output: (data, vl)
  806. // NOTE: We model this with default memory properties since we model writing
  807. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  808. class RISCVUSSegLoadFF<int nf>
  809. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  810. !add(nf, -1)), [llvm_anyint_ty]),
  811. [LLVMPointerToElt<0>, LLVMMatchType<1>],
  812. [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic {
  813. let VLOperand = 1;
  814. }
  815. // For unit stride fault-only-first segment load with mask
  816. // Input: (maskedoff, pointer, mask, vl, ta)
  817. // Output: (data, vl)
  818. // NOTE: We model this with default memory properties since we model writing
  819. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  820. class RISCVUSSegLoadFFMask<int nf>
  821. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  822. !add(nf, -1)), [llvm_anyint_ty]),
  823. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  824. [LLVMPointerToElt<0>,
  825. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  826. LLVMMatchType<1>, LLVMMatchType<1>]),
  827. [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
  828. RISCVVIntrinsic {
  829. let VLOperand = !add(nf, 2);
  830. }
  831. // For stride segment load
  832. // Input: (pointer, offset, vl)
  833. class RISCVSSegLoad<int nf>
  834. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  835. !add(nf, -1))),
  836. [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>],
  837. [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
  838. let VLOperand = 2;
  839. }
  840. // For stride segment load with mask
  841. // Input: (maskedoff, pointer, offset, mask, vl, ta)
  842. class RISCVSSegLoadMask<int nf>
  843. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  844. !add(nf, -1))),
  845. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  846. [LLVMPointerToElt<0>,
  847. llvm_anyint_ty,
  848. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  849. LLVMMatchType<1>, LLVMMatchType<1>]),
  850. [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
  851. RISCVVIntrinsic {
  852. let VLOperand = !add(nf, 3);
  853. }
  854. // For indexed segment load
  855. // Input: (pointer, index, vl)
  856. class RISCVISegLoad<int nf>
  857. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  858. !add(nf, -1))),
  859. [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty],
  860. [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
  861. let VLOperand = 2;
  862. }
  863. // For indexed segment load with mask
  864. // Input: (maskedoff, pointer, index, mask, vl, ta)
  865. class RISCVISegLoadMask<int nf>
  866. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  867. !add(nf, -1))),
  868. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  869. [LLVMPointerToElt<0>,
  870. llvm_anyvector_ty,
  871. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  872. llvm_anyint_ty, LLVMMatchType<2>]),
  873. [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
  874. RISCVVIntrinsic {
  875. let VLOperand = !add(nf, 3);
  876. }
  877. // For unit stride segment store
  878. // Input: (value, pointer, vl)
  879. class RISCVUSSegStore<int nf>
  880. : Intrinsic<[],
  881. !listconcat([llvm_anyvector_ty],
  882. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  883. [LLVMPointerToElt<0>, llvm_anyint_ty]),
  884. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  885. let VLOperand = !add(nf, 1);
  886. }
  887. // For unit stride segment store with mask
  888. // Input: (value, pointer, mask, vl)
  889. class RISCVUSSegStoreMask<int nf>
  890. : Intrinsic<[],
  891. !listconcat([llvm_anyvector_ty],
  892. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  893. [LLVMPointerToElt<0>,
  894. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  895. llvm_anyint_ty]),
  896. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  897. let VLOperand = !add(nf, 2);
  898. }
  899. // For stride segment store
  900. // Input: (value, pointer, offset, vl)
  901. class RISCVSSegStore<int nf>
  902. : Intrinsic<[],
  903. !listconcat([llvm_anyvector_ty],
  904. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  905. [LLVMPointerToElt<0>, llvm_anyint_ty,
  906. LLVMMatchType<1>]),
  907. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  908. let VLOperand = !add(nf, 2);
  909. }
  910. // For stride segment store with mask
  911. // Input: (value, pointer, offset, mask, vl)
  912. class RISCVSSegStoreMask<int nf>
  913. : Intrinsic<[],
  914. !listconcat([llvm_anyvector_ty],
  915. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  916. [LLVMPointerToElt<0>, llvm_anyint_ty,
  917. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  918. LLVMMatchType<1>]),
  919. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  920. let VLOperand = !add(nf, 3);
  921. }
  922. // For indexed segment store
  923. // Input: (value, pointer, offset, vl)
  924. class RISCVISegStore<int nf>
  925. : Intrinsic<[],
  926. !listconcat([llvm_anyvector_ty],
  927. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  928. [LLVMPointerToElt<0>, llvm_anyvector_ty,
  929. llvm_anyint_ty]),
  930. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  931. let VLOperand = !add(nf, 2);
  932. }
  933. // For indexed segment store with mask
  934. // Input: (value, pointer, offset, mask, vl)
  935. class RISCVISegStoreMask<int nf>
  936. : Intrinsic<[],
  937. !listconcat([llvm_anyvector_ty],
  938. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  939. [LLVMPointerToElt<0>, llvm_anyvector_ty,
  940. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  941. llvm_anyint_ty]),
  942. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  943. let VLOperand = !add(nf, 3);
  944. }
  945. multiclass RISCVUSLoad {
  946. def "int_riscv_" # NAME : RISCVUSLoad;
  947. def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
  948. }
  949. multiclass RISCVUSLoadFF {
  950. def "int_riscv_" # NAME : RISCVUSLoadFF;
  951. def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMask;
  952. }
  953. multiclass RISCVSLoad {
  954. def "int_riscv_" # NAME : RISCVSLoad;
  955. def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask;
  956. }
  957. multiclass RISCVILoad {
  958. def "int_riscv_" # NAME : RISCVILoad;
  959. def "int_riscv_" # NAME # "_mask" : RISCVILoadMask;
  960. }
  961. multiclass RISCVUSStore {
  962. def "int_riscv_" # NAME : RISCVUSStore;
  963. def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
  964. }
  965. multiclass RISCVSStore {
  966. def "int_riscv_" # NAME : RISCVSStore;
  967. def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
  968. }
  969. multiclass RISCVIStore {
  970. def "int_riscv_" # NAME : RISCVIStore;
  971. def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask;
  972. }
  973. multiclass RISCVUnaryAA {
  974. def "int_riscv_" # NAME : RISCVUnaryAANoMask;
  975. def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask;
  976. }
  977. multiclass RISCVUnaryAB {
  978. def "int_riscv_" # NAME : RISCVUnaryABNoMask;
  979. def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask;
  980. }
  981. // AAX means the destination type(A) is the same as the first source
  982. // type(A). X means any type for the second source operand.
  983. multiclass RISCVBinaryAAX {
  984. def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
  985. def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
  986. }
  987. // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
  988. // must be a vector or an XLen scalar.
  989. multiclass RISCVBinaryAAShift {
  990. def "int_riscv_" # NAME : RISCVBinaryAAShiftNoMask;
  991. def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMask;
  992. }
  993. multiclass RISCVRGatherVV {
  994. def "int_riscv_" # NAME : RISCVRGatherVVNoMask;
  995. def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMask;
  996. }
  997. multiclass RISCVRGatherVX {
  998. def "int_riscv_" # NAME : RISCVGatherVXNoMask;
  999. def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMask;
  1000. }
  1001. multiclass RISCVRGatherEI16VV {
  1002. def "int_riscv_" # NAME : RISCVRGatherEI16VVNoMask;
  1003. def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMask;
  1004. }
  1005. // ABX means the destination type(A) is different from the first source
  1006. // type(B). X means any type for the second source operand.
  1007. multiclass RISCVBinaryABX {
  1008. def "int_riscv_" # NAME : RISCVBinaryABXNoMask;
  1009. def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask;
  1010. }
  1011. // Like RISCVBinaryABX, but the second operand is used a shift amount so it
  1012. // must be a vector or an XLen scalar.
  1013. multiclass RISCVBinaryABShift {
  1014. def "int_riscv_" # NAME : RISCVBinaryABShiftNoMask;
  1015. def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMask;
  1016. }
  1017. multiclass RISCVBinaryWithV0 {
  1018. def "int_riscv_" # NAME : RISCVBinaryWithV0;
  1019. }
  1020. multiclass RISCVBinaryMaskOutWithV0 {
  1021. def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
  1022. }
  1023. multiclass RISCVBinaryMaskOut {
  1024. def "int_riscv_" # NAME : RISCVBinaryMOut;
  1025. }
  1026. multiclass RISCVSaturatingBinaryAAX {
  1027. def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask;
  1028. def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask;
  1029. }
  1030. multiclass RISCVSaturatingBinaryAAShift {
  1031. def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftNoMask;
  1032. def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMask;
  1033. }
  1034. multiclass RISCVSaturatingBinaryABShift {
  1035. def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftNoMask;
  1036. def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMask;
  1037. }
  1038. multiclass RISCVTernaryAAAX {
  1039. def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask;
  1040. def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask;
  1041. }
  1042. multiclass RISCVTernaryAAXA {
  1043. def "int_riscv_" # NAME : RISCVTernaryAAXANoMask;
  1044. def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask;
  1045. }
  1046. multiclass RISCVCompare {
  1047. def "int_riscv_" # NAME : RISCVCompareNoMask;
  1048. def "int_riscv_" # NAME # "_mask" : RISCVCompareMask;
  1049. }
  1050. multiclass RISCVClassify {
  1051. def "int_riscv_" # NAME : RISCVClassifyNoMask;
  1052. def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask;
  1053. }
  1054. multiclass RISCVTernaryWide {
  1055. def "int_riscv_" # NAME : RISCVTernaryWideNoMask;
  1056. def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask;
  1057. }
  1058. multiclass RISCVReduction {
  1059. def "int_riscv_" # NAME : RISCVReductionNoMask;
  1060. def "int_riscv_" # NAME # "_mask" : RISCVReductionMask;
  1061. }
  1062. multiclass RISCVMaskUnarySOut {
  1063. def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
  1064. def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
  1065. }
  1066. multiclass RISCVMaskUnaryMOut {
  1067. def "int_riscv_" # NAME : RISCVUnaryNoMask;
  1068. def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask;
  1069. }
  1070. multiclass RISCVConversion {
  1071. def "int_riscv_" #NAME :RISCVConversionNoMask;
  1072. def "int_riscv_" # NAME # "_mask" : RISCVConversionMask;
  1073. }
  1074. multiclass RISCVUSSegLoad<int nf> {
  1075. def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
  1076. def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
  1077. }
  1078. multiclass RISCVUSSegLoadFF<int nf> {
  1079. def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
  1080. def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMask<nf>;
  1081. }
  1082. multiclass RISCVSSegLoad<int nf> {
  1083. def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
  1084. def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask<nf>;
  1085. }
  1086. multiclass RISCVISegLoad<int nf> {
  1087. def "int_riscv_" # NAME : RISCVISegLoad<nf>;
  1088. def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMask<nf>;
  1089. }
  1090. multiclass RISCVUSSegStore<int nf> {
  1091. def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
  1092. def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask<nf>;
  1093. }
  1094. multiclass RISCVSSegStore<int nf> {
  1095. def "int_riscv_" # NAME : RISCVSSegStore<nf>;
  1096. def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask<nf>;
  1097. }
  1098. multiclass RISCVISegStore<int nf> {
  1099. def "int_riscv_" # NAME : RISCVISegStore<nf>;
  1100. def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask<nf>;
  1101. }
  1102. defm vle : RISCVUSLoad;
  1103. defm vleff : RISCVUSLoadFF;
  1104. defm vse : RISCVUSStore;
  1105. defm vlse: RISCVSLoad;
  1106. defm vsse: RISCVSStore;
  1107. defm vluxei : RISCVILoad;
  1108. defm vloxei : RISCVILoad;
  1109. defm vsoxei : RISCVIStore;
  1110. defm vsuxei : RISCVIStore;
  1111. def int_riscv_vlm : RISCVUSMLoad;
  1112. def int_riscv_vsm : RISCVUSStore;
  1113. defm vadd : RISCVBinaryAAX;
  1114. defm vsub : RISCVBinaryAAX;
  1115. defm vrsub : RISCVBinaryAAX;
  1116. defm vwaddu : RISCVBinaryABX;
  1117. defm vwadd : RISCVBinaryABX;
  1118. defm vwaddu_w : RISCVBinaryAAX;
  1119. defm vwadd_w : RISCVBinaryAAX;
  1120. defm vwsubu : RISCVBinaryABX;
  1121. defm vwsub : RISCVBinaryABX;
  1122. defm vwsubu_w : RISCVBinaryAAX;
  1123. defm vwsub_w : RISCVBinaryAAX;
  1124. defm vzext : RISCVUnaryAB;
  1125. defm vsext : RISCVUnaryAB;
  1126. defm vadc : RISCVBinaryWithV0;
  1127. defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
  1128. defm vmadc : RISCVBinaryMaskOut;
  1129. defm vsbc : RISCVBinaryWithV0;
  1130. defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
  1131. defm vmsbc : RISCVBinaryMaskOut;
  1132. defm vand : RISCVBinaryAAX;
  1133. defm vor : RISCVBinaryAAX;
  1134. defm vxor : RISCVBinaryAAX;
  1135. defm vsll : RISCVBinaryAAShift;
  1136. defm vsrl : RISCVBinaryAAShift;
  1137. defm vsra : RISCVBinaryAAShift;
  1138. defm vnsrl : RISCVBinaryABShift;
  1139. defm vnsra : RISCVBinaryABShift;
  1140. defm vmseq : RISCVCompare;
  1141. defm vmsne : RISCVCompare;
  1142. defm vmsltu : RISCVCompare;
  1143. defm vmslt : RISCVCompare;
  1144. defm vmsleu : RISCVCompare;
  1145. defm vmsle : RISCVCompare;
  1146. defm vmsgtu : RISCVCompare;
  1147. defm vmsgt : RISCVCompare;
  1148. defm vmsgeu : RISCVCompare;
  1149. defm vmsge : RISCVCompare;
  1150. defm vminu : RISCVBinaryAAX;
  1151. defm vmin : RISCVBinaryAAX;
  1152. defm vmaxu : RISCVBinaryAAX;
  1153. defm vmax : RISCVBinaryAAX;
  1154. defm vmul : RISCVBinaryAAX;
  1155. defm vmulh : RISCVBinaryAAX;
  1156. defm vmulhu : RISCVBinaryAAX;
  1157. defm vmulhsu : RISCVBinaryAAX;
  1158. defm vdivu : RISCVBinaryAAX;
  1159. defm vdiv : RISCVBinaryAAX;
  1160. defm vremu : RISCVBinaryAAX;
  1161. defm vrem : RISCVBinaryAAX;
  1162. defm vwmul : RISCVBinaryABX;
  1163. defm vwmulu : RISCVBinaryABX;
  1164. defm vwmulsu : RISCVBinaryABX;
  1165. defm vmacc : RISCVTernaryAAXA;
  1166. defm vnmsac : RISCVTernaryAAXA;
  1167. defm vmadd : RISCVTernaryAAXA;
  1168. defm vnmsub : RISCVTernaryAAXA;
  1169. defm vwmaccu : RISCVTernaryWide;
  1170. defm vwmacc : RISCVTernaryWide;
  1171. defm vwmaccus : RISCVTernaryWide;
  1172. defm vwmaccsu : RISCVTernaryWide;
  1173. defm vfadd : RISCVBinaryAAX;
  1174. defm vfsub : RISCVBinaryAAX;
  1175. defm vfrsub : RISCVBinaryAAX;
  1176. defm vfwadd : RISCVBinaryABX;
  1177. defm vfwsub : RISCVBinaryABX;
  1178. defm vfwadd_w : RISCVBinaryAAX;
  1179. defm vfwsub_w : RISCVBinaryAAX;
  1180. defm vsaddu : RISCVSaturatingBinaryAAX;
  1181. defm vsadd : RISCVSaturatingBinaryAAX;
  1182. defm vssubu : RISCVSaturatingBinaryAAX;
  1183. defm vssub : RISCVSaturatingBinaryAAX;
  1184. defm vmerge : RISCVBinaryWithV0;
  1185. def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty],
  1186. [LLVMMatchType<0>, llvm_anyint_ty],
  1187. [IntrNoMem]>, RISCVVIntrinsic {
  1188. let VLOperand = 1;
  1189. }
  1190. def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty],
  1191. [LLVMVectorElementType<0>, llvm_anyint_ty],
  1192. [IntrNoMem]>, RISCVVIntrinsic {
  1193. let VLOperand = 1;
  1194. }
  1195. def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
  1196. [LLVMVectorElementType<0>, llvm_anyint_ty],
  1197. [IntrNoMem]>, RISCVVIntrinsic {
  1198. let VLOperand = 1;
  1199. }
  1200. def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
  1201. [llvm_anyint_ty],
  1202. [IntrNoMem]>, RISCVVIntrinsic;
  1203. def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
  1204. [LLVMMatchType<0>, LLVMVectorElementType<0>,
  1205. llvm_anyint_ty],
  1206. [IntrNoMem]>, RISCVVIntrinsic {
  1207. let VLOperand = 2;
  1208. }
  1209. def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
  1210. [llvm_anyfloat_ty],
  1211. [IntrNoMem]>, RISCVVIntrinsic;
  1212. def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
  1213. [LLVMMatchType<0>, LLVMVectorElementType<0>,
  1214. llvm_anyint_ty],
  1215. [IntrNoMem]>, RISCVVIntrinsic {
  1216. let VLOperand = 2;
  1217. }
  1218. defm vfmul : RISCVBinaryAAX;
  1219. defm vfdiv : RISCVBinaryAAX;
  1220. defm vfrdiv : RISCVBinaryAAX;
  1221. defm vfwmul : RISCVBinaryABX;
  1222. defm vfmacc : RISCVTernaryAAXA;
  1223. defm vfnmacc : RISCVTernaryAAXA;
  1224. defm vfmsac : RISCVTernaryAAXA;
  1225. defm vfnmsac : RISCVTernaryAAXA;
  1226. defm vfmadd : RISCVTernaryAAXA;
  1227. defm vfnmadd : RISCVTernaryAAXA;
  1228. defm vfmsub : RISCVTernaryAAXA;
  1229. defm vfnmsub : RISCVTernaryAAXA;
  1230. defm vfwmacc : RISCVTernaryWide;
  1231. defm vfwnmacc : RISCVTernaryWide;
  1232. defm vfwmsac : RISCVTernaryWide;
  1233. defm vfwnmsac : RISCVTernaryWide;
  1234. defm vfsqrt : RISCVUnaryAA;
  1235. defm vfrsqrt7 : RISCVUnaryAA;
  1236. defm vfrec7 : RISCVUnaryAA;
  1237. defm vfmin : RISCVBinaryAAX;
  1238. defm vfmax : RISCVBinaryAAX;
  1239. defm vfsgnj : RISCVBinaryAAX;
  1240. defm vfsgnjn : RISCVBinaryAAX;
  1241. defm vfsgnjx : RISCVBinaryAAX;
  1242. defm vfclass : RISCVClassify;
  1243. defm vfmerge : RISCVBinaryWithV0;
  1244. defm vslideup : RISCVTernaryAAAX;
  1245. defm vslidedown : RISCVTernaryAAAX;
  1246. defm vslide1up : RISCVBinaryAAX;
  1247. defm vslide1down : RISCVBinaryAAX;
  1248. defm vfslide1up : RISCVBinaryAAX;
  1249. defm vfslide1down : RISCVBinaryAAX;
  1250. defm vrgather_vv : RISCVRGatherVV;
  1251. defm vrgather_vx : RISCVRGatherVX;
  1252. defm vrgatherei16_vv : RISCVRGatherEI16VV;
  1253. def "int_riscv_vcompress" : RISCVUnaryAAMaskNoTA;
  1254. defm vaaddu : RISCVSaturatingBinaryAAX;
  1255. defm vaadd : RISCVSaturatingBinaryAAX;
  1256. defm vasubu : RISCVSaturatingBinaryAAX;
  1257. defm vasub : RISCVSaturatingBinaryAAX;
  1258. defm vsmul : RISCVSaturatingBinaryAAX;
  1259. defm vssrl : RISCVSaturatingBinaryAAShift;
  1260. defm vssra : RISCVSaturatingBinaryAAShift;
  1261. defm vnclipu : RISCVSaturatingBinaryABShift;
  1262. defm vnclip : RISCVSaturatingBinaryABShift;
  1263. defm vmfeq : RISCVCompare;
  1264. defm vmfne : RISCVCompare;
  1265. defm vmflt : RISCVCompare;
  1266. defm vmfle : RISCVCompare;
  1267. defm vmfgt : RISCVCompare;
  1268. defm vmfge : RISCVCompare;
  1269. defm vredsum : RISCVReduction;
  1270. defm vredand : RISCVReduction;
  1271. defm vredor : RISCVReduction;
  1272. defm vredxor : RISCVReduction;
  1273. defm vredminu : RISCVReduction;
  1274. defm vredmin : RISCVReduction;
  1275. defm vredmaxu : RISCVReduction;
  1276. defm vredmax : RISCVReduction;
  1277. defm vwredsumu : RISCVReduction;
  1278. defm vwredsum : RISCVReduction;
  1279. defm vfredosum : RISCVReduction;
  1280. defm vfredusum : RISCVReduction;
  1281. defm vfredmin : RISCVReduction;
  1282. defm vfredmax : RISCVReduction;
  1283. defm vfwredusum : RISCVReduction;
  1284. defm vfwredosum : RISCVReduction;
  1285. def int_riscv_vmand: RISCVBinaryAAANoMask;
  1286. def int_riscv_vmnand: RISCVBinaryAAANoMask;
  1287. def int_riscv_vmandn: RISCVBinaryAAANoMask;
  1288. def int_riscv_vmxor: RISCVBinaryAAANoMask;
  1289. def int_riscv_vmor: RISCVBinaryAAANoMask;
  1290. def int_riscv_vmnor: RISCVBinaryAAANoMask;
  1291. def int_riscv_vmorn: RISCVBinaryAAANoMask;
  1292. def int_riscv_vmxnor: RISCVBinaryAAANoMask;
  1293. def int_riscv_vmclr : RISCVNullaryIntrinsic;
  1294. def int_riscv_vmset : RISCVNullaryIntrinsic;
  1295. defm vcpop : RISCVMaskUnarySOut;
  1296. defm vfirst : RISCVMaskUnarySOut;
  1297. defm vmsbf : RISCVMaskUnaryMOut;
  1298. defm vmsof : RISCVMaskUnaryMOut;
  1299. defm vmsif : RISCVMaskUnaryMOut;
  1300. defm vfcvt_xu_f_v : RISCVConversion;
  1301. defm vfcvt_x_f_v : RISCVConversion;
  1302. defm vfcvt_rtz_xu_f_v : RISCVConversion;
  1303. defm vfcvt_rtz_x_f_v : RISCVConversion;
  1304. defm vfcvt_f_xu_v : RISCVConversion;
  1305. defm vfcvt_f_x_v : RISCVConversion;
  1306. defm vfwcvt_f_xu_v : RISCVConversion;
  1307. defm vfwcvt_f_x_v : RISCVConversion;
  1308. defm vfwcvt_xu_f_v : RISCVConversion;
  1309. defm vfwcvt_x_f_v : RISCVConversion;
  1310. defm vfwcvt_rtz_xu_f_v : RISCVConversion;
  1311. defm vfwcvt_rtz_x_f_v : RISCVConversion;
  1312. defm vfwcvt_f_f_v : RISCVConversion;
  1313. defm vfncvt_f_xu_w : RISCVConversion;
  1314. defm vfncvt_f_x_w : RISCVConversion;
  1315. defm vfncvt_xu_f_w : RISCVConversion;
  1316. defm vfncvt_x_f_w : RISCVConversion;
  1317. defm vfncvt_rtz_xu_f_w : RISCVConversion;
  1318. defm vfncvt_rtz_x_f_w : RISCVConversion;
  1319. defm vfncvt_f_f_w : RISCVConversion;
  1320. defm vfncvt_rod_f_f_w : RISCVConversion;
  1321. // Output: (vector)
  1322. // Input: (mask type input, vl)
  1323. def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
  1324. [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  1325. llvm_anyint_ty],
  1326. [IntrNoMem]>, RISCVVIntrinsic {
  1327. let VLOperand = 1;
  1328. }
  1329. // Output: (vector)
  1330. // Input: (maskedoff, mask type vector_in, mask, vl)
  1331. def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
  1332. [LLVMMatchType<0>,
  1333. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  1334. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  1335. llvm_anyint_ty],
  1336. [IntrNoMem]>, RISCVVIntrinsic {
  1337. let VLOperand = 3;
  1338. }
  1339. // Output: (vector)
  1340. // Input: (vl)
  1341. def int_riscv_vid : RISCVNullaryIntrinsic;
  1342. // Output: (vector)
  1343. // Input: (maskedoff, mask, vl)
  1344. def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],
  1345. [LLVMMatchType<0>,
  1346. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  1347. llvm_anyint_ty],
  1348. [IntrNoMem]>, RISCVVIntrinsic {
  1349. let VLOperand = 2;
  1350. }
  1351. foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
  1352. defm vlseg # nf : RISCVUSSegLoad<nf>;
  1353. defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
  1354. defm vlsseg # nf : RISCVSSegLoad<nf>;
  1355. defm vloxseg # nf : RISCVISegLoad<nf>;
  1356. defm vluxseg # nf : RISCVISegLoad<nf>;
  1357. defm vsseg # nf : RISCVUSSegStore<nf>;
  1358. defm vssseg # nf : RISCVSSegStore<nf>;
  1359. defm vsoxseg # nf : RISCVISegStore<nf>;
  1360. defm vsuxseg # nf : RISCVISegStore<nf>;
  1361. }
  1362. // Strided loads/stores for fixed vectors.
  1363. def int_riscv_masked_strided_load
  1364. : Intrinsic<[llvm_anyvector_ty],
  1365. [LLVMMatchType<0>, llvm_anyptr_ty,
  1366. llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  1367. [NoCapture<ArgIndex<1>>, IntrReadMem]>;
  1368. def int_riscv_masked_strided_store
  1369. : Intrinsic<[],
  1370. [llvm_anyvector_ty, llvm_anyptr_ty,
  1371. llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  1372. [NoCapture<ArgIndex<1>>, IntrWriteMem]>;
  1373. } // TargetPrefix = "riscv"
  1374. //===----------------------------------------------------------------------===//
  1375. // Scalar Cryptography
  1376. //
  1377. // These intrinsics will lower directly into the corresponding instructions
  1378. // added by the scalar cyptography extension, if the extension is present.
  1379. let TargetPrefix = "riscv" in {
  1380. class ScalarCryptoGprIntrinsicAny
  1381. : Intrinsic<[llvm_anyint_ty],
  1382. [LLVMMatchType<0>],
  1383. [IntrNoMem, IntrSpeculatable]>;
  1384. class ScalarCryptoByteSelect32
  1385. : Intrinsic<[llvm_i32_ty],
  1386. [llvm_i32_ty, llvm_i32_ty, llvm_i8_ty],
  1387. [IntrNoMem, IntrWillReturn, IntrSpeculatable,
  1388. ImmArg<ArgIndex<2>>]>;
  1389. class ScalarCryptoGprGprIntrinsic32
  1390. : Intrinsic<[llvm_i32_ty],
  1391. [llvm_i32_ty, llvm_i32_ty],
  1392. [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
  1393. class ScalarCryptoGprGprIntrinsic64
  1394. : Intrinsic<[llvm_i64_ty],
  1395. [llvm_i64_ty, llvm_i64_ty],
  1396. [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
  1397. class ScalarCryptoGprIntrinsic64
  1398. : Intrinsic<[llvm_i64_ty],
  1399. [llvm_i64_ty],
  1400. [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
  1401. class ScalarCryptoByteSelectAny
  1402. : Intrinsic<[llvm_anyint_ty],
  1403. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i8_ty],
  1404. [IntrNoMem, IntrSpeculatable, IntrWillReturn,
  1405. ImmArg<ArgIndex<2>>, Returned<ArgIndex<0>>]>;
  1406. // Zknd
  1407. def int_riscv_aes32dsi : ScalarCryptoByteSelect32;
  1408. def int_riscv_aes32dsmi : ScalarCryptoByteSelect32;
  1409. def int_riscv_aes64ds : ScalarCryptoGprGprIntrinsic64;
  1410. def int_riscv_aes64dsm : ScalarCryptoGprGprIntrinsic64;
  1411. def int_riscv_aes64im : ScalarCryptoGprIntrinsic64;
  1412. // Zkne
  1413. def int_riscv_aes32esi : ScalarCryptoByteSelect32;
  1414. def int_riscv_aes32esmi : ScalarCryptoByteSelect32;
  1415. def int_riscv_aes64es : ScalarCryptoGprGprIntrinsic64;
  1416. def int_riscv_aes64esm : ScalarCryptoGprGprIntrinsic64;
  1417. // Zknd & Zkne
  1418. def int_riscv_aes64ks2 : ScalarCryptoGprGprIntrinsic64;
  1419. def int_riscv_aes64ks1i : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
  1420. [IntrNoMem, IntrSpeculatable,
  1421. IntrWillReturn, ImmArg<ArgIndex<1>>]>;
  1422. // Zknh
  1423. def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny;
  1424. def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny;
  1425. def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny;
  1426. def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny;
  1427. def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32;
  1428. def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32;
  1429. def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32;
  1430. def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32;
  1431. def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32;
  1432. def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32;
  1433. def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64;
  1434. def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64;
  1435. def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64;
  1436. def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64;
  1437. // Zksed
  1438. def int_riscv_sm4ks : ScalarCryptoByteSelectAny;
  1439. def int_riscv_sm4ed : ScalarCryptoByteSelectAny;
  1440. // Zksh
  1441. def int_riscv_sm3p0 : ScalarCryptoGprIntrinsicAny;
  1442. def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny;
  1443. } // TargetPrefix = "riscv"