IntrinsicsRISCV.td 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600
  1. //===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file defines all of the RISCV-specific intrinsics.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. //===----------------------------------------------------------------------===//
  13. // Atomics
  14. // Atomic Intrinsics have multiple versions for different access widths, which
  15. // all follow one of the following signatures (depending on how many arguments
  16. // they require). We carefully instantiate only specific versions of these for
  17. // specific integer widths, rather than using `llvm_anyint_ty`.
  18. //
  19. // In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
  20. // canonical names, and the intrinsics used in the code will have a name
  21. // suffixed with the pointer type they are specialised for (denoted `<p>` in the
  22. // names below), in order to avoid type conflicts.
  23. let TargetPrefix = "riscv" in {
  24. // T @llvm.<name>.T.<p>(any*, T, T, T imm);
  25. class MaskedAtomicRMWFourArg<LLVMType itype>
  26. : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
  27. [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
  28. // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
  29. class MaskedAtomicRMWFiveArg<LLVMType itype>
  30. : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
  31. [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
  32. // We define 32-bit and 64-bit variants of the above, where T stands for i32
  33. // or i64 respectively:
  34. multiclass MaskedAtomicRMWFourArgIntrinsics {
  35. // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
  36. def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
  37. // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
  38. def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
  39. }
  40. multiclass MaskedAtomicRMWFiveArgIntrinsics {
  41. // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
  42. def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
  43. // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
  44. def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
  45. }
  46. // These intrinsics are intended only for internal compiler use (i.e. as
  47. // part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their
  48. // names and semantics could change in the future.
  49. // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(
  50. // ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering)
  51. defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
  52. defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
  53. defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
  54. defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
  55. defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
  56. defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
  57. // Signed min and max need an extra operand to do sign extension with.
  58. // @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>(
  59. // ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering)
  60. defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
  61. defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
  62. // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(
  63. // ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering)
  64. defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
  65. } // TargetPrefix = "riscv"
  66. //===----------------------------------------------------------------------===//
  67. // Bitmanip (Bit Manipulation) Extension
  68. let TargetPrefix = "riscv" in {
  69. class BitManipGPRIntrinsics
  70. : DefaultAttrsIntrinsic<[llvm_any_ty],
  71. [LLVMMatchType<0>],
  72. [IntrNoMem, IntrSpeculatable]>;
  73. class BitManipGPRGPRIntrinsics
  74. : DefaultAttrsIntrinsic<[llvm_any_ty],
  75. [LLVMMatchType<0>, LLVMMatchType<0>],
  76. [IntrNoMem, IntrSpeculatable]>;
  77. // Zbb
  78. def int_riscv_orc_b : BitManipGPRIntrinsics;
  79. // Zbc or Zbkc
  80. def int_riscv_clmul : BitManipGPRGPRIntrinsics;
  81. def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
  82. // Zbc
  83. def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
  84. // Zbkb
  85. def int_riscv_brev8 : BitManipGPRIntrinsics;
  86. def int_riscv_zip : BitManipGPRIntrinsics;
  87. def int_riscv_unzip : BitManipGPRIntrinsics;
  88. // Zbkx
  89. def int_riscv_xperm4 : BitManipGPRGPRIntrinsics;
  90. def int_riscv_xperm8 : BitManipGPRGPRIntrinsics;
  91. } // TargetPrefix = "riscv"
  92. //===----------------------------------------------------------------------===//
  93. // Vectors
  94. // The intrinsic does not have any operand that must be extended.
  95. defvar NoScalarOperand = 0xF;
  96. // The intrinsic does not have a VL operand.
  97. // (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
  98. defvar NoVLOperand = 0x1F;
  99. class RISCVVIntrinsic {
  100. // These intrinsics may accept illegal integer values in their llvm_any_ty
  101. // operand, so they have to be extended.
  102. Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
  103. bits<4> ScalarOperand = NoScalarOperand;
  104. bits<5> VLOperand = NoVLOperand;
  105. }
  106. let TargetPrefix = "riscv" in {
  107. // We use anyint here but we only support XLen.
  108. def int_riscv_vsetvli : Intrinsic<[llvm_anyint_ty],
  109. /* AVL */ [LLVMMatchType<0>,
  110. /* VSEW */ LLVMMatchType<0>,
  111. /* VLMUL */ LLVMMatchType<0>],
  112. [IntrNoMem, IntrHasSideEffects,
  113. ImmArg<ArgIndex<1>>,
  114. ImmArg<ArgIndex<2>>]>;
  115. def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
  116. /* VSEW */ [LLVMMatchType<0>,
  117. /* VLMUL */ LLVMMatchType<0>],
  118. [IntrNoMem, IntrHasSideEffects,
  119. ImmArg<ArgIndex<0>>,
  120. ImmArg<ArgIndex<1>>]>;
  121. // Versions without side effects: better optimizable and usable if only the
  122. // returned vector length is important.
  123. def int_riscv_vsetvli_opt : Intrinsic<[llvm_anyint_ty],
  124. /* AVL */ [LLVMMatchType<0>,
  125. /* VSEW */ LLVMMatchType<0>,
  126. /* VLMUL */ LLVMMatchType<0>],
  127. [IntrNoMem,
  128. ImmArg<ArgIndex<1>>,
  129. ImmArg<ArgIndex<2>>]>;
  130. def int_riscv_vsetvlimax_opt : Intrinsic<[llvm_anyint_ty],
  131. /* VSEW */ [LLVMMatchType<0>,
  132. /* VLMUL */ LLVMMatchType<0>],
  133. [IntrNoMem,
  134. ImmArg<ArgIndex<0>>,
  135. ImmArg<ArgIndex<1>>]>;
  136. // For unit stride mask load
  137. // Input: (pointer, vl)
  138. class RISCVUSMLoad
  139. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  140. [LLVMPointerType<LLVMMatchType<0>>,
  141. llvm_anyint_ty],
  142. [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
  143. let VLOperand = 1;
  144. }
  145. // For unit stride load
  146. // Input: (passthru, pointer, vl)
  147. class RISCVUSLoad
  148. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  149. [LLVMMatchType<0>,
  150. LLVMPointerType<LLVMMatchType<0>>,
  151. llvm_anyint_ty],
  152. [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
  153. let VLOperand = 2;
  154. }
  155. // For unit stride fault-only-first load
  156. // Input: (passthru, pointer, vl)
  157. // Output: (data, vl)
  158. // NOTE: We model this with default memory properties since we model writing
  159. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  160. class RISCVUSLoadFF
  161. : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
  162. [LLVMMatchType<0>,
  163. LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
  164. [NoCapture<ArgIndex<1>>]>,
  165. RISCVVIntrinsic {
  166. let VLOperand = 2;
  167. }
  168. // For unit stride load with mask
  169. // Input: (maskedoff, pointer, mask, vl, policy)
  170. class RISCVUSLoadMasked
  171. : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
  172. [LLVMMatchType<0>,
  173. LLVMPointerType<LLVMMatchType<0>>,
  174. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  175. llvm_anyint_ty, LLVMMatchType<1>],
  176. [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
  177. RISCVVIntrinsic {
  178. let VLOperand = 3;
  179. }
  180. // For unit stride fault-only-first load with mask
  181. // Input: (maskedoff, pointer, mask, vl, policy)
  182. // Output: (data, vl)
  183. // NOTE: We model this with default memory properties since we model writing
  184. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  185. class RISCVUSLoadFFMasked
  186. : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
  187. [LLVMMatchType<0>,
  188. LLVMPointerType<LLVMMatchType<0>>,
  189. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  190. LLVMMatchType<1>, LLVMMatchType<1>],
  191. [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
  192. let VLOperand = 3;
  193. }
  194. // For strided load with passthru operand
  195. // Input: (passthru, pointer, stride, vl)
  196. class RISCVSLoad
  197. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  198. [LLVMMatchType<0>,
  199. LLVMPointerType<LLVMMatchType<0>>,
  200. llvm_anyint_ty, LLVMMatchType<1>],
  201. [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
  202. let VLOperand = 3;
  203. }
  204. // For strided load with mask
  205. // Input: (maskedoff, pointer, stride, mask, vl, policy)
  206. class RISCVSLoadMasked
  207. : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
  208. [LLVMMatchType<0>,
  209. LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
  210. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
  211. LLVMMatchType<1>],
  212. [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
  213. RISCVVIntrinsic {
  214. let VLOperand = 4;
  215. }
  216. // For indexed load with passthru operand
  217. // Input: (passthru, pointer, index, vl)
  218. class RISCVILoad
  219. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  220. [LLVMMatchType<0>,
  221. LLVMPointerType<LLVMMatchType<0>>,
  222. llvm_anyvector_ty, llvm_anyint_ty],
  223. [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
  224. let VLOperand = 3;
  225. }
  226. // For indexed load with mask
  227. // Input: (maskedoff, pointer, index, mask, vl, policy)
  228. class RISCVILoadMasked
  229. : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
  230. [LLVMMatchType<0>,
  231. LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
  232. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  233. LLVMMatchType<2>],
  234. [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
  235. RISCVVIntrinsic {
  236. let VLOperand = 4;
  237. }
  238. // For unit stride store
  239. // Input: (vector_in, pointer, vl)
  240. class RISCVUSStore
  241. : DefaultAttrsIntrinsic<[],
  242. [llvm_anyvector_ty,
  243. LLVMPointerType<LLVMMatchType<0>>,
  244. llvm_anyint_ty],
  245. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  246. let VLOperand = 2;
  247. }
  248. // For unit stride store with mask
  249. // Input: (vector_in, pointer, mask, vl)
  250. class RISCVUSStoreMasked
  251. : DefaultAttrsIntrinsic<[],
  252. [llvm_anyvector_ty,
  253. LLVMPointerType<LLVMMatchType<0>>,
  254. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  255. llvm_anyint_ty],
  256. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  257. let VLOperand = 3;
  258. }
  259. // For strided store
  260. // Input: (vector_in, pointer, stride, vl)
  261. class RISCVSStore
  262. : DefaultAttrsIntrinsic<[],
  263. [llvm_anyvector_ty,
  264. LLVMPointerType<LLVMMatchType<0>>,
  265. llvm_anyint_ty, LLVMMatchType<1>],
  266. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  267. let VLOperand = 3;
  268. }
  269. // For stride store with mask
  270. // Input: (vector_in, pointer, stirde, mask, vl)
  271. class RISCVSStoreMasked
  272. : DefaultAttrsIntrinsic<[],
  273. [llvm_anyvector_ty,
  274. LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
  275. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
  276. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  277. let VLOperand = 4;
  278. }
  279. // For indexed store
  280. // Input: (vector_in, pointer, index, vl)
  281. class RISCVIStore
  282. : DefaultAttrsIntrinsic<[],
  283. [llvm_anyvector_ty,
  284. LLVMPointerType<LLVMMatchType<0>>,
  285. llvm_anyint_ty, llvm_anyint_ty],
  286. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  287. let VLOperand = 3;
  288. }
  289. // For indexed store with mask
  290. // Input: (vector_in, pointer, index, mask, vl)
  291. class RISCVIStoreMasked
  292. : DefaultAttrsIntrinsic<[],
  293. [llvm_anyvector_ty,
  294. LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
  295. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  296. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
  297. let VLOperand = 4;
  298. }
  299. // For destination vector type is the same as source vector.
  300. // Input: (passthru, vector_in, vl)
  301. class RISCVUnaryAAUnMasked
  302. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  303. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
  304. [IntrNoMem]>, RISCVVIntrinsic {
  305. let VLOperand = 2;
  306. }
  307. // For destination vector type is the same as first source vector (with mask).
  308. // Input: (vector_in, vector_in, mask, vl, policy)
  309. class RISCVUnaryAAMasked
  310. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  311. [LLVMMatchType<0>, LLVMMatchType<0>,
  312. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  313. LLVMMatchType<1>],
  314. [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
  315. let VLOperand = 3;
  316. }
  317. // Input: (passthru, vector_in, vector_in, mask, vl)
  318. class RISCVCompress
  319. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  320. [LLVMMatchType<0>, LLVMMatchType<0>,
  321. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  322. [IntrNoMem]>, RISCVVIntrinsic {
  323. let VLOperand = 3;
  324. }
  325. // For destination vector type is the same as first and second source vector.
  326. // Input: (vector_in, vector_in, vl)
  327. class RISCVBinaryAAAUnMasked
  328. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  329. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
  330. [IntrNoMem]>, RISCVVIntrinsic {
  331. let VLOperand = 2;
  332. }
  333. // For destination vector type is the same as first and second source vector.
  334. // Input: (passthru, vector_in, int_vector_in, vl)
  335. class RISCVRGatherVVUnMasked
  336. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  337. [LLVMMatchType<0>, LLVMMatchType<0>,
  338. LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
  339. [IntrNoMem]>, RISCVVIntrinsic {
  340. let VLOperand = 3;
  341. }
  342. // For destination vector type is the same as first and second source vector.
  343. // Input: (vector_in, vector_in, int_vector_in, vl, policy)
  344. class RISCVRGatherVVMasked
  345. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  346. [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
  347. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  348. LLVMMatchType<1>],
  349. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  350. let VLOperand = 4;
  351. }
  352. // Input: (passthru, vector_in, int16_vector_in, vl)
  353. class RISCVRGatherEI16VVUnMasked
  354. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  355. [LLVMMatchType<0>, LLVMMatchType<0>,
  356. LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
  357. llvm_anyint_ty],
  358. [IntrNoMem]>, RISCVVIntrinsic {
  359. let VLOperand = 3;
  360. }
  361. // For destination vector type is the same as first and second source vector.
  362. // Input: (vector_in, vector_in, int16_vector_in, vl, policy)
  363. class RISCVRGatherEI16VVMasked
  364. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  365. [LLVMMatchType<0>, LLVMMatchType<0>,
  366. LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
  367. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  368. LLVMMatchType<1>],
  369. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  370. let VLOperand = 4;
  371. }
  372. // For destination vector type is the same as first source vector, and the
  373. // second operand is XLen.
  374. // Input: (passthru, vector_in, xlen_in, vl)
  375. class RISCVGatherVXUnMasked
  376. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  377. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
  378. LLVMMatchType<1>],
  379. [IntrNoMem]>, RISCVVIntrinsic {
  380. let VLOperand = 3;
  381. }
  382. // For destination vector type is the same as first source vector (with mask).
  383. // Second operand is XLen.
  384. // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy)
  385. class RISCVGatherVXMasked
  386. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  387. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
  388. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
  389. LLVMMatchType<1>],
  390. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  391. let VLOperand = 4;
  392. }
  393. // For destination vector type is the same as first source vector.
  394. // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  395. class RISCVBinaryAAXUnMasked
  396. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  397. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  398. llvm_anyint_ty],
  399. [IntrNoMem]>, RISCVVIntrinsic {
  400. let ScalarOperand = 2;
  401. let VLOperand = 3;
  402. }
  403. // For destination vector type is the same as first source vector (with mask).
  404. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  405. class RISCVBinaryAAXMasked
  406. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  407. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  408. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  409. LLVMMatchType<2>],
  410. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  411. let ScalarOperand = 2;
  412. let VLOperand = 4;
  413. }
  414. // For destination vector type is the same as first source vector. The
  415. // second source operand must match the destination type or be an XLen scalar.
  416. // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  417. class RISCVBinaryAAShiftUnMasked
  418. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  419. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  420. llvm_anyint_ty],
  421. [IntrNoMem]>, RISCVVIntrinsic {
  422. let VLOperand = 3;
  423. }
  424. // For destination vector type is the same as first source vector (with mask).
  425. // The second source operand must match the destination type or be an XLen scalar.
  426. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  427. class RISCVBinaryAAShiftMasked
  428. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  429. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  430. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  431. LLVMMatchType<2>],
  432. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  433. let VLOperand = 4;
  434. }
  435. // For destination vector type is NOT the same as first source vector.
  436. // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  437. class RISCVBinaryABXUnMasked
  438. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  439. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  440. llvm_anyint_ty],
  441. [IntrNoMem]>, RISCVVIntrinsic {
  442. let ScalarOperand = 2;
  443. let VLOperand = 3;
  444. }
  445. // For destination vector type is NOT the same as first source vector (with mask).
  446. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  447. class RISCVBinaryABXMasked
  448. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  449. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  450. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  451. LLVMMatchType<3>],
  452. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  453. let ScalarOperand = 2;
  454. let VLOperand = 4;
  455. }
  456. // For destination vector type is NOT the same as first source vector. The
  457. // second source operand must match the destination type or be an XLen scalar.
  458. // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  459. class RISCVBinaryABShiftUnMasked
  460. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  461. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  462. llvm_anyint_ty],
  463. [IntrNoMem]>, RISCVVIntrinsic {
  464. let VLOperand = 3;
  465. }
  466. // For destination vector type is NOT the same as first source vector (with mask).
  467. // The second source operand must match the destination type or be an XLen scalar.
  468. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  469. class RISCVBinaryABShiftMasked
  470. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  471. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  472. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  473. LLVMMatchType<3>],
  474. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  475. let VLOperand = 4;
  476. }
  477. // For binary operations with V0 as input.
  478. // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl)
  479. class RISCVBinaryWithV0
  480. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  481. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  482. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  483. llvm_anyint_ty],
  484. [IntrNoMem]>, RISCVVIntrinsic {
  485. let ScalarOperand = 2;
  486. let VLOperand = 4;
  487. }
  488. // For binary operations with mask type output and V0 as input.
  489. // Output: (mask type output)
  490. // Input: (vector_in, vector_in/scalar_in, V0, vl)
  491. class RISCVBinaryMOutWithV0
  492. :DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  493. [llvm_anyvector_ty, llvm_any_ty,
  494. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  495. llvm_anyint_ty],
  496. [IntrNoMem]>, RISCVVIntrinsic {
  497. let ScalarOperand = 1;
  498. let VLOperand = 3;
  499. }
  500. // For binary operations with mask type output.
  501. // Output: (mask type output)
  502. // Input: (vector_in, vector_in/scalar_in, vl)
  503. class RISCVBinaryMOut
  504. : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  505. [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
  506. [IntrNoMem]>, RISCVVIntrinsic {
  507. let ScalarOperand = 1;
  508. let VLOperand = 2;
  509. }
  510. // For binary operations with mask type output without mask.
  511. // Output: (mask type output)
  512. // Input: (vector_in, vector_in/scalar_in, vl)
  513. class RISCVCompareUnMasked
  514. : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  515. [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
  516. [IntrNoMem]>, RISCVVIntrinsic {
  517. let ScalarOperand = 1;
  518. let VLOperand = 2;
  519. }
  520. // For binary operations with mask type output with mask.
  521. // Output: (mask type output)
  522. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
  523. class RISCVCompareMasked
  524. : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  525. [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  526. llvm_anyvector_ty, llvm_any_ty,
  527. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  528. [IntrNoMem]>, RISCVVIntrinsic {
  529. let ScalarOperand = 2;
  530. let VLOperand = 4;
  531. }
  532. // For FP classify operations.
  533. // Output: (bit mask type output)
  534. // Input: (passthru, vector_in, vl)
  535. class RISCVClassifyUnMasked
  536. : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
  537. [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
  538. llvm_anyint_ty],
  539. [IntrNoMem]>, RISCVVIntrinsic {
  540. let VLOperand = 1;
  541. }
  542. // For FP classify operations with mask.
  543. // Output: (bit mask type output)
  544. // Input: (maskedoff, vector_in, mask, vl, policy)
  545. class RISCVClassifyMasked
  546. : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
  547. [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
  548. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  549. llvm_anyint_ty, LLVMMatchType<1>],
  550. [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
  551. let VLOperand = 3;
  552. }
  553. // For Saturating binary operations.
  554. // The destination vector type is the same as first source vector.
  555. // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  556. class RISCVSaturatingBinaryAAXUnMasked
  557. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  558. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  559. llvm_anyint_ty],
  560. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  561. let ScalarOperand = 2;
  562. let VLOperand = 3;
  563. }
  564. // For Saturating binary operations with mask.
  565. // The destination vector type is the same as first source vector.
  566. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  567. class RISCVSaturatingBinaryAAXMasked
  568. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  569. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  570. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  571. LLVMMatchType<2>],
  572. [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  573. let ScalarOperand = 2;
  574. let VLOperand = 4;
  575. }
  576. // For Saturating binary operations.
  577. // The destination vector type is the same as first source vector.
  578. // The second source operand matches the destination type or is an XLen scalar.
  579. // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  580. class RISCVSaturatingBinaryAAShiftUnMasked
  581. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  582. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  583. llvm_anyint_ty],
  584. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  585. let VLOperand = 3;
  586. }
  587. // For Saturating binary operations with mask.
  588. // The destination vector type is the same as first source vector.
  589. // The second source operand matches the destination type or is an XLen scalar.
  590. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  591. class RISCVSaturatingBinaryAAShiftMasked
  592. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  593. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  594. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  595. LLVMMatchType<2>],
  596. [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  597. let VLOperand = 4;
  598. }
  599. // For Saturating binary operations.
  600. // The destination vector type is NOT the same as first source vector.
  601. // The second source operand matches the destination type or is an XLen scalar.
  602. // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  603. class RISCVSaturatingBinaryABShiftUnMasked
  604. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  605. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  606. llvm_anyint_ty],
  607. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  608. let VLOperand = 3;
  609. }
  610. // For Saturating binary operations with mask.
  611. // The destination vector type is NOT the same as first source vector (with mask).
  612. // The second source operand matches the destination type or is an XLen scalar.
  613. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  614. class RISCVSaturatingBinaryABShiftMasked
  615. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  616. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  617. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  618. LLVMMatchType<3>],
  619. [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  620. let VLOperand = 4;
  621. }
  622. // Input: (vector_in, vector_in, scalar_in, vl, policy)
  623. class RVVSlideUnMasked
  624. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  625. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
  626. LLVMMatchType<1>, LLVMMatchType<1>],
  627. [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
  628. let VLOperand = 3;
  629. }
  630. // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
  631. class RVVSlideMasked
  632. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  633. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
  634. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  635. LLVMMatchType<1>, LLVMMatchType<1>],
  636. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  637. let VLOperand = 4;
  638. }
  639. // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
  640. // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
  641. class RISCVTernaryAAXAUnMasked
  642. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  643. [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
  644. llvm_anyint_ty, LLVMMatchType<2>],
  645. [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
  646. let ScalarOperand = 1;
  647. let VLOperand = 3;
  648. }
  649. // Masked Vector Multiply-Add operations, its first operand can not be undef.
  650. // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
  651. class RISCVTernaryAAXAMasked
  652. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  653. [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
  654. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  655. llvm_anyint_ty, LLVMMatchType<2>],
  656. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  657. let ScalarOperand = 1;
  658. let VLOperand = 4;
  659. }
  660. // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
  661. // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
  662. class RISCVTernaryWideUnMasked
  663. : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
  664. [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
  665. llvm_anyint_ty, LLVMMatchType<3>],
  666. [ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic {
  667. let ScalarOperand = 1;
  668. let VLOperand = 3;
  669. }
  670. // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
  671. // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
  672. class RISCVTernaryWideMasked
  673. : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
  674. [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
  675. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  676. llvm_anyint_ty, LLVMMatchType<3>],
  677. [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
  678. let ScalarOperand = 1;
  679. let VLOperand = 4;
  680. }
  681. // For Reduction ternary operations.
  682. // For destination vector type is the same as first and third source vector.
  683. // Input: (vector_in, vector_in, vector_in, vl)
  684. class RISCVReductionUnMasked
  685. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  686. [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
  687. llvm_anyint_ty],
  688. [IntrNoMem]>, RISCVVIntrinsic {
  689. let VLOperand = 3;
  690. }
  691. // For Reduction ternary operations with mask.
  692. // For destination vector type is the same as first and third source vector.
  693. // The mask type come from second source vector.
  694. // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
  695. class RISCVReductionMasked
  696. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  697. [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
  698. LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
  699. [IntrNoMem]>, RISCVVIntrinsic {
  700. let VLOperand = 4;
  701. }
  702. // For unary operations with scalar type output without mask
  703. // Output: (scalar type)
  704. // Input: (vector_in, vl)
  705. class RISCVMaskedUnarySOutUnMasked
  706. : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
  707. [llvm_anyvector_ty, llvm_anyint_ty],
  708. [IntrNoMem]>, RISCVVIntrinsic {
  709. let VLOperand = 1;
  710. }
  711. // For unary operations with scalar type output with mask
  712. // Output: (scalar type)
  713. // Input: (vector_in, mask, vl)
  714. class RISCVMaskedUnarySOutMasked
  715. : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
  716. [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
  717. [IntrNoMem]>, RISCVVIntrinsic {
  718. let VLOperand = 2;
  719. }
  720. // For destination vector type is NOT the same as source vector.
  721. // Input: (passthru, vector_in, vl)
  722. class RISCVUnaryABUnMasked
  723. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  724. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
  725. [IntrNoMem]>, RISCVVIntrinsic {
  726. let VLOperand = 2;
  727. }
  728. // For destination vector type is NOT the same as source vector (with mask).
  729. // Input: (maskedoff, vector_in, mask, vl, policy)
  730. class RISCVUnaryABMasked
  731. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  732. [LLVMMatchType<0>, llvm_anyvector_ty,
  733. LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
  734. llvm_anyint_ty, LLVMMatchType<2>],
  735. [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
  736. let VLOperand = 3;
  737. }
  738. // For unary operations with the same vector type in/out without mask
  739. // Output: (vector)
  740. // Input: (vector_in, vl)
  741. class RISCVUnaryUnMasked
  742. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  743. [LLVMMatchType<0>, llvm_anyint_ty],
  744. [IntrNoMem]>, RISCVVIntrinsic {
  745. let VLOperand = 1;
  746. }
  747. // For mask unary operations with mask type in/out with mask
  748. // Output: (mask type output)
  749. // Input: (mask type maskedoff, mask type vector_in, mask, vl)
  750. class RISCVMaskedUnaryMOutMasked
  751. : DefaultAttrsIntrinsic<[llvm_anyint_ty],
  752. [LLVMMatchType<0>, LLVMMatchType<0>,
  753. LLVMMatchType<0>, llvm_anyint_ty],
  754. [IntrNoMem]>, RISCVVIntrinsic {
  755. let VLOperand = 3;
  756. }
  757. // Output: (vector)
  758. // Input: (vl)
  759. class RISCVNullaryIntrinsic
  760. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  761. [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic {
  762. let VLOperand = 1;
  763. }
  764. // Output: (vector)
  765. // Input: (passthru, vl)
  766. class RISCVID
  767. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  768. [LLVMMatchType<0>, llvm_anyint_ty],
  769. [IntrNoMem]>, RISCVVIntrinsic {
  770. let VLOperand = 1;
  771. }
  772. // For Conversion unary operations.
  773. // Input: (passthru, vector_in, vl)
  774. class RISCVConversionUnMasked
  775. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  776. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
  777. [IntrNoMem]>, RISCVVIntrinsic {
  778. let VLOperand = 2;
  779. }
  780. // For Conversion unary operations with mask.
  781. // Input: (maskedoff, vector_in, mask, vl, policy)
  782. class RISCVConversionMasked
  783. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  784. [LLVMMatchType<0>, llvm_anyvector_ty,
  785. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
  786. LLVMMatchType<2>],
  787. [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
  788. let VLOperand = 3;
  789. }
  790. // For unit stride segment load
  791. // Input: (passthru, pointer, vl)
  792. class RISCVUSSegLoad<int nf>
  793. : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  794. !add(nf, -1))),
  795. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  796. [LLVMPointerToElt<0>, llvm_anyint_ty]),
  797. [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
  798. let VLOperand = !add(nf, 1);
  799. }
  800. // For unit stride segment load with mask
  801. // Input: (maskedoff, pointer, mask, vl, policy)
  802. class RISCVUSSegLoadMasked<int nf>
  803. : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  804. !add(nf, -1))),
  805. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  806. [LLVMPointerToElt<0>,
  807. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  808. llvm_anyint_ty, LLVMMatchType<1>]),
  809. [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
  810. RISCVVIntrinsic {
  811. let VLOperand = !add(nf, 2);
  812. }
  813. // For unit stride fault-only-first segment load
  814. // Input: (passthru, pointer, vl)
  815. // Output: (data, vl)
  816. // NOTE: We model this with default memory properties since we model writing
  817. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  818. class RISCVUSSegLoadFF<int nf>
  819. : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  820. !add(nf, -1)), [llvm_anyint_ty]),
  821. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  822. [LLVMPointerToElt<0>, LLVMMatchType<1>]),
  823. [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic {
  824. let VLOperand = !add(nf, 1);
  825. }
  826. // For unit stride fault-only-first segment load with mask
  827. // Input: (maskedoff, pointer, mask, vl, policy)
  828. // Output: (data, vl)
  829. // NOTE: We model this with default memory properties since we model writing
  830. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  831. class RISCVUSSegLoadFFMasked<int nf>
  832. : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  833. !add(nf, -1)), [llvm_anyint_ty]),
  834. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  835. [LLVMPointerToElt<0>,
  836. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  837. LLVMMatchType<1>, LLVMMatchType<1>]),
  838. [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
  839. RISCVVIntrinsic {
  840. let VLOperand = !add(nf, 2);
  841. }
  842. // For stride segment load
  843. // Input: (passthru, pointer, offset, vl)
  844. class RISCVSSegLoad<int nf>
  845. : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  846. !add(nf, -1))),
  847. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  848. [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>]),
  849. [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
  850. let VLOperand = !add(nf, 2);
  851. }
  852. // For stride segment load with mask
  853. // Input: (maskedoff, pointer, offset, mask, vl, policy)
  854. class RISCVSSegLoadMasked<int nf>
  855. : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  856. !add(nf, -1))),
  857. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  858. [LLVMPointerToElt<0>,
  859. llvm_anyint_ty,
  860. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  861. LLVMMatchType<1>, LLVMMatchType<1>]),
  862. [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
  863. RISCVVIntrinsic {
  864. let VLOperand = !add(nf, 3);
  865. }
  866. // For indexed segment load
  867. // Input: (passthru, pointer, index, vl)
  868. class RISCVISegLoad<int nf>
  869. : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  870. !add(nf, -1))),
  871. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  872. [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty]),
  873. [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
  874. let VLOperand = !add(nf, 2);
  875. }
  876. // For indexed segment load with mask
  877. // Input: (maskedoff, pointer, index, mask, vl, policy)
  878. class RISCVISegLoadMasked<int nf>
  879. : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  880. !add(nf, -1))),
  881. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  882. [LLVMPointerToElt<0>,
  883. llvm_anyvector_ty,
  884. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  885. llvm_anyint_ty, LLVMMatchType<2>]),
  886. [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
  887. RISCVVIntrinsic {
  888. let VLOperand = !add(nf, 3);
  889. }
  890. // For unit stride segment store
  891. // Input: (value, pointer, vl)
  892. class RISCVUSSegStore<int nf>
  893. : DefaultAttrsIntrinsic<[],
  894. !listconcat([llvm_anyvector_ty],
  895. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  896. [LLVMPointerToElt<0>, llvm_anyint_ty]),
  897. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  898. let VLOperand = !add(nf, 1);
  899. }
  900. // For unit stride segment store with mask
  901. // Input: (value, pointer, mask, vl)
  902. class RISCVUSSegStoreMasked<int nf>
  903. : DefaultAttrsIntrinsic<[],
  904. !listconcat([llvm_anyvector_ty],
  905. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  906. [LLVMPointerToElt<0>,
  907. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  908. llvm_anyint_ty]),
  909. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  910. let VLOperand = !add(nf, 2);
  911. }
  912. // For stride segment store
  913. // Input: (value, pointer, offset, vl)
  914. class RISCVSSegStore<int nf>
  915. : DefaultAttrsIntrinsic<[],
  916. !listconcat([llvm_anyvector_ty],
  917. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  918. [LLVMPointerToElt<0>, llvm_anyint_ty,
  919. LLVMMatchType<1>]),
  920. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  921. let VLOperand = !add(nf, 2);
  922. }
  923. // For stride segment store with mask
  924. // Input: (value, pointer, offset, mask, vl)
  925. class RISCVSSegStoreMasked<int nf>
  926. : DefaultAttrsIntrinsic<[],
  927. !listconcat([llvm_anyvector_ty],
  928. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  929. [LLVMPointerToElt<0>, llvm_anyint_ty,
  930. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  931. LLVMMatchType<1>]),
  932. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  933. let VLOperand = !add(nf, 3);
  934. }
  935. // For indexed segment store
  936. // Input: (value, pointer, offset, vl)
  937. class RISCVISegStore<int nf>
  938. : DefaultAttrsIntrinsic<[],
  939. !listconcat([llvm_anyvector_ty],
  940. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  941. [LLVMPointerToElt<0>, llvm_anyvector_ty,
  942. llvm_anyint_ty]),
  943. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  944. let VLOperand = !add(nf, 2);
  945. }
  946. // For indexed segment store with mask
  947. // Input: (value, pointer, offset, mask, vl)
  948. class RISCVISegStoreMasked<int nf>
  949. : DefaultAttrsIntrinsic<[],
  950. !listconcat([llvm_anyvector_ty],
  951. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  952. [LLVMPointerToElt<0>, llvm_anyvector_ty,
  953. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  954. llvm_anyint_ty]),
  955. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
  956. let VLOperand = !add(nf, 3);
  957. }
  958. multiclass RISCVUSLoad {
  959. def "int_riscv_" # NAME : RISCVUSLoad;
  960. def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked;
  961. }
  962. multiclass RISCVUSLoadFF {
  963. def "int_riscv_" # NAME : RISCVUSLoadFF;
  964. def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked;
  965. }
  966. multiclass RISCVSLoad {
  967. def "int_riscv_" # NAME : RISCVSLoad;
  968. def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked;
  969. }
  970. multiclass RISCVILoad {
  971. def "int_riscv_" # NAME : RISCVILoad;
  972. def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked;
  973. }
  974. multiclass RISCVUSStore {
  975. def "int_riscv_" # NAME : RISCVUSStore;
  976. def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked;
  977. }
  978. multiclass RISCVSStore {
  979. def "int_riscv_" # NAME : RISCVSStore;
  980. def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked;
  981. }
  982. multiclass RISCVIStore {
  983. def "int_riscv_" # NAME : RISCVIStore;
  984. def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked;
  985. }
  986. multiclass RISCVUnaryAA {
  987. def "int_riscv_" # NAME : RISCVUnaryAAUnMasked;
  988. def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked;
  989. }
  990. multiclass RISCVUnaryAB {
  991. def "int_riscv_" # NAME : RISCVUnaryABUnMasked;
  992. def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked;
  993. }
  994. // AAX means the destination type(A) is the same as the first source
  995. // type(A). X means any type for the second source operand.
  996. multiclass RISCVBinaryAAX {
  997. def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked;
  998. def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked;
  999. }
  1000. // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
  1001. // must be a vector or an XLen scalar.
  1002. multiclass RISCVBinaryAAShift {
  1003. def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked;
  1004. def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked;
  1005. }
  1006. multiclass RISCVRGatherVV {
  1007. def "int_riscv_" # NAME : RISCVRGatherVVUnMasked;
  1008. def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked;
  1009. }
  1010. multiclass RISCVRGatherVX {
  1011. def "int_riscv_" # NAME : RISCVGatherVXUnMasked;
  1012. def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked;
  1013. }
  1014. multiclass RISCVRGatherEI16VV {
  1015. def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked;
  1016. def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked;
  1017. }
  1018. // ABX means the destination type(A) is different from the first source
  1019. // type(B). X means any type for the second source operand.
  1020. multiclass RISCVBinaryABX {
  1021. def "int_riscv_" # NAME : RISCVBinaryABXUnMasked;
  1022. def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked;
  1023. }
  1024. // Like RISCVBinaryABX, but the second operand is used a shift amount so it
  1025. // must be a vector or an XLen scalar.
  1026. multiclass RISCVBinaryABShift {
  1027. def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked;
  1028. def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked;
  1029. }
  1030. multiclass RISCVBinaryWithV0 {
  1031. def "int_riscv_" # NAME : RISCVBinaryWithV0;
  1032. }
  1033. multiclass RISCVBinaryMaskOutWithV0 {
  1034. def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
  1035. }
  1036. multiclass RISCVBinaryMaskOut {
  1037. def "int_riscv_" # NAME : RISCVBinaryMOut;
  1038. }
  1039. multiclass RISCVSaturatingBinaryAAX {
  1040. def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked;
  1041. def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked;
  1042. }
  1043. multiclass RISCVSaturatingBinaryAAShift {
  1044. def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMasked;
  1045. def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMasked;
  1046. }
  1047. multiclass RISCVSaturatingBinaryABShift {
  1048. def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMasked;
  1049. def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMasked;
  1050. }
  1051. multiclass RVVSlide {
  1052. def "int_riscv_" # NAME : RVVSlideUnMasked;
  1053. def "int_riscv_" # NAME # "_mask" : RVVSlideMasked;
  1054. }
  1055. multiclass RISCVTernaryAAXA {
  1056. def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked;
  1057. def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked;
  1058. }
  1059. multiclass RISCVCompare {
  1060. def "int_riscv_" # NAME : RISCVCompareUnMasked;
  1061. def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked;
  1062. }
  1063. multiclass RISCVClassify {
  1064. def "int_riscv_" # NAME : RISCVClassifyUnMasked;
  1065. def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked;
  1066. }
  1067. multiclass RISCVTernaryWide {
  1068. def "int_riscv_" # NAME : RISCVTernaryWideUnMasked;
  1069. def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked;
  1070. }
  1071. multiclass RISCVReduction {
  1072. def "int_riscv_" # NAME : RISCVReductionUnMasked;
  1073. def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked;
  1074. }
  1075. multiclass RISCVMaskedUnarySOut {
  1076. def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked;
  1077. def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked;
  1078. }
  1079. multiclass RISCVMaskedUnaryMOut {
  1080. def "int_riscv_" # NAME : RISCVUnaryUnMasked;
  1081. def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked;
  1082. }
  1083. multiclass RISCVConversion {
  1084. def "int_riscv_" #NAME :RISCVConversionUnMasked;
  1085. def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked;
  1086. }
  1087. multiclass RISCVUSSegLoad<int nf> {
  1088. def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
  1089. def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked<nf>;
  1090. }
  1091. multiclass RISCVUSSegLoadFF<int nf> {
  1092. def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
  1093. def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked<nf>;
  1094. }
  1095. multiclass RISCVSSegLoad<int nf> {
  1096. def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
  1097. def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked<nf>;
  1098. }
  1099. multiclass RISCVISegLoad<int nf> {
  1100. def "int_riscv_" # NAME : RISCVISegLoad<nf>;
  1101. def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked<nf>;
  1102. }
  1103. multiclass RISCVUSSegStore<int nf> {
  1104. def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
  1105. def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked<nf>;
  1106. }
  1107. multiclass RISCVSSegStore<int nf> {
  1108. def "int_riscv_" # NAME : RISCVSSegStore<nf>;
  1109. def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked<nf>;
  1110. }
  1111. multiclass RISCVISegStore<int nf> {
  1112. def "int_riscv_" # NAME : RISCVISegStore<nf>;
  1113. def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked<nf>;
  1114. }
  1115. defm vle : RISCVUSLoad;
  1116. defm vleff : RISCVUSLoadFF;
  1117. defm vse : RISCVUSStore;
  1118. defm vlse: RISCVSLoad;
  1119. defm vsse: RISCVSStore;
  1120. defm vluxei : RISCVILoad;
  1121. defm vloxei : RISCVILoad;
  1122. defm vsoxei : RISCVIStore;
  1123. defm vsuxei : RISCVIStore;
  1124. def int_riscv_vlm : RISCVUSMLoad;
  1125. def int_riscv_vsm : RISCVUSStore;
  1126. defm vadd : RISCVBinaryAAX;
  1127. defm vsub : RISCVBinaryAAX;
  1128. defm vrsub : RISCVBinaryAAX;
  1129. defm vwaddu : RISCVBinaryABX;
  1130. defm vwadd : RISCVBinaryABX;
  1131. defm vwaddu_w : RISCVBinaryAAX;
  1132. defm vwadd_w : RISCVBinaryAAX;
  1133. defm vwsubu : RISCVBinaryABX;
  1134. defm vwsub : RISCVBinaryABX;
  1135. defm vwsubu_w : RISCVBinaryAAX;
  1136. defm vwsub_w : RISCVBinaryAAX;
  1137. defm vzext : RISCVUnaryAB;
  1138. defm vsext : RISCVUnaryAB;
  1139. defm vadc : RISCVBinaryWithV0;
  1140. defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
  1141. defm vmadc : RISCVBinaryMaskOut;
  1142. defm vsbc : RISCVBinaryWithV0;
  1143. defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
  1144. defm vmsbc : RISCVBinaryMaskOut;
  1145. defm vand : RISCVBinaryAAX;
  1146. defm vor : RISCVBinaryAAX;
  1147. defm vxor : RISCVBinaryAAX;
  1148. defm vsll : RISCVBinaryAAShift;
  1149. defm vsrl : RISCVBinaryAAShift;
  1150. defm vsra : RISCVBinaryAAShift;
  1151. defm vnsrl : RISCVBinaryABShift;
  1152. defm vnsra : RISCVBinaryABShift;
  1153. defm vmseq : RISCVCompare;
  1154. defm vmsne : RISCVCompare;
  1155. defm vmsltu : RISCVCompare;
  1156. defm vmslt : RISCVCompare;
  1157. defm vmsleu : RISCVCompare;
  1158. defm vmsle : RISCVCompare;
  1159. defm vmsgtu : RISCVCompare;
  1160. defm vmsgt : RISCVCompare;
  1161. defm vmsgeu : RISCVCompare;
  1162. defm vmsge : RISCVCompare;
  1163. defm vminu : RISCVBinaryAAX;
  1164. defm vmin : RISCVBinaryAAX;
  1165. defm vmaxu : RISCVBinaryAAX;
  1166. defm vmax : RISCVBinaryAAX;
  1167. defm vmul : RISCVBinaryAAX;
  1168. defm vmulh : RISCVBinaryAAX;
  1169. defm vmulhu : RISCVBinaryAAX;
  1170. defm vmulhsu : RISCVBinaryAAX;
  1171. defm vdivu : RISCVBinaryAAX;
  1172. defm vdiv : RISCVBinaryAAX;
  1173. defm vremu : RISCVBinaryAAX;
  1174. defm vrem : RISCVBinaryAAX;
  1175. defm vwmul : RISCVBinaryABX;
  1176. defm vwmulu : RISCVBinaryABX;
  1177. defm vwmulsu : RISCVBinaryABX;
  1178. defm vmacc : RISCVTernaryAAXA;
  1179. defm vnmsac : RISCVTernaryAAXA;
  1180. defm vmadd : RISCVTernaryAAXA;
  1181. defm vnmsub : RISCVTernaryAAXA;
  1182. defm vwmaccu : RISCVTernaryWide;
  1183. defm vwmacc : RISCVTernaryWide;
  1184. defm vwmaccus : RISCVTernaryWide;
  1185. defm vwmaccsu : RISCVTernaryWide;
  1186. defm vfadd : RISCVBinaryAAX;
  1187. defm vfsub : RISCVBinaryAAX;
  1188. defm vfrsub : RISCVBinaryAAX;
  1189. defm vfwadd : RISCVBinaryABX;
  1190. defm vfwsub : RISCVBinaryABX;
  1191. defm vfwadd_w : RISCVBinaryAAX;
  1192. defm vfwsub_w : RISCVBinaryAAX;
  1193. defm vsaddu : RISCVSaturatingBinaryAAX;
  1194. defm vsadd : RISCVSaturatingBinaryAAX;
  1195. defm vssubu : RISCVSaturatingBinaryAAX;
  1196. defm vssub : RISCVSaturatingBinaryAAX;
  1197. defm vmerge : RISCVBinaryWithV0;
  1198. // Output: (vector)
  1199. // Input: (passthru, vector_in, vl)
  1200. def int_riscv_vmv_v_v : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  1201. [LLVMMatchType<0>,
  1202. LLVMMatchType<0>,
  1203. llvm_anyint_ty],
  1204. [IntrNoMem]>, RISCVVIntrinsic {
  1205. let VLOperand = 2;
  1206. }
  1207. // Output: (vector)
  1208. // Input: (passthru, scalar, vl)
  1209. def int_riscv_vmv_v_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
  1210. [LLVMMatchType<0>,
  1211. LLVMVectorElementType<0>,
  1212. llvm_anyint_ty],
  1213. [IntrNoMem]>, RISCVVIntrinsic {
  1214. let VLOperand = 2;
  1215. }
  1216. // Output: (vector)
  1217. // Input: (passthru, scalar, vl)
  1218. def int_riscv_vfmv_v_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
  1219. [LLVMMatchType<0>,
  1220. LLVMVectorElementType<0>,
  1221. llvm_anyint_ty],
  1222. [IntrNoMem]>, RISCVVIntrinsic {
  1223. let VLOperand = 2;
  1224. }
  1225. def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
  1226. [llvm_anyint_ty],
  1227. [IntrNoMem]>, RISCVVIntrinsic;
  1228. def int_riscv_vmv_s_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
  1229. [LLVMMatchType<0>,
  1230. LLVMVectorElementType<0>,
  1231. llvm_anyint_ty],
  1232. [IntrNoMem]>, RISCVVIntrinsic {
  1233. let VLOperand = 2;
  1234. }
  1235. def int_riscv_vfmv_f_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
  1236. [llvm_anyfloat_ty],
  1237. [IntrNoMem]>, RISCVVIntrinsic;
  1238. def int_riscv_vfmv_s_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
  1239. [LLVMMatchType<0>,
  1240. LLVMVectorElementType<0>,
  1241. llvm_anyint_ty],
  1242. [IntrNoMem]>, RISCVVIntrinsic {
  1243. let VLOperand = 2;
  1244. }
  1245. defm vfmul : RISCVBinaryAAX;
  1246. defm vfdiv : RISCVBinaryAAX;
  1247. defm vfrdiv : RISCVBinaryAAX;
  1248. defm vfwmul : RISCVBinaryABX;
  1249. defm vfmacc : RISCVTernaryAAXA;
  1250. defm vfnmacc : RISCVTernaryAAXA;
  1251. defm vfmsac : RISCVTernaryAAXA;
  1252. defm vfnmsac : RISCVTernaryAAXA;
  1253. defm vfmadd : RISCVTernaryAAXA;
  1254. defm vfnmadd : RISCVTernaryAAXA;
  1255. defm vfmsub : RISCVTernaryAAXA;
  1256. defm vfnmsub : RISCVTernaryAAXA;
  1257. defm vfwmacc : RISCVTernaryWide;
  1258. defm vfwnmacc : RISCVTernaryWide;
  1259. defm vfwmsac : RISCVTernaryWide;
  1260. defm vfwnmsac : RISCVTernaryWide;
  1261. defm vfsqrt : RISCVUnaryAA;
  1262. defm vfrsqrt7 : RISCVUnaryAA;
  1263. defm vfrec7 : RISCVUnaryAA;
  1264. defm vfmin : RISCVBinaryAAX;
  1265. defm vfmax : RISCVBinaryAAX;
  1266. defm vfsgnj : RISCVBinaryAAX;
  1267. defm vfsgnjn : RISCVBinaryAAX;
  1268. defm vfsgnjx : RISCVBinaryAAX;
  1269. defm vfclass : RISCVClassify;
  1270. defm vfmerge : RISCVBinaryWithV0;
  1271. defm vslideup : RVVSlide;
  1272. defm vslidedown : RVVSlide;
  1273. defm vslide1up : RISCVBinaryAAX;
  1274. defm vslide1down : RISCVBinaryAAX;
  1275. defm vfslide1up : RISCVBinaryAAX;
  1276. defm vfslide1down : RISCVBinaryAAX;
  1277. defm vrgather_vv : RISCVRGatherVV;
  1278. defm vrgather_vx : RISCVRGatherVX;
  1279. defm vrgatherei16_vv : RISCVRGatherEI16VV;
  1280. def "int_riscv_vcompress" : RISCVCompress;
  1281. defm vaaddu : RISCVSaturatingBinaryAAX;
  1282. defm vaadd : RISCVSaturatingBinaryAAX;
  1283. defm vasubu : RISCVSaturatingBinaryAAX;
  1284. defm vasub : RISCVSaturatingBinaryAAX;
  1285. defm vsmul : RISCVSaturatingBinaryAAX;
  1286. defm vssrl : RISCVSaturatingBinaryAAShift;
  1287. defm vssra : RISCVSaturatingBinaryAAShift;
  1288. defm vnclipu : RISCVSaturatingBinaryABShift;
  1289. defm vnclip : RISCVSaturatingBinaryABShift;
  1290. defm vmfeq : RISCVCompare;
  1291. defm vmfne : RISCVCompare;
  1292. defm vmflt : RISCVCompare;
  1293. defm vmfle : RISCVCompare;
  1294. defm vmfgt : RISCVCompare;
  1295. defm vmfge : RISCVCompare;
  1296. defm vredsum : RISCVReduction;
  1297. defm vredand : RISCVReduction;
  1298. defm vredor : RISCVReduction;
  1299. defm vredxor : RISCVReduction;
  1300. defm vredminu : RISCVReduction;
  1301. defm vredmin : RISCVReduction;
  1302. defm vredmaxu : RISCVReduction;
  1303. defm vredmax : RISCVReduction;
  1304. defm vwredsumu : RISCVReduction;
  1305. defm vwredsum : RISCVReduction;
  1306. defm vfredosum : RISCVReduction;
  1307. defm vfredusum : RISCVReduction;
  1308. defm vfredmin : RISCVReduction;
  1309. defm vfredmax : RISCVReduction;
  1310. defm vfwredusum : RISCVReduction;
  1311. defm vfwredosum : RISCVReduction;
  1312. def int_riscv_vmand: RISCVBinaryAAAUnMasked;
  1313. def int_riscv_vmnand: RISCVBinaryAAAUnMasked;
  1314. def int_riscv_vmandn: RISCVBinaryAAAUnMasked;
  1315. def int_riscv_vmxor: RISCVBinaryAAAUnMasked;
  1316. def int_riscv_vmor: RISCVBinaryAAAUnMasked;
  1317. def int_riscv_vmnor: RISCVBinaryAAAUnMasked;
  1318. def int_riscv_vmorn: RISCVBinaryAAAUnMasked;
  1319. def int_riscv_vmxnor: RISCVBinaryAAAUnMasked;
  1320. def int_riscv_vmclr : RISCVNullaryIntrinsic;
  1321. def int_riscv_vmset : RISCVNullaryIntrinsic;
  1322. defm vcpop : RISCVMaskedUnarySOut;
  1323. defm vfirst : RISCVMaskedUnarySOut;
  1324. defm vmsbf : RISCVMaskedUnaryMOut;
  1325. defm vmsof : RISCVMaskedUnaryMOut;
  1326. defm vmsif : RISCVMaskedUnaryMOut;
  1327. defm vfcvt_xu_f_v : RISCVConversion;
  1328. defm vfcvt_x_f_v : RISCVConversion;
  1329. defm vfcvt_rtz_xu_f_v : RISCVConversion;
  1330. defm vfcvt_rtz_x_f_v : RISCVConversion;
  1331. defm vfcvt_f_xu_v : RISCVConversion;
  1332. defm vfcvt_f_x_v : RISCVConversion;
  1333. defm vfwcvt_f_xu_v : RISCVConversion;
  1334. defm vfwcvt_f_x_v : RISCVConversion;
  1335. defm vfwcvt_xu_f_v : RISCVConversion;
  1336. defm vfwcvt_x_f_v : RISCVConversion;
  1337. defm vfwcvt_rtz_xu_f_v : RISCVConversion;
  1338. defm vfwcvt_rtz_x_f_v : RISCVConversion;
  1339. defm vfwcvt_f_f_v : RISCVConversion;
  1340. defm vfncvt_f_xu_w : RISCVConversion;
  1341. defm vfncvt_f_x_w : RISCVConversion;
  1342. defm vfncvt_xu_f_w : RISCVConversion;
  1343. defm vfncvt_x_f_w : RISCVConversion;
  1344. defm vfncvt_rtz_xu_f_w : RISCVConversion;
  1345. defm vfncvt_rtz_x_f_w : RISCVConversion;
  1346. defm vfncvt_f_f_w : RISCVConversion;
  1347. defm vfncvt_rod_f_f_w : RISCVConversion;
  1348. // Output: (vector)
  1349. // Input: (passthru, mask type input, vl)
  1350. def int_riscv_viota
  1351. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  1352. [LLVMMatchType<0>,
  1353. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  1354. llvm_anyint_ty],
  1355. [IntrNoMem]>, RISCVVIntrinsic {
  1356. let VLOperand = 2;
  1357. }
  1358. // Output: (vector)
  1359. // Input: (maskedoff, mask type vector_in, mask, vl, policy)
  1360. def int_riscv_viota_mask
  1361. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  1362. [LLVMMatchType<0>,
  1363. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  1364. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  1365. llvm_anyint_ty, LLVMMatchType<1>],
  1366. [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
  1367. let VLOperand = 3;
  1368. }
  1369. // Output: (vector)
  1370. // Input: (passthru, vl)
  1371. def int_riscv_vid : RISCVID;
  1372. // Output: (vector)
  1373. // Input: (maskedoff, mask, vl, policy)
  1374. def int_riscv_vid_mask
  1375. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  1376. [LLVMMatchType<0>,
  1377. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  1378. llvm_anyint_ty, LLVMMatchType<1>],
  1379. [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
  1380. let VLOperand = 2;
  1381. }
  1382. foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
  1383. defm vlseg # nf : RISCVUSSegLoad<nf>;
  1384. defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
  1385. defm vlsseg # nf : RISCVSSegLoad<nf>;
  1386. defm vloxseg # nf : RISCVISegLoad<nf>;
  1387. defm vluxseg # nf : RISCVISegLoad<nf>;
  1388. defm vsseg # nf : RISCVUSSegStore<nf>;
  1389. defm vssseg # nf : RISCVSSegStore<nf>;
  1390. defm vsoxseg # nf : RISCVISegStore<nf>;
  1391. defm vsuxseg # nf : RISCVISegStore<nf>;
  1392. }
  1393. // Strided loads/stores for fixed vectors.
  1394. def int_riscv_masked_strided_load
  1395. : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
  1396. [LLVMMatchType<0>, llvm_anyptr_ty,
  1397. llvm_anyint_ty,
  1398. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  1399. [NoCapture<ArgIndex<1>>, IntrReadMem]>;
  1400. def int_riscv_masked_strided_store
  1401. : DefaultAttrsIntrinsic<[],
  1402. [llvm_anyvector_ty, llvm_anyptr_ty,
  1403. llvm_anyint_ty,
  1404. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  1405. [NoCapture<ArgIndex<1>>, IntrWriteMem]>;
  1406. // Segment loads for fixed vectors.
  1407. foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
  1408. def int_riscv_seg # nf # _load
  1409. : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
  1410. !listsplat(LLVMMatchType<0>,
  1411. !add(nf, -1))),
  1412. [llvm_anyptr_ty, llvm_anyint_ty],
  1413. [NoCapture<ArgIndex<0>>, IntrReadMem]>;
  1414. }
  1415. } // TargetPrefix = "riscv"
  1416. //===----------------------------------------------------------------------===//
  1417. // Scalar Cryptography
  1418. //
  1419. // These intrinsics will lower directly into the corresponding instructions
  1420. // added by the scalar cyptography extension, if the extension is present.
  1421. let TargetPrefix = "riscv" in {
  1422. class ScalarCryptoGprIntrinsicAny
  1423. : DefaultAttrsIntrinsic<[llvm_anyint_ty],
  1424. [LLVMMatchType<0>],
  1425. [IntrNoMem, IntrSpeculatable]>;
  1426. class ScalarCryptoByteSelect32
  1427. : DefaultAttrsIntrinsic<[llvm_i32_ty],
  1428. [llvm_i32_ty, llvm_i32_ty, llvm_i8_ty],
  1429. [IntrNoMem, IntrSpeculatable,
  1430. ImmArg<ArgIndex<2>>]>;
  1431. class ScalarCryptoGprGprIntrinsic32
  1432. : DefaultAttrsIntrinsic<[llvm_i32_ty],
  1433. [llvm_i32_ty, llvm_i32_ty],
  1434. [IntrNoMem, IntrSpeculatable]>;
  1435. class ScalarCryptoGprGprIntrinsic64
  1436. : DefaultAttrsIntrinsic<[llvm_i64_ty],
  1437. [llvm_i64_ty, llvm_i64_ty],
  1438. [IntrNoMem, IntrSpeculatable]>;
  1439. class ScalarCryptoGprIntrinsic64
  1440. : DefaultAttrsIntrinsic<[llvm_i64_ty],
  1441. [llvm_i64_ty],
  1442. [IntrNoMem, IntrSpeculatable]>;
  1443. class ScalarCryptoByteSelectAny
  1444. : DefaultAttrsIntrinsic<[llvm_anyint_ty],
  1445. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i8_ty],
  1446. [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>;
  1447. // Zknd
  1448. def int_riscv_aes32dsi : ScalarCryptoByteSelect32;
  1449. def int_riscv_aes32dsmi : ScalarCryptoByteSelect32;
  1450. def int_riscv_aes64ds : ScalarCryptoGprGprIntrinsic64;
  1451. def int_riscv_aes64dsm : ScalarCryptoGprGprIntrinsic64;
  1452. def int_riscv_aes64im : ScalarCryptoGprIntrinsic64;
  1453. // Zkne
  1454. def int_riscv_aes32esi : ScalarCryptoByteSelect32;
  1455. def int_riscv_aes32esmi : ScalarCryptoByteSelect32;
  1456. def int_riscv_aes64es : ScalarCryptoGprGprIntrinsic64;
  1457. def int_riscv_aes64esm : ScalarCryptoGprGprIntrinsic64;
  1458. // Zknd & Zkne
  1459. def int_riscv_aes64ks2 : ScalarCryptoGprGprIntrinsic64;
  1460. def int_riscv_aes64ks1i : DefaultAttrsIntrinsic<[llvm_i64_ty],
  1461. [llvm_i64_ty, llvm_i32_ty],
  1462. [IntrNoMem, IntrSpeculatable,
  1463. ImmArg<ArgIndex<1>>]>;
  1464. // Zknh
  1465. def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny;
  1466. def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny;
  1467. def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny;
  1468. def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny;
  1469. def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32;
  1470. def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32;
  1471. def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32;
  1472. def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32;
  1473. def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32;
  1474. def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32;
  1475. def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64;
  1476. def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64;
  1477. def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64;
  1478. def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64;
  1479. // Zksed
  1480. def int_riscv_sm4ks : ScalarCryptoByteSelectAny;
  1481. def int_riscv_sm4ed : ScalarCryptoByteSelectAny;
  1482. // Zksh
  1483. def int_riscv_sm3p0 : ScalarCryptoGprIntrinsicAny;
  1484. def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny;
  1485. } // TargetPrefix = "riscv"
  1486. //===----------------------------------------------------------------------===//
  1487. // Vendor extensions
  1488. //===----------------------------------------------------------------------===//
  1489. include "llvm/IR/IntrinsicsRISCVXTHead.td"