IntrinsicsRISCV.td 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092
  1. //===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file defines all of the RISCV-specific intrinsics.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. //===----------------------------------------------------------------------===//
  13. // Atomics
  14. // Atomic Intrinsics have multiple versions for different access widths, which
  15. // all follow one of the following signatures (depending on how many arguments
  16. // they require). We carefully instantiate only specific versions of these for
  17. // specific integer widths, rather than using `llvm_anyint_ty`.
  18. //
  19. // In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
  20. // canonical names, and the intrinsics used in the code will have a name
  21. // suffixed with the pointer type they are specialised for (denoted `<p>` in the
  22. // names below), in order to avoid type conflicts.
  23. let TargetPrefix = "riscv" in {
  24. // T @llvm.<name>.T.<p>(any*, T, T, T imm);
  25. class MaskedAtomicRMWFourArg<LLVMType itype>
  26. : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
  27. [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
  28. // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
  29. class MaskedAtomicRMWFiveArg<LLVMType itype>
  30. : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
  31. [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
  32. // We define 32-bit and 64-bit variants of the above, where T stands for i32
  33. // or i64 respectively:
  34. multiclass MaskedAtomicRMWFourArgIntrinsics {
  35. // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
  36. def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
  37. // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
  38. def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
  39. }
  40. multiclass MaskedAtomicRMWFiveArgIntrinsics {
  41. // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
  42. def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
  43. // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
  44. def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
  45. }
  46. // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(...)
  47. defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
  48. defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
  49. defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
  50. defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
  51. // Signed min and max need an extra operand to do sign extension with.
  52. defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
  53. defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
  54. // Unsigned min and max don't need the extra operand.
  55. defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
  56. defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
  57. // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(...)
  58. defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
  59. } // TargetPrefix = "riscv"
  60. //===----------------------------------------------------------------------===//
  61. // Vectors
  62. class RISCVVIntrinsic {
  63. // These intrinsics may accept illegal integer values in their llvm_any_ty
  64. // operand, so they have to be extended. If set to zero then the intrinsic
  65. // does not have any operand that must be extended.
  66. Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
  67. bits<4> ExtendOperand = 0;
  68. }
  69. let TargetPrefix = "riscv" in {
  70. // We use anyint here but we only support XLen.
  71. def int_riscv_vsetvli : Intrinsic<[llvm_anyint_ty],
  72. /* AVL */ [LLVMMatchType<0>,
  73. /* VSEW */ LLVMMatchType<0>,
  74. /* VLMUL */ LLVMMatchType<0>],
  75. [IntrNoMem, IntrHasSideEffects,
  76. ImmArg<ArgIndex<1>>,
  77. ImmArg<ArgIndex<2>>]>;
  78. def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
  79. /* VSEW */ [LLVMMatchType<0>,
  80. /* VLMUL */ LLVMMatchType<0>],
  81. [IntrNoMem, IntrHasSideEffects,
  82. ImmArg<ArgIndex<0>>,
  83. ImmArg<ArgIndex<1>>]>;
  84. // For unit stride load
  85. // Input: (pointer, vl)
  86. class RISCVUSLoad
  87. : Intrinsic<[llvm_anyvector_ty],
  88. [LLVMPointerType<LLVMMatchType<0>>,
  89. llvm_anyint_ty],
  90. [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
  91. // For unit stride fault-only-first load
  92. // Input: (pointer, vl)
  93. // Output: (data, vl)
  94. // NOTE: We model this with default memory properties since we model writing
  95. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  96. class RISCVUSLoadFF
  97. : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
  98. [LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
  99. [NoCapture<ArgIndex<0>>]>,
  100. RISCVVIntrinsic;
  101. // For unit stride load with mask
  102. // Input: (maskedoff, pointer, mask, vl)
  103. class RISCVUSLoadMask
  104. : Intrinsic<[llvm_anyvector_ty ],
  105. [LLVMMatchType<0>,
  106. LLVMPointerType<LLVMMatchType<0>>,
  107. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  108. llvm_anyint_ty],
  109. [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
  110. // For unit stride fault-only-first load with mask
  111. // Input: (maskedoff, pointer, mask, vl)
  112. // Output: (data, vl)
  113. // NOTE: We model this with default memory properties since we model writing
  114. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  115. class RISCVUSLoadFFMask
  116. : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
  117. [LLVMMatchType<0>,
  118. LLVMPointerType<LLVMMatchType<0>>,
  119. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  120. LLVMMatchType<1>],
  121. [NoCapture<ArgIndex<1>>]>, RISCVVIntrinsic;
  122. // For strided load
  123. // Input: (pointer, stride, vl)
  124. class RISCVSLoad
  125. : Intrinsic<[llvm_anyvector_ty],
  126. [LLVMPointerType<LLVMMatchType<0>>,
  127. llvm_anyint_ty, LLVMMatchType<1>],
  128. [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
  129. // For strided load with mask
  130. // Input: (maskedoff, pointer, stride, mask, vl)
  131. class RISCVSLoadMask
  132. : Intrinsic<[llvm_anyvector_ty ],
  133. [LLVMMatchType<0>,
  134. LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
  135. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
  136. [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
  137. // For indexed load
  138. // Input: (pointer, index, vl)
  139. class RISCVILoad
  140. : Intrinsic<[llvm_anyvector_ty],
  141. [LLVMPointerType<LLVMMatchType<0>>,
  142. llvm_anyvector_ty, llvm_anyint_ty],
  143. [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
  144. // For indexed load with mask
  145. // Input: (maskedoff, pointer, index, mask, vl)
  146. class RISCVILoadMask
  147. : Intrinsic<[llvm_anyvector_ty ],
  148. [LLVMMatchType<0>,
  149. LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
  150. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  151. [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
  152. // For unit stride store
  153. // Input: (vector_in, pointer, vl)
  154. class RISCVUSStore
  155. : Intrinsic<[],
  156. [llvm_anyvector_ty,
  157. LLVMPointerType<LLVMMatchType<0>>,
  158. llvm_anyint_ty],
  159. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
  160. // For unit stride store with mask
  161. // Input: (vector_in, pointer, mask, vl)
  162. class RISCVUSStoreMask
  163. : Intrinsic<[],
  164. [llvm_anyvector_ty,
  165. LLVMPointerType<LLVMMatchType<0>>,
  166. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  167. llvm_anyint_ty],
  168. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
  169. // For strided store
  170. // Input: (vector_in, pointer, stride, vl)
  171. class RISCVSStore
  172. : Intrinsic<[],
  173. [llvm_anyvector_ty,
  174. LLVMPointerType<LLVMMatchType<0>>,
  175. llvm_anyint_ty, LLVMMatchType<1>],
  176. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
  177. // For stride store with mask
  178. // Input: (vector_in, pointer, stirde, mask, vl)
  179. class RISCVSStoreMask
  180. : Intrinsic<[],
  181. [llvm_anyvector_ty,
  182. LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
  183. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
  184. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
  185. // For indexed store
  186. // Input: (vector_in, pointer, index, vl)
  187. class RISCVIStore
  188. : Intrinsic<[],
  189. [llvm_anyvector_ty,
  190. LLVMPointerType<LLVMMatchType<0>>,
  191. llvm_anyint_ty, llvm_anyint_ty],
  192. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
  193. // For indexed store with mask
  194. // Input: (vector_in, pointer, index, mask, vl)
  195. class RISCVIStoreMask
  196. : Intrinsic<[],
  197. [llvm_anyvector_ty,
  198. LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
  199. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  200. [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
  201. // For destination vector type is the same as source vector.
  202. // Input: (vector_in, vl)
  203. class RISCVUnaryAANoMask
  204. : Intrinsic<[llvm_anyvector_ty],
  205. [LLVMMatchType<0>, llvm_anyint_ty],
  206. [IntrNoMem]>, RISCVVIntrinsic;
  207. // For destination vector type is the same as first source vector (with mask).
  208. // Input: (vector_in, mask, vl)
  209. class RISCVUnaryAAMask
  210. : Intrinsic<[llvm_anyvector_ty],
  211. [LLVMMatchType<0>, LLVMMatchType<0>,
  212. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  213. [IntrNoMem]>, RISCVVIntrinsic;
  214. // For destination vector type is the same as first and second source vector.
  215. // Input: (vector_in, vector_in, vl)
  216. class RISCVBinaryAAANoMask
  217. : Intrinsic<[llvm_anyvector_ty],
  218. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
  219. [IntrNoMem]>, RISCVVIntrinsic;
  220. // For destination vector type is the same as first and second source vector.
  221. // Input: (vector_in, vector_in, vl)
  222. class RISCVBinaryAAAMask
  223. : Intrinsic<[llvm_anyvector_ty],
  224. [LLVMMatchType<0>, LLVMMatchType<0>,
  225. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  226. [IntrNoMem]>, RISCVVIntrinsic;
  227. // For destination vector type is the same as first source vector.
  228. // Input: (vector_in, vector_in/scalar_in, vl)
  229. class RISCVBinaryAAXNoMask
  230. : Intrinsic<[llvm_anyvector_ty],
  231. [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
  232. [IntrNoMem]>, RISCVVIntrinsic {
  233. let ExtendOperand = 2;
  234. }
  235. // For destination vector type is the same as first source vector (with mask).
  236. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
  237. class RISCVBinaryAAXMask
  238. : Intrinsic<[llvm_anyvector_ty],
  239. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  240. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  241. [IntrNoMem]>, RISCVVIntrinsic {
  242. let ExtendOperand = 3;
  243. }
  244. // For destination vector type is NOT the same as first source vector.
  245. // Input: (vector_in, vector_in/scalar_in, vl)
  246. class RISCVBinaryABXNoMask
  247. : Intrinsic<[llvm_anyvector_ty],
  248. [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
  249. [IntrNoMem]>, RISCVVIntrinsic {
  250. let ExtendOperand = 2;
  251. }
  252. // For destination vector type is NOT the same as first source vector (with mask).
  253. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
  254. class RISCVBinaryABXMask
  255. : Intrinsic<[llvm_anyvector_ty],
  256. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  257. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  258. [IntrNoMem]>, RISCVVIntrinsic {
  259. let ExtendOperand = 3;
  260. }
  261. // For binary operations with V0 as input.
  262. // Input: (vector_in, vector_in/scalar_in, V0, vl)
  263. class RISCVBinaryWithV0
  264. : Intrinsic<[llvm_anyvector_ty],
  265. [LLVMMatchType<0>, llvm_any_ty,
  266. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  267. llvm_anyint_ty],
  268. [IntrNoMem]>, RISCVVIntrinsic {
  269. let ExtendOperand = 2;
  270. }
  271. // For binary operations with mask type output and V0 as input.
  272. // Output: (mask type output)
  273. // Input: (vector_in, vector_in/scalar_in, V0, vl)
  274. class RISCVBinaryMOutWithV0
  275. :Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  276. [llvm_anyvector_ty, llvm_any_ty,
  277. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  278. llvm_anyint_ty],
  279. [IntrNoMem]>, RISCVVIntrinsic {
  280. let ExtendOperand = 2;
  281. }
  282. // For binary operations with mask type output.
  283. // Output: (mask type output)
  284. // Input: (vector_in, vector_in/scalar_in, vl)
  285. class RISCVBinaryMOut
  286. : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  287. [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
  288. [IntrNoMem]>, RISCVVIntrinsic {
  289. let ExtendOperand = 2;
  290. }
  291. // For binary operations with mask type output without mask.
  292. // Output: (mask type output)
  293. // Input: (vector_in, vector_in/scalar_in, vl)
  294. class RISCVCompareNoMask
  295. : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  296. [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
  297. [IntrNoMem]>, RISCVVIntrinsic {
  298. let ExtendOperand = 2;
  299. }
  300. // For binary operations with mask type output with mask.
  301. // Output: (mask type output)
  302. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
  303. class RISCVCompareMask
  304. : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
  305. [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  306. llvm_anyvector_ty, llvm_any_ty,
  307. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  308. [IntrNoMem]>, RISCVVIntrinsic {
  309. let ExtendOperand = 3;
  310. }
  311. // For FP classify operations.
  312. // Output: (bit mask type output)
  313. // Input: (vector_in, vl)
  314. class RISCVClassifyNoMask
  315. : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
  316. [llvm_anyvector_ty, llvm_anyint_ty],
  317. [IntrNoMem]>, RISCVVIntrinsic;
  318. // For FP classify operations with mask.
  319. // Output: (bit mask type output)
  320. // Input: (maskedoff, vector_in, mask, vl)
  321. class RISCVClassifyMask
  322. : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
  323. [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
  324. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  325. [IntrNoMem]>, RISCVVIntrinsic;
  326. // For Saturating binary operations.
  327. // The destination vector type is the same as first source vector.
  328. // Input: (vector_in, vector_in/scalar_in, vl)
  329. class RISCVSaturatingBinaryAAXNoMask
  330. : Intrinsic<[llvm_anyvector_ty],
  331. [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
  332. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  333. let ExtendOperand = 2;
  334. }
  335. // For Saturating binary operations with mask.
  336. // The destination vector type is the same as first source vector.
  337. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
  338. class RISCVSaturatingBinaryAAXMask
  339. : Intrinsic<[llvm_anyvector_ty],
  340. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
  341. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  342. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  343. let ExtendOperand = 3;
  344. }
  345. // For Saturating binary operations.
  346. // The destination vector type is NOT the same as first source vector.
  347. // Input: (vector_in, vector_in/scalar_in, vl)
  348. class RISCVSaturatingBinaryABXNoMask
  349. : Intrinsic<[llvm_anyvector_ty],
  350. [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
  351. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  352. let ExtendOperand = 2;
  353. }
  354. // For Saturating binary operations with mask.
  355. // The destination vector type is NOT the same as first source vector (with mask).
  356. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
  357. class RISCVSaturatingBinaryABXMask
  358. : Intrinsic<[llvm_anyvector_ty],
  359. [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
  360. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  361. [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
  362. let ExtendOperand = 3;
  363. }
  364. class RISCVTernaryAAAXNoMask
  365. : Intrinsic<[llvm_anyvector_ty],
  366. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
  367. LLVMMatchType<1>],
  368. [IntrNoMem]>, RISCVVIntrinsic;
  369. class RISCVTernaryAAAXMask
  370. : Intrinsic<[llvm_anyvector_ty],
  371. [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
  372. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
  373. [IntrNoMem]>, RISCVVIntrinsic;
  374. class RISCVTernaryAAXANoMask
  375. : Intrinsic<[llvm_anyvector_ty],
  376. [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
  377. llvm_anyint_ty],
  378. [IntrNoMem]>, RISCVVIntrinsic {
  379. let ExtendOperand = 2;
  380. }
  381. class RISCVTernaryAAXAMask
  382. : Intrinsic<[llvm_anyvector_ty],
  383. [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
  384. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  385. [IntrNoMem]>, RISCVVIntrinsic {
  386. let ExtendOperand = 2;
  387. }
  388. class RISCVTernaryWideNoMask
  389. : Intrinsic< [llvm_anyvector_ty],
  390. [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
  391. llvm_anyint_ty],
  392. [IntrNoMem] >, RISCVVIntrinsic {
  393. let ExtendOperand = 2;
  394. }
  395. class RISCVTernaryWideMask
  396. : Intrinsic< [llvm_anyvector_ty],
  397. [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
  398. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  399. [IntrNoMem]>, RISCVVIntrinsic {
  400. let ExtendOperand = 2;
  401. }
  402. // For Reduction ternary operations.
  403. // For destination vector type is the same as first and third source vector.
  404. // Input: (vector_in, vector_in, vector_in, vl)
  405. class RISCVReductionNoMask
  406. : Intrinsic<[llvm_anyvector_ty],
  407. [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
  408. llvm_anyint_ty],
  409. [IntrNoMem]>, RISCVVIntrinsic;
  410. // For Reduction ternary operations with mask.
  411. // For destination vector type is the same as first and third source vector.
  412. // The mask type come from second source vector.
  413. // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
  414. class RISCVReductionMask
  415. : Intrinsic<[llvm_anyvector_ty],
  416. [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
  417. LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
  418. [IntrNoMem]>, RISCVVIntrinsic;
  419. // For unary operations with scalar type output without mask
  420. // Output: (scalar type)
  421. // Input: (vector_in, vl)
  422. class RISCVMaskUnarySOutNoMask
  423. : Intrinsic<[llvm_anyint_ty],
  424. [llvm_anyvector_ty, LLVMMatchType<0>],
  425. [IntrNoMem]>, RISCVVIntrinsic;
  426. // For unary operations with scalar type output with mask
  427. // Output: (scalar type)
  428. // Input: (vector_in, mask, vl)
  429. class RISCVMaskUnarySOutMask
  430. : Intrinsic<[llvm_anyint_ty],
  431. [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<0>],
  432. [IntrNoMem]>, RISCVVIntrinsic;
  433. // For destination vector type is NOT the same as source vector.
  434. // Input: (vector_in, vl)
  435. class RISCVUnaryABNoMask
  436. : Intrinsic<[llvm_anyvector_ty],
  437. [llvm_anyvector_ty, llvm_anyint_ty],
  438. [IntrNoMem]>, RISCVVIntrinsic;
  439. // For destination vector type is NOT the same as source vector (with mask).
  440. // Input: (maskedoff, vector_in, mask, vl)
  441. class RISCVUnaryABMask
  442. : Intrinsic<[llvm_anyvector_ty],
  443. [LLVMMatchType<0>, llvm_anyvector_ty,
  444. LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
  445. llvm_anyint_ty],
  446. [IntrNoMem]>, RISCVVIntrinsic;
  447. // For unary operations with the same vector type in/out without mask
  448. // Output: (vector)
  449. // Input: (vector_in, vl)
  450. class RISCVUnaryNoMask
  451. : Intrinsic<[llvm_anyvector_ty],
  452. [LLVMMatchType<0>, llvm_anyint_ty],
  453. [IntrNoMem]>, RISCVVIntrinsic;
  454. // For mask unary operations with mask type in/out with mask
  455. // Output: (mask type output)
  456. // Input: (mask type maskedoff, mask type vector_in, mask, vl)
  457. class RISCVMaskUnaryMOutMask
  458. : Intrinsic<[llvm_anyint_ty],
  459. [LLVMMatchType<0>, LLVMMatchType<0>,
  460. LLVMMatchType<0>, llvm_anyint_ty],
  461. [IntrNoMem]>, RISCVVIntrinsic;
  462. // Output: (vector)
  463. // Input: (vl)
  464. class RISCVNullaryIntrinsic
  465. : Intrinsic<[llvm_anyvector_ty],
  466. [llvm_anyint_ty],
  467. [IntrNoMem]>, RISCVVIntrinsic;
  468. // For Conversion unary operations.
  469. // Input: (vector_in, vl)
  470. class RISCVConversionNoMask
  471. : Intrinsic<[llvm_anyvector_ty],
  472. [llvm_anyvector_ty, llvm_anyint_ty],
  473. [IntrNoMem]>, RISCVVIntrinsic;
  474. // For Conversion unary operations with mask.
  475. // Input: (maskedoff, vector_in, mask, vl)
  476. class RISCVConversionMask
  477. : Intrinsic<[llvm_anyvector_ty],
  478. [LLVMMatchType<0>, llvm_anyvector_ty,
  479. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  480. [IntrNoMem]>, RISCVVIntrinsic;
  481. // For atomic operations without mask
  482. // Input: (base, index, value, vl)
  483. class RISCVAMONoMask
  484. : Intrinsic<[llvm_anyvector_ty],
  485. [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
  486. llvm_anyint_ty],
  487. [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
  488. // For atomic operations with mask
  489. // Input: (base, index, value, mask, vl)
  490. class RISCVAMOMask
  491. : Intrinsic<[llvm_anyvector_ty],
  492. [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
  493. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
  494. [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
  495. // For unit stride segment load
  496. // Input: (pointer, vl)
  497. class RISCVUSSegLoad<int nf>
  498. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  499. !add(nf, -1))),
  500. [LLVMPointerToElt<0>, llvm_anyint_ty],
  501. [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
  502. // For unit stride segment load with mask
  503. // Input: (maskedoff, pointer, mask, vl)
  504. class RISCVUSSegLoadMask<int nf>
  505. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  506. !add(nf, -1))),
  507. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  508. [LLVMPointerToElt<0>,
  509. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  510. llvm_anyint_ty]),
  511. [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
  512. // For unit stride fault-only-first segment load
  513. // Input: (pointer, vl)
  514. // Output: (data, vl)
  515. // NOTE: We model this with default memory properties since we model writing
  516. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  517. class RISCVUSSegLoadFF<int nf>
  518. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  519. !add(nf, -1)), [llvm_anyint_ty]),
  520. [LLVMPointerToElt<0>, LLVMMatchType<1>],
  521. [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
  522. // For unit stride fault-only-first segment load with mask
  523. // Input: (maskedoff, pointer, mask, vl)
  524. // Output: (data, vl)
  525. // NOTE: We model this with default memory properties since we model writing
  526. // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  527. class RISCVUSSegLoadFFMask<int nf>
  528. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  529. !add(nf, -1)), [llvm_anyint_ty]),
  530. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  531. [LLVMPointerToElt<0>,
  532. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  533. LLVMMatchType<1>]),
  534. [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic;
  535. // For stride segment load
  536. // Input: (pointer, offset, vl)
  537. class RISCVSSegLoad<int nf>
  538. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  539. !add(nf, -1))),
  540. [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>],
  541. [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
  542. // For stride segment load with mask
  543. // Input: (maskedoff, pointer, offset, mask, vl)
  544. class RISCVSSegLoadMask<int nf>
  545. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  546. !add(nf, -1))),
  547. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  548. [LLVMPointerToElt<0>,
  549. llvm_anyint_ty,
  550. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  551. LLVMMatchType<1>]),
  552. [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
  553. // For indexed segment load
  554. // Input: (pointer, index, vl)
  555. class RISCVISegLoad<int nf>
  556. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  557. !add(nf, -1))),
  558. [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty],
  559. [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
  560. // For indexed segment load with mask
  561. // Input: (maskedoff, pointer, index, mask, vl)
  562. class RISCVISegLoadMask<int nf>
  563. : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
  564. !add(nf, -1))),
  565. !listconcat(!listsplat(LLVMMatchType<0>, nf),
  566. [LLVMPointerToElt<0>,
  567. llvm_anyvector_ty,
  568. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  569. llvm_anyint_ty]),
  570. [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
  571. // For unit stride segment store
  572. // Input: (value, pointer, vl)
  573. class RISCVUSSegStore<int nf>
  574. : Intrinsic<[],
  575. !listconcat([llvm_anyvector_ty],
  576. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  577. [LLVMPointerToElt<0>, llvm_anyint_ty]),
  578. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
  579. // For unit stride segment store with mask
  580. // Input: (value, pointer, mask, vl)
  581. class RISCVUSSegStoreMask<int nf>
  582. : Intrinsic<[],
  583. !listconcat([llvm_anyvector_ty],
  584. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  585. [LLVMPointerToElt<0>,
  586. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  587. llvm_anyint_ty]),
  588. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
  589. // For stride segment store
  590. // Input: (value, pointer, offset, vl)
  591. class RISCVSSegStore<int nf>
  592. : Intrinsic<[],
  593. !listconcat([llvm_anyvector_ty],
  594. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  595. [LLVMPointerToElt<0>, llvm_anyint_ty,
  596. LLVMMatchType<1>]),
  597. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
  598. // For stride segment store with mask
  599. // Input: (value, pointer, offset, mask, vl)
  600. class RISCVSSegStoreMask<int nf>
  601. : Intrinsic<[],
  602. !listconcat([llvm_anyvector_ty],
  603. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  604. [LLVMPointerToElt<0>, llvm_anyint_ty,
  605. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  606. LLVMMatchType<1>]),
  607. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
  608. // For indexed segment store
  609. // Input: (value, pointer, offset, vl)
  610. class RISCVISegStore<int nf>
  611. : Intrinsic<[],
  612. !listconcat([llvm_anyvector_ty],
  613. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  614. [LLVMPointerToElt<0>, llvm_anyvector_ty,
  615. llvm_anyint_ty]),
  616. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
  617. // For indexed segment store with mask
  618. // Input: (value, pointer, offset, mask, vl)
  619. class RISCVISegStoreMask<int nf>
  620. : Intrinsic<[],
  621. !listconcat([llvm_anyvector_ty],
  622. !listsplat(LLVMMatchType<0>, !add(nf, -1)),
  623. [LLVMPointerToElt<0>, llvm_anyvector_ty,
  624. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  625. llvm_anyint_ty]),
  626. [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
  627. multiclass RISCVUSLoad {
  628. def "int_riscv_" # NAME : RISCVUSLoad;
  629. def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
  630. }
  631. multiclass RISCVUSLoadFF {
  632. def "int_riscv_" # NAME : RISCVUSLoadFF;
  633. def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMask;
  634. }
  635. multiclass RISCVSLoad {
  636. def "int_riscv_" # NAME : RISCVSLoad;
  637. def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask;
  638. }
  639. multiclass RISCVILoad {
  640. def "int_riscv_" # NAME : RISCVILoad;
  641. def "int_riscv_" # NAME # "_mask" : RISCVILoadMask;
  642. }
  643. multiclass RISCVUSStore {
  644. def "int_riscv_" # NAME : RISCVUSStore;
  645. def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
  646. }
  647. multiclass RISCVSStore {
  648. def "int_riscv_" # NAME : RISCVSStore;
  649. def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
  650. }
  651. multiclass RISCVIStore {
  652. def "int_riscv_" # NAME : RISCVIStore;
  653. def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask;
  654. }
  655. multiclass RISCVUnaryAA {
  656. def "int_riscv_" # NAME : RISCVUnaryAANoMask;
  657. def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask;
  658. }
  659. multiclass RISCVUnaryAB {
  660. def "int_riscv_" # NAME : RISCVUnaryABNoMask;
  661. def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask;
  662. }
  663. // AAX means the destination type(A) is the same as the first source
  664. // type(A). X means any type for the second source operand.
  665. multiclass RISCVBinaryAAX {
  666. def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
  667. def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
  668. }
  669. // ABX means the destination type(A) is different from the first source
  670. // type(B). X means any type for the second source operand.
  671. multiclass RISCVBinaryABX {
  672. def "int_riscv_" # NAME : RISCVBinaryABXNoMask;
  673. def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask;
  674. }
  675. multiclass RISCVBinaryWithV0 {
  676. def "int_riscv_" # NAME : RISCVBinaryWithV0;
  677. }
  678. multiclass RISCVBinaryMaskOutWithV0 {
  679. def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
  680. }
  681. multiclass RISCVBinaryMaskOut {
  682. def "int_riscv_" # NAME : RISCVBinaryMOut;
  683. }
  684. multiclass RISCVSaturatingBinaryAAX {
  685. def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask;
  686. def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask;
  687. }
  688. multiclass RISCVSaturatingBinaryABX {
  689. def "int_riscv_" # NAME : RISCVSaturatingBinaryABXNoMask;
  690. def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABXMask;
  691. }
  692. multiclass RISCVTernaryAAAX {
  693. def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask;
  694. def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask;
  695. }
  696. multiclass RISCVTernaryAAXA {
  697. def "int_riscv_" # NAME : RISCVTernaryAAXANoMask;
  698. def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask;
  699. }
  700. multiclass RISCVCompare {
  701. def "int_riscv_" # NAME : RISCVCompareNoMask;
  702. def "int_riscv_" # NAME # "_mask" : RISCVCompareMask;
  703. }
  704. multiclass RISCVClassify {
  705. def "int_riscv_" # NAME : RISCVClassifyNoMask;
  706. def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask;
  707. }
  708. multiclass RISCVTernaryWide {
  709. def "int_riscv_" # NAME : RISCVTernaryWideNoMask;
  710. def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask;
  711. }
  712. multiclass RISCVReduction {
  713. def "int_riscv_" # NAME : RISCVReductionNoMask;
  714. def "int_riscv_" # NAME # "_mask" : RISCVReductionMask;
  715. }
  716. multiclass RISCVMaskUnarySOut {
  717. def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
  718. def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
  719. }
  720. multiclass RISCVMaskUnaryMOut {
  721. def "int_riscv_" # NAME : RISCVUnaryNoMask;
  722. def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask;
  723. }
  724. multiclass RISCVConversion {
  725. def "int_riscv_" #NAME :RISCVConversionNoMask;
  726. def "int_riscv_" # NAME # "_mask" : RISCVConversionMask;
  727. }
  728. multiclass RISCVAMO {
  729. def "int_riscv_" # NAME : RISCVAMONoMask;
  730. def "int_riscv_" # NAME # "_mask" : RISCVAMOMask;
  731. }
  732. multiclass RISCVUSSegLoad<int nf> {
  733. def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
  734. def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
  735. }
  736. multiclass RISCVUSSegLoadFF<int nf> {
  737. def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
  738. def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMask<nf>;
  739. }
  740. multiclass RISCVSSegLoad<int nf> {
  741. def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
  742. def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask<nf>;
  743. }
  744. multiclass RISCVISegLoad<int nf> {
  745. def "int_riscv_" # NAME : RISCVISegLoad<nf>;
  746. def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMask<nf>;
  747. }
  748. multiclass RISCVUSSegStore<int nf> {
  749. def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
  750. def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask<nf>;
  751. }
  752. multiclass RISCVSSegStore<int nf> {
  753. def "int_riscv_" # NAME : RISCVSSegStore<nf>;
  754. def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask<nf>;
  755. }
  756. multiclass RISCVISegStore<int nf> {
  757. def "int_riscv_" # NAME : RISCVISegStore<nf>;
  758. def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask<nf>;
  759. }
  760. defm vle : RISCVUSLoad;
  761. defm vleff : RISCVUSLoadFF;
  762. defm vse : RISCVUSStore;
  763. defm vlse: RISCVSLoad;
  764. defm vsse: RISCVSStore;
  765. defm vluxei : RISCVILoad;
  766. defm vloxei : RISCVILoad;
  767. defm vsoxei : RISCVIStore;
  768. defm vsuxei : RISCVIStore;
  769. def int_riscv_vle1 : RISCVUSLoad;
  770. def int_riscv_vse1 : RISCVUSStore;
  771. defm vamoswap : RISCVAMO;
  772. defm vamoadd : RISCVAMO;
  773. defm vamoxor : RISCVAMO;
  774. defm vamoand : RISCVAMO;
  775. defm vamoor : RISCVAMO;
  776. defm vamomin : RISCVAMO;
  777. defm vamomax : RISCVAMO;
  778. defm vamominu : RISCVAMO;
  779. defm vamomaxu : RISCVAMO;
  780. defm vadd : RISCVBinaryAAX;
  781. defm vsub : RISCVBinaryAAX;
  782. defm vrsub : RISCVBinaryAAX;
  783. defm vwaddu : RISCVBinaryABX;
  784. defm vwadd : RISCVBinaryABX;
  785. defm vwaddu_w : RISCVBinaryAAX;
  786. defm vwadd_w : RISCVBinaryAAX;
  787. defm vwsubu : RISCVBinaryABX;
  788. defm vwsub : RISCVBinaryABX;
  789. defm vwsubu_w : RISCVBinaryAAX;
  790. defm vwsub_w : RISCVBinaryAAX;
  791. defm vzext : RISCVUnaryAB;
  792. defm vsext : RISCVUnaryAB;
  793. defm vadc : RISCVBinaryWithV0;
  794. defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
  795. defm vmadc : RISCVBinaryMaskOut;
  796. defm vsbc : RISCVBinaryWithV0;
  797. defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
  798. defm vmsbc : RISCVBinaryMaskOut;
  799. defm vand : RISCVBinaryAAX;
  800. defm vor : RISCVBinaryAAX;
  801. defm vxor : RISCVBinaryAAX;
  802. defm vsll : RISCVBinaryAAX;
  803. defm vsrl : RISCVBinaryAAX;
  804. defm vsra : RISCVBinaryAAX;
  805. defm vnsrl : RISCVBinaryABX;
  806. defm vnsra : RISCVBinaryABX;
  807. defm vmseq : RISCVCompare;
  808. defm vmsne : RISCVCompare;
  809. defm vmsltu : RISCVCompare;
  810. defm vmslt : RISCVCompare;
  811. defm vmsleu : RISCVCompare;
  812. defm vmsle : RISCVCompare;
  813. defm vmsgtu : RISCVCompare;
  814. defm vmsgt : RISCVCompare;
  815. defm vminu : RISCVBinaryAAX;
  816. defm vmin : RISCVBinaryAAX;
  817. defm vmaxu : RISCVBinaryAAX;
  818. defm vmax : RISCVBinaryAAX;
  819. defm vmul : RISCVBinaryAAX;
  820. defm vmulh : RISCVBinaryAAX;
  821. defm vmulhu : RISCVBinaryAAX;
  822. defm vmulhsu : RISCVBinaryAAX;
  823. defm vdivu : RISCVBinaryAAX;
  824. defm vdiv : RISCVBinaryAAX;
  825. defm vremu : RISCVBinaryAAX;
  826. defm vrem : RISCVBinaryAAX;
  827. defm vwmul : RISCVBinaryABX;
  828. defm vwmulu : RISCVBinaryABX;
  829. defm vwmulsu : RISCVBinaryABX;
  830. defm vmacc : RISCVTernaryAAXA;
  831. defm vnmsac : RISCVTernaryAAXA;
  832. defm vmadd : RISCVTernaryAAXA;
  833. defm vnmsub : RISCVTernaryAAXA;
  834. defm vwmaccu : RISCVTernaryWide;
  835. defm vwmacc : RISCVTernaryWide;
  836. defm vwmaccus : RISCVTernaryWide;
  837. defm vwmaccsu : RISCVTernaryWide;
  838. defm vfadd : RISCVBinaryAAX;
  839. defm vfsub : RISCVBinaryAAX;
  840. defm vfrsub : RISCVBinaryAAX;
  841. defm vfwadd : RISCVBinaryABX;
  842. defm vfwsub : RISCVBinaryABX;
  843. defm vfwadd_w : RISCVBinaryAAX;
  844. defm vfwsub_w : RISCVBinaryAAX;
  845. defm vsaddu : RISCVSaturatingBinaryAAX;
  846. defm vsadd : RISCVSaturatingBinaryAAX;
  847. defm vssubu : RISCVSaturatingBinaryAAX;
  848. defm vssub : RISCVSaturatingBinaryAAX;
  849. def int_riscv_vmerge : RISCVBinaryWithV0;
  850. def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty],
  851. [LLVMMatchType<0>, llvm_anyint_ty],
  852. [IntrNoMem]>, RISCVVIntrinsic;
  853. def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty],
  854. [LLVMVectorElementType<0>, llvm_anyint_ty],
  855. [IntrNoMem]>, RISCVVIntrinsic {
  856. let ExtendOperand = 1;
  857. }
  858. def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
  859. [LLVMVectorElementType<0>, llvm_anyint_ty],
  860. [IntrNoMem]>, RISCVVIntrinsic;
  861. def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
  862. [llvm_anyint_ty],
  863. [IntrNoMem]>, RISCVVIntrinsic;
  864. def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
  865. [LLVMMatchType<0>, LLVMVectorElementType<0>,
  866. llvm_anyint_ty],
  867. [IntrNoMem]>, RISCVVIntrinsic {
  868. let ExtendOperand = 2;
  869. }
  870. def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
  871. [llvm_anyfloat_ty],
  872. [IntrNoMem]>, RISCVVIntrinsic;
  873. def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
  874. [LLVMMatchType<0>, LLVMVectorElementType<0>,
  875. llvm_anyint_ty],
  876. [IntrNoMem]>, RISCVVIntrinsic;
  877. defm vfmul : RISCVBinaryAAX;
  878. defm vfdiv : RISCVBinaryAAX;
  879. defm vfrdiv : RISCVBinaryAAX;
  880. defm vfwmul : RISCVBinaryABX;
  881. defm vfmacc : RISCVTernaryAAXA;
  882. defm vfnmacc : RISCVTernaryAAXA;
  883. defm vfmsac : RISCVTernaryAAXA;
  884. defm vfnmsac : RISCVTernaryAAXA;
  885. defm vfmadd : RISCVTernaryAAXA;
  886. defm vfnmadd : RISCVTernaryAAXA;
  887. defm vfmsub : RISCVTernaryAAXA;
  888. defm vfnmsub : RISCVTernaryAAXA;
  889. defm vfwmacc : RISCVTernaryWide;
  890. defm vfwnmacc : RISCVTernaryWide;
  891. defm vfwmsac : RISCVTernaryWide;
  892. defm vfwnmsac : RISCVTernaryWide;
  893. defm vfsqrt : RISCVUnaryAA;
  894. defm vfrsqrt7 : RISCVUnaryAA;
  895. defm vfrec7 : RISCVUnaryAA;
  896. defm vfmin : RISCVBinaryAAX;
  897. defm vfmax : RISCVBinaryAAX;
  898. defm vfsgnj : RISCVBinaryAAX;
  899. defm vfsgnjn : RISCVBinaryAAX;
  900. defm vfsgnjx : RISCVBinaryAAX;
  901. defm vfclass : RISCVClassify;
  902. defm vfmerge : RISCVBinaryWithV0;
  903. defm vslideup : RISCVTernaryAAAX;
  904. defm vslidedown : RISCVTernaryAAAX;
  905. defm vslide1up : RISCVBinaryAAX;
  906. defm vslide1down : RISCVBinaryAAX;
  907. defm vfslide1up : RISCVBinaryAAX;
  908. defm vfslide1down : RISCVBinaryAAX;
  909. defm vrgather : RISCVBinaryAAX;
  910. defm vrgatherei16 : RISCVBinaryAAX;
  911. def "int_riscv_vcompress" : RISCVBinaryAAAMask;
  912. defm vaaddu : RISCVSaturatingBinaryAAX;
  913. defm vaadd : RISCVSaturatingBinaryAAX;
  914. defm vasubu : RISCVSaturatingBinaryAAX;
  915. defm vasub : RISCVSaturatingBinaryAAX;
  916. defm vsmul : RISCVSaturatingBinaryAAX;
  917. defm vssrl : RISCVSaturatingBinaryAAX;
  918. defm vssra : RISCVSaturatingBinaryAAX;
  919. defm vnclipu : RISCVSaturatingBinaryABX;
  920. defm vnclip : RISCVSaturatingBinaryABX;
  921. defm vmfeq : RISCVCompare;
  922. defm vmfne : RISCVCompare;
  923. defm vmflt : RISCVCompare;
  924. defm vmfle : RISCVCompare;
  925. defm vmfgt : RISCVCompare;
  926. defm vmfge : RISCVCompare;
  927. defm vredsum : RISCVReduction;
  928. defm vredand : RISCVReduction;
  929. defm vredor : RISCVReduction;
  930. defm vredxor : RISCVReduction;
  931. defm vredminu : RISCVReduction;
  932. defm vredmin : RISCVReduction;
  933. defm vredmaxu : RISCVReduction;
  934. defm vredmax : RISCVReduction;
  935. defm vwredsumu : RISCVReduction;
  936. defm vwredsum : RISCVReduction;
  937. defm vfredosum : RISCVReduction;
  938. defm vfredsum : RISCVReduction;
  939. defm vfredmin : RISCVReduction;
  940. defm vfredmax : RISCVReduction;
  941. defm vfwredsum : RISCVReduction;
  942. defm vfwredosum : RISCVReduction;
  943. def int_riscv_vmand: RISCVBinaryAAANoMask;
  944. def int_riscv_vmnand: RISCVBinaryAAANoMask;
  945. def int_riscv_vmandnot: RISCVBinaryAAANoMask;
  946. def int_riscv_vmxor: RISCVBinaryAAANoMask;
  947. def int_riscv_vmor: RISCVBinaryAAANoMask;
  948. def int_riscv_vmnor: RISCVBinaryAAANoMask;
  949. def int_riscv_vmornot: RISCVBinaryAAANoMask;
  950. def int_riscv_vmxnor: RISCVBinaryAAANoMask;
  951. def int_riscv_vmclr : RISCVNullaryIntrinsic;
  952. def int_riscv_vmset : RISCVNullaryIntrinsic;
  953. defm vpopc : RISCVMaskUnarySOut;
  954. defm vfirst : RISCVMaskUnarySOut;
  955. defm vmsbf : RISCVMaskUnaryMOut;
  956. defm vmsof : RISCVMaskUnaryMOut;
  957. defm vmsif : RISCVMaskUnaryMOut;
  958. defm vfcvt_xu_f_v : RISCVConversion;
  959. defm vfcvt_x_f_v : RISCVConversion;
  960. defm vfcvt_rtz_xu_f_v : RISCVConversion;
  961. defm vfcvt_rtz_x_f_v : RISCVConversion;
  962. defm vfcvt_f_xu_v : RISCVConversion;
  963. defm vfcvt_f_x_v : RISCVConversion;
  964. defm vfwcvt_f_xu_v : RISCVConversion;
  965. defm vfwcvt_f_x_v : RISCVConversion;
  966. defm vfwcvt_xu_f_v : RISCVConversion;
  967. defm vfwcvt_x_f_v : RISCVConversion;
  968. defm vfwcvt_rtz_xu_f_v : RISCVConversion;
  969. defm vfwcvt_rtz_x_f_v : RISCVConversion;
  970. defm vfwcvt_f_f_v : RISCVConversion;
  971. defm vfncvt_f_xu_w : RISCVConversion;
  972. defm vfncvt_f_x_w : RISCVConversion;
  973. defm vfncvt_xu_f_w : RISCVConversion;
  974. defm vfncvt_x_f_w : RISCVConversion;
  975. defm vfncvt_rtz_xu_f_w : RISCVConversion;
  976. defm vfncvt_rtz_x_f_w : RISCVConversion;
  977. defm vfncvt_f_f_w : RISCVConversion;
  978. defm vfncvt_rod_f_f_w : RISCVConversion;
  979. // Output: (vector)
  980. // Input: (mask type input, vl)
  981. def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
  982. [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  983. llvm_anyint_ty],
  984. [IntrNoMem]>, RISCVVIntrinsic;
  985. // Output: (vector)
  986. // Input: (maskedoff, mask type vector_in, mask, vl)
  987. def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
  988. [LLVMMatchType<0>,
  989. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  990. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  991. llvm_anyint_ty],
  992. [IntrNoMem]>, RISCVVIntrinsic;
  993. // Output: (vector)
  994. // Input: (vl)
  995. def int_riscv_vid : RISCVNullaryIntrinsic;
  996. // Output: (vector)
  997. // Input: (maskedoff, mask, vl)
  998. def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],
  999. [LLVMMatchType<0>,
  1000. LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
  1001. llvm_anyint_ty],
  1002. [IntrNoMem]>, RISCVVIntrinsic;
  1003. foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
  1004. defm vlseg # nf : RISCVUSSegLoad<nf>;
  1005. defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
  1006. defm vlsseg # nf : RISCVSSegLoad<nf>;
  1007. defm vloxseg # nf : RISCVISegLoad<nf>;
  1008. defm vluxseg # nf : RISCVISegLoad<nf>;
  1009. defm vsseg # nf : RISCVUSSegStore<nf>;
  1010. defm vssseg # nf : RISCVSSegStore<nf>;
  1011. defm vsoxseg # nf : RISCVISegStore<nf>;
  1012. defm vsuxseg # nf : RISCVISegStore<nf>;
  1013. }
  1014. } // TargetPrefix = "riscv"