RISCVInstrInfoV.td 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770
  1. //===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. ///
  9. /// This file describes the RISC-V instructions from the standard 'V' Vector
  10. /// extension, version 1.0.
  11. ///
  12. //===----------------------------------------------------------------------===//
  13. include "RISCVInstrFormatsV.td"
  14. //===----------------------------------------------------------------------===//
  15. // Operand and SDNode transformation definitions.
  16. //===----------------------------------------------------------------------===//
  17. class VTypeIAsmOperand<int VTypeINum> : AsmOperandClass {
  18. let Name = "VTypeI" # VTypeINum;
  19. let ParserMethod = "parseVTypeI";
  20. let DiagnosticType = "InvalidVTypeI";
  21. let RenderMethod = "addVTypeIOperands";
  22. }
  23. class VTypeIOp<int VTypeINum> : Operand<XLenVT> {
  24. let ParserMatchClass = VTypeIAsmOperand<VTypeINum>;
  25. let PrintMethod = "printVTypeI";
  26. let DecoderMethod = "decodeUImmOperand<"#VTypeINum#">";
  27. let OperandType = "OPERAND_VTYPEI" # VTypeINum;
  28. let OperandNamespace = "RISCVOp";
  29. let MCOperandPredicate = [{
  30. int64_t Imm;
  31. if (MCOp.evaluateAsConstantImm(Imm))
  32. return isUInt<VTypeINum>(Imm);
  33. return MCOp.isBareSymbolRef();
  34. }];
  35. }
  36. def VTypeIOp10 : VTypeIOp<10>;
  37. def VTypeIOp11 : VTypeIOp<11>;
  38. def VMaskAsmOperand : AsmOperandClass {
  39. let Name = "RVVMaskRegOpOperand";
  40. let RenderMethod = "addRegOperands";
  41. let PredicateMethod = "isV0Reg";
  42. let ParserMethod = "parseMaskReg";
  43. let IsOptional = 1;
  44. let DefaultMethod = "defaultMaskRegOp";
  45. let DiagnosticType = "InvalidVMaskRegister";
  46. }
  47. def VMaskOp : RegisterOperand<VMV0> {
  48. let ParserMatchClass = VMaskAsmOperand;
  49. let PrintMethod = "printVMaskReg";
  50. let EncoderMethod = "getVMaskReg";
  51. let DecoderMethod = "decodeVMaskReg";
  52. }
  53. def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
  54. let ParserMatchClass = SImmAsmOperand<5>;
  55. let EncoderMethod = "getImmOpValue";
  56. let DecoderMethod = "decodeSImmOperand<5>";
  57. let OperandType = "OPERAND_SIMM5";
  58. let OperandNamespace = "RISCVOp";
  59. let MCOperandPredicate = [{
  60. int64_t Imm;
  61. if (MCOp.evaluateAsConstantImm(Imm))
  62. return isInt<5>(Imm);
  63. return MCOp.isBareSymbolRef();
  64. }];
  65. }
  66. def SImm5Plus1AsmOperand : AsmOperandClass {
  67. let Name = "SImm5Plus1";
  68. let RenderMethod = "addImmOperands";
  69. let DiagnosticType = "InvalidSImm5Plus1";
  70. }
  71. def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
  72. [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> {
  73. let ParserMatchClass = SImm5Plus1AsmOperand;
  74. let OperandType = "OPERAND_SIMM5_PLUS1";
  75. let OperandNamespace = "RISCVOp";
  76. let MCOperandPredicate = [{
  77. int64_t Imm;
  78. if (MCOp.evaluateAsConstantImm(Imm))
  79. return (isInt<5>(Imm) && Imm != -16) || Imm == 16;
  80. return MCOp.isBareSymbolRef();
  81. }];
  82. }
  83. def simm5_plus1_nonzero : ImmLeaf<XLenVT,
  84. [{return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);}]>;
  85. //===----------------------------------------------------------------------===//
  86. // Scheduling definitions.
  87. //===----------------------------------------------------------------------===//
  88. class VMVRSched<int n> : Sched<[
  89. !cast<SchedReadWrite>("WriteVMov" #n #"V"),
  90. !cast<SchedReadWrite>("ReadVMov" #n #"V")
  91. ]>;
  92. class VLESched<string mx> : Sched<[
  93. !cast<SchedReadWrite>("WriteVLDE_" #mx),
  94. !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
  95. ]>;
  96. class VSESched<string mx> : Sched<[
  97. !cast<SchedReadWrite>("WriteVSTE_" #mx),
  98. !cast<SchedReadWrite>("ReadVSTEV_" #mx),
  99. !cast<SchedReadWrite>("ReadVSTX_" #mx), ReadVMask
  100. ]>;
  101. class VLSSched<int n, string mx> : Sched<[
  102. !cast<SchedReadWrite>("WriteVLDS" #n #"_" #mx),
  103. !cast<SchedReadWrite>("ReadVLDX_" #mx),
  104. !cast<SchedReadWrite>("ReadVLDSX_" #mx), ReadVMask
  105. ]>;
  106. class VSSSched<int n, string mx> : Sched<[
  107. !cast<SchedReadWrite>("WriteVSTS" #n #"_" #mx),
  108. !cast<SchedReadWrite>("ReadVSTS" #n #"V_" #mx),
  109. !cast<SchedReadWrite>("ReadVSTX_" #mx),
  110. !cast<SchedReadWrite>("ReadVSTSX_" #mx), ReadVMask
  111. ]>;
  112. class VLXSched<int n, string o, string mx> : Sched<[
  113. !cast<SchedReadWrite>("WriteVLD" #o #"X" #n #"_" #mx),
  114. !cast<SchedReadWrite>("ReadVLDX_" #mx),
  115. !cast<SchedReadWrite>("ReadVLD" #o #"XV_" #mx), ReadVMask
  116. ]>;
  117. class VSXSched<int n, string o, string mx> : Sched<[
  118. !cast<SchedReadWrite>("WriteVST" #o #"X" #n #"_" #mx),
  119. !cast<SchedReadWrite>("ReadVST" #o #"X" #n #"_" #mx),
  120. !cast<SchedReadWrite>("ReadVSTX_" #mx),
  121. !cast<SchedReadWrite>("ReadVST" #o #"XV_" #mx), ReadVMask
  122. ]>;
  123. class VLFSched<string mx> : Sched<[
  124. !cast<SchedReadWrite>("WriteVLDFF_" #mx),
  125. !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
  126. ]>;
  127. // Unit-Stride Segment Loads and Stores
  128. class VLSEGSched<int nf, int eew, string mx> : Sched<[
  129. !cast<SchedReadWrite>("WriteVLSEG" #nf #"e" #eew #"_" #mx),
  130. !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
  131. ]>;
  132. class VSSEGSched<int nf, int eew, string mx> : Sched<[
  133. !cast<SchedReadWrite>("WriteVSSEG" #nf #"e" #eew #"_" #mx),
  134. !cast<SchedReadWrite>("ReadVSTEV_" #mx),
  135. !cast<SchedReadWrite>("ReadVSTX_" #mx), ReadVMask
  136. ]>;
  137. class VLSEGFFSched<int nf, int eew, string mx> : Sched<[
  138. !cast<SchedReadWrite>("WriteVLSEGFF" #nf #"e" #eew #"_" #mx),
  139. !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
  140. ]>;
  141. // Strided Segment Loads and Stores
  142. class VLSSEGSched<int nf, int eew, string mx> : Sched<[
  143. !cast<SchedReadWrite>("WriteVLSSEG" #nf #"e" #eew #"_" #mx),
  144. !cast<SchedReadWrite>("ReadVLDX_" #mx),
  145. !cast<SchedReadWrite>("ReadVLDSX_" #mx), ReadVMask
  146. ]>;
  147. class VSSSEGSched<int nf, int eew, string mx> : Sched<[
  148. !cast<SchedReadWrite>("WriteVSSSEG" #nf #"e" #eew #"_" #mx),
  149. !cast<SchedReadWrite>("ReadVSTS" #eew #"V" #"_" #mx),
  150. !cast<SchedReadWrite>("ReadVSTX_" #mx),
  151. !cast<SchedReadWrite>("ReadVSTSX_" #mx), ReadVMask
  152. ]>;
  153. // Indexed Segment Loads and Stores
  154. class VLXSEGSched<int nf, int eew, string o, string mx> : Sched<[
  155. !cast<SchedReadWrite>("WriteVL" #o #"XSEG" #nf #"e" #eew #"_" #mx),
  156. !cast<SchedReadWrite>("ReadVLDX_" #mx),
  157. !cast<SchedReadWrite>("ReadVLD" #o #"XV" #"_" #mx), ReadVMask
  158. ]>;
  159. class VSXSEGSched<int nf, int eew, string o, string mx> : Sched<[
  160. !cast<SchedReadWrite>("WriteVS" #o #"XSEG" #nf #"e" #eew #"_" #mx),
  161. !cast<SchedReadWrite>("ReadVST" #o #"X" #eew # "_" # mx),
  162. !cast<SchedReadWrite>("ReadVSTX_" #mx),
  163. !cast<SchedReadWrite>("ReadVST" #o #"XV" # "_" # mx), ReadVMask
  164. ]>;
  165. //===----------------------------------------------------------------------===//
  166. // Instruction class templates
  167. //===----------------------------------------------------------------------===//
  168. let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
  169. // unit-stride load vd, (rs1), vm
  170. class VUnitStrideLoad<RISCVWidth width, string opcodestr>
  171. : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
  172. (outs VR:$vd),
  173. (ins GPRMem:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
  174. let vm = 1, RVVConstraint = NoConstraint in {
  175. // unit-stride whole register load vl<nf>r.v vd, (rs1)
  176. class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
  177. : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
  178. width.Value{2-0}, (outs VRC:$vd), (ins GPRMem:$rs1),
  179. opcodestr, "$vd, (${rs1})"> {
  180. let Uses = [];
  181. }
  182. // unit-stride mask load vd, (rs1)
  183. class VUnitStrideLoadMask<string opcodestr>
  184. : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
  185. (outs VR:$vd),
  186. (ins GPRMem:$rs1), opcodestr, "$vd, (${rs1})">;
  187. } // vm = 1, RVVConstraint = NoConstraint
  188. // unit-stride fault-only-first load vd, (rs1), vm
  189. class VUnitStrideLoadFF<RISCVWidth width, string opcodestr>
  190. : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
  191. (outs VR:$vd),
  192. (ins GPRMem:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
  193. // strided load vd, (rs1), rs2, vm
  194. class VStridedLoad<RISCVWidth width, string opcodestr>
  195. : RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
  196. (outs VR:$vd),
  197. (ins GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
  198. "$vd, (${rs1}), $rs2$vm">;
  199. // indexed load vd, (rs1), vs2, vm
  200. class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
  201. : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
  202. (outs VR:$vd),
  203. (ins GPRMem:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
  204. "$vd, (${rs1}), $vs2$vm">;
  205. // unit-stride segment load vd, (rs1), vm
  206. class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
  207. : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
  208. (outs VR:$vd),
  209. (ins GPRMem:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
  210. // segment fault-only-first load vd, (rs1), vm
  211. class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr>
  212. : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
  213. (outs VR:$vd),
  214. (ins GPRMem:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
  215. // strided segment load vd, (rs1), rs2, vm
  216. class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
  217. : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
  218. (outs VR:$vd),
  219. (ins GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
  220. "$vd, (${rs1}), $rs2$vm">;
  221. // indexed segment load vd, (rs1), vs2, vm
  222. class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
  223. string opcodestr>
  224. : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
  225. (outs VR:$vd),
  226. (ins GPRMem:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
  227. "$vd, (${rs1}), $vs2$vm">;
  228. } // hasSideEffects = 0, mayLoad = 1, mayStore = 0
  229. let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
  230. // unit-stride store vd, vs3, (rs1), vm
  231. class VUnitStrideStore<RISCVWidth width, string opcodestr>
  232. : RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
  233. (outs), (ins VR:$vs3, GPRMem:$rs1, VMaskOp:$vm), opcodestr,
  234. "$vs3, (${rs1})$vm">;
  235. let vm = 1 in {
  236. // vs<nf>r.v vd, (rs1)
  237. class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
  238. : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
  239. 0b000, (outs), (ins VRC:$vs3, GPRMem:$rs1),
  240. opcodestr, "$vs3, (${rs1})"> {
  241. let Uses = [];
  242. }
  243. // unit-stride mask store vd, vs3, (rs1)
  244. class VUnitStrideStoreMask<string opcodestr>
  245. : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
  246. (outs), (ins VR:$vs3, GPRMem:$rs1), opcodestr,
  247. "$vs3, (${rs1})">;
  248. } // vm = 1
  249. // strided store vd, vs3, (rs1), rs2, vm
  250. class VStridedStore<RISCVWidth width, string opcodestr>
  251. : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
  252. (ins VR:$vs3, GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm),
  253. opcodestr, "$vs3, (${rs1}), $rs2$vm">;
  254. // indexed store vd, vs3, (rs1), vs2, vm
  255. class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
  256. : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
  257. (ins VR:$vs3, GPRMem:$rs1, VR:$vs2, VMaskOp:$vm),
  258. opcodestr, "$vs3, (${rs1}), $vs2$vm">;
  259. // segment store vd, vs3, (rs1), vm
  260. class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
  261. : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
  262. (outs), (ins VR:$vs3, GPRMem:$rs1, VMaskOp:$vm), opcodestr,
  263. "$vs3, (${rs1})$vm">;
  264. // segment store vd, vs3, (rs1), rs2, vm
  265. class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
  266. : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
  267. (ins VR:$vs3, GPRMem:$rs1, GPR:$rs2, VMaskOp:$vm),
  268. opcodestr, "$vs3, (${rs1}), $rs2$vm">;
  269. // segment store vd, vs3, (rs1), vs2, vm
  270. class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
  271. string opcodestr>
  272. : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
  273. (ins VR:$vs3, GPRMem:$rs1, VR:$vs2, VMaskOp:$vm),
  274. opcodestr, "$vs3, (${rs1}), $vs2$vm">;
  275. } // hasSideEffects = 0, mayLoad = 0, mayStore = 1
  276. let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
  277. // op vd, vs2, vs1, vm
  278. class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
  279. : RVInstVV<funct6, opv, (outs VR:$vd),
  280. (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
  281. opcodestr, "$vd, $vs2, $vs1$vm">;
  282. // op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
  283. class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
  284. : RVInstVV<funct6, opv, (outs VR:$vd),
  285. (ins VR:$vs2, VR:$vs1, VMV0:$v0),
  286. opcodestr, "$vd, $vs2, $vs1, v0"> {
  287. let vm = 0;
  288. }
  289. // op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
  290. class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
  291. : RVInstVV<funct6, opv, (outs VR:$vd),
  292. (ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
  293. opcodestr, "$vd, $vs1, $vs2$vm">;
  294. // op vd, vs2, vs1
  295. class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
  296. : RVInstVV<funct6, opv, (outs VR:$vd),
  297. (ins VR:$vs2, VR:$vs1),
  298. opcodestr, "$vd, $vs2, $vs1"> {
  299. let vm = 1;
  300. }
  301. // op vd, vs2, rs1, vm
  302. class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
  303. : RVInstVX<funct6, opv, (outs VR:$vd),
  304. (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
  305. opcodestr, "$vd, $vs2, $rs1$vm">;
  306. // op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
  307. class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
  308. : RVInstVX<funct6, opv, (outs VR:$vd),
  309. (ins VR:$vs2, GPR:$rs1, VMV0:$v0),
  310. opcodestr, "$vd, $vs2, $rs1, v0"> {
  311. let vm = 0;
  312. }
  313. // op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
  314. class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
  315. : RVInstVX<funct6, opv, (outs VR:$vd),
  316. (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm),
  317. opcodestr, "$vd, $rs1, $vs2$vm">;
  318. // op vd, vs1, vs2
  319. class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
  320. : RVInstVX<funct6, opv, (outs VR:$vd),
  321. (ins VR:$vs2, GPR:$rs1),
  322. opcodestr, "$vd, $vs2, $rs1"> {
  323. let vm = 1;
  324. }
  325. // op vd, vs2, imm, vm
  326. class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
  327. : RVInstIVI<funct6, (outs VR:$vd),
  328. (ins VR:$vs2, optype:$imm, VMaskOp:$vm),
  329. opcodestr, "$vd, $vs2, $imm$vm">;
  330. // op vd, vs2, imm, v0 (without mask, use v0 as carry input)
  331. class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
  332. : RVInstIVI<funct6, (outs VR:$vd),
  333. (ins VR:$vs2, optype:$imm, VMV0:$v0),
  334. opcodestr, "$vd, $vs2, $imm, v0"> {
  335. let vm = 0;
  336. }
  337. // op vd, vs2, imm, vm
  338. class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
  339. : RVInstIVI<funct6, (outs VR:$vd),
  340. (ins VR:$vs2, optype:$imm),
  341. opcodestr, "$vd, $vs2, $imm"> {
  342. let vm = 1;
  343. }
  344. // op vd, vs2, rs1, vm (Float)
  345. class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
  346. : RVInstVX<funct6, opv, (outs VR:$vd),
  347. (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
  348. opcodestr, "$vd, $vs2, $rs1$vm">;
  349. // op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
  350. class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
  351. : RVInstVX<funct6, opv, (outs VR:$vd),
  352. (ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
  353. opcodestr, "$vd, $rs1, $vs2$vm">;
  354. // op vd, vs2, vm (use vs1 as instruction encoding)
  355. class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
  356. : RVInstV<funct6, vs1, opv, (outs VR:$vd),
  357. (ins VR:$vs2, VMaskOp:$vm),
  358. opcodestr, "$vd, $vs2$vm">;
  359. } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
  360. //===----------------------------------------------------------------------===//
  361. // Combination of instruction classes.
  362. // Use these multiclasses to define instructions more easily.
  363. //===----------------------------------------------------------------------===//
  364. multiclass VIndexLoadStore<list<int> EEWList> {
  365. foreach n = EEWList in {
  366. defvar w = !cast<RISCVWidth>("LSWidth" # n);
  367. def VLUXEI # n # _V :
  368. VIndexedLoad<MOPLDIndexedUnord, w, "vluxei" # n # ".v">,
  369. VLXSched<n, "U", UpperBoundLMUL>;
  370. def VLOXEI # n # _V :
  371. VIndexedLoad<MOPLDIndexedOrder, w, "vloxei" # n # ".v">,
  372. VLXSched<n, "O", UpperBoundLMUL>;
  373. def VSUXEI # n # _V :
  374. VIndexedStore<MOPSTIndexedUnord, w, "vsuxei" # n # ".v">,
  375. VSXSched<n, "U", UpperBoundLMUL>;
  376. def VSOXEI # n # _V :
  377. VIndexedStore<MOPSTIndexedOrder, w, "vsoxei" # n # ".v">,
  378. VSXSched<n, "O", UpperBoundLMUL>;
  379. }
  380. }
  381. multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
  382. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  383. Sched<[WriteVIALUV_UpperBound, ReadVIALUV_UpperBound,
  384. ReadVIALUV_UpperBound, ReadVMask]>;
  385. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  386. Sched<[WriteVIALUX_UpperBound, ReadVIALUV_UpperBound,
  387. ReadVIALUX_UpperBound, ReadVMask]>;
  388. def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
  389. Sched<[WriteVIALUI_UpperBound, ReadVIALUV_UpperBound,
  390. ReadVMask]>;
  391. }
  392. multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  393. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  394. Sched<[WriteVIALUV_UpperBound, ReadVIALUV_UpperBound,
  395. ReadVIALUV_UpperBound, ReadVMask]>;
  396. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  397. Sched<[WriteVIALUX_UpperBound, ReadVIALUV_UpperBound,
  398. ReadVIALUX_UpperBound, ReadVMask]>;
  399. }
  400. multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
  401. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  402. Sched<[WriteVIALUV_UpperBound, ReadVIALUV_UpperBound,
  403. ReadVIALUX_UpperBound, ReadVMask]>;
  404. def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
  405. Sched<[WriteVIALUI_UpperBound, ReadVIALUV_UpperBound,
  406. ReadVMask]>;
  407. }
  408. multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  409. def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
  410. Sched<[WriteVIWALUV_UpperBound, ReadVIWALUV_UpperBound,
  411. ReadVIWALUV_UpperBound, ReadVMask]>;
  412. def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
  413. Sched<[WriteVIWALUX_UpperBound, ReadVIWALUV_UpperBound,
  414. ReadVIWALUX_UpperBound, ReadVMask]>;
  415. }
  416. multiclass VMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  417. def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
  418. Sched<[WriteVIMulAddV_UpperBound, ReadVIMulAddV_UpperBound,
  419. ReadVIMulAddV_UpperBound, ReadVMask]>;
  420. def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
  421. Sched<[WriteVIMulAddX_UpperBound, ReadVIMulAddV_UpperBound,
  422. ReadVIMulAddX_UpperBound, ReadVMask]>;
  423. }
  424. multiclass VWMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  425. def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
  426. Sched<[WriteVIWMulAddV_UpperBound, ReadVIWMulAddV_UpperBound,
  427. ReadVIWMulAddV_UpperBound, ReadVMask]>;
  428. def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
  429. Sched<[WriteVIWMulAddX_UpperBound, ReadVIWMulAddV_UpperBound,
  430. ReadVIWMulAddX_UpperBound, ReadVMask]>;
  431. }
  432. multiclass VWMAC_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  433. def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
  434. Sched<[WriteVIWMulAddX_UpperBound, ReadVIWMulAddV_UpperBound,
  435. ReadVIWMulAddX_UpperBound, ReadVMask]>;
  436. }
  437. multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  438. def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
  439. Sched<[WriteVExtV_UpperBound, ReadVExtV_UpperBound, ReadVMask]>;
  440. }
  441. multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> {
  442. def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
  443. Sched<[WriteVICALUV_UpperBound, ReadVICALUV_UpperBound,
  444. ReadVICALUV_UpperBound, ReadVMask]>;
  445. def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
  446. Sched<[WriteVICALUX_UpperBound, ReadVICALUV_UpperBound,
  447. ReadVICALUX_UpperBound, ReadVMask]>;
  448. def IM : VALUmVI<funct6, opcodestr # ".vim">,
  449. Sched<[WriteVICALUI_UpperBound, ReadVICALUV_UpperBound,
  450. ReadVMask]>;
  451. }
  452. multiclass VMRG_IV_V_X_I<string opcodestr, bits<6> funct6> {
  453. def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
  454. Sched<[WriteVIMergeV_UpperBound, ReadVIMergeV_UpperBound,
  455. ReadVIMergeV_UpperBound, ReadVMask]>;
  456. def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
  457. Sched<[WriteVIMergeX_UpperBound, ReadVIMergeV_UpperBound,
  458. ReadVIMergeX_UpperBound, ReadVMask]>;
  459. def IM : VALUmVI<funct6, opcodestr # ".vim">,
  460. Sched<[WriteVIMergeI_UpperBound, ReadVIMergeV_UpperBound,
  461. ReadVMask]>;
  462. }
  463. multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
  464. def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
  465. Sched<[WriteVICALUV_UpperBound, ReadVICALUV_UpperBound,
  466. ReadVICALUV_UpperBound, ReadVMask]>;
  467. def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
  468. Sched<[WriteVICALUX_UpperBound, ReadVICALUV_UpperBound,
  469. ReadVICALUX_UpperBound, ReadVMask]>;
  470. }
  471. multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5> {
  472. def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
  473. Sched<[WriteVICALUV_UpperBound, ReadVICALUV_UpperBound,
  474. ReadVICALUV_UpperBound]>;
  475. def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
  476. Sched<[WriteVICALUX_UpperBound, ReadVICALUV_UpperBound,
  477. ReadVICALUX_UpperBound]>;
  478. def I : VALUVINoVm<funct6, opcodestr # ".vi", optype>,
  479. Sched<[WriteVICALUI_UpperBound, ReadVICALUV_UpperBound]>;
  480. }
  481. multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
  482. def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
  483. Sched<[WriteVICALUV_UpperBound, ReadVICALUV_UpperBound,
  484. ReadVICALUV_UpperBound]>;
  485. def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
  486. Sched<[WriteVICALUX_UpperBound, ReadVICALUV_UpperBound,
  487. ReadVICALUX_UpperBound]>;
  488. }
  489. multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  490. def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
  491. Sched<[WriteVFALUV_UpperBound, ReadVFALUV_UpperBound,
  492. ReadVFALUV_UpperBound, ReadVMask]>;
  493. def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  494. Sched<[WriteVFALUF_UpperBound, ReadVFALUV_UpperBound,
  495. ReadVFALUF_UpperBound, ReadVMask]>;
  496. }
  497. multiclass VALU_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  498. def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  499. Sched<[WriteVFALUF_UpperBound, ReadVFALUV_UpperBound,
  500. ReadVFALUF_UpperBound, ReadVMask]>;
  501. }
  502. multiclass VWALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  503. def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
  504. Sched<[WriteVFWALUV_UpperBound, ReadVFWALUV_UpperBound,
  505. ReadVFWALUV_UpperBound, ReadVMask]>;
  506. def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  507. Sched<[WriteVFWALUF_UpperBound, ReadVFWALUV_UpperBound,
  508. ReadVFWALUF_UpperBound, ReadVMask]>;
  509. }
  510. multiclass VMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  511. def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
  512. Sched<[WriteVFMulV_UpperBound, ReadVFMulV_UpperBound,
  513. ReadVFMulV_UpperBound, ReadVMask]>;
  514. def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  515. Sched<[WriteVFMulF_UpperBound, ReadVFMulV_UpperBound,
  516. ReadVFMulF_UpperBound, ReadVMask]>;
  517. }
  518. multiclass VDIV_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  519. def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
  520. Sched<[WriteVFDivV_UpperBound, ReadVFDivV_UpperBound,
  521. ReadVFDivV_UpperBound, ReadVMask]>;
  522. def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  523. Sched<[WriteVFDivF_UpperBound, ReadVFDivV_UpperBound,
  524. ReadVFDivF_UpperBound, ReadVMask]>;
  525. }
  526. multiclass VRDIV_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  527. def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  528. Sched<[WriteVFDivF_UpperBound, ReadVFDivV_UpperBound,
  529. ReadVFDivF_UpperBound, ReadVMask]>;
  530. }
  531. multiclass VWMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  532. def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
  533. Sched<[WriteVFWMulV_UpperBound, ReadVFWMulV_UpperBound,
  534. ReadVFWMulV_UpperBound, ReadVMask]>;
  535. def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  536. Sched<[WriteVFWMulF_UpperBound, ReadVFWMulV_UpperBound,
  537. ReadVFWMulF_UpperBound, ReadVMask]>;
  538. }
  539. multiclass VMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  540. def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
  541. Sched<[WriteVFMulAddV_UpperBound, ReadVFMulAddV_UpperBound,
  542. ReadVFMulAddV_UpperBound, ReadVMask]>;
  543. def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  544. Sched<[WriteVFMulAddF_UpperBound, ReadVFMulAddV_UpperBound,
  545. ReadVFMulAddF_UpperBound, ReadVMask]>;
  546. }
  547. multiclass VWMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  548. def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
  549. Sched<[WriteVFWMulAddV_UpperBound, ReadVFWMulAddV_UpperBound,
  550. ReadVFWMulAddV_UpperBound, ReadVMask]>;
  551. def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  552. Sched<[WriteVFWMulAddF_UpperBound, ReadVFWMulAddV_UpperBound,
  553. ReadVFWMulAddF_UpperBound, ReadVMask]>;
  554. }
  555. multiclass VSQR_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  556. def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
  557. Sched<[WriteVFSqrtV_UpperBound, ReadVFSqrtV_UpperBound,
  558. ReadVMask]>;
  559. }
  560. multiclass VRCP_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  561. def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
  562. Sched<[WriteVFRecpV_UpperBound, ReadVFRecpV_UpperBound,
  563. ReadVMask]>;
  564. }
  565. multiclass VCMP_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  566. def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
  567. Sched<[WriteVFCmpV_UpperBound, ReadVFCmpV_UpperBound,
  568. ReadVFCmpV_UpperBound, ReadVMask]>;
  569. def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  570. Sched<[WriteVFCmpF_UpperBound, ReadVFCmpV_UpperBound,
  571. ReadVFCmpF_UpperBound, ReadVMask]>;
  572. }
  573. multiclass VCMP_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  574. def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  575. Sched<[WriteVFCmpF_UpperBound, ReadVFCmpV_UpperBound,
  576. ReadVFCmpF_UpperBound, ReadVMask]>;
  577. }
  578. multiclass VSGNJ_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  579. def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
  580. Sched<[WriteVFSgnjV_UpperBound, ReadVFSgnjV_UpperBound,
  581. ReadVFSgnjV_UpperBound, ReadVMask]>;
  582. def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  583. Sched<[WriteVFSgnjF_UpperBound, ReadVFSgnjV_UpperBound,
  584. ReadVFSgnjF_UpperBound, ReadVMask]>;
  585. }
  586. multiclass VCLS_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  587. def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
  588. Sched<[WriteVFClassV_UpperBound, ReadVFClassV_UpperBound,
  589. ReadVMask]>;
  590. }
  591. multiclass VCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  592. def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
  593. Sched<[WriteVFCvtIToFV_UpperBound, ReadVFCvtIToFV_UpperBound,
  594. ReadVMask]>;
  595. }
  596. multiclass VCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  597. def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
  598. Sched<[WriteVFCvtFToIV_UpperBound, ReadVFCvtFToIV_UpperBound,
  599. ReadVMask]>;
  600. }
  601. multiclass VWCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  602. def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
  603. Sched<[WriteVFWCvtIToFV_UpperBound, ReadVFWCvtIToFV_UpperBound,
  604. ReadVMask]>;
  605. }
  606. multiclass VWCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  607. def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
  608. Sched<[WriteVFWCvtFToIV_UpperBound, ReadVFWCvtFToIV_UpperBound,
  609. ReadVMask]>;
  610. }
  611. multiclass VWCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  612. def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
  613. Sched<[WriteVFWCvtFToFV_UpperBound, ReadVFWCvtFToFV_UpperBound,
  614. ReadVMask]>;
  615. }
  616. multiclass VNCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  617. def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
  618. Sched<[WriteVFNCvtIToFV_UpperBound, ReadVFNCvtIToFV_UpperBound,
  619. ReadVMask]>;
  620. }
  621. multiclass VNCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  622. def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
  623. Sched<[WriteVFNCvtFToIV_UpperBound, ReadVFNCvtFToIV_UpperBound,
  624. ReadVMask]>;
  625. }
  626. multiclass VNCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  627. def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
  628. Sched<[WriteVFNCvtFToFV_UpperBound, ReadVFNCvtFToFV_UpperBound,
  629. ReadVMask]>;
  630. }
  631. multiclass VRED_MV_V<string opcodestr, bits<6> funct6> {
  632. def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">,
  633. Sched<[WriteVIRedV, ReadVIRedV, ReadVIRedV0, ReadVMask]>;
  634. }
  635. multiclass VWRED_IV_V<string opcodestr, bits<6> funct6> {
  636. def _VS : VALUVV<funct6, OPIVV, opcodestr # ".vs">,
  637. Sched<[WriteVIWRedV, ReadVIWRedV, ReadVIWRedV0, ReadVMask]>;
  638. }
  639. multiclass VRED_FV_V<string opcodestr, bits<6> funct6> {
  640. def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
  641. Sched<[WriteVFRedV, ReadVFRedV, ReadVFRedV0, ReadVMask]>;
  642. }
  643. multiclass VREDO_FV_V<string opcodestr, bits<6> funct6> {
  644. def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
  645. Sched<[WriteVFRedOV, ReadVFRedOV, ReadVFRedOV0, ReadVMask]>;
  646. }
  647. multiclass VWRED_FV_V<string opcodestr, bits<6> funct6> {
  648. def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
  649. Sched<[WriteVFWRedV, ReadVFWRedV, ReadVFWRedV0, ReadVMask]>;
  650. }
  651. multiclass VWREDO_FV_V<string opcodestr, bits<6> funct6> {
  652. def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
  653. Sched<[WriteVFWRedOV, ReadVFWRedOV, ReadVFWRedOV0, ReadVMask]>;
  654. }
  655. multiclass VMALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
  656. def M : VALUVVNoVm<funct6, OPMVV, opcodestr #"." #vm #"m">,
  657. Sched<[WriteVMALUV_UpperBound, ReadVMALUV_UpperBound,
  658. ReadVMALUV_UpperBound]>;
  659. }
  660. multiclass VMSFS_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
  661. def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
  662. Sched<[WriteVMSFSV_UpperBound, ReadVMSFSV_UpperBound, ReadVMask]>;
  663. }
  664. multiclass VMIOT_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
  665. def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
  666. Sched<[WriteVMIotV_UpperBound, ReadVMIotV_UpperBound, ReadVMask]>;
  667. }
  668. multiclass VSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
  669. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  670. Sched<[WriteVShiftV_UpperBound, ReadVShiftV_UpperBound,
  671. ReadVShiftV_UpperBound, ReadVMask]>;
  672. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  673. Sched<[WriteVShiftX_UpperBound, ReadVShiftV_UpperBound,
  674. ReadVShiftX_UpperBound, ReadVMask]>;
  675. def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
  676. Sched<[WriteVShiftI_UpperBound, ReadVShiftV_UpperBound,
  677. ReadVMask]>;
  678. }
  679. multiclass VNSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
  680. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  681. Sched<[WriteVNShiftV_UpperBound, ReadVNShiftV_UpperBound,
  682. ReadVNShiftV_UpperBound, ReadVMask]>;
  683. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  684. Sched<[WriteVNShiftX_UpperBound, ReadVNShiftV_UpperBound,
  685. ReadVNShiftX_UpperBound, ReadVMask]>;
  686. def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
  687. Sched<[WriteVNShiftI_UpperBound, ReadVNShiftV_UpperBound,
  688. ReadVMask]>;
  689. }
  690. multiclass VCMP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
  691. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  692. Sched<[WriteVICmpV_UpperBound, ReadVICmpV_UpperBound,
  693. ReadVICmpV_UpperBound, ReadVMask]>;
  694. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  695. Sched<[WriteVICmpX_UpperBound, ReadVICmpV_UpperBound,
  696. ReadVICmpX_UpperBound, ReadVMask]>;
  697. def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
  698. Sched<[WriteVICmpI_UpperBound, ReadVICmpV_UpperBound,
  699. ReadVMask]>;
  700. }
  701. multiclass VCMP_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
  702. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  703. Sched<[WriteVICmpV_UpperBound, ReadVICmpV_UpperBound,
  704. ReadVICmpX_UpperBound, ReadVMask]>;
  705. def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
  706. Sched<[WriteVICmpI_UpperBound, ReadVICmpV_UpperBound,
  707. ReadVMask]>;
  708. }
  709. multiclass VCMP_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  710. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  711. Sched<[WriteVICmpV_UpperBound, ReadVICmpV_UpperBound,
  712. ReadVICmpV_UpperBound, ReadVMask]>;
  713. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  714. Sched<[WriteVICmpX_UpperBound, ReadVICmpV_UpperBound,
  715. ReadVICmpX_UpperBound, ReadVMask]>;
  716. }
  717. multiclass VMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  718. def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
  719. Sched<[WriteVIMulV_UpperBound, ReadVIMulV_UpperBound,
  720. ReadVIMulV_UpperBound, ReadVMask]>;
  721. def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
  722. Sched<[WriteVIMulX_UpperBound, ReadVIMulV_UpperBound,
  723. ReadVIMulX_UpperBound, ReadVMask]>;
  724. }
  725. multiclass VWMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  726. def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
  727. Sched<[WriteVIWMulV_UpperBound, ReadVIWMulV_UpperBound,
  728. ReadVIWMulV_UpperBound, ReadVMask]>;
  729. def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
  730. Sched<[WriteVIWMulX_UpperBound, ReadVIWMulV_UpperBound,
  731. ReadVIWMulX_UpperBound, ReadVMask]>;
  732. }
  733. multiclass VDIV_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  734. def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
  735. Sched<[WriteVIDivV_UpperBound, ReadVIDivV_UpperBound,
  736. ReadVIDivV_UpperBound, ReadVMask]>;
  737. def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
  738. Sched<[WriteVIDivX_UpperBound, ReadVIDivV_UpperBound,
  739. ReadVIDivX_UpperBound, ReadVMask]>;
  740. }
  741. multiclass VSALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
  742. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  743. Sched<[WriteVSALUV_UpperBound, ReadVSALUV_UpperBound,
  744. ReadVSALUV_UpperBound, ReadVMask]>;
  745. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  746. Sched<[WriteVSALUX_UpperBound, ReadVSALUV_UpperBound,
  747. ReadVSALUX_UpperBound, ReadVMask]>;
  748. def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
  749. Sched<[WriteVSALUI_UpperBound, ReadVSALUV_UpperBound,
  750. ReadVMask]>;
  751. }
  752. multiclass VSALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  753. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  754. Sched<[WriteVSALUV_UpperBound, ReadVSALUV_UpperBound,
  755. ReadVSALUV_UpperBound, ReadVMask]>;
  756. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  757. Sched<[WriteVSALUX_UpperBound, ReadVSALUV_UpperBound,
  758. ReadVSALUX_UpperBound, ReadVMask]>;
  759. }
  760. multiclass VAALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  761. def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
  762. Sched<[WriteVAALUV_UpperBound, ReadVAALUV_UpperBound,
  763. ReadVAALUV_UpperBound, ReadVMask]>;
  764. def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
  765. Sched<[WriteVAALUX_UpperBound, ReadVAALUV_UpperBound,
  766. ReadVAALUX_UpperBound, ReadVMask]>;
  767. }
  768. multiclass VSMUL_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  769. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  770. Sched<[WriteVSMulV_UpperBound, ReadVSMulV_UpperBound,
  771. ReadVSMulV_UpperBound, ReadVMask]>;
  772. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  773. Sched<[WriteVSMulX_UpperBound, ReadVSMulV_UpperBound,
  774. ReadVSMulX_UpperBound, ReadVMask]>;
  775. }
  776. multiclass VSSHF_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
  777. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  778. Sched<[WriteVSShiftV_UpperBound, ReadVSShiftV_UpperBound,
  779. ReadVSShiftV_UpperBound, ReadVMask]>;
  780. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  781. Sched<[WriteVSShiftX_UpperBound, ReadVSShiftV_UpperBound,
  782. ReadVSShiftX_UpperBound, ReadVMask]>;
  783. def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
  784. Sched<[WriteVSShiftI_UpperBound, ReadVSShiftV_UpperBound,
  785. ReadVMask]>;
  786. }
  787. multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
  788. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  789. Sched<[WriteVNClipV_UpperBound, ReadVNClipV_UpperBound,
  790. ReadVNClipV_UpperBound, ReadVMask]>;
  791. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  792. Sched<[WriteVNClipX_UpperBound, ReadVNClipV_UpperBound,
  793. ReadVNClipX_UpperBound, ReadVMask]>;
  794. def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
  795. Sched<[WriteVNClipI_UpperBound, ReadVNClipV_UpperBound,
  796. ReadVMask]>;
  797. }
  798. multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
  799. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  800. Sched<[WriteVISlideX_UpperBound, ReadVISlideV_UpperBound,
  801. ReadVISlideX_UpperBound, ReadVMask]>;
  802. def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
  803. Sched<[WriteVISlideI_UpperBound, ReadVISlideV_UpperBound,
  804. ReadVMask]>;
  805. }
  806. multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
  807. def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
  808. Sched<[WriteVISlide1X_UpperBound, ReadVISlideV_UpperBound,
  809. ReadVISlideX_UpperBound, ReadVMask]>;
  810. }
  811. multiclass VSLD1_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
  812. def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
  813. Sched<[WriteVFSlide1F_UpperBound, ReadVFSlideV_UpperBound,
  814. ReadVFSlideF_UpperBound, ReadVMask]>;
  815. }
  816. multiclass VGTR_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
  817. def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
  818. Sched<[WriteVGatherV_UpperBound, ReadVGatherV_UpperBound,
  819. ReadVGatherV_UpperBound, ReadVMask]>;
  820. def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
  821. Sched<[WriteVGatherX_UpperBound, ReadVGatherV_UpperBound,
  822. ReadVGatherX_UpperBound, ReadVMask]>;
  823. def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
  824. Sched<[WriteVGatherI_UpperBound, ReadVGatherV_UpperBound,
  825. ReadVMask]>;
  826. }
  827. multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
  828. def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">,
  829. Sched<[WriteVCompressV_UpperBound, ReadVCompressV_UpperBound,
  830. ReadVCompressV_UpperBound]>;
  831. }
  832. multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> {
  833. foreach l = [8, 16, 32] in {
  834. defvar w = !cast<RISCVWidth>("LSWidth" # l);
  835. defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R");
  836. def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>,
  837. Sched<[s, ReadVLDX_UpperBound]>;
  838. }
  839. }
  840. multiclass VWholeLoadEEW64<bits<3> nf, string opcodestr, RegisterClass VRC, SchedReadWrite schedrw> {
  841. def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>,
  842. Sched<[schedrw, ReadVLDX_UpperBound]>;
  843. }
  844. //===----------------------------------------------------------------------===//
  845. // Instructions
  846. //===----------------------------------------------------------------------===//
  847. let Predicates = [HasVInstructions] in {
  848. let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
  849. def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp11:$vtypei),
  850. "vsetvli", "$rd, $rs1, $vtypei">,
  851. Sched<[WriteVSETVLI, ReadVSETVLI]>;
  852. def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp10:$vtypei),
  853. "vsetivli", "$rd, $uimm, $vtypei">,
  854. Sched<[WriteVSETIVLI]>;
  855. def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
  856. "vsetvl", "$rd, $rs1, $rs2">,
  857. Sched<[WriteVSETVL, ReadVSETVL, ReadVSETVL]>;
  858. } // hasSideEffects = 1, mayLoad = 0, mayStore = 0
  859. foreach eew = [8, 16, 32] in {
  860. defvar w = !cast<RISCVWidth>("LSWidth" # eew);
  861. // Vector Unit-Stride Instructions
  862. def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESched<UpperBoundLMUL>;
  863. def VSE#eew#_V : VUnitStrideStore<w, "vse"#eew#".v">, VSESched<UpperBoundLMUL>;
  864. // Vector Unit-Stride Fault-only-First Loads
  865. def VLE#eew#FF_V : VUnitStrideLoadFF<w, "vle"#eew#"ff.v">, VLFSched<UpperBoundLMUL>;
  866. // Vector Strided Instructions
  867. def VLSE#eew#_V : VStridedLoad<w, "vlse"#eew#".v">, VLSSched<eew, UpperBoundLMUL>;
  868. def VSSE#eew#_V : VStridedStore<w, "vsse"#eew#".v">, VSSSched<eew, UpperBoundLMUL>;
  869. }
  870. defm "" : VIndexLoadStore<[8, 16, 32]>;
  871. } // Predicates = [HasVInstructions]
  872. let Predicates = [HasVInstructions] in {
  873. def VLM_V : VUnitStrideLoadMask<"vlm.v">,
  874. Sched<[WriteVLDM_UpperBound, ReadVLDX_UpperBound]>;
  875. def VSM_V : VUnitStrideStoreMask<"vsm.v">,
  876. Sched<[WriteVSTM_UpperBound, ReadVSTM_UpperBound, ReadVSTX_UpperBound]>;
  877. def : InstAlias<"vle1.v $vd, (${rs1})",
  878. (VLM_V VR:$vd, GPR:$rs1), 0>;
  879. def : InstAlias<"vse1.v $vs3, (${rs1})",
  880. (VSM_V VR:$vs3, GPR:$rs1), 0>;
  881. defm VL1R : VWholeLoadN<0, "vl1r", VR>;
  882. defm VL2R : VWholeLoadN<1, "vl2r", VRM2>;
  883. defm VL4R : VWholeLoadN<3, "vl4r", VRM4>;
  884. defm VL8R : VWholeLoadN<7, "vl8r", VRM8>;
  885. def VS1R_V : VWholeStore<0, "vs1r.v", VR>,
  886. Sched<[WriteVST1R, ReadVST1R, ReadVSTX_UpperBound]>;
  887. def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>,
  888. Sched<[WriteVST2R, ReadVST2R, ReadVSTX_UpperBound]>;
  889. def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>,
  890. Sched<[WriteVST4R, ReadVST4R, ReadVSTX_UpperBound]>;
  891. def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>,
  892. Sched<[WriteVST8R, ReadVST8R, ReadVSTX_UpperBound]>;
  893. def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
  894. def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
  895. def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
  896. def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
  897. } // Predicates = [HasVInstructions]
  898. let Predicates = [HasVInstructionsI64] in {
  899. // Vector Unit-Stride Instructions
  900. def VLE64_V : VUnitStrideLoad<LSWidth64, "vle64.v">,
  901. VLESched<UpperBoundLMUL>;
  902. def VLE64FF_V : VUnitStrideLoadFF<LSWidth64, "vle64ff.v">,
  903. VLFSched<UpperBoundLMUL>;
  904. def VSE64_V : VUnitStrideStore<LSWidth64, "vse64.v">,
  905. VSESched<UpperBoundLMUL>;
  906. // Vector Strided Instructions
  907. def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">,
  908. VLSSched<32, UpperBoundLMUL>;
  909. def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">,
  910. VSSSched<64, UpperBoundLMUL>;
  911. defm VL1R: VWholeLoadEEW64<0, "vl1r", VR, WriteVLD1R>;
  912. defm VL2R: VWholeLoadEEW64<1, "vl2r", VRM2, WriteVLD2R>;
  913. defm VL4R: VWholeLoadEEW64<3, "vl4r", VRM4, WriteVLD4R>;
  914. defm VL8R: VWholeLoadEEW64<7, "vl8r", VRM8, WriteVLD8R>;
  915. } // Predicates = [HasVInstructionsI64]
  916. let Predicates = [IsRV64, HasVInstructionsI64] in {
  917. // Vector Indexed Instructions
  918. defm "" : VIndexLoadStore<[64]>;
  919. } // [IsRV64, HasVInstructionsI64]
  920. let Predicates = [HasVInstructions] in {
  921. // Vector Single-Width Integer Add and Subtract
  922. defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
  923. defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
  924. defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
  925. def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
  926. def : InstAlias<"vneg.v $vd, $vs", (VRSUB_VX VR:$vd, VR:$vs, X0, zero_reg)>;
  927. // Vector Widening Integer Add/Subtract
  928. // Refer to 11.2 Widening Vector Arithmetic Instructions
  929. // The destination vector register group cannot overlap a source vector
  930. // register group of a different element width (including the mask register
  931. // if masked), otherwise an illegal instruction exception is raised.
  932. let Constraints = "@earlyclobber $vd" in {
  933. let RVVConstraint = WidenV in {
  934. defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>;
  935. defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>;
  936. defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>;
  937. defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>;
  938. } // RVVConstraint = WidenV
  939. // Set earlyclobber for following instructions for second and mask operands.
  940. // This has the downside that the earlyclobber constraint is too coarse and
  941. // will impose unnecessary restrictions by not allowing the destination to
  942. // overlap with the first (wide) operand.
  943. let RVVConstraint = WidenW in {
  944. defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
  945. defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
  946. defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
  947. defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
  948. } // RVVConstraint = WidenW
  949. } // Constraints = "@earlyclobber $vd"
  950. def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
  951. (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
  952. def : InstAlias<"vwcvt.x.x.v $vd, $vs",
  953. (VWADD_VX VR:$vd, VR:$vs, X0, zero_reg)>;
  954. def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
  955. (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
  956. def : InstAlias<"vwcvtu.x.x.v $vd, $vs",
  957. (VWADDU_VX VR:$vd, VR:$vs, X0, zero_reg)>;
  958. // Vector Integer Extension
  959. defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
  960. defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
  961. defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
  962. defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
  963. defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
  964. defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
  965. // Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
  966. defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
  967. let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
  968. defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
  969. defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
  970. } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
  971. defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
  972. let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
  973. defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
  974. defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
  975. } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
  976. // Vector Bitwise Logical Instructions
  977. defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
  978. defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
  979. defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
  980. def : InstAlias<"vnot.v $vd, $vs$vm",
  981. (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
  982. def : InstAlias<"vnot.v $vd, $vs",
  983. (VXOR_VI VR:$vd, VR:$vs, -1, zero_reg)>;
  984. // Vector Single-Width Bit Shift Instructions
  985. defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101, uimm5>;
  986. defm VSRL_V : VSHT_IV_V_X_I<"vsrl", 0b101000, uimm5>;
  987. defm VSRA_V : VSHT_IV_V_X_I<"vsra", 0b101001, uimm5>;
  988. // Vector Narrowing Integer Right Shift Instructions
  989. // Refer to 11.3. Narrowing Vector Arithmetic Instructions
  990. // The destination vector register group cannot overlap the first source
  991. // vector register group (specified by vs2). The destination vector register
  992. // group cannot overlap the mask register if used, unless LMUL=1.
  993. let Constraints = "@earlyclobber $vd" in {
  994. defm VNSRL_W : VNSHT_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">;
  995. defm VNSRA_W : VNSHT_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">;
  996. } // Constraints = "@earlyclobber $vd"
  997. def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
  998. (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
  999. def : InstAlias<"vncvt.x.x.w $vd, $vs",
  1000. (VNSRL_WX VR:$vd, VR:$vs, X0, zero_reg)>;
  1001. // Vector Integer Comparison Instructions
  1002. let RVVConstraint = NoConstraint in {
  1003. defm VMSEQ_V : VCMP_IV_V_X_I<"vmseq", 0b011000>;
  1004. defm VMSNE_V : VCMP_IV_V_X_I<"vmsne", 0b011001>;
  1005. defm VMSLTU_V : VCMP_IV_V_X<"vmsltu", 0b011010>;
  1006. defm VMSLT_V : VCMP_IV_V_X<"vmslt", 0b011011>;
  1007. defm VMSLEU_V : VCMP_IV_V_X_I<"vmsleu", 0b011100>;
  1008. defm VMSLE_V : VCMP_IV_V_X_I<"vmsle", 0b011101>;
  1009. defm VMSGTU_V : VCMP_IV_X_I<"vmsgtu", 0b011110>;
  1010. defm VMSGT_V : VCMP_IV_X_I<"vmsgt", 0b011111>;
  1011. } // RVVConstraint = NoConstraint
  1012. def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
  1013. (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
  1014. def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
  1015. (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
  1016. def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
  1017. (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
  1018. def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
  1019. (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
  1020. let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
  1021. mayStore = 0 in {
  1022. // For unsigned comparisons we need to special case 0 immediate to maintain
  1023. // the always true/false semantics we would invert if we just decremented the
  1024. // immediate like we do for signed. To match the GNU assembler we will use
  1025. // vmseq/vmsne.vv with the same register for both operands which we can't do
  1026. // from an InstAlias.
  1027. def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
  1028. (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
  1029. [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
  1030. def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
  1031. (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
  1032. [], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
  1033. // Handle signed with pseudos as well for more consistency in the
  1034. // implementation.
  1035. def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
  1036. (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
  1037. [], "vmsge.vi", "$vd, $vs2, $imm$vm">;
  1038. def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
  1039. (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
  1040. [], "vmslt.vi", "$vd, $vs2, $imm$vm">;
  1041. }
  1042. let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
  1043. mayStore = 0 in {
  1044. def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
  1045. (ins VR:$vs2, GPR:$rs1),
  1046. [], "vmsgeu.vx", "$vd, $vs2, $rs1">;
  1047. def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
  1048. (ins VR:$vs2, GPR:$rs1),
  1049. [], "vmsge.vx", "$vd, $vs2, $rs1">;
  1050. def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
  1051. (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
  1052. [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
  1053. def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
  1054. (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
  1055. [], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
  1056. def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
  1057. (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
  1058. [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
  1059. def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
  1060. (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
  1061. [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
  1062. }
  1063. // Vector Integer Min/Max Instructions
  1064. defm VMINU_V : VCMP_IV_V_X<"vminu", 0b000100>;
  1065. defm VMIN_V : VCMP_IV_V_X<"vmin", 0b000101>;
  1066. defm VMAXU_V : VCMP_IV_V_X<"vmaxu", 0b000110>;
  1067. defm VMAX_V : VCMP_IV_V_X<"vmax", 0b000111>;
  1068. // Vector Single-Width Integer Multiply Instructions
  1069. defm VMUL_V : VMUL_MV_V_X<"vmul", 0b100101>;
  1070. defm VMULH_V : VMUL_MV_V_X<"vmulh", 0b100111>;
  1071. defm VMULHU_V : VMUL_MV_V_X<"vmulhu", 0b100100>;
  1072. defm VMULHSU_V : VMUL_MV_V_X<"vmulhsu", 0b100110>;
  1073. // Vector Integer Divide Instructions
  1074. defm VDIVU_V : VDIV_MV_V_X<"vdivu", 0b100000>;
  1075. defm VDIV_V : VDIV_MV_V_X<"vdiv", 0b100001>;
  1076. defm VREMU_V : VDIV_MV_V_X<"vremu", 0b100010>;
  1077. defm VREM_V : VDIV_MV_V_X<"vrem", 0b100011>;
  1078. // Vector Widening Integer Multiply Instructions
  1079. let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
  1080. defm VWMUL_V : VWMUL_MV_V_X<"vwmul", 0b111011>;
  1081. defm VWMULU_V : VWMUL_MV_V_X<"vwmulu", 0b111000>;
  1082. defm VWMULSU_V : VWMUL_MV_V_X<"vwmulsu", 0b111010>;
  1083. } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
  1084. // Vector Single-Width Integer Multiply-Add Instructions
  1085. defm VMACC_V : VMAC_MV_V_X<"vmacc", 0b101101>;
  1086. defm VNMSAC_V : VMAC_MV_V_X<"vnmsac", 0b101111>;
  1087. defm VMADD_V : VMAC_MV_V_X<"vmadd", 0b101001>;
  1088. defm VNMSUB_V : VMAC_MV_V_X<"vnmsub", 0b101011>;
  1089. // Vector Widening Integer Multiply-Add Instructions
  1090. let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
  1091. defm VWMACCU_V : VWMAC_MV_V_X<"vwmaccu", 0b111100>;
  1092. defm VWMACC_V : VWMAC_MV_V_X<"vwmacc", 0b111101>;
  1093. defm VWMACCSU_V : VWMAC_MV_V_X<"vwmaccsu", 0b111111>;
  1094. defm VWMACCUS_V : VWMAC_MV_X<"vwmaccus", 0b111110>;
  1095. } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
  1096. // Vector Integer Merge Instructions
  1097. defm VMERGE_V : VMRG_IV_V_X_I<"vmerge", 0b010111>;
  1098. // Vector Integer Move Instructions
  1099. let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
  1100. RVVConstraint = NoConstraint in {
  1101. // op vd, vs1
  1102. def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
  1103. (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">,
  1104. Sched<[WriteVIMovV_UpperBound, ReadVIMovV_UpperBound]>;
  1105. // op vd, rs1
  1106. def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
  1107. (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">,
  1108. Sched<[WriteVIMovX_UpperBound, ReadVIMovX_UpperBound]>;
  1109. // op vd, imm
  1110. def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
  1111. (ins simm5:$imm), "vmv.v.i", "$vd, $imm">,
  1112. Sched<[WriteVIMovI_UpperBound]>;
  1113. } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
  1114. // Vector Fixed-Point Arithmetic Instructions
  1115. defm VSADDU_V : VSALU_IV_V_X_I<"vsaddu", 0b100000>;
  1116. defm VSADD_V : VSALU_IV_V_X_I<"vsadd", 0b100001>;
  1117. defm VSSUBU_V : VSALU_IV_V_X<"vssubu", 0b100010>;
  1118. defm VSSUB_V : VSALU_IV_V_X<"vssub", 0b100011>;
  1119. // Vector Single-Width Averaging Add and Subtract
  1120. defm VAADDU_V : VAALU_MV_V_X<"vaaddu", 0b001000>;
  1121. defm VAADD_V : VAALU_MV_V_X<"vaadd", 0b001001>;
  1122. defm VASUBU_V : VAALU_MV_V_X<"vasubu", 0b001010>;
  1123. defm VASUB_V : VAALU_MV_V_X<"vasub", 0b001011>;
  1124. // Vector Single-Width Fractional Multiply with Rounding and Saturation
  1125. defm VSMUL_V : VSMUL_IV_V_X<"vsmul", 0b100111>;
  1126. // Vector Single-Width Scaling Shift Instructions
  1127. defm VSSRL_V : VSSHF_IV_V_X_I<"vssrl", 0b101010, uimm5>;
  1128. defm VSSRA_V : VSSHF_IV_V_X_I<"vssra", 0b101011, uimm5>;
  1129. // Vector Narrowing Fixed-Point Clip Instructions
  1130. let Constraints = "@earlyclobber $vd" in {
  1131. defm VNCLIPU_W : VNCLP_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">;
  1132. defm VNCLIP_W : VNCLP_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">;
  1133. } // Constraints = "@earlyclobber $vd"
  1134. } // Predicates = [HasVInstructions]
  1135. let Predicates = [HasVInstructionsAnyF] in {
  1136. // Vector Single-Width Floating-Point Add/Subtract Instructions
  1137. let Uses = [FRM], mayRaiseFPException = true in {
  1138. defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
  1139. defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
  1140. defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
  1141. }
  1142. // Vector Widening Floating-Point Add/Subtract Instructions
  1143. let Constraints = "@earlyclobber $vd",
  1144. Uses = [FRM],
  1145. mayRaiseFPException = true in {
  1146. let RVVConstraint = WidenV in {
  1147. defm VFWADD_V : VWALU_FV_V_F<"vfwadd", 0b110000>;
  1148. defm VFWSUB_V : VWALU_FV_V_F<"vfwsub", 0b110010>;
  1149. } // RVVConstraint = WidenV
  1150. // Set earlyclobber for following instructions for second and mask operands.
  1151. // This has the downside that the earlyclobber constraint is too coarse and
  1152. // will impose unnecessary restrictions by not allowing the destination to
  1153. // overlap with the first (wide) operand.
  1154. let RVVConstraint = WidenW in {
  1155. defm VFWADD_W : VWALU_FV_V_F<"vfwadd", 0b110100, "w">;
  1156. defm VFWSUB_W : VWALU_FV_V_F<"vfwsub", 0b110110, "w">;
  1157. } // RVVConstraint = WidenW
  1158. } // Constraints = "@earlyclobber $vd", Uses = [FRM], mayRaiseFPException = true
  1159. // Vector Single-Width Floating-Point Multiply/Divide Instructions
  1160. let Uses = [FRM], mayRaiseFPException = true in {
  1161. defm VFMUL_V : VMUL_FV_V_F<"vfmul", 0b100100>;
  1162. defm VFDIV_V : VDIV_FV_V_F<"vfdiv", 0b100000>;
  1163. defm VFRDIV_V : VRDIV_FV_F<"vfrdiv", 0b100001>;
  1164. }
  1165. // Vector Widening Floating-Point Multiply
  1166. let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
  1167. Uses = [FRM], mayRaiseFPException = true in {
  1168. defm VFWMUL_V : VWMUL_FV_V_F<"vfwmul", 0b111000>;
  1169. } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
  1170. // Vector Single-Width Floating-Point Fused Multiply-Add Instructions
  1171. let Uses = [FRM], mayRaiseFPException = true in {
  1172. defm VFMACC_V : VMAC_FV_V_F<"vfmacc", 0b101100>;
  1173. defm VFNMACC_V : VMAC_FV_V_F<"vfnmacc", 0b101101>;
  1174. defm VFMSAC_V : VMAC_FV_V_F<"vfmsac", 0b101110>;
  1175. defm VFNMSAC_V : VMAC_FV_V_F<"vfnmsac", 0b101111>;
  1176. defm VFMADD_V : VMAC_FV_V_F<"vfmadd", 0b101000>;
  1177. defm VFNMADD_V : VMAC_FV_V_F<"vfnmadd", 0b101001>;
  1178. defm VFMSUB_V : VMAC_FV_V_F<"vfmsub", 0b101010>;
  1179. defm VFNMSUB_V : VMAC_FV_V_F<"vfnmsub", 0b101011>;
  1180. }
  1181. // Vector Widening Floating-Point Fused Multiply-Add Instructions
  1182. let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
  1183. Uses = [FRM], mayRaiseFPException = true in {
  1184. defm VFWMACC_V : VWMAC_FV_V_F<"vfwmacc", 0b111100>;
  1185. defm VFWNMACC_V : VWMAC_FV_V_F<"vfwnmacc", 0b111101>;
  1186. defm VFWMSAC_V : VWMAC_FV_V_F<"vfwmsac", 0b111110>;
  1187. defm VFWNMSAC_V : VWMAC_FV_V_F<"vfwnmsac", 0b111111>;
  1188. } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
  1189. // Vector Floating-Point Square-Root Instruction
  1190. let Uses = [FRM], mayRaiseFPException = true in {
  1191. defm VFSQRT_V : VSQR_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
  1192. defm VFREC7_V : VRCP_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
  1193. }
  1194. let mayRaiseFPException = true in
  1195. defm VFRSQRT7_V : VRCP_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
  1196. // Vector Floating-Point MIN/MAX Instructions
  1197. let mayRaiseFPException = true in {
  1198. defm VFMIN_V : VCMP_FV_V_F<"vfmin", 0b000100>;
  1199. defm VFMAX_V : VCMP_FV_V_F<"vfmax", 0b000110>;
  1200. }
  1201. // Vector Floating-Point Sign-Injection Instructions
  1202. defm VFSGNJ_V : VSGNJ_FV_V_F<"vfsgnj", 0b001000>;
  1203. defm VFSGNJN_V : VSGNJ_FV_V_F<"vfsgnjn", 0b001001>;
  1204. defm VFSGNJX_V : VSGNJ_FV_V_F<"vfsgnjx", 0b001010>;
  1205. def : InstAlias<"vfneg.v $vd, $vs$vm",
  1206. (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
  1207. def : InstAlias<"vfneg.v $vd, $vs",
  1208. (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
  1209. def : InstAlias<"vfabs.v $vd, $vs$vm",
  1210. (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
  1211. def : InstAlias<"vfabs.v $vd, $vs",
  1212. (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
  1213. // Vector Floating-Point Compare Instructions
  1214. let RVVConstraint = NoConstraint, mayRaiseFPException = true in {
  1215. defm VMFEQ_V : VCMP_FV_V_F<"vmfeq", 0b011000>;
  1216. defm VMFNE_V : VCMP_FV_V_F<"vmfne", 0b011100>;
  1217. defm VMFLT_V : VCMP_FV_V_F<"vmflt", 0b011011>;
  1218. defm VMFLE_V : VCMP_FV_V_F<"vmfle", 0b011001>;
  1219. defm VMFGT_V : VCMP_FV_F<"vmfgt", 0b011101>;
  1220. defm VMFGE_V : VCMP_FV_F<"vmfge", 0b011111>;
  1221. } // RVVConstraint = NoConstraint, mayRaiseFPException = true
  1222. def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
  1223. (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
  1224. def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
  1225. (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
  1226. // Vector Floating-Point Classify Instruction
  1227. defm VFCLASS_V : VCLS_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
  1228. let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
  1229. // Vector Floating-Point Merge Instruction
  1230. let vm = 0 in
  1231. def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
  1232. (ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
  1233. "vfmerge.vfm", "$vd, $vs2, $rs1, v0">,
  1234. Sched<[WriteVFMergeV_UpperBound, ReadVFMergeV_UpperBound,
  1235. ReadVFMergeF_UpperBound, ReadVMask]>;
  1236. // Vector Floating-Point Move Instruction
  1237. let RVVConstraint = NoConstraint in
  1238. let vm = 1, vs2 = 0 in
  1239. def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
  1240. (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1">,
  1241. Sched<[WriteVFMovV_UpperBound, ReadVFMovF_UpperBound]>;
  1242. } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
  1243. // Single-Width Floating-Point/Integer Type-Convert Instructions
  1244. let mayRaiseFPException = true in {
  1245. let Uses = [FRM] in {
  1246. defm VFCVT_XU_F_V : VCVTI_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
  1247. defm VFCVT_X_F_V : VCVTI_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
  1248. }
  1249. defm VFCVT_RTZ_XU_F_V : VCVTI_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
  1250. defm VFCVT_RTZ_X_F_V : VCVTI_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
  1251. let Uses = [FRM] in {
  1252. defm VFCVT_F_XU_V : VCVTF_IV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
  1253. defm VFCVT_F_X_V : VCVTF_IV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
  1254. }
  1255. } // mayRaiseFPException = true
  1256. // Widening Floating-Point/Integer Type-Convert Instructions
  1257. let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt,
  1258. mayRaiseFPException = true in {
  1259. let Uses = [FRM] in {
  1260. defm VFWCVT_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
  1261. defm VFWCVT_X_F_V : VWCVTI_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
  1262. }
  1263. defm VFWCVT_RTZ_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
  1264. defm VFWCVT_RTZ_X_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
  1265. defm VFWCVT_F_XU_V : VWCVTF_IV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
  1266. defm VFWCVT_F_X_V : VWCVTF_IV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
  1267. defm VFWCVT_F_F_V : VWCVTF_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
  1268. } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
  1269. // Narrowing Floating-Point/Integer Type-Convert Instructions
  1270. let Constraints = "@earlyclobber $vd", mayRaiseFPException = true in {
  1271. let Uses = [FRM] in {
  1272. defm VFNCVT_XU_F_W : VNCVTI_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
  1273. defm VFNCVT_X_F_W : VNCVTI_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
  1274. }
  1275. defm VFNCVT_RTZ_XU_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
  1276. defm VFNCVT_RTZ_X_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
  1277. let Uses = [FRM] in {
  1278. defm VFNCVT_F_XU_W : VNCVTF_IV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
  1279. defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
  1280. defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
  1281. }
  1282. defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
  1283. } // Constraints = "@earlyclobber $vd", mayRaiseFPException = true
  1284. } // Predicates = HasVInstructionsAnyF]
  1285. let Predicates = [HasVInstructions] in {
  1286. // Vector Single-Width Integer Reduction Instructions
  1287. let RVVConstraint = NoConstraint in {
  1288. defm VREDSUM : VRED_MV_V<"vredsum", 0b000000>;
  1289. defm VREDMAXU : VRED_MV_V<"vredmaxu", 0b000110>;
  1290. defm VREDMAX : VRED_MV_V<"vredmax", 0b000111>;
  1291. defm VREDMINU : VRED_MV_V<"vredminu", 0b000100>;
  1292. defm VREDMIN : VRED_MV_V<"vredmin", 0b000101>;
  1293. defm VREDAND : VRED_MV_V<"vredand", 0b000001>;
  1294. defm VREDOR : VRED_MV_V<"vredor", 0b000010>;
  1295. defm VREDXOR : VRED_MV_V<"vredxor", 0b000011>;
  1296. } // RVVConstraint = NoConstraint
  1297. // Vector Widening Integer Reduction Instructions
  1298. let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
  1299. // Set earlyclobber for following instructions for second and mask operands.
  1300. // This has the downside that the earlyclobber constraint is too coarse and
  1301. // will impose unnecessary restrictions by not allowing the destination to
  1302. // overlap with the first (wide) operand.
  1303. defm VWREDSUMU : VWRED_IV_V<"vwredsumu", 0b110000>;
  1304. defm VWREDSUM : VWRED_IV_V<"vwredsum", 0b110001>;
  1305. } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
  1306. } // Predicates = [HasVInstructions]
  1307. let Predicates = [HasVInstructionsAnyF] in {
  1308. // Vector Single-Width Floating-Point Reduction Instructions
  1309. let RVVConstraint = NoConstraint in {
  1310. let Uses = [FRM], mayRaiseFPException = true in {
  1311. defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>;
  1312. defm VFREDUSUM : VRED_FV_V<"vfredusum", 0b000001>;
  1313. }
  1314. let mayRaiseFPException = true in {
  1315. defm VFREDMAX : VRED_FV_V<"vfredmax", 0b000111>;
  1316. defm VFREDMIN : VRED_FV_V<"vfredmin", 0b000101>;
  1317. }
  1318. } // RVVConstraint = NoConstraint
  1319. def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm",
  1320. (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
  1321. // Vector Widening Floating-Point Reduction Instructions
  1322. let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
  1323. // Set earlyclobber for following instructions for second and mask operands.
  1324. // This has the downside that the earlyclobber constraint is too coarse and
  1325. // will impose unnecessary restrictions by not allowing the destination to
  1326. // overlap with the first (wide) operand.
  1327. let Uses = [FRM], mayRaiseFPException = true in {
  1328. defm VFWREDOSUM : VWREDO_FV_V<"vfwredosum", 0b110011>;
  1329. defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>;
  1330. }
  1331. } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
  1332. def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm",
  1333. (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
  1334. } // Predicates = [HasVInstructionsAnyF]
  1335. let Predicates = [HasVInstructions] in {
  1336. // Vector Mask-Register Logical Instructions
  1337. let RVVConstraint = NoConstraint in {
  1338. defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">;
  1339. defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">;
  1340. defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">;
  1341. defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">;
  1342. defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">;
  1343. defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">;
  1344. defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">;
  1345. defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">;
  1346. }
  1347. def : InstAlias<"vmmv.m $vd, $vs",
  1348. (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
  1349. def : InstAlias<"vmclr.m $vd",
  1350. (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
  1351. def : InstAlias<"vmset.m $vd",
  1352. (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
  1353. def : InstAlias<"vmnot.m $vd, $vs",
  1354. (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
  1355. def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1",
  1356. (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
  1357. def : InstAlias<"vmornot.mm $vd, $vs2, $vs1",
  1358. (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
  1359. let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
  1360. RVVConstraint = NoConstraint in {
  1361. // Vector mask population count vcpop
  1362. def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
  1363. (ins VR:$vs2, VMaskOp:$vm),
  1364. "vcpop.m", "$vd, $vs2$vm">,
  1365. Sched<[WriteVMPopV_UpperBound, ReadVMPopV_UpperBound,
  1366. ReadVMask]>;
  1367. // vfirst find-first-set mask bit
  1368. def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
  1369. (ins VR:$vs2, VMaskOp:$vm),
  1370. "vfirst.m", "$vd, $vs2$vm">,
  1371. Sched<[WriteVMFFSV_UpperBound, ReadVMFFSV_UpperBound,
  1372. ReadVMask]>;
  1373. } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
  1374. def : InstAlias<"vpopc.m $vd, $vs2$vm",
  1375. (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>;
  1376. let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
  1377. // vmsbf.m set-before-first mask bit
  1378. defm VMSBF_M : VMSFS_MV_V<"vmsbf.m", 0b010100, 0b00001>;
  1379. // vmsif.m set-including-first mask bit
  1380. defm VMSIF_M : VMSFS_MV_V<"vmsif.m", 0b010100, 0b00011>;
  1381. // vmsof.m set-only-first mask bit
  1382. defm VMSOF_M : VMSFS_MV_V<"vmsof.m", 0b010100, 0b00010>;
  1383. // Vector Iota Instruction
  1384. defm VIOTA_M : VMIOT_MV_V<"viota.m", 0b010100, 0b10000>;
  1385. } // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
  1386. // Vector Element Index Instruction
  1387. let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
  1388. let vs2 = 0 in
  1389. def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
  1390. (ins VMaskOp:$vm), "vid.v", "$vd$vm">,
  1391. Sched<[WriteVMIdxV_UpperBound, ReadVMask]>;
  1392. // Integer Scalar Move Instructions
  1393. let vm = 1, RVVConstraint = NoConstraint in {
  1394. def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
  1395. (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">,
  1396. Sched<[WriteVIMovVX_UpperBound, ReadVIMovVX_UpperBound]>;
  1397. let Constraints = "$vd = $vd_wb" in
  1398. def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
  1399. (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">,
  1400. Sched<[WriteVIMovXV_UpperBound, ReadVIMovXV_UpperBound,
  1401. ReadVIMovXX_UpperBound]>;
  1402. }
  1403. } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
  1404. } // Predicates = [HasVInstructions]
  1405. let Predicates = [HasVInstructionsAnyF] in {
  1406. let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
  1407. RVVConstraint = NoConstraint in {
  1408. // Floating-Point Scalar Move Instructions
  1409. def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
  1410. (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">,
  1411. Sched<[WriteVFMovVF_UpperBound, ReadVFMovVF_UpperBound]>;
  1412. let Constraints = "$vd = $vd_wb" in
  1413. def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
  1414. (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">,
  1415. Sched<[WriteVFMovFV_UpperBound, ReadVFMovFV_UpperBound,
  1416. ReadVFMovFX_UpperBound]>;
  1417. } // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
  1418. } // Predicates = [HasVInstructionsAnyF]
  1419. let Predicates = [HasVInstructions] in {
  1420. // Vector Slide Instructions
  1421. let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
  1422. defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, uimm5>;
  1423. defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
  1424. } // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
  1425. defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, uimm5>;
  1426. defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
  1427. } // Predicates = [HasVInstructions]
  1428. let Predicates = [HasVInstructionsAnyF] in {
  1429. let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
  1430. defm VFSLIDE1UP_V : VSLD1_FV_F<"vfslide1up", 0b001110>;
  1431. } // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
  1432. defm VFSLIDE1DOWN_V : VSLD1_FV_F<"vfslide1down", 0b001111>;
  1433. } // Predicates = [HasVInstructionsAnyF]
  1434. let Predicates = [HasVInstructions] in {
  1435. // Vector Register Gather Instruction
  1436. let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
  1437. defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100, uimm5>;
  1438. def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">,
  1439. Sched<[WriteVGatherV_UpperBound, ReadVGatherV_UpperBound,
  1440. ReadVGatherV_UpperBound]>;
  1441. } // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
  1442. // Vector Compress Instruction
  1443. let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
  1444. defm VCOMPRESS_V : VCPR_MV_Mask<"vcompress", 0b010111>;
  1445. } // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
  1446. let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
  1447. RVVConstraint = NoConstraint in {
  1448. // A future extension may relax the vector register alignment restrictions.
  1449. foreach n = [1, 2, 4, 8] in {
  1450. defvar vrc = !cast<VReg>(!if(!eq(n, 1), "VR", "VRM"#n));
  1451. def VMV#n#R_V : RVInstV<0b100111, !add(n, -1), OPIVI, (outs vrc:$vd),
  1452. (ins vrc:$vs2), "vmv" # n # "r.v", "$vd, $vs2">,
  1453. VMVRSched<n> {
  1454. let Uses = [];
  1455. let vm = 1;
  1456. }
  1457. }
  1458. } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
  1459. } // Predicates = [HasVInstructions]
  1460. let Predicates = [HasVInstructions] in {
  1461. foreach nf=2-8 in {
  1462. foreach eew = [8, 16, 32] in {
  1463. defvar w = !cast<RISCVWidth>("LSWidth"#eew);
  1464. def VLSEG#nf#E#eew#_V :
  1465. VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">,
  1466. VLSEGSched<nf, eew, UpperBoundLMUL>;
  1467. def VLSEG#nf#E#eew#FF_V :
  1468. VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">,
  1469. VLSEGFFSched<nf, eew, UpperBoundLMUL>;
  1470. def VSSEG#nf#E#eew#_V :
  1471. VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">,
  1472. VSSEGSched<nf, eew, UpperBoundLMUL>;
  1473. // Vector Strided Instructions
  1474. def VLSSEG#nf#E#eew#_V :
  1475. VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">,
  1476. VLSSEGSched<nf, eew, UpperBoundLMUL>;
  1477. def VSSSEG#nf#E#eew#_V :
  1478. VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">,
  1479. VSSSEGSched<nf, eew, UpperBoundLMUL>;
  1480. // Vector Indexed Instructions
  1481. def VLUXSEG#nf#EI#eew#_V :
  1482. VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
  1483. "vluxseg"#nf#"ei"#eew#".v">,
  1484. VLXSEGSched<nf, eew, "U", UpperBoundLMUL>;
  1485. def VLOXSEG#nf#EI#eew#_V :
  1486. VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
  1487. "vloxseg"#nf#"ei"#eew#".v">,
  1488. VLXSEGSched<nf, eew, "O", UpperBoundLMUL>;
  1489. def VSUXSEG#nf#EI#eew#_V :
  1490. VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
  1491. "vsuxseg"#nf#"ei"#eew#".v">,
  1492. VSXSEGSched<nf, eew, "U", UpperBoundLMUL>;
  1493. def VSOXSEG#nf#EI#eew#_V :
  1494. VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
  1495. "vsoxseg"#nf#"ei"#eew#".v">,
  1496. VSXSEGSched<nf, eew, "O", UpperBoundLMUL>;
  1497. }
  1498. }
  1499. } // Predicates = [HasVInstructions]
  1500. let Predicates = [HasVInstructionsI64] in {
  1501. foreach nf=2-8 in {
  1502. // Vector Unit-strided Segment Instructions
  1503. def VLSEG#nf#E64_V :
  1504. VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">,
  1505. VLSEGSched<nf, 64, UpperBoundLMUL>;
  1506. def VLSEG#nf#E64FF_V :
  1507. VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">,
  1508. VLSEGFFSched<nf, 64, UpperBoundLMUL>;
  1509. def VSSEG#nf#E64_V :
  1510. VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">,
  1511. VSSEGSched<nf, 64, UpperBoundLMUL>;
  1512. // Vector Strided Segment Instructions
  1513. def VLSSEG#nf#E64_V :
  1514. VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">,
  1515. VLSSEGSched<nf, 64, UpperBoundLMUL>;
  1516. def VSSSEG#nf#E64_V :
  1517. VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">,
  1518. VSSSEGSched<nf, 64, UpperBoundLMUL>;
  1519. }
  1520. } // Predicates = [HasVInstructionsI64]
  1521. let Predicates = [HasVInstructionsI64, IsRV64] in {
  1522. foreach nf = 2 - 8 in {
  1523. // Vector Indexed Segment Instructions
  1524. def VLUXSEG #nf #EI64_V
  1525. : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
  1526. "vluxseg" #nf #"ei64.v">,
  1527. VLXSEGSched<nf, 64, "U", UpperBoundLMUL>;
  1528. def VLOXSEG #nf #EI64_V
  1529. : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
  1530. "vloxseg" #nf #"ei64.v">,
  1531. VLXSEGSched<nf, 64, "O", UpperBoundLMUL>;
  1532. def VSUXSEG #nf #EI64_V
  1533. : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
  1534. "vsuxseg" #nf #"ei64.v">,
  1535. VSXSEGSched<nf, 64, "U", UpperBoundLMUL>;
  1536. def VSOXSEG #nf #EI64_V
  1537. : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
  1538. "vsoxseg" #nf #"ei64.v">,
  1539. VSXSEGSched<nf, 64, "O", UpperBoundLMUL>;
  1540. }
  1541. } // Predicates = [HasVInstructionsI64, IsRV64]
  1542. include "RISCVInstrInfoVPseudos.td"