RISCVInstrInfoVVLPatterns.td 115 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205
  1. //===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. ///
  9. /// This file contains the required infrastructure and VL patterns to
  10. /// support code generation for the standard 'V' (Vector) extension, version
  11. /// version 1.0.
  12. ///
  13. /// This file is included from and depends upon RISCVInstrInfoVPseudos.td
  14. ///
  15. /// Note: the patterns for RVV intrinsics are found in
  16. /// RISCVInstrInfoVPseudos.td.
  17. ///
  18. //===----------------------------------------------------------------------===//
  19. //===----------------------------------------------------------------------===//
  20. // Helpers to define the VL patterns.
  21. //===----------------------------------------------------------------------===//
  22. def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
  23. SDTCisSameAs<0, 2>,
  24. SDTCisVec<0>, SDTCisInt<0>,
  25. SDTCisSameAs<0, 3>,
  26. SDTCVecEltisVT<4, i1>,
  27. SDTCisSameNumEltsAs<0, 4>,
  28. SDTCisVT<5, XLenVT>]>;
  29. def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
  30. SDTCisVec<0>, SDTCisFP<0>,
  31. SDTCVecEltisVT<2, i1>,
  32. SDTCisSameNumEltsAs<0, 2>,
  33. SDTCisVT<3, XLenVT>]>;
  34. def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
  35. SDTCisSameAs<0, 2>,
  36. SDTCisVec<0>, SDTCisFP<0>,
  37. SDTCisSameAs<0, 3>,
  38. SDTCVecEltisVT<4, i1>,
  39. SDTCisSameNumEltsAs<0, 4>,
  40. SDTCisVT<5, XLenVT>]>;
  41. def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
  42. SDTCisSameAs<0, 2>,
  43. SDTCisVec<0>, SDTCisFP<0>,
  44. SDTCisSameAs<0, 3>,
  45. SDTCVecEltisVT<4, i1>,
  46. SDTCisSameNumEltsAs<0, 4>,
  47. SDTCisVT<5, XLenVT>]>;
  48. def riscv_vmv_v_x_vl : SDNode<"RISCVISD::VMV_V_X_VL",
  49. SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>,
  50. SDTCisSameAs<0, 1>,
  51. SDTCisVT<2, XLenVT>,
  52. SDTCisVT<3, XLenVT>]>>;
  53. def riscv_vfmv_v_f_vl : SDNode<"RISCVISD::VFMV_V_F_VL",
  54. SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>,
  55. SDTCisSameAs<0, 1>,
  56. SDTCisEltOfVec<2, 0>,
  57. SDTCisVT<3, XLenVT>]>>;
  58. def riscv_vmv_s_x_vl : SDNode<"RISCVISD::VMV_S_X_VL",
  59. SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
  60. SDTCisInt<0>,
  61. SDTCisVT<2, XLenVT>,
  62. SDTCisVT<3, XLenVT>]>>;
  63. def riscv_vfmv_s_f_vl : SDNode<"RISCVISD::VFMV_S_F_VL",
  64. SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
  65. SDTCisFP<0>,
  66. SDTCisEltOfVec<2, 0>,
  67. SDTCisVT<3, XLenVT>]>>;
  68. def riscv_add_vl : SDNode<"RISCVISD::ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  69. def riscv_sub_vl : SDNode<"RISCVISD::SUB_VL", SDT_RISCVIntBinOp_VL>;
  70. def riscv_mul_vl : SDNode<"RISCVISD::MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  71. def riscv_mulhs_vl : SDNode<"RISCVISD::MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  72. def riscv_mulhu_vl : SDNode<"RISCVISD::MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  73. def riscv_and_vl : SDNode<"RISCVISD::AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  74. def riscv_or_vl : SDNode<"RISCVISD::OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  75. def riscv_xor_vl : SDNode<"RISCVISD::XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  76. def riscv_sdiv_vl : SDNode<"RISCVISD::SDIV_VL", SDT_RISCVIntBinOp_VL>;
  77. def riscv_srem_vl : SDNode<"RISCVISD::SREM_VL", SDT_RISCVIntBinOp_VL>;
  78. def riscv_udiv_vl : SDNode<"RISCVISD::UDIV_VL", SDT_RISCVIntBinOp_VL>;
  79. def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>;
  80. def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>;
  81. def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>;
  82. def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>;
  83. def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  84. def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  85. def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  86. def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  87. def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  88. def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
  89. def riscv_ssubsat_vl : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>;
  90. def riscv_usubsat_vl : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>;
  91. def riscv_fadd_vl : SDNode<"RISCVISD::FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
  92. def riscv_fsub_vl : SDNode<"RISCVISD::FSUB_VL", SDT_RISCVFPBinOp_VL>;
  93. def riscv_fmul_vl : SDNode<"RISCVISD::FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
  94. def riscv_fdiv_vl : SDNode<"RISCVISD::FDIV_VL", SDT_RISCVFPBinOp_VL>;
  95. def riscv_fneg_vl : SDNode<"RISCVISD::FNEG_VL", SDT_RISCVFPUnOp_VL>;
  96. def riscv_fabs_vl : SDNode<"RISCVISD::FABS_VL", SDT_RISCVFPUnOp_VL>;
  97. def riscv_fsqrt_vl : SDNode<"RISCVISD::FSQRT_VL", SDT_RISCVFPUnOp_VL>;
  98. def riscv_fcopysign_vl : SDNode<"RISCVISD::FCOPYSIGN_VL", SDT_RISCVCopySign_VL>;
  99. def riscv_fminnum_vl : SDNode<"RISCVISD::FMINNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
  100. def riscv_fmaxnum_vl : SDNode<"RISCVISD::FMAXNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
  101. def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
  102. SDTCisSameAs<0, 2>,
  103. SDTCisSameAs<0, 3>,
  104. SDTCisVec<0>, SDTCisFP<0>,
  105. SDTCVecEltisVT<4, i1>,
  106. SDTCisSameNumEltsAs<0, 4>,
  107. SDTCisVT<5, XLenVT>]>;
  108. def riscv_vfmadd_vl : SDNode<"RISCVISD::VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>;
  109. def riscv_vfnmadd_vl : SDNode<"RISCVISD::VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>;
  110. def riscv_vfmsub_vl : SDNode<"RISCVISD::VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>;
  111. def riscv_vfnmsub_vl : SDNode<"RISCVISD::VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>;
  112. def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [
  113. SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>,
  114. SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
  115. ]>;
  116. def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [
  117. SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>,
  118. SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
  119. ]>;
  120. def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>;
  121. def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>;
  122. def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>;
  123. def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [
  124. SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>,
  125. SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
  126. ]>;
  127. def SDT_RISCVFP2IOp_RM_VL : SDTypeProfile<1, 4, [
  128. SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>,
  129. SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>,
  130. SDTCisVT<4, XLenVT> // Rounding mode
  131. ]>;
  132. def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [
  133. SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>,
  134. SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>
  135. ]>;
  136. def SDT_RISCVI2FPOp_RM_VL : SDTypeProfile<1, 4, [
  137. SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>,
  138. SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>,
  139. SDTCisVT<4, XLenVT> // Rounding mode
  140. ]>;
  141. // Float -> Int
  142. def riscv_vfcvt_xu_f_vl : SDNode<"RISCVISD::VFCVT_XU_F_VL", SDT_RISCVFP2IOp_VL>;
  143. def riscv_vfcvt_x_f_vl : SDNode<"RISCVISD::VFCVT_X_F_VL", SDT_RISCVFP2IOp_VL>;
  144. def riscv_vfcvt_rm_xu_f_vl : SDNode<"RISCVISD::VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>;
  145. def riscv_vfcvt_rm_x_f_vl : SDNode<"RISCVISD::VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>;
  146. def riscv_vfcvt_rtz_xu_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>;
  147. def riscv_vfcvt_rtz_x_f_vl : SDNode<"RISCVISD::VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>;
  148. // Int -> Float
  149. def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>;
  150. def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>;
  151. def riscv_vfcvt_rm_f_xu_vl : SDNode<"RISCVISD::VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>;
  152. def riscv_vfcvt_rm_f_x_vl : SDNode<"RISCVISD::VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>;
  153. def riscv_vfround_noexcept_vl: SDNode<"RISCVISD::VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>;
  154. def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL",
  155. SDTypeProfile<1, 6, [SDTCVecEltisVT<0, i1>,
  156. SDTCisVec<1>,
  157. SDTCisSameNumEltsAs<0, 1>,
  158. SDTCisSameAs<1, 2>,
  159. SDTCisVT<3, OtherVT>,
  160. SDTCisSameAs<0, 4>,
  161. SDTCisSameAs<0, 5>,
  162. SDTCisVT<6, XLenVT>]>>;
  163. def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL",
  164. SDTypeProfile<1, 5, [SDTCisVec<0>,
  165. SDTCisSameAs<0, 1>,
  166. SDTCisVT<2, XLenVT>,
  167. SDTCisSameAs<0, 3>,
  168. SDTCVecEltisVT<4, i1>,
  169. SDTCisSameNumEltsAs<0, 4>,
  170. SDTCisVT<5, XLenVT>]>>;
  171. def riscv_vrgather_vv_vl : SDNode<"RISCVISD::VRGATHER_VV_VL",
  172. SDTypeProfile<1, 5, [SDTCisVec<0>,
  173. SDTCisSameAs<0, 1>,
  174. SDTCisInt<2>,
  175. SDTCisSameNumEltsAs<0, 2>,
  176. SDTCisSameSizeAs<0, 2>,
  177. SDTCisSameAs<0, 3>,
  178. SDTCVecEltisVT<4, i1>,
  179. SDTCisSameNumEltsAs<0, 4>,
  180. SDTCisVT<5, XLenVT>]>>;
  181. def riscv_vrgatherei16_vv_vl : SDNode<"RISCVISD::VRGATHEREI16_VV_VL",
  182. SDTypeProfile<1, 5, [SDTCisVec<0>,
  183. SDTCisSameAs<0, 1>,
  184. SDTCisInt<2>,
  185. SDTCVecEltisVT<2, i16>,
  186. SDTCisSameNumEltsAs<0, 2>,
  187. SDTCisSameAs<0, 3>,
  188. SDTCVecEltisVT<4, i1>,
  189. SDTCisSameNumEltsAs<0, 4>,
  190. SDTCisVT<5, XLenVT>]>>;
  191. def SDT_RISCVSelect_VL : SDTypeProfile<1, 4, [
  192. SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>,
  193. SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisVT<4, XLenVT>
  194. ]>;
  195. def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL", SDT_RISCVSelect_VL>;
  196. def riscv_vp_merge_vl : SDNode<"RISCVISD::VP_MERGE_VL", SDT_RISCVSelect_VL>;
  197. def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>,
  198. SDTCisVT<1, XLenVT>]>;
  199. def riscv_vmclr_vl : SDNode<"RISCVISD::VMCLR_VL", SDT_RISCVVMSETCLR_VL>;
  200. def riscv_vmset_vl : SDNode<"RISCVISD::VMSET_VL", SDT_RISCVVMSETCLR_VL>;
  201. def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
  202. SDTCisSameAs<0, 2>,
  203. SDTCVecEltisVT<0, i1>,
  204. SDTCisVT<3, XLenVT>]>;
  205. def riscv_vmand_vl : SDNode<"RISCVISD::VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>;
  206. def riscv_vmor_vl : SDNode<"RISCVISD::VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>;
  207. def riscv_vmxor_vl : SDNode<"RISCVISD::VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>;
  208. def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>;
  209. def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl),
  210. (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>;
  211. def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL",
  212. SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>,
  213. SDTCisVec<1>, SDTCisInt<1>,
  214. SDTCVecEltisVT<2, i1>,
  215. SDTCisSameNumEltsAs<1, 2>,
  216. SDTCisVT<3, XLenVT>]>>;
  217. def riscv_vfirst_vl : SDNode<"RISCVISD::VFIRST_VL",
  218. SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>,
  219. SDTCisVec<1>, SDTCisInt<1>,
  220. SDTCVecEltisVT<2, i1>,
  221. SDTCisSameNumEltsAs<1, 2>,
  222. SDTCisVT<3, XLenVT>]>>;
  223. def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>,
  224. SDTCisSameNumEltsAs<0, 1>,
  225. SDTCisSameNumEltsAs<1, 2>,
  226. SDTCVecEltisVT<2, i1>,
  227. SDTCisVT<3, XLenVT>]>;
  228. def riscv_sext_vl : SDNode<"RISCVISD::VSEXT_VL", SDT_RISCVVEXTEND_VL>;
  229. def riscv_zext_vl : SDNode<"RISCVISD::VZEXT_VL", SDT_RISCVVEXTEND_VL>;
  230. def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL",
  231. SDTypeProfile<1, 3, [SDTCisVec<0>,
  232. SDTCisSameNumEltsAs<0, 1>,
  233. SDTCisSameNumEltsAs<0, 2>,
  234. SDTCVecEltisVT<2, i1>,
  235. SDTCisVT<3, XLenVT>]>>;
  236. def SDT_RISCVVWBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>,
  237. SDTCisSameNumEltsAs<0, 1>,
  238. SDTCisSameAs<1, 2>,
  239. SDTCisSameAs<0, 3>,
  240. SDTCisSameNumEltsAs<1, 4>,
  241. SDTCVecEltisVT<4, i1>,
  242. SDTCisVT<5, XLenVT>]>;
  243. def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>;
  244. def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>;
  245. def riscv_vwmulsu_vl : SDNode<"RISCVISD::VWMULSU_VL", SDT_RISCVVWBinOp_VL>;
  246. def riscv_vwadd_vl : SDNode<"RISCVISD::VWADD_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>;
  247. def riscv_vwaddu_vl : SDNode<"RISCVISD::VWADDU_VL", SDT_RISCVVWBinOp_VL, [SDNPCommutative]>;
  248. def riscv_vwsub_vl : SDNode<"RISCVISD::VWSUB_VL", SDT_RISCVVWBinOp_VL, []>;
  249. def riscv_vwsubu_vl : SDNode<"RISCVISD::VWSUBU_VL", SDT_RISCVVWBinOp_VL, []>;
  250. def SDT_RISCVVNBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>,
  251. SDTCisSameNumEltsAs<0, 1>,
  252. SDTCisOpSmallerThanOp<0, 1>,
  253. SDTCisSameAs<0, 2>,
  254. SDTCisSameAs<0, 3>,
  255. SDTCisSameNumEltsAs<0, 4>,
  256. SDTCVecEltisVT<4, i1>,
  257. SDTCisVT<5, XLenVT>]>;
  258. def riscv_vnsrl_vl : SDNode<"RISCVISD::VNSRL_VL", SDT_RISCVVNBinOp_VL>;
  259. def SDT_RISCVVWBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>,
  260. SDTCisSameAs<0, 1>,
  261. SDTCisSameNumEltsAs<1, 2>,
  262. SDTCisOpSmallerThanOp<2, 1>,
  263. SDTCisSameAs<0, 3>,
  264. SDTCisSameNumEltsAs<1, 4>,
  265. SDTCVecEltisVT<4, i1>,
  266. SDTCisVT<5, XLenVT>]>;
  267. def riscv_vwadd_w_vl : SDNode<"RISCVISD::VWADD_W_VL", SDT_RISCVVWBinOpW_VL>;
  268. def riscv_vwaddu_w_vl : SDNode<"RISCVISD::VWADDU_W_VL", SDT_RISCVVWBinOpW_VL>;
  269. def riscv_vwsub_w_vl : SDNode<"RISCVISD::VWSUB_W_VL", SDT_RISCVVWBinOpW_VL>;
  270. def riscv_vwsubu_w_vl : SDNode<"RISCVISD::VWSUBU_W_VL", SDT_RISCVVWBinOpW_VL>;
  271. def SDTRVVVecReduce : SDTypeProfile<1, 5, [
  272. SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>,
  273. SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>
  274. ]>;
  275. def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
  276. node:$E),
  277. (riscv_add_vl node:$A, node:$B, node:$C,
  278. node:$D, node:$E), [{
  279. return N->hasOneUse();
  280. }]>;
  281. def riscv_sub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
  282. node:$E),
  283. (riscv_sub_vl node:$A, node:$B, node:$C,
  284. node:$D, node:$E), [{
  285. return N->hasOneUse();
  286. }]>;
  287. def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
  288. node:$E),
  289. (riscv_mul_vl node:$A, node:$B, node:$C,
  290. node:$D, node:$E), [{
  291. return N->hasOneUse();
  292. }]>;
  293. def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
  294. node:$E),
  295. (riscv_vwmul_vl node:$A, node:$B, node:$C,
  296. node:$D, node:$E), [{
  297. return N->hasOneUse();
  298. }]>;
  299. def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
  300. node:$E),
  301. (riscv_vwmulu_vl node:$A, node:$B, node:$C,
  302. node:$D, node:$E), [{
  303. return N->hasOneUse();
  304. }]>;
  305. def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
  306. node:$E),
  307. (riscv_vwmulsu_vl node:$A, node:$B, node:$C,
  308. node:$D, node:$E), [{
  309. return N->hasOneUse();
  310. }]>;
  311. def riscv_sext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C),
  312. (riscv_sext_vl node:$A, node:$B, node:$C), [{
  313. return N->hasOneUse();
  314. }]>;
  315. def riscv_zext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C),
  316. (riscv_zext_vl node:$A, node:$B, node:$C), [{
  317. return N->hasOneUse();
  318. }]>;
  319. def riscv_fpextend_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C),
  320. (riscv_fpextend_vl node:$A, node:$B, node:$C), [{
  321. return N->hasOneUse();
  322. }]>;
  323. def riscv_vfmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
  324. node:$E),
  325. (riscv_vfmadd_vl node:$A, node:$B,
  326. node:$C, node:$D, node:$E), [{
  327. return N->hasOneUse();
  328. }]>;
  329. def riscv_vfnmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
  330. node:$E),
  331. (riscv_vfnmadd_vl node:$A, node:$B,
  332. node:$C, node:$D, node:$E), [{
  333. return N->hasOneUse();
  334. }]>;
  335. def riscv_vfmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
  336. node:$E),
  337. (riscv_vfmsub_vl node:$A, node:$B,
  338. node:$C, node:$D, node:$E), [{
  339. return N->hasOneUse();
  340. }]>;
  341. def riscv_vfnmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
  342. node:$E),
  343. (riscv_vfnmsub_vl node:$A, node:$B,
  344. node:$C, node:$D, node:$E), [{
  345. return N->hasOneUse();
  346. }]>;
  347. foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR",
  348. "FADD", "SEQ_FADD", "FMIN", "FMAX"] in
  349. def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>;
  350. // Give explicit Complexity to prefer simm5/uimm5.
  351. def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>;
  352. def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [], [], 2>;
  353. def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimm5", [], [], 2>;
  354. def SplatPat_simm5_plus1
  355. : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1", [], [], 2>;
  356. def SplatPat_simm5_plus1_nonzero
  357. : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", [], [], 2>;
  358. // Ignore the vl operand.
  359. def SplatFPOp : PatFrag<(ops node:$op),
  360. (riscv_vfmv_v_f_vl undef, node:$op, srcvalue)>;
  361. def sew8simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<8>", []>;
  362. def sew16simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<16>", []>;
  363. def sew32simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<32>", []>;
  364. def sew64simm5 : ComplexPattern<XLenVT, 1, "selectRVVSimm5<64>", []>;
  365. multiclass VPatBinaryVL_V<SDNode vop,
  366. string instruction_name,
  367. string suffix,
  368. ValueType result_type,
  369. ValueType op1_type,
  370. ValueType op2_type,
  371. ValueType mask_type,
  372. int sew,
  373. LMULInfo vlmul,
  374. VReg result_reg_class,
  375. VReg op1_reg_class,
  376. VReg op2_reg_class> {
  377. def : Pat<(result_type (vop
  378. (op1_type op1_reg_class:$rs1),
  379. (op2_type op2_reg_class:$rs2),
  380. (result_type result_reg_class:$merge),
  381. (mask_type V0),
  382. VLOpFrag)),
  383. (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_MASK")
  384. result_reg_class:$merge,
  385. op1_reg_class:$rs1,
  386. op2_reg_class:$rs2,
  387. (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>;
  388. }
  389. multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop,
  390. string instruction_name,
  391. string suffix,
  392. ValueType result_type,
  393. ValueType op2_type,
  394. int sew,
  395. LMULInfo vlmul,
  396. VReg result_reg_class,
  397. VReg op2_reg_class> {
  398. def : Pat<(result_type (vop
  399. (result_type result_reg_class:$rs1),
  400. (op2_type op2_reg_class:$rs2),
  401. srcvalue,
  402. true_mask,
  403. VLOpFrag)),
  404. (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED")
  405. result_reg_class:$rs1,
  406. op2_reg_class:$rs2,
  407. GPR:$vl, sew, TAIL_AGNOSTIC)>;
  408. // Tail undisturbed
  409. def : Pat<(riscv_vp_merge_vl true_mask,
  410. (result_type (vop
  411. result_reg_class:$rs1,
  412. (op2_type op2_reg_class:$rs2),
  413. srcvalue,
  414. true_mask,
  415. VLOpFrag)),
  416. result_reg_class:$rs1, VLOpFrag),
  417. (!cast<Instruction>(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED")
  418. result_reg_class:$rs1,
  419. op2_reg_class:$rs2,
  420. GPR:$vl, sew, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
  421. }
  422. multiclass VPatBinaryVL_XI<SDNode vop,
  423. string instruction_name,
  424. string suffix,
  425. ValueType result_type,
  426. ValueType vop1_type,
  427. ValueType vop2_type,
  428. ValueType mask_type,
  429. int sew,
  430. LMULInfo vlmul,
  431. VReg result_reg_class,
  432. VReg vop_reg_class,
  433. ComplexPattern SplatPatKind,
  434. DAGOperand xop_kind> {
  435. def : Pat<(result_type (vop
  436. (vop1_type vop_reg_class:$rs1),
  437. (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))),
  438. (result_type result_reg_class:$merge),
  439. (mask_type V0),
  440. VLOpFrag)),
  441. (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX#"_MASK")
  442. result_reg_class:$merge,
  443. vop_reg_class:$rs1,
  444. xop_kind:$rs2,
  445. (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>;
  446. }
  447. multiclass VPatBinaryVL_VV_VX<SDNode vop, string instruction_name> {
  448. foreach vti = AllIntegerVectors in {
  449. defm : VPatBinaryVL_V<vop, instruction_name, "VV",
  450. vti.Vector, vti.Vector, vti.Vector, vti.Mask,
  451. vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
  452. vti.RegClass>;
  453. defm : VPatBinaryVL_XI<vop, instruction_name, "VX",
  454. vti.Vector, vti.Vector, vti.Vector, vti.Mask,
  455. vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
  456. SplatPat, GPR>;
  457. }
  458. }
  459. multiclass VPatBinaryVL_VV_VX_VI<SDNode vop, string instruction_name,
  460. Operand ImmType = simm5>
  461. : VPatBinaryVL_VV_VX<vop, instruction_name> {
  462. foreach vti = AllIntegerVectors in {
  463. defm : VPatBinaryVL_XI<vop, instruction_name, "VI",
  464. vti.Vector, vti.Vector, vti.Vector, vti.Mask,
  465. vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
  466. !cast<ComplexPattern>(SplatPat#_#ImmType),
  467. ImmType>;
  468. }
  469. }
  470. multiclass VPatBinaryWVL_VV_VX<SDNode vop, string instruction_name> {
  471. foreach VtiToWti = AllWidenableIntVectors in {
  472. defvar vti = VtiToWti.Vti;
  473. defvar wti = VtiToWti.Wti;
  474. defm : VPatBinaryVL_V<vop, instruction_name, "VV",
  475. wti.Vector, vti.Vector, vti.Vector, vti.Mask,
  476. vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass,
  477. vti.RegClass>;
  478. defm : VPatBinaryVL_XI<vop, instruction_name, "VX",
  479. wti.Vector, vti.Vector, vti.Vector, vti.Mask,
  480. vti.Log2SEW, vti.LMul, wti.RegClass, vti.RegClass,
  481. SplatPat, GPR>;
  482. }
  483. }
  484. multiclass VPatBinaryWVL_VV_VX_WV_WX<SDNode vop, SDNode vop_w,
  485. string instruction_name>
  486. : VPatBinaryWVL_VV_VX<vop, instruction_name> {
  487. foreach VtiToWti = AllWidenableIntVectors in {
  488. defvar vti = VtiToWti.Vti;
  489. defvar wti = VtiToWti.Wti;
  490. defm : VPatTiedBinaryNoMaskVL_V<vop_w, instruction_name, "WV",
  491. wti.Vector, vti.Vector, vti.Log2SEW,
  492. vti.LMul, wti.RegClass, vti.RegClass>;
  493. defm : VPatBinaryVL_V<vop_w, instruction_name, "WV",
  494. wti.Vector, wti.Vector, vti.Vector, vti.Mask,
  495. vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass,
  496. vti.RegClass>;
  497. defm : VPatBinaryVL_XI<vop_w, instruction_name, "WX",
  498. wti.Vector, wti.Vector, vti.Vector, vti.Mask,
  499. vti.Log2SEW, vti.LMul, wti.RegClass, wti.RegClass,
  500. SplatPat, GPR>;
  501. }
  502. }
  503. multiclass VPatBinaryNVL_WV_WX_WI<SDNode vop, string instruction_name> {
  504. foreach VtiToWti = AllWidenableIntVectors in {
  505. defvar vti = VtiToWti.Vti;
  506. defvar wti = VtiToWti.Wti;
  507. defm : VPatBinaryVL_V<vop, instruction_name, "WV",
  508. vti.Vector, wti.Vector, vti.Vector, vti.Mask,
  509. vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass,
  510. vti.RegClass>;
  511. defm : VPatBinaryVL_XI<vop, instruction_name, "WX",
  512. vti.Vector, wti.Vector, vti.Vector, vti.Mask,
  513. vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass,
  514. SplatPat, GPR>;
  515. defm : VPatBinaryVL_XI<vop, instruction_name, "WI",
  516. vti.Vector, wti.Vector, vti.Vector, vti.Mask,
  517. vti.Log2SEW, vti.LMul, vti.RegClass, wti.RegClass,
  518. !cast<ComplexPattern>(SplatPat#_#uimm5),
  519. uimm5>;
  520. }
  521. }
  522. multiclass VPatBinaryVL_VF<SDNode vop,
  523. string instruction_name,
  524. ValueType result_type,
  525. ValueType vop_type,
  526. ValueType mask_type,
  527. int sew,
  528. LMULInfo vlmul,
  529. VReg result_reg_class,
  530. VReg vop_reg_class,
  531. RegisterClass scalar_reg_class> {
  532. def : Pat<(result_type (vop (vop_type vop_reg_class:$rs1),
  533. (vop_type (SplatFPOp scalar_reg_class:$rs2)),
  534. (result_type result_reg_class:$merge),
  535. (mask_type V0),
  536. VLOpFrag)),
  537. (!cast<Instruction>(instruction_name#"_"#vlmul.MX#"_MASK")
  538. result_reg_class:$merge,
  539. vop_reg_class:$rs1,
  540. scalar_reg_class:$rs2,
  541. (mask_type V0), GPR:$vl, sew, TAIL_AGNOSTIC)>;
  542. }
  543. multiclass VPatBinaryFPVL_VV_VF<SDNode vop, string instruction_name> {
  544. foreach vti = AllFloatVectors in {
  545. defm : VPatBinaryVL_V<vop, instruction_name, "VV",
  546. vti.Vector, vti.Vector, vti.Vector, vti.Mask,
  547. vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
  548. vti.RegClass>;
  549. defm : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix,
  550. vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
  551. vti.LMul, vti.RegClass, vti.RegClass,
  552. vti.ScalarRegClass>;
  553. }
  554. }
  555. multiclass VPatBinaryFPVL_R_VF<SDNode vop, string instruction_name> {
  556. foreach fvti = AllFloatVectors in {
  557. def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2),
  558. fvti.RegClass:$rs1,
  559. (fvti.Vector fvti.RegClass:$merge),
  560. (fvti.Mask V0),
  561. VLOpFrag)),
  562. (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
  563. fvti.RegClass:$merge,
  564. fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
  565. (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  566. }
  567. }
  568. multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name,
  569. CondCode cc> {
  570. def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
  571. vti.RegClass:$rs2, cc,
  572. VR:$merge,
  573. (vti.Mask V0),
  574. VLOpFrag)),
  575. (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK")
  576. VR:$merge,
  577. vti.RegClass:$rs1,
  578. vti.RegClass:$rs2,
  579. (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  580. }
  581. // Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped.
  582. multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_name,
  583. CondCode cc, CondCode invcc>
  584. : VPatIntegerSetCCVL_VV<vti, instruction_name, cc> {
  585. def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2),
  586. vti.RegClass:$rs1, invcc,
  587. VR:$merge,
  588. (vti.Mask V0),
  589. VLOpFrag)),
  590. (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX#"_MASK")
  591. VR:$merge, vti.RegClass:$rs1,
  592. vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  593. }
  594. multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name,
  595. CondCode cc, CondCode invcc> {
  596. defvar instruction_masked = !cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX#"_MASK");
  597. def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
  598. (SplatPat (XLenVT GPR:$rs2)), cc,
  599. VR:$merge,
  600. (vti.Mask V0),
  601. VLOpFrag)),
  602. (instruction_masked VR:$merge, vti.RegClass:$rs1,
  603. GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  604. def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)),
  605. (vti.Vector vti.RegClass:$rs1), invcc,
  606. VR:$merge,
  607. (vti.Mask V0),
  608. VLOpFrag)),
  609. (instruction_masked VR:$merge, vti.RegClass:$rs1,
  610. GPR:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  611. }
  612. multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name,
  613. CondCode cc, CondCode invcc> {
  614. defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK");
  615. def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
  616. (SplatPat_simm5 simm5:$rs2), cc,
  617. VR:$merge,
  618. (vti.Mask V0),
  619. VLOpFrag)),
  620. (instruction_masked VR:$merge, vti.RegClass:$rs1,
  621. XLenVT:$rs2, (vti.Mask V0), GPR:$vl,
  622. vti.Log2SEW)>;
  623. // FIXME: Can do some canonicalization to remove these patterns.
  624. def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2),
  625. (vti.Vector vti.RegClass:$rs1), invcc,
  626. VR:$merge,
  627. (vti.Mask V0),
  628. VLOpFrag)),
  629. (instruction_masked VR:$merge, vti.RegClass:$rs1,
  630. simm5:$rs2, (vti.Mask V0), GPR:$vl,
  631. vti.Log2SEW)>;
  632. }
  633. multiclass VPatIntegerSetCCVL_VIPlus1_Swappable<VTypeInfo vti,
  634. string instruction_name,
  635. CondCode cc, CondCode invcc,
  636. ComplexPattern splatpat_kind> {
  637. defvar instruction_masked = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX#"_MASK");
  638. def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
  639. (splatpat_kind simm5:$rs2), cc,
  640. VR:$merge,
  641. (vti.Mask V0),
  642. VLOpFrag)),
  643. (instruction_masked VR:$merge, vti.RegClass:$rs1,
  644. (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl,
  645. vti.Log2SEW)>;
  646. // FIXME: Can do some canonicalization to remove these patterns.
  647. def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2),
  648. (vti.Vector vti.RegClass:$rs1), invcc,
  649. VR:$merge,
  650. (vti.Mask V0),
  651. VLOpFrag)),
  652. (instruction_masked VR:$merge, vti.RegClass:$rs1,
  653. (DecImm simm5:$rs2), (vti.Mask V0), GPR:$vl,
  654. vti.Log2SEW)>;
  655. }
  656. multiclass VPatFPSetCCVL_VV_VF_FV<CondCode cc,
  657. string inst_name,
  658. string swapped_op_inst_name> {
  659. foreach fvti = AllFloatVectors in {
  660. def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1),
  661. fvti.RegClass:$rs2,
  662. cc,
  663. VR:$merge,
  664. (fvti.Mask V0),
  665. VLOpFrag)),
  666. (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX#"_MASK")
  667. VR:$merge, fvti.RegClass:$rs1,
  668. fvti.RegClass:$rs2, (fvti.Mask V0),
  669. GPR:$vl, fvti.Log2SEW)>;
  670. def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1),
  671. (SplatFPOp fvti.ScalarRegClass:$rs2),
  672. cc,
  673. VR:$merge,
  674. (fvti.Mask V0),
  675. VLOpFrag)),
  676. (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
  677. VR:$merge, fvti.RegClass:$rs1,
  678. fvti.ScalarRegClass:$rs2, (fvti.Mask V0),
  679. GPR:$vl, fvti.Log2SEW)>;
  680. def : Pat<(fvti.Mask (riscv_setcc_vl (SplatFPOp fvti.ScalarRegClass:$rs2),
  681. (fvti.Vector fvti.RegClass:$rs1),
  682. cc,
  683. VR:$merge,
  684. (fvti.Mask V0),
  685. VLOpFrag)),
  686. (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")
  687. VR:$merge, fvti.RegClass:$rs1,
  688. fvti.ScalarRegClass:$rs2, (fvti.Mask V0),
  689. GPR:$vl, fvti.Log2SEW)>;
  690. }
  691. }
  692. multiclass VPatExtendVL_V<SDNode vop, string inst_name, string suffix,
  693. list <VTypeInfoToFraction> fraction_list> {
  694. foreach vtiTofti = fraction_list in {
  695. defvar vti = vtiTofti.Vti;
  696. defvar fti = vtiTofti.Fti;
  697. def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2),
  698. (fti.Mask V0), VLOpFrag)),
  699. (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK")
  700. (vti.Vector (IMPLICIT_DEF)),
  701. fti.RegClass:$rs2,
  702. (fti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
  703. }
  704. }
  705. // Single width converting
  706. multiclass VPatConvertFP2IVL_V<SDNode vop, string instruction_name> {
  707. foreach fvti = AllFloatVectors in {
  708. defvar ivti = GetIntVTypeInfo<fvti>.Vti;
  709. def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
  710. (fvti.Mask V0),
  711. VLOpFrag)),
  712. (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK")
  713. (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
  714. (fvti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>;
  715. }
  716. }
  717. multiclass VPatConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> {
  718. foreach fvti = AllFloatVectors in {
  719. defvar ivti = GetIntVTypeInfo<fvti>.Vti;
  720. def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
  721. (fvti.Mask V0), (XLenVT timm:$frm),
  722. VLOpFrag)),
  723. (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK")
  724. (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
  725. (fvti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW,
  726. TA_MA)>;
  727. }
  728. }
  729. multiclass VPatConvertI2FPVL_V<SDNode vop, string instruction_name> {
  730. foreach fvti = AllFloatVectors in {
  731. defvar ivti = GetIntVTypeInfo<fvti>.Vti;
  732. def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
  733. (ivti.Mask V0),
  734. VLOpFrag)),
  735. (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
  736. (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
  737. (ivti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>;
  738. }
  739. }
  740. multiclass VPatConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> {
  741. foreach fvti = AllFloatVectors in {
  742. defvar ivti = GetIntVTypeInfo<fvti>.Vti;
  743. def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
  744. (ivti.Mask V0), (XLenVT timm:$frm),
  745. VLOpFrag)),
  746. (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
  747. (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
  748. (ivti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
  749. }
  750. }
  751. // Widening converting
  752. multiclass VPatWConvertFP2IVL_V<SDNode vop, string instruction_name> {
  753. foreach fvtiToFWti = AllWidenableFloatVectors in {
  754. defvar fvti = fvtiToFWti.Vti;
  755. defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
  756. def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
  757. (fvti.Mask V0),
  758. VLOpFrag)),
  759. (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
  760. (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
  761. (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>;
  762. }
  763. }
  764. multiclass VPatWConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> {
  765. foreach fvtiToFWti = AllWidenableFloatVectors in {
  766. defvar fvti = fvtiToFWti.Vti;
  767. defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
  768. def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1),
  769. (fvti.Mask V0), (XLenVT timm:$frm),
  770. VLOpFrag)),
  771. (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
  772. (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
  773. (fvti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
  774. }
  775. }
  776. multiclass VPatWConvertI2FPVL_V<SDNode vop, string instruction_name> {
  777. foreach vtiToWti = AllWidenableIntToFloatVectors in {
  778. defvar ivti = vtiToWti.Vti;
  779. defvar fwti = vtiToWti.Wti;
  780. def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
  781. (ivti.Mask V0),
  782. VLOpFrag)),
  783. (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK")
  784. (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
  785. (ivti.Mask V0), GPR:$vl, ivti.Log2SEW, TA_MA)>;
  786. }
  787. }
  788. multiclass VPatWConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> {
  789. foreach vtiToWti = AllWidenableIntToFloatVectors in {
  790. defvar ivti = vtiToWti.Vti;
  791. defvar fwti = vtiToWti.Wti;
  792. def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1),
  793. (ivti.Mask V0), (XLenVT timm:$frm),
  794. VLOpFrag)),
  795. (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX#"_MASK")
  796. (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1,
  797. (ivti.Mask V0), timm:$frm, GPR:$vl, ivti.Log2SEW, TA_MA)>;
  798. }
  799. }
  800. // Narrowing converting
  801. multiclass VPatNConvertFP2IVL_V<SDNode vop, string instruction_name> {
  802. // Reuse the same list of types used in the widening nodes, but just swap the
  803. // direction of types around so we're converting from Wti -> Vti
  804. foreach vtiToWti = AllWidenableIntToFloatVectors in {
  805. defvar vti = vtiToWti.Vti;
  806. defvar fwti = vtiToWti.Wti;
  807. def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1),
  808. (fwti.Mask V0),
  809. VLOpFrag)),
  810. (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK")
  811. (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
  812. (fwti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
  813. }
  814. }
  815. multiclass VPatNConvertFP2I_RM_VL_V<SDNode vop, string instruction_name> {
  816. foreach vtiToWti = AllWidenableIntToFloatVectors in {
  817. defvar vti = vtiToWti.Vti;
  818. defvar fwti = vtiToWti.Wti;
  819. def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1),
  820. (fwti.Mask V0), (XLenVT timm:$frm),
  821. VLOpFrag)),
  822. (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX#"_MASK")
  823. (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
  824. (fwti.Mask V0), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>;
  825. }
  826. }
  827. multiclass VPatNConvertI2FPVL_V<SDNode vop, string instruction_name> {
  828. foreach fvtiToFWti = AllWidenableFloatVectors in {
  829. defvar fvti = fvtiToFWti.Vti;
  830. defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
  831. def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1),
  832. (iwti.Mask V0),
  833. VLOpFrag)),
  834. (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
  835. (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1,
  836. (iwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>;
  837. }
  838. }
  839. multiclass VPatNConvertI2FP_RM_VL_V<SDNode vop, string instruction_name> {
  840. foreach fvtiToFWti = AllWidenableFloatVectors in {
  841. defvar fvti = fvtiToFWti.Vti;
  842. defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
  843. def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1),
  844. (iwti.Mask V0), (XLenVT timm:$frm),
  845. VLOpFrag)),
  846. (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX#"_MASK")
  847. (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1,
  848. (iwti.Mask V0), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>;
  849. }
  850. }
  851. multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
  852. foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in {
  853. defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1");
  854. def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2,
  855. (vti.Mask true_mask),
  856. VLOpFrag)),
  857. (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX)
  858. (vti_m1.Vector VR:$merge),
  859. (vti.Vector vti.RegClass:$rs1),
  860. (vti_m1.Vector VR:$rs2),
  861. GPR:$vl, vti.Log2SEW)>;
  862. def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1), VR:$rs2,
  863. (vti.Mask V0), VLOpFrag)),
  864. (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK")
  865. (vti_m1.Vector VR:$merge),
  866. (vti.Vector vti.RegClass:$rs1),
  867. (vti_m1.Vector VR:$rs2),
  868. (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  869. }
  870. }
  871. multiclass VPatBinaryExtVL_WV_WX<SDNode op, PatFrags extop, string instruction_name> {
  872. foreach vtiToWti = AllWidenableIntVectors in {
  873. defvar vti = vtiToWti.Vti;
  874. defvar wti = vtiToWti.Wti;
  875. def : Pat<
  876. (vti.Vector
  877. (riscv_trunc_vector_vl
  878. (op (wti.Vector wti.RegClass:$rs2),
  879. (wti.Vector (extop (vti.Vector vti.RegClass:$rs1)))),
  880. (vti.Mask true_mask),
  881. VLOpFrag)),
  882. (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX)
  883. wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
  884. def : Pat<
  885. (vti.Vector
  886. (riscv_trunc_vector_vl
  887. (op (wti.Vector wti.RegClass:$rs2),
  888. (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1))))),
  889. (vti.Mask true_mask),
  890. VLOpFrag)),
  891. (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX)
  892. wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>;
  893. }
  894. }
  895. multiclass VPatBinaryVL_WV_WX_WI<SDNode op, string instruction_name> {
  896. defm : VPatBinaryExtVL_WV_WX<op, sext_oneuse, instruction_name>;
  897. defm : VPatBinaryExtVL_WV_WX<op, zext_oneuse, instruction_name>;
  898. foreach vtiToWti = AllWidenableIntVectors in {
  899. defvar vti = vtiToWti.Vti;
  900. defvar wti = vtiToWti.Wti;
  901. def : Pat<
  902. (vti.Vector
  903. (riscv_trunc_vector_vl
  904. (op (wti.Vector wti.RegClass:$rs2),
  905. (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask),
  906. VLOpFrag)),
  907. (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX)
  908. wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW)>;
  909. }
  910. }
  911. multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> {
  912. foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in {
  913. defvar vti = vtiToWti.Vti;
  914. defvar wti = vtiToWti.Wti;
  915. defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
  916. def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
  917. (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
  918. VR:$rs2, (vti.Mask true_mask), VLOpFrag)),
  919. (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX)
  920. (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
  921. (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW)>;
  922. def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
  923. (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))),
  924. VR:$rs2, (vti.Mask V0), VLOpFrag)),
  925. (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK")
  926. (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
  927. (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  928. }
  929. }
  930. multiclass VPatWidenReductionVL_Ext_VL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> {
  931. foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in {
  932. defvar vti = vtiToWti.Vti;
  933. defvar wti = vtiToWti.Wti;
  934. defvar wti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # wti.SEW # "M1");
  935. def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
  936. (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
  937. VR:$rs2, (vti.Mask true_mask), VLOpFrag)),
  938. (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX)
  939. (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
  940. (wti_m1.Vector VR:$rs2), GPR:$vl, vti.Log2SEW)>;
  941. def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$merge),
  942. (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)),
  943. VR:$rs2, (vti.Mask V0), VLOpFrag)),
  944. (!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX#"_MASK")
  945. (wti_m1.Vector VR:$merge), (vti.Vector vti.RegClass:$rs1),
  946. (wti_m1.Vector VR:$rs2), (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  947. }
  948. }
  949. multiclass VPatWidenBinaryFPVL_VV_VF<SDNode op, PatFrags extop, string instruction_name> {
  950. foreach fvtiToFWti = AllWidenableFloatVectors in {
  951. defvar fvti = fvtiToFWti.Vti;
  952. defvar fwti = fvtiToFWti.Wti;
  953. def : Pat<(fwti.Vector (op (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs2),
  954. (fvti.Mask true_mask), VLOpFrag)),
  955. (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs1),
  956. (fvti.Mask true_mask), VLOpFrag)),
  957. srcvalue, (fwti.Mask true_mask), VLOpFrag)),
  958. (!cast<Instruction>(instruction_name#"_VV_"#fvti.LMul.MX)
  959. fvti.RegClass:$rs2, fvti.RegClass:$rs1,
  960. GPR:$vl, fvti.Log2SEW)>;
  961. def : Pat<(fwti.Vector (op (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs2),
  962. (fvti.Mask true_mask), VLOpFrag)),
  963. (fwti.Vector (extop (fvti.Vector (SplatFPOp fvti.ScalarRegClass:$rs1)),
  964. (fvti.Mask true_mask), VLOpFrag)),
  965. srcvalue, (fwti.Mask true_mask), VLOpFrag)),
  966. (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
  967. fvti.RegClass:$rs2, fvti.ScalarRegClass:$rs1,
  968. GPR:$vl, fvti.Log2SEW)>;
  969. }
  970. }
  971. multiclass VPatWidenBinaryFPVL_WV_WF<SDNode op, PatFrags extop, string instruction_name> {
  972. foreach fvtiToFWti = AllWidenableFloatVectors in {
  973. defvar fvti = fvtiToFWti.Vti;
  974. defvar fwti = fvtiToFWti.Wti;
  975. def : Pat<(fwti.Vector (op (fwti.Vector fwti.RegClass:$rs2),
  976. (fwti.Vector (extop (fvti.Vector fvti.RegClass:$rs1),
  977. (fvti.Mask true_mask), VLOpFrag)),
  978. srcvalue, (fwti.Mask true_mask), VLOpFrag)),
  979. (!cast<Instruction>(instruction_name#"_WV_"#fvti.LMul.MX#"_TIED")
  980. fwti.RegClass:$rs2, fvti.RegClass:$rs1,
  981. GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  982. def : Pat<(fwti.Vector (op (fwti.Vector fwti.RegClass:$rs2),
  983. (fwti.Vector (extop (fvti.Vector (SplatFPOp fvti.ScalarRegClass:$rs1)),
  984. (fvti.Mask true_mask), VLOpFrag)),
  985. srcvalue, (fwti.Mask true_mask), VLOpFrag)),
  986. (!cast<Instruction>(instruction_name#"_W"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
  987. fwti.RegClass:$rs2, fvti.ScalarRegClass:$rs1,
  988. GPR:$vl, fvti.Log2SEW)>;
  989. }
  990. }
  991. multiclass VPatWidenBinaryFPVL_VV_VF_WV_WF<SDNode op, string instruction_name> {
  992. defm : VPatWidenBinaryFPVL_VV_VF<op, riscv_fpextend_vl_oneuse, instruction_name>;
  993. defm : VPatWidenBinaryFPVL_WV_WF<op, riscv_fpextend_vl_oneuse, instruction_name>;
  994. }
  995. multiclass VPatNarrowShiftSplatExt_WX<SDNode op, PatFrags extop, string instruction_name> {
  996. foreach vtiToWti = AllWidenableIntVectors in {
  997. defvar vti = vtiToWti.Vti;
  998. defvar wti = vtiToWti.Wti;
  999. def : Pat<
  1000. (vti.Vector
  1001. (riscv_trunc_vector_vl
  1002. (op (wti.Vector wti.RegClass:$rs2),
  1003. (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1)),
  1004. (vti.Mask true_mask), VLOpFrag)),
  1005. srcvalue, (wti.Mask true_mask), VLOpFrag),
  1006. (vti.Mask true_mask), VLOpFrag)),
  1007. (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX)
  1008. wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>;
  1009. }
  1010. }
  1011. multiclass VPatMultiplyAddVL_VV_VX<SDNode op, string instruction_name> {
  1012. foreach vti = AllIntegerVectors in {
  1013. defvar suffix = vti.LMul.MX;
  1014. // NOTE: We choose VMADD because it has the most commuting freedom. So it
  1015. // works best with how TwoAddressInstructionPass tries commuting.
  1016. def : Pat<(vti.Vector
  1017. (op vti.RegClass:$rs2,
  1018. (riscv_mul_vl_oneuse vti.RegClass:$rs1,
  1019. vti.RegClass:$rd,
  1020. srcvalue, (vti.Mask true_mask), VLOpFrag),
  1021. srcvalue, (vti.Mask true_mask), VLOpFrag)),
  1022. (!cast<Instruction>(instruction_name#"_VV_"# suffix)
  1023. vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  1024. GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1025. // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally
  1026. // commutable.
  1027. def : Pat<(vti.Vector
  1028. (op vti.RegClass:$rs2,
  1029. (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1),
  1030. vti.RegClass:$rd,
  1031. srcvalue, (vti.Mask true_mask), VLOpFrag),
  1032. srcvalue, (vti.Mask true_mask), VLOpFrag)),
  1033. (!cast<Instruction>(instruction_name#"_VX_" # suffix)
  1034. vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  1035. GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1036. }
  1037. }
  1038. multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> {
  1039. foreach vti = AllIntegerVectors in {
  1040. defvar suffix = vti.LMul.MX;
  1041. def : Pat<(riscv_vp_merge_vl (vti.Mask true_mask),
  1042. (vti.Vector (op vti.RegClass:$rd,
  1043. (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2,
  1044. srcvalue, (vti.Mask true_mask), VLOpFrag),
  1045. srcvalue, (vti.Mask true_mask), VLOpFrag)),
  1046. vti.RegClass:$rd, VLOpFrag),
  1047. (!cast<Instruction>(instruction_name#"_VV_"# suffix)
  1048. vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  1049. GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
  1050. def : Pat<(riscv_vp_merge_vl (vti.Mask V0),
  1051. (vti.Vector (op vti.RegClass:$rd,
  1052. (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2,
  1053. srcvalue, (vti.Mask true_mask), VLOpFrag),
  1054. srcvalue, (vti.Mask true_mask), VLOpFrag)),
  1055. vti.RegClass:$rd, VLOpFrag),
  1056. (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
  1057. vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  1058. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
  1059. def : Pat<(riscv_vp_merge_vl (vti.Mask true_mask),
  1060. (vti.Vector (op vti.RegClass:$rd,
  1061. (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2,
  1062. srcvalue, (vti.Mask true_mask), VLOpFrag),
  1063. srcvalue, (vti.Mask true_mask), VLOpFrag)),
  1064. vti.RegClass:$rd, VLOpFrag),
  1065. (!cast<Instruction>(instruction_name#"_VX_"# suffix)
  1066. vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  1067. GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
  1068. def : Pat<(riscv_vp_merge_vl (vti.Mask V0),
  1069. (vti.Vector (op vti.RegClass:$rd,
  1070. (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2,
  1071. srcvalue, (vti.Mask true_mask), VLOpFrag),
  1072. srcvalue, (vti.Mask true_mask), VLOpFrag)),
  1073. vti.RegClass:$rd, VLOpFrag),
  1074. (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK")
  1075. vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  1076. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
  1077. def : Pat<(riscv_vselect_vl (vti.Mask V0),
  1078. (vti.Vector (op vti.RegClass:$rd,
  1079. (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rs2,
  1080. srcvalue, (vti.Mask true_mask), VLOpFrag),
  1081. srcvalue, (vti.Mask true_mask), VLOpFrag)),
  1082. vti.RegClass:$rd, VLOpFrag),
  1083. (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
  1084. vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  1085. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1086. def : Pat<(riscv_vselect_vl (vti.Mask V0),
  1087. (vti.Vector (op vti.RegClass:$rd,
  1088. (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rs2,
  1089. srcvalue, (vti.Mask true_mask), VLOpFrag),
  1090. srcvalue, (vti.Mask true_mask), VLOpFrag)),
  1091. vti.RegClass:$rd, VLOpFrag),
  1092. (!cast<Instruction>(instruction_name#"_VX_"# suffix #"_MASK")
  1093. vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  1094. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1095. }
  1096. }
  1097. multiclass VPatWidenMultiplyAddVL_VV_VX<PatFrag op1, string instruction_name> {
  1098. foreach vtiTowti = AllWidenableIntVectors in {
  1099. defvar vti = vtiTowti.Vti;
  1100. defvar wti = vtiTowti.Wti;
  1101. def : Pat<(wti.Vector
  1102. (riscv_add_vl wti.RegClass:$rd,
  1103. (op1 vti.RegClass:$rs1,
  1104. (vti.Vector vti.RegClass:$rs2),
  1105. srcvalue, (vti.Mask true_mask), VLOpFrag),
  1106. srcvalue, (vti.Mask true_mask), VLOpFrag)),
  1107. (!cast<Instruction>(instruction_name#"_VV_" # vti.LMul.MX)
  1108. wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  1109. GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1110. def : Pat<(wti.Vector
  1111. (riscv_add_vl wti.RegClass:$rd,
  1112. (op1 (SplatPat XLenVT:$rs1),
  1113. (vti.Vector vti.RegClass:$rs2),
  1114. srcvalue, (vti.Mask true_mask), VLOpFrag),
  1115. srcvalue, (vti.Mask true_mask), VLOpFrag)),
  1116. (!cast<Instruction>(instruction_name#"_VX_" # vti.LMul.MX)
  1117. wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  1118. GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1119. }
  1120. }
  1121. multiclass VPatNarrowShiftSplat_WX_WI<SDNode op, string instruction_name> {
  1122. foreach vtiTowti = AllWidenableIntVectors in {
  1123. defvar vti = vtiTowti.Vti;
  1124. defvar wti = vtiTowti.Wti;
  1125. def : Pat<(vti.Vector (riscv_trunc_vector_vl
  1126. (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2),
  1127. srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)),
  1128. (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX)
  1129. wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
  1130. def : Pat<(vti.Vector (riscv_trunc_vector_vl
  1131. (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2),
  1132. srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)),
  1133. (!cast<Instruction>(instruction_name#"_WI_"#vti.LMul.MX)
  1134. wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW)>;
  1135. }
  1136. }
  1137. multiclass VPatFPMulAddVL_VV_VF<SDNode vop, string instruction_name> {
  1138. foreach vti = AllFloatVectors in {
  1139. defvar suffix = vti.LMul.MX;
  1140. def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd,
  1141. vti.RegClass:$rs2, (vti.Mask true_mask),
  1142. VLOpFrag)),
  1143. (!cast<Instruction>(instruction_name#"_VV_"# suffix)
  1144. vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  1145. GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1146. def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd,
  1147. vti.RegClass:$rs2, (vti.Mask V0),
  1148. VLOpFrag)),
  1149. (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
  1150. vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  1151. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1152. def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1),
  1153. vti.RegClass:$rd, vti.RegClass:$rs2,
  1154. (vti.Mask true_mask),
  1155. VLOpFrag)),
  1156. (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix)
  1157. vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  1158. GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1159. def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1),
  1160. vti.RegClass:$rd, vti.RegClass:$rs2,
  1161. (vti.Mask V0),
  1162. VLOpFrag)),
  1163. (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
  1164. vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  1165. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1166. }
  1167. }
  1168. multiclass VPatFPMulAccVL_VV_VF<PatFrag vop, string instruction_name> {
  1169. foreach vti = AllFloatVectors in {
  1170. defvar suffix = vti.LMul.MX;
  1171. def : Pat<(riscv_vp_merge_vl (vti.Mask true_mask),
  1172. (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
  1173. vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
  1174. vti.RegClass:$rd, VLOpFrag),
  1175. (!cast<Instruction>(instruction_name#"_VV_"# suffix)
  1176. vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  1177. GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
  1178. def : Pat<(riscv_vp_merge_vl (vti.Mask V0),
  1179. (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
  1180. vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
  1181. vti.RegClass:$rd, VLOpFrag),
  1182. (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
  1183. vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  1184. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
  1185. def : Pat<(riscv_vp_merge_vl (vti.Mask true_mask),
  1186. (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
  1187. vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
  1188. vti.RegClass:$rd, VLOpFrag),
  1189. (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix)
  1190. vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  1191. GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
  1192. def : Pat<(riscv_vp_merge_vl (vti.Mask V0),
  1193. (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
  1194. vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
  1195. vti.RegClass:$rd, VLOpFrag),
  1196. (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
  1197. vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  1198. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
  1199. def : Pat<(riscv_vselect_vl (vti.Mask V0),
  1200. (vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rs2,
  1201. vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
  1202. vti.RegClass:$rd, VLOpFrag),
  1203. (!cast<Instruction>(instruction_name#"_VV_"# suffix #"_MASK")
  1204. vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  1205. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1206. def : Pat<(riscv_vselect_vl (vti.Mask V0),
  1207. (vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rs2,
  1208. vti.RegClass:$rd, (vti.Mask true_mask), VLOpFrag)),
  1209. vti.RegClass:$rd, VLOpFrag),
  1210. (!cast<Instruction>(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
  1211. vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  1212. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1213. }
  1214. }
  1215. multiclass VPatWidenFPMulAccVL_VV_VF<SDNode vop, string instruction_name> {
  1216. foreach vtiToWti = AllWidenableFloatVectors in {
  1217. defvar vti = vtiToWti.Vti;
  1218. defvar wti = vtiToWti.Wti;
  1219. def : Pat<(vop
  1220. (wti.Vector (riscv_fpextend_vl_oneuse
  1221. (vti.Vector vti.RegClass:$rs1),
  1222. (vti.Mask true_mask), VLOpFrag)),
  1223. (wti.Vector (riscv_fpextend_vl_oneuse
  1224. (vti.Vector vti.RegClass:$rs2),
  1225. (vti.Mask true_mask), VLOpFrag)),
  1226. (wti.Vector wti.RegClass:$rd), (vti.Mask true_mask),
  1227. VLOpFrag),
  1228. (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
  1229. wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  1230. GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1231. def : Pat<(vop
  1232. (wti.Vector (riscv_fpextend_vl_oneuse
  1233. (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)),
  1234. (vti.Mask true_mask), VLOpFrag)),
  1235. (wti.Vector (riscv_fpextend_vl_oneuse
  1236. (vti.Vector vti.RegClass:$rs2),
  1237. (vti.Mask true_mask), VLOpFrag)),
  1238. (wti.Vector wti.RegClass:$rd), (vti.Mask true_mask),
  1239. VLOpFrag),
  1240. (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  1241. wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  1242. GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1243. }
  1244. }
  1245. //===----------------------------------------------------------------------===//
  1246. // Patterns.
  1247. //===----------------------------------------------------------------------===//
  1248. let Predicates = [HasVInstructions] in {
  1249. // 11. Vector Integer Arithmetic Instructions
  1250. // 11.1. Vector Single-Width Integer Add and Subtract
  1251. defm : VPatBinaryVL_VV_VX_VI<riscv_add_vl, "PseudoVADD">;
  1252. defm : VPatBinaryVL_VV_VX<riscv_sub_vl, "PseudoVSUB">;
  1253. // Handle VRSUB specially since it's the only integer binary op with reversed
  1254. // pattern operands
  1255. foreach vti = AllIntegerVectors in {
  1256. def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))),
  1257. (vti.Vector vti.RegClass:$rs1),
  1258. vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
  1259. (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK")
  1260. vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2,
  1261. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1262. def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
  1263. (vti.Vector vti.RegClass:$rs1),
  1264. vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
  1265. (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK")
  1266. vti.RegClass:$merge, vti.RegClass:$rs1, simm5:$rs2,
  1267. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1268. }
  1269. // 11.2. Vector Widening Integer Add/Subtract
  1270. defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">;
  1271. defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">;
  1272. defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsub_vl, riscv_vwsub_w_vl, "PseudoVWSUB">;
  1273. defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwsubu_vl, riscv_vwsubu_w_vl, "PseudoVWSUBU">;
  1274. // 11.3. Vector Integer Extension
  1275. defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF2",
  1276. AllFractionableVF2IntVectors>;
  1277. defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF2",
  1278. AllFractionableVF2IntVectors>;
  1279. defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF4",
  1280. AllFractionableVF4IntVectors>;
  1281. defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF4",
  1282. AllFractionableVF4IntVectors>;
  1283. defm : VPatExtendVL_V<riscv_zext_vl, "PseudoVZEXT", "VF8",
  1284. AllFractionableVF8IntVectors>;
  1285. defm : VPatExtendVL_V<riscv_sext_vl, "PseudoVSEXT", "VF8",
  1286. AllFractionableVF8IntVectors>;
  1287. // 11.5. Vector Bitwise Logical Instructions
  1288. defm : VPatBinaryVL_VV_VX_VI<riscv_and_vl, "PseudoVAND">;
  1289. defm : VPatBinaryVL_VV_VX_VI<riscv_or_vl, "PseudoVOR">;
  1290. defm : VPatBinaryVL_VV_VX_VI<riscv_xor_vl, "PseudoVXOR">;
  1291. // 11.6. Vector Single-Width Bit Shift Instructions
  1292. defm : VPatBinaryVL_VV_VX_VI<riscv_shl_vl, "PseudoVSLL", uimm5>;
  1293. defm : VPatBinaryVL_VV_VX_VI<riscv_srl_vl, "PseudoVSRL", uimm5>;
  1294. defm : VPatBinaryVL_VV_VX_VI<riscv_sra_vl, "PseudoVSRA", uimm5>;
  1295. foreach vti = AllIntegerVectors in {
  1296. // Emit shift by 1 as an add since it might be faster.
  1297. def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1),
  1298. (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)),
  1299. srcvalue, (vti.Mask true_mask), VLOpFrag),
  1300. (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX)
  1301. vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
  1302. }
  1303. // 11.7. Vector Narrowing Integer Right Shift Instructions
  1304. defm : VPatBinaryVL_WV_WX_WI<srl, "PseudoVNSRL">;
  1305. defm : VPatBinaryVL_WV_WX_WI<sra, "PseudoVNSRA">;
  1306. defm : VPatNarrowShiftSplat_WX_WI<riscv_sra_vl, "PseudoVNSRA">;
  1307. defm : VPatNarrowShiftSplat_WX_WI<riscv_srl_vl, "PseudoVNSRL">;
  1308. defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_sext_vl_oneuse, "PseudoVNSRA">;
  1309. defm : VPatNarrowShiftSplatExt_WX<riscv_sra_vl, riscv_zext_vl_oneuse, "PseudoVNSRA">;
  1310. defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_sext_vl_oneuse, "PseudoVNSRL">;
  1311. defm : VPatNarrowShiftSplatExt_WX<riscv_srl_vl, riscv_zext_vl_oneuse, "PseudoVNSRL">;
  1312. defm : VPatBinaryNVL_WV_WX_WI<riscv_vnsrl_vl, "PseudoVNSRL">;
  1313. foreach vtiTowti = AllWidenableIntVectors in {
  1314. defvar vti = vtiTowti.Vti;
  1315. defvar wti = vtiTowti.Wti;
  1316. def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1),
  1317. (vti.Mask V0),
  1318. VLOpFrag)),
  1319. (!cast<Instruction>("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK")
  1320. (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0,
  1321. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
  1322. }
  1323. // 11.8. Vector Integer Comparison Instructions
  1324. foreach vti = AllIntegerVectors in {
  1325. defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSEQ", SETEQ>;
  1326. defm : VPatIntegerSetCCVL_VV<vti, "PseudoVMSNE", SETNE>;
  1327. defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>;
  1328. defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>;
  1329. defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>;
  1330. defm : VPatIntegerSetCCVL_VV_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>;
  1331. defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>;
  1332. defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>;
  1333. defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLT", SETLT, SETGT>;
  1334. defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLTU", SETULT, SETUGT>;
  1335. defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>;
  1336. defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>;
  1337. defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>;
  1338. defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>;
  1339. // There is no VMSGE(U)_VX instruction
  1340. defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>;
  1341. defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>;
  1342. defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>;
  1343. defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>;
  1344. defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGT", SETGT, SETLT>;
  1345. defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>;
  1346. defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLE", SETLT, SETGT,
  1347. SplatPat_simm5_plus1_nonzero>;
  1348. defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSLEU", SETULT, SETUGT,
  1349. SplatPat_simm5_plus1_nonzero>;
  1350. defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGT", SETGE, SETLE,
  1351. SplatPat_simm5_plus1>;
  1352. defm : VPatIntegerSetCCVL_VIPlus1_Swappable<vti, "PseudoVMSGTU", SETUGE, SETULE,
  1353. SplatPat_simm5_plus1_nonzero>;
  1354. } // foreach vti = AllIntegerVectors
  1355. // 11.9. Vector Integer Min/Max Instructions
  1356. defm : VPatBinaryVL_VV_VX<riscv_umin_vl, "PseudoVMINU">;
  1357. defm : VPatBinaryVL_VV_VX<riscv_smin_vl, "PseudoVMIN">;
  1358. defm : VPatBinaryVL_VV_VX<riscv_umax_vl, "PseudoVMAXU">;
  1359. defm : VPatBinaryVL_VV_VX<riscv_smax_vl, "PseudoVMAX">;
  1360. // 11.10. Vector Single-Width Integer Multiply Instructions
  1361. defm : VPatBinaryVL_VV_VX<riscv_mul_vl, "PseudoVMUL">;
  1362. defm : VPatBinaryVL_VV_VX<riscv_mulhs_vl, "PseudoVMULH">;
  1363. defm : VPatBinaryVL_VV_VX<riscv_mulhu_vl, "PseudoVMULHU">;
  1364. // 11.11. Vector Integer Divide Instructions
  1365. defm : VPatBinaryVL_VV_VX<riscv_udiv_vl, "PseudoVDIVU">;
  1366. defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV">;
  1367. defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU">;
  1368. defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM">;
  1369. // 11.12. Vector Widening Integer Multiply Instructions
  1370. defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">;
  1371. defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">;
  1372. defm : VPatBinaryWVL_VV_VX<riscv_vwmulsu_vl, "PseudoVWMULSU">;
  1373. // 11.13 Vector Single-Width Integer Multiply-Add Instructions
  1374. defm : VPatMultiplyAddVL_VV_VX<riscv_add_vl, "PseudoVMADD">;
  1375. defm : VPatMultiplyAddVL_VV_VX<riscv_sub_vl, "PseudoVNMSUB">;
  1376. defm : VPatMultiplyAccVL_VV_VX<riscv_add_vl_oneuse, "PseudoVMACC">;
  1377. defm : VPatMultiplyAccVL_VV_VX<riscv_sub_vl_oneuse, "PseudoVNMSAC">;
  1378. // 11.14. Vector Widening Integer Multiply-Add Instructions
  1379. defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmul_vl_oneuse, "PseudoVWMACC">;
  1380. defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmulu_vl_oneuse, "PseudoVWMACCU">;
  1381. defm : VPatWidenMultiplyAddVL_VV_VX<riscv_vwmulsu_vl_oneuse, "PseudoVWMACCSU">;
  1382. foreach vtiTowti = AllWidenableIntVectors in {
  1383. defvar vti = vtiTowti.Vti;
  1384. defvar wti = vtiTowti.Wti;
  1385. def : Pat<(wti.Vector
  1386. (riscv_add_vl wti.RegClass:$rd,
  1387. (riscv_vwmulsu_vl_oneuse (vti.Vector vti.RegClass:$rs1),
  1388. (SplatPat XLenVT:$rs2),
  1389. srcvalue,
  1390. (vti.Mask true_mask),
  1391. VLOpFrag),
  1392. srcvalue, (vti.Mask true_mask),VLOpFrag)),
  1393. (!cast<Instruction>("PseudoVWMACCUS_VX_" # vti.LMul.MX)
  1394. wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1,
  1395. GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1396. }
  1397. // 11.15. Vector Integer Merge Instructions
  1398. foreach vti = AllIntegerVectors in {
  1399. def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
  1400. vti.RegClass:$rs1,
  1401. vti.RegClass:$rs2,
  1402. VLOpFrag)),
  1403. (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
  1404. vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0),
  1405. GPR:$vl, vti.Log2SEW)>;
  1406. def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
  1407. (SplatPat XLenVT:$rs1),
  1408. vti.RegClass:$rs2,
  1409. VLOpFrag)),
  1410. (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
  1411. vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  1412. def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask V0),
  1413. (SplatPat_simm5 simm5:$rs1),
  1414. vti.RegClass:$rs2,
  1415. VLOpFrag)),
  1416. (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
  1417. vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  1418. def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0),
  1419. vti.RegClass:$rs1,
  1420. vti.RegClass:$rs2,
  1421. VLOpFrag)),
  1422. (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX#"_TU")
  1423. vti.RegClass:$rs2, vti.RegClass:$rs2, vti.RegClass:$rs1,
  1424. (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  1425. def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0),
  1426. (SplatPat XLenVT:$rs1),
  1427. vti.RegClass:$rs2,
  1428. VLOpFrag)),
  1429. (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX#"_TU")
  1430. vti.RegClass:$rs2, vti.RegClass:$rs2, GPR:$rs1,
  1431. (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  1432. def : Pat<(vti.Vector (riscv_vp_merge_vl (vti.Mask V0),
  1433. (SplatPat_simm5 simm5:$rs1),
  1434. vti.RegClass:$rs2,
  1435. VLOpFrag)),
  1436. (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX#"_TU")
  1437. vti.RegClass:$rs2, vti.RegClass:$rs2, simm5:$rs1,
  1438. (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
  1439. }
  1440. // 11.16. Vector Integer Move Instructions
  1441. foreach vti = AllIntegerVectors in {
  1442. def : Pat<(vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), GPR:$rs2, VLOpFrag)),
  1443. (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX)
  1444. $rs2, GPR:$vl, vti.Log2SEW)>;
  1445. def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.Vector:$passthru, GPR:$rs2, VLOpFrag)),
  1446. (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX#"_TU")
  1447. $passthru, $rs2, GPR:$vl, vti.Log2SEW)>;
  1448. defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5");
  1449. def : Pat<(vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), (ImmPat XLenVT:$imm5),
  1450. VLOpFrag)),
  1451. (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX)
  1452. XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>;
  1453. def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.Vector:$passthru, (ImmPat XLenVT:$imm5),
  1454. VLOpFrag)),
  1455. (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX#"_TU")
  1456. $passthru, XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>;
  1457. }
  1458. // 12. Vector Fixed-Point Arithmetic Instructions
  1459. // 12.1. Vector Single-Width Saturating Add and Subtract
  1460. defm : VPatBinaryVL_VV_VX_VI<riscv_saddsat_vl, "PseudoVSADD">;
  1461. defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">;
  1462. defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">;
  1463. defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">;
  1464. } // Predicates = [HasVInstructions]
  1465. // 13. Vector Floating-Point Instructions
  1466. let Predicates = [HasVInstructionsAnyF] in {
  1467. // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
  1468. defm : VPatBinaryFPVL_VV_VF<riscv_fadd_vl, "PseudoVFADD">;
  1469. defm : VPatBinaryFPVL_VV_VF<riscv_fsub_vl, "PseudoVFSUB">;
  1470. defm : VPatBinaryFPVL_R_VF<riscv_fsub_vl, "PseudoVFRSUB">;
  1471. // 13.3. Vector Widening Floating-Point Add/Subtract Instructions
  1472. defm : VPatWidenBinaryFPVL_VV_VF_WV_WF<riscv_fadd_vl, "PseudoVFWADD">;
  1473. defm : VPatWidenBinaryFPVL_VV_VF_WV_WF<riscv_fsub_vl, "PseudoVFWSUB">;
  1474. // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
  1475. defm : VPatBinaryFPVL_VV_VF<riscv_fmul_vl, "PseudoVFMUL">;
  1476. defm : VPatBinaryFPVL_VV_VF<riscv_fdiv_vl, "PseudoVFDIV">;
  1477. defm : VPatBinaryFPVL_R_VF<riscv_fdiv_vl, "PseudoVFRDIV">;
  1478. // 13.5. Vector Widening Floating-Point Multiply Instructions
  1479. defm : VPatWidenBinaryFPVL_VV_VF<riscv_fmul_vl, riscv_fpextend_vl_oneuse, "PseudoVFWMUL">;
  1480. // 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions.
  1481. defm : VPatFPMulAddVL_VV_VF<riscv_vfmadd_vl, "PseudoVFMADD">;
  1482. defm : VPatFPMulAddVL_VV_VF<riscv_vfmsub_vl, "PseudoVFMSUB">;
  1483. defm : VPatFPMulAddVL_VV_VF<riscv_vfnmadd_vl, "PseudoVFNMADD">;
  1484. defm : VPatFPMulAddVL_VV_VF<riscv_vfnmsub_vl, "PseudoVFNMSUB">;
  1485. defm : VPatFPMulAccVL_VV_VF<riscv_vfmadd_vl_oneuse, "PseudoVFMACC">;
  1486. defm : VPatFPMulAccVL_VV_VF<riscv_vfmsub_vl_oneuse, "PseudoVFMSAC">;
  1487. defm : VPatFPMulAccVL_VV_VF<riscv_vfnmadd_vl_oneuse, "PseudoVFNMACC">;
  1488. defm : VPatFPMulAccVL_VV_VF<riscv_vfnmsub_vl_oneuse, "PseudoVFNMSAC">;
  1489. // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
  1490. defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfmadd_vl, "PseudoVFWMACC">;
  1491. defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfnmadd_vl, "PseudoVFWNMACC">;
  1492. defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfmsub_vl, "PseudoVFWMSAC">;
  1493. defm : VPatWidenFPMulAccVL_VV_VF<riscv_vfnmsub_vl, "PseudoVFWNMSAC">;
  1494. // 13.11. Vector Floating-Point MIN/MAX Instructions
  1495. defm : VPatBinaryFPVL_VV_VF<riscv_fminnum_vl, "PseudoVFMIN">;
  1496. defm : VPatBinaryFPVL_VV_VF<riscv_fmaxnum_vl, "PseudoVFMAX">;
  1497. // 13.13. Vector Floating-Point Compare Instructions
  1498. defm : VPatFPSetCCVL_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">;
  1499. defm : VPatFPSetCCVL_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">;
  1500. defm : VPatFPSetCCVL_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">;
  1501. defm : VPatFPSetCCVL_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">;
  1502. defm : VPatFPSetCCVL_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">;
  1503. defm : VPatFPSetCCVL_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">;
  1504. defm : VPatFPSetCCVL_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">;
  1505. defm : VPatFPSetCCVL_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">;
  1506. foreach vti = AllFloatVectors in {
  1507. // 13.8. Vector Floating-Point Square-Root Instruction
  1508. def : Pat<(riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0),
  1509. VLOpFrag),
  1510. (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX #"_MASK")
  1511. (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2,
  1512. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
  1513. // 13.12. Vector Floating-Point Sign-Injection Instructions
  1514. def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0),
  1515. VLOpFrag),
  1516. (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_MASK")
  1517. (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs,
  1518. vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
  1519. TA_MA)>;
  1520. // Handle fneg with VFSGNJN using the same input for both operands.
  1521. def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask V0),
  1522. VLOpFrag),
  1523. (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX #"_MASK")
  1524. (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs,
  1525. vti.RegClass:$rs, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
  1526. TA_MA)>;
  1527. def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
  1528. (vti.Vector vti.RegClass:$rs2),
  1529. vti.RegClass:$merge,
  1530. (vti.Mask V0),
  1531. VLOpFrag),
  1532. (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_MASK")
  1533. vti.RegClass:$merge, vti.RegClass:$rs1,
  1534. vti.RegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
  1535. TAIL_AGNOSTIC)>;
  1536. def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
  1537. (riscv_fneg_vl vti.RegClass:$rs2,
  1538. (vti.Mask true_mask),
  1539. VLOpFrag),
  1540. srcvalue,
  1541. (vti.Mask true_mask),
  1542. VLOpFrag),
  1543. (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
  1544. vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
  1545. def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
  1546. (SplatFPOp vti.ScalarRegClass:$rs2),
  1547. vti.RegClass:$merge,
  1548. (vti.Mask V0),
  1549. VLOpFrag),
  1550. (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_MASK")
  1551. vti.RegClass:$merge, vti.RegClass:$rs1,
  1552. vti.ScalarRegClass:$rs2, (vti.Mask V0), GPR:$vl, vti.Log2SEW,
  1553. TAIL_AGNOSTIC)>;
  1554. // Rounding without exception to implement nearbyint.
  1555. def : Pat<(riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1),
  1556. (vti.Mask V0), VLOpFrag),
  1557. (!cast<Instruction>("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK")
  1558. (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1,
  1559. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
  1560. }
  1561. foreach fvti = AllFloatVectors in {
  1562. // Floating-point vselects:
  1563. // 11.15. Vector Integer Merge Instructions
  1564. // 13.15. Vector Floating-Point Merge Instruction
  1565. def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
  1566. fvti.RegClass:$rs1,
  1567. fvti.RegClass:$rs2,
  1568. VLOpFrag)),
  1569. (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
  1570. fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
  1571. GPR:$vl, fvti.Log2SEW)>;
  1572. def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
  1573. (SplatFPOp fvti.ScalarRegClass:$rs1),
  1574. fvti.RegClass:$rs2,
  1575. VLOpFrag)),
  1576. (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
  1577. fvti.RegClass:$rs2,
  1578. (fvti.Scalar fvti.ScalarRegClass:$rs1),
  1579. (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
  1580. def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0),
  1581. (SplatFPOp (fvti.Scalar fpimm0)),
  1582. fvti.RegClass:$rs2,
  1583. VLOpFrag)),
  1584. (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
  1585. fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
  1586. def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0),
  1587. fvti.RegClass:$rs1,
  1588. fvti.RegClass:$rs2,
  1589. VLOpFrag)),
  1590. (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX#"_TU")
  1591. fvti.RegClass:$rs2, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
  1592. GPR:$vl, fvti.Log2SEW)>;
  1593. def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0),
  1594. (SplatFPOp fvti.ScalarRegClass:$rs1),
  1595. fvti.RegClass:$rs2,
  1596. VLOpFrag)),
  1597. (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX#"_TU")
  1598. fvti.RegClass:$rs2, fvti.RegClass:$rs2,
  1599. (fvti.Scalar fvti.ScalarRegClass:$rs1),
  1600. (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
  1601. def : Pat<(fvti.Vector (riscv_vp_merge_vl (fvti.Mask V0),
  1602. (SplatFPOp (fvti.Scalar fpimm0)),
  1603. fvti.RegClass:$rs2,
  1604. VLOpFrag)),
  1605. (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX#"_TU")
  1606. fvti.RegClass:$rs2, fvti.RegClass:$rs2, 0, (fvti.Mask V0),
  1607. GPR:$vl, fvti.Log2SEW)>;
  1608. // 13.16. Vector Floating-Point Move Instruction
  1609. // If we're splatting fpimm0, use vmv.v.x vd, x0.
  1610. def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
  1611. (fvti.Vector undef), (fvti.Scalar (fpimm0)), VLOpFrag)),
  1612. (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
  1613. 0, GPR:$vl, fvti.Log2SEW)>;
  1614. def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
  1615. fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)),
  1616. (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX#"_TU")
  1617. $passthru, 0, GPR:$vl, fvti.Log2SEW)>;
  1618. def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
  1619. (fvti.Vector undef), (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
  1620. (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" #
  1621. fvti.LMul.MX)
  1622. (fvti.Scalar fvti.ScalarRegClass:$rs2),
  1623. GPR:$vl, fvti.Log2SEW)>;
  1624. def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
  1625. fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
  1626. (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" #
  1627. fvti.LMul.MX # "_TU")
  1628. $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2),
  1629. GPR:$vl, fvti.Log2SEW)>;
  1630. // 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions
  1631. defm : VPatConvertFP2IVL_V<riscv_vfcvt_xu_f_vl, "PseudoVFCVT_XU_F_V">;
  1632. defm : VPatConvertFP2IVL_V<riscv_vfcvt_x_f_vl, "PseudoVFCVT_X_F_V">;
  1633. defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFCVT_RM_XU_F_V">;
  1634. defm : VPatConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFCVT_RM_X_F_V">;
  1635. defm : VPatConvertFP2IVL_V<riscv_vfcvt_rtz_xu_f_vl, "PseudoVFCVT_RTZ_XU_F_V">;
  1636. defm : VPatConvertFP2IVL_V<riscv_vfcvt_rtz_x_f_vl, "PseudoVFCVT_RTZ_X_F_V">;
  1637. defm : VPatConvertI2FPVL_V<riscv_uint_to_fp_vl, "PseudoVFCVT_F_XU_V">;
  1638. defm : VPatConvertI2FPVL_V<riscv_sint_to_fp_vl, "PseudoVFCVT_F_X_V">;
  1639. defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFCVT_RM_F_XU_V">;
  1640. defm : VPatConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFCVT_RM_F_X_V">;
  1641. // 13.18. Widening Floating-Point/Integer Type-Convert Instructions
  1642. defm : VPatWConvertFP2IVL_V<riscv_vfcvt_xu_f_vl, "PseudoVFWCVT_XU_F_V">;
  1643. defm : VPatWConvertFP2IVL_V<riscv_vfcvt_x_f_vl, "PseudoVFWCVT_X_F_V">;
  1644. defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFWCVT_RM_XU_F_V">;
  1645. defm : VPatWConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFWCVT_RM_X_F_V">;
  1646. defm : VPatWConvertFP2IVL_V<riscv_vfcvt_rtz_xu_f_vl, "PseudoVFWCVT_RTZ_XU_F_V">;
  1647. defm : VPatWConvertFP2IVL_V<riscv_vfcvt_rtz_x_f_vl, "PseudoVFWCVT_RTZ_X_F_V">;
  1648. defm : VPatWConvertI2FPVL_V<riscv_uint_to_fp_vl, "PseudoVFWCVT_F_XU_V">;
  1649. defm : VPatWConvertI2FPVL_V<riscv_sint_to_fp_vl, "PseudoVFWCVT_F_X_V">;
  1650. defm : VPatWConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFWCVT_RM_F_XU_V">;
  1651. defm : VPatWConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFWCVT_RM_F_X_V">;
  1652. foreach fvtiToFWti = AllWidenableFloatVectors in {
  1653. defvar fvti = fvtiToFWti.Vti;
  1654. defvar fwti = fvtiToFWti.Wti;
  1655. def : Pat<(fwti.Vector (riscv_fpextend_vl (fvti.Vector fvti.RegClass:$rs1),
  1656. (fvti.Mask V0),
  1657. VLOpFrag)),
  1658. (!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_MASK")
  1659. (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1,
  1660. (fvti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>;
  1661. }
  1662. // 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions
  1663. defm : VPatNConvertFP2IVL_V<riscv_vfcvt_xu_f_vl, "PseudoVFNCVT_XU_F_W">;
  1664. defm : VPatNConvertFP2IVL_V<riscv_vfcvt_x_f_vl, "PseudoVFNCVT_X_F_W">;
  1665. defm : VPatNConvertFP2I_RM_VL_V<riscv_vfcvt_rm_xu_f_vl, "PseudoVFNCVT_RM_XU_F_W">;
  1666. defm : VPatNConvertFP2I_RM_VL_V<riscv_vfcvt_rm_x_f_vl, "PseudoVFNCVT_RM_X_F_W">;
  1667. defm : VPatNConvertFP2IVL_V<riscv_vfcvt_rtz_xu_f_vl, "PseudoVFNCVT_RTZ_XU_F_W">;
  1668. defm : VPatNConvertFP2IVL_V<riscv_vfcvt_rtz_x_f_vl, "PseudoVFNCVT_RTZ_X_F_W">;
  1669. defm : VPatNConvertI2FPVL_V<riscv_uint_to_fp_vl, "PseudoVFNCVT_F_XU_W">;
  1670. defm : VPatNConvertI2FPVL_V<riscv_sint_to_fp_vl, "PseudoVFNCVT_F_X_W">;
  1671. defm : VPatNConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_xu_vl, "PseudoVFNCVT_RM_F_XU_W">;
  1672. defm : VPatNConvertI2FP_RM_VL_V<riscv_vfcvt_rm_f_x_vl, "PseudoVFNCVT_RM_F_X_W">;
  1673. foreach fvtiToFWti = AllWidenableFloatVectors in {
  1674. defvar fvti = fvtiToFWti.Vti;
  1675. defvar fwti = fvtiToFWti.Wti;
  1676. def : Pat<(fvti.Vector (riscv_fpround_vl (fwti.Vector fwti.RegClass:$rs1),
  1677. (fwti.Mask V0),
  1678. VLOpFrag)),
  1679. (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK")
  1680. (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
  1681. (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>;
  1682. def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1),
  1683. (fwti.Mask V0),
  1684. VLOpFrag)),
  1685. (!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_MASK")
  1686. (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1,
  1687. (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>;
  1688. }
  1689. }
  1690. } // Predicates = [HasVInstructionsAnyF]
  1691. // 14. Vector Reduction Operations
  1692. // 14.1. Vector Single-Width Integer Reduction Instructions
  1693. let Predicates = [HasVInstructions] in {
  1694. defm : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", /*is_float*/0>;
  1695. defm : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", /*is_float*/0>;
  1696. defm : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", /*is_float*/0>;
  1697. defm : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", /*is_float*/0>;
  1698. defm : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", /*is_float*/0>;
  1699. defm : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", /*is_float*/0>;
  1700. defm : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", /*is_float*/0>;
  1701. defm : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", /*is_float*/0>;
  1702. // 14.2. Vector Widening Integer Reduction Instructions
  1703. defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, anyext_oneuse, "PseudoVWREDSUMU", /*is_float*/0>;
  1704. defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, zext_oneuse, "PseudoVWREDSUMU", /*is_float*/0>;
  1705. defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_zext_vl_oneuse, "PseudoVWREDSUMU", /*is_float*/0>;
  1706. defm : VPatWidenReductionVL<rvv_vecreduce_ADD_vl, sext_oneuse, "PseudoVWREDSUM", /*is_float*/0>;
  1707. defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_ADD_vl, riscv_sext_vl_oneuse, "PseudoVWREDSUM", /*is_float*/0>;
  1708. } // Predicates = [HasVInstructions]
  1709. // 14.3. Vector Single-Width Floating-Point Reduction Instructions
  1710. let Predicates = [HasVInstructionsAnyF] in {
  1711. defm : VPatReductionVL<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", /*is_float*/1>;
  1712. defm : VPatReductionVL<rvv_vecreduce_FADD_vl, "PseudoVFREDUSUM", /*is_float*/1>;
  1713. defm : VPatReductionVL<rvv_vecreduce_FMIN_vl, "PseudoVFREDMIN", /*is_float*/1>;
  1714. defm : VPatReductionVL<rvv_vecreduce_FMAX_vl, "PseudoVFREDMAX", /*is_float*/1>;
  1715. // 14.4. Vector Widening Floating-Point Reduction Instructions
  1716. defm : VPatWidenReductionVL<rvv_vecreduce_SEQ_FADD_vl, fpext_oneuse, "PseudoVFWREDOSUM", /*is_float*/1>;
  1717. defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_SEQ_FADD_vl, riscv_fpextend_vl_oneuse, "PseudoVFWREDOSUM", /*is_float*/1>;
  1718. defm : VPatWidenReductionVL<rvv_vecreduce_FADD_vl, fpext_oneuse, "PseudoVFWREDUSUM", /*is_float*/1>;
  1719. defm : VPatWidenReductionVL_Ext_VL<rvv_vecreduce_FADD_vl, riscv_fpextend_vl_oneuse, "PseudoVFWREDUSUM", /*is_float*/1>;
  1720. } // Predicates = [HasVInstructionsAnyF]
  1721. // 15. Vector Mask Instructions
  1722. let Predicates = [HasVInstructions] in {
  1723. foreach mti = AllMasks in {
  1724. // 15.1 Vector Mask-Register Logical Instructions
  1725. def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)),
  1726. (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
  1727. def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)),
  1728. (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
  1729. def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)),
  1730. (!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX)
  1731. VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
  1732. def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
  1733. (!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX)
  1734. VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
  1735. def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
  1736. (!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX)
  1737. VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
  1738. def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1,
  1739. (riscv_vmnot_vl VR:$rs2, VLOpFrag),
  1740. VLOpFrag)),
  1741. (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX)
  1742. VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
  1743. def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1,
  1744. (riscv_vmnot_vl VR:$rs2, VLOpFrag),
  1745. VLOpFrag)),
  1746. (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX)
  1747. VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
  1748. // XOR is associative so we need 2 patterns for VMXNOR.
  1749. def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1,
  1750. VLOpFrag),
  1751. VR:$rs2, VLOpFrag)),
  1752. (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
  1753. VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
  1754. def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2,
  1755. VLOpFrag),
  1756. VLOpFrag)),
  1757. (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
  1758. VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
  1759. def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2,
  1760. VLOpFrag),
  1761. VLOpFrag)),
  1762. (!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX)
  1763. VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
  1764. def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2,
  1765. VLOpFrag),
  1766. VLOpFrag)),
  1767. (!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
  1768. VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
  1769. // Match the not idiom to the vmnot.m pseudo.
  1770. def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)),
  1771. (!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
  1772. VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>;
  1773. // 15.2 Vector count population in mask vcpop.m
  1774. def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
  1775. VLOpFrag)),
  1776. (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX)
  1777. VR:$rs2, GPR:$vl, mti.Log2SEW)>;
  1778. def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0),
  1779. VLOpFrag)),
  1780. (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK")
  1781. VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
  1782. // 15.3 vfirst find-first-set mask bit
  1783. def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
  1784. VLOpFrag)),
  1785. (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX)
  1786. VR:$rs2, GPR:$vl, mti.Log2SEW)>;
  1787. def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask V0),
  1788. VLOpFrag)),
  1789. (!cast<Instruction>("PseudoVFIRST_M_" # mti.BX # "_MASK")
  1790. VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
  1791. }
  1792. } // Predicates = [HasVInstructions]
  1793. // 16. Vector Permutation Instructions
  1794. let Predicates = [HasVInstructions] in {
  1795. // 16.1. Integer Scalar Move Instructions
  1796. // 16.4. Vector Register Gather Instruction
  1797. foreach vti = AllIntegerVectors in {
  1798. def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge),
  1799. vti.ScalarRegClass:$rs1,
  1800. VLOpFrag)),
  1801. (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX)
  1802. vti.RegClass:$merge,
  1803. (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
  1804. def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
  1805. vti.RegClass:$rs1,
  1806. vti.RegClass:$merge,
  1807. (vti.Mask V0),
  1808. VLOpFrag)),
  1809. (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
  1810. vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
  1811. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1812. def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
  1813. vti.RegClass:$merge,
  1814. (vti.Mask V0),
  1815. VLOpFrag)),
  1816. (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK")
  1817. vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
  1818. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1819. def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2,
  1820. uimm5:$imm,
  1821. vti.RegClass:$merge,
  1822. (vti.Mask V0),
  1823. VLOpFrag)),
  1824. (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
  1825. vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
  1826. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1827. // emul = lmul * 16 / sew
  1828. defvar vlmul = vti.LMul;
  1829. defvar octuple_lmul = vlmul.octuple;
  1830. defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW);
  1831. if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
  1832. defvar emul_str = octuple_to_str<octuple_emul>.ret;
  1833. defvar ivti = !cast<VTypeInfo>("VI16" # emul_str);
  1834. defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_" # emul_str;
  1835. def : Pat<(vti.Vector
  1836. (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
  1837. (ivti.Vector ivti.RegClass:$rs1),
  1838. vti.RegClass:$merge,
  1839. (vti.Mask V0),
  1840. VLOpFrag)),
  1841. (!cast<Instruction>(inst#"_MASK")
  1842. vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
  1843. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1844. }
  1845. }
  1846. } // Predicates = [HasVInstructions]
  1847. let Predicates = [HasVInstructionsAnyF] in {
  1848. // 16.2. Floating-Point Scalar Move Instructions
  1849. foreach vti = AllFloatVectors in {
  1850. def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
  1851. (vti.Scalar (fpimm0)),
  1852. VLOpFrag)),
  1853. (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX)
  1854. vti.RegClass:$merge, X0, GPR:$vl, vti.Log2SEW)>;
  1855. def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
  1856. vti.ScalarRegClass:$rs1,
  1857. VLOpFrag)),
  1858. (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  1859. vti.RegClass:$merge,
  1860. (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
  1861. defvar ivti = GetIntVTypeInfo<vti>.Vti;
  1862. def : Pat<(vti.Vector
  1863. (riscv_vrgather_vv_vl vti.RegClass:$rs2,
  1864. (ivti.Vector vti.RegClass:$rs1),
  1865. vti.RegClass:$merge,
  1866. (vti.Mask V0),
  1867. VLOpFrag)),
  1868. (!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
  1869. vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
  1870. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1871. def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
  1872. vti.RegClass:$merge,
  1873. (vti.Mask V0),
  1874. VLOpFrag)),
  1875. (!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK")
  1876. vti.RegClass:$merge, vti.RegClass:$rs2, GPR:$rs1,
  1877. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1878. def : Pat<(vti.Vector
  1879. (riscv_vrgather_vx_vl vti.RegClass:$rs2,
  1880. uimm5:$imm,
  1881. vti.RegClass:$merge,
  1882. (vti.Mask V0),
  1883. VLOpFrag)),
  1884. (!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK")
  1885. vti.RegClass:$merge, vti.RegClass:$rs2, uimm5:$imm,
  1886. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1887. defvar vlmul = vti.LMul;
  1888. defvar octuple_lmul = vlmul.octuple;
  1889. defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW);
  1890. if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
  1891. defvar emul_str = octuple_to_str<octuple_emul>.ret;
  1892. defvar ivti = !cast<VTypeInfo>("VI16" # emul_str);
  1893. defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_" # emul_str;
  1894. def : Pat<(vti.Vector
  1895. (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2,
  1896. (ivti.Vector ivti.RegClass:$rs1),
  1897. vti.RegClass:$merge,
  1898. (vti.Mask V0),
  1899. VLOpFrag)),
  1900. (!cast<Instruction>(inst#"_MASK")
  1901. vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
  1902. (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
  1903. }
  1904. }
  1905. } // Predicates = [HasVInstructionsAnyF]
  1906. //===----------------------------------------------------------------------===//
  1907. // Miscellaneous RISCVISD SDNodes
  1908. //===----------------------------------------------------------------------===//
  1909. def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2,
  1910. [SDTCisVec<0>, SDTCVecEltisVT<1, i1>,
  1911. SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>;
  1912. def SDTRVVSlide : SDTypeProfile<1, 6, [
  1913. SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>,
  1914. SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>,
  1915. SDTCisVT<6, XLenVT>
  1916. ]>;
  1917. def SDTRVVSlide1 : SDTypeProfile<1, 5, [
  1918. SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>,
  1919. SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>,
  1920. SDTCisVT<5, XLenVT>
  1921. ]>;
  1922. def riscv_slideup_vl : SDNode<"RISCVISD::VSLIDEUP_VL", SDTRVVSlide, []>;
  1923. def riscv_slide1up_vl : SDNode<"RISCVISD::VSLIDE1UP_VL", SDTRVVSlide1, []>;
  1924. def riscv_slidedown_vl : SDNode<"RISCVISD::VSLIDEDOWN_VL", SDTRVVSlide, []>;
  1925. def riscv_slide1down_vl : SDNode<"RISCVISD::VSLIDE1DOWN_VL", SDTRVVSlide1, []>;
  1926. let Predicates = [HasVInstructions] in {
  1927. foreach vti = AllIntegerVectors in {
  1928. def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask true_mask),
  1929. VLOpFrag)),
  1930. (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.Log2SEW)>;
  1931. def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector undef),
  1932. (vti.Vector vti.RegClass:$rs1),
  1933. GPR:$rs2, (vti.Mask true_mask),
  1934. VLOpFrag)),
  1935. (!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX)
  1936. vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
  1937. def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rd),
  1938. (vti.Vector vti.RegClass:$rs1),
  1939. GPR:$rs2, (vti.Mask true_mask),
  1940. VLOpFrag)),
  1941. (!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX#"_TU")
  1942. vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
  1943. def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector undef),
  1944. (vti.Vector vti.RegClass:$rs1),
  1945. GPR:$rs2, (vti.Mask true_mask),
  1946. VLOpFrag)),
  1947. (!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX)
  1948. vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
  1949. def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rd),
  1950. (vti.Vector vti.RegClass:$rs1),
  1951. GPR:$rs2, (vti.Mask true_mask),
  1952. VLOpFrag)),
  1953. (!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX#"_TU")
  1954. vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
  1955. }
  1956. foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
  1957. def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3),
  1958. (vti.Vector vti.RegClass:$rs1),
  1959. uimm5:$rs2, (vti.Mask true_mask),
  1960. VLOpFrag, (XLenVT timm:$policy))),
  1961. (!cast<Instruction>("PseudoVSLIDEUP_VI_"#vti.LMul.MX)
  1962. vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
  1963. GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
  1964. def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3),
  1965. (vti.Vector vti.RegClass:$rs1),
  1966. GPR:$rs2, (vti.Mask true_mask),
  1967. VLOpFrag, (XLenVT timm:$policy))),
  1968. (!cast<Instruction>("PseudoVSLIDEUP_VX_"#vti.LMul.MX)
  1969. vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
  1970. GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
  1971. def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
  1972. (vti.Vector vti.RegClass:$rs1),
  1973. uimm5:$rs2, (vti.Mask true_mask),
  1974. VLOpFrag, (XLenVT timm:$policy))),
  1975. (!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX)
  1976. vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
  1977. GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
  1978. def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
  1979. (vti.Vector vti.RegClass:$rs1),
  1980. GPR:$rs2, (vti.Mask true_mask),
  1981. VLOpFrag, (XLenVT timm:$policy))),
  1982. (!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX)
  1983. vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
  1984. GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
  1985. }
  1986. } // Predicates = [HasVInstructions]