RISCVISelDAGToDAG.cpp 108 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946
  1. //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file defines an instruction selector for the RISCV target.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "RISCVISelDAGToDAG.h"
  13. #include "MCTargetDesc/RISCVMCTargetDesc.h"
  14. #include "MCTargetDesc/RISCVMatInt.h"
  15. #include "RISCVISelLowering.h"
  16. #include "RISCVMachineFunctionInfo.h"
  17. #include "llvm/CodeGen/MachineFrameInfo.h"
  18. #include "llvm/IR/IntrinsicsRISCV.h"
  19. #include "llvm/Support/Alignment.h"
  20. #include "llvm/Support/Debug.h"
  21. #include "llvm/Support/MathExtras.h"
  22. #include "llvm/Support/raw_ostream.h"
  23. #include <optional>
  24. using namespace llvm;
  25. #define DEBUG_TYPE "riscv-isel"
  26. #define PASS_NAME "RISCV DAG->DAG Pattern Instruction Selection"
  27. namespace llvm::RISCV {
  28. #define GET_RISCVVSSEGTable_IMPL
  29. #define GET_RISCVVLSEGTable_IMPL
  30. #define GET_RISCVVLXSEGTable_IMPL
  31. #define GET_RISCVVSXSEGTable_IMPL
  32. #define GET_RISCVVLETable_IMPL
  33. #define GET_RISCVVSETable_IMPL
  34. #define GET_RISCVVLXTable_IMPL
  35. #define GET_RISCVVSXTable_IMPL
  36. #define GET_RISCVMaskedPseudosTable_IMPL
  37. #include "RISCVGenSearchableTables.inc"
  38. } // namespace llvm::RISCV
  39. static unsigned getLastNonGlueOrChainOpIdx(const SDNode *Node) {
  40. assert(Node->getNumOperands() > 0 && "Node with no operands");
  41. unsigned LastOpIdx = Node->getNumOperands() - 1;
  42. if (Node->getOperand(LastOpIdx).getValueType() == MVT::Glue)
  43. --LastOpIdx;
  44. if (Node->getOperand(LastOpIdx).getValueType() == MVT::Other)
  45. --LastOpIdx;
  46. return LastOpIdx;
  47. }
  48. static unsigned getVecPolicyOpIdx(const SDNode *Node, const MCInstrDesc &MCID) {
  49. assert(RISCVII::hasVecPolicyOp(MCID.TSFlags));
  50. (void)MCID;
  51. return getLastNonGlueOrChainOpIdx(Node);
  52. }
  53. void RISCVDAGToDAGISel::PreprocessISelDAG() {
  54. SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
  55. bool MadeChange = false;
  56. while (Position != CurDAG->allnodes_begin()) {
  57. SDNode *N = &*--Position;
  58. if (N->use_empty())
  59. continue;
  60. SDValue Result;
  61. switch (N->getOpcode()) {
  62. case ISD::SPLAT_VECTOR: {
  63. // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
  64. // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
  65. MVT VT = N->getSimpleValueType(0);
  66. unsigned Opc =
  67. VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
  68. SDLoc DL(N);
  69. SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
  70. Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
  71. N->getOperand(0), VL);
  72. break;
  73. }
  74. case RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL: {
  75. // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
  76. // load. Done after lowering and combining so that we have a chance to
  77. // optimize this to VMV_V_X_VL when the upper bits aren't needed.
  78. assert(N->getNumOperands() == 4 && "Unexpected number of operands");
  79. MVT VT = N->getSimpleValueType(0);
  80. SDValue Passthru = N->getOperand(0);
  81. SDValue Lo = N->getOperand(1);
  82. SDValue Hi = N->getOperand(2);
  83. SDValue VL = N->getOperand(3);
  84. assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
  85. Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
  86. "Unexpected VTs!");
  87. MachineFunction &MF = CurDAG->getMachineFunction();
  88. RISCVMachineFunctionInfo *FuncInfo =
  89. MF.getInfo<RISCVMachineFunctionInfo>();
  90. SDLoc DL(N);
  91. // We use the same frame index we use for moving two i32s into 64-bit FPR.
  92. // This is an analogous operation.
  93. int FI = FuncInfo->getMoveF64FrameIndex(MF);
  94. MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
  95. const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
  96. SDValue StackSlot =
  97. CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
  98. SDValue Chain = CurDAG->getEntryNode();
  99. Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
  100. SDValue OffsetSlot =
  101. CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
  102. Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
  103. Align(8));
  104. Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
  105. SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
  106. SDValue IntID =
  107. CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
  108. SDValue Ops[] = {Chain,
  109. IntID,
  110. Passthru,
  111. StackSlot,
  112. CurDAG->getRegister(RISCV::X0, MVT::i64),
  113. VL};
  114. Result = CurDAG->getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
  115. MVT::i64, MPI, Align(8),
  116. MachineMemOperand::MOLoad);
  117. break;
  118. }
  119. }
  120. if (Result) {
  121. LLVM_DEBUG(dbgs() << "RISCV DAG preprocessing replacing:\nOld: ");
  122. LLVM_DEBUG(N->dump(CurDAG));
  123. LLVM_DEBUG(dbgs() << "\nNew: ");
  124. LLVM_DEBUG(Result->dump(CurDAG));
  125. LLVM_DEBUG(dbgs() << "\n");
  126. CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
  127. MadeChange = true;
  128. }
  129. }
  130. if (MadeChange)
  131. CurDAG->RemoveDeadNodes();
  132. }
  133. void RISCVDAGToDAGISel::PostprocessISelDAG() {
  134. HandleSDNode Dummy(CurDAG->getRoot());
  135. SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
  136. bool MadeChange = false;
  137. while (Position != CurDAG->allnodes_begin()) {
  138. SDNode *N = &*--Position;
  139. // Skip dead nodes and any non-machine opcodes.
  140. if (N->use_empty() || !N->isMachineOpcode())
  141. continue;
  142. MadeChange |= doPeepholeSExtW(N);
  143. MadeChange |= doPeepholeMaskedRVV(N);
  144. }
  145. CurDAG->setRoot(Dummy.getValue());
  146. MadeChange |= doPeepholeMergeVVMFold();
  147. if (MadeChange)
  148. CurDAG->RemoveDeadNodes();
  149. }
  150. static SDNode *selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
  151. RISCVMatInt::InstSeq &Seq) {
  152. SDNode *Result = nullptr;
  153. SDValue SrcReg = CurDAG->getRegister(RISCV::X0, VT);
  154. for (RISCVMatInt::Inst &Inst : Seq) {
  155. SDValue SDImm = CurDAG->getTargetConstant(Inst.getImm(), DL, VT);
  156. switch (Inst.getOpndKind()) {
  157. case RISCVMatInt::Imm:
  158. Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SDImm);
  159. break;
  160. case RISCVMatInt::RegX0:
  161. Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg,
  162. CurDAG->getRegister(RISCV::X0, VT));
  163. break;
  164. case RISCVMatInt::RegReg:
  165. Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SrcReg);
  166. break;
  167. case RISCVMatInt::RegImm:
  168. Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SDImm);
  169. break;
  170. }
  171. // Only the first instruction has X0 as its source.
  172. SrcReg = SDValue(Result, 0);
  173. }
  174. return Result;
  175. }
  176. static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
  177. int64_t Imm, const RISCVSubtarget &Subtarget) {
  178. RISCVMatInt::InstSeq Seq =
  179. RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
  180. return selectImmSeq(CurDAG, DL, VT, Seq);
  181. }
  182. static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
  183. unsigned NF, RISCVII::VLMUL LMUL) {
  184. static const unsigned M1TupleRegClassIDs[] = {
  185. RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
  186. RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
  187. RISCV::VRN8M1RegClassID};
  188. static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
  189. RISCV::VRN3M2RegClassID,
  190. RISCV::VRN4M2RegClassID};
  191. assert(Regs.size() >= 2 && Regs.size() <= 8);
  192. unsigned RegClassID;
  193. unsigned SubReg0;
  194. switch (LMUL) {
  195. default:
  196. llvm_unreachable("Invalid LMUL.");
  197. case RISCVII::VLMUL::LMUL_F8:
  198. case RISCVII::VLMUL::LMUL_F4:
  199. case RISCVII::VLMUL::LMUL_F2:
  200. case RISCVII::VLMUL::LMUL_1:
  201. static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
  202. "Unexpected subreg numbering");
  203. SubReg0 = RISCV::sub_vrm1_0;
  204. RegClassID = M1TupleRegClassIDs[NF - 2];
  205. break;
  206. case RISCVII::VLMUL::LMUL_2:
  207. static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
  208. "Unexpected subreg numbering");
  209. SubReg0 = RISCV::sub_vrm2_0;
  210. RegClassID = M2TupleRegClassIDs[NF - 2];
  211. break;
  212. case RISCVII::VLMUL::LMUL_4:
  213. static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
  214. "Unexpected subreg numbering");
  215. SubReg0 = RISCV::sub_vrm4_0;
  216. RegClassID = RISCV::VRN2M4RegClassID;
  217. break;
  218. }
  219. SDLoc DL(Regs[0]);
  220. SmallVector<SDValue, 8> Ops;
  221. Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
  222. for (unsigned I = 0; I < Regs.size(); ++I) {
  223. Ops.push_back(Regs[I]);
  224. Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
  225. }
  226. SDNode *N =
  227. CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
  228. return SDValue(N, 0);
  229. }
  230. void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
  231. SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
  232. bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
  233. bool IsLoad, MVT *IndexVT) {
  234. SDValue Chain = Node->getOperand(0);
  235. SDValue Glue;
  236. Operands.push_back(Node->getOperand(CurOp++)); // Base pointer.
  237. if (IsStridedOrIndexed) {
  238. Operands.push_back(Node->getOperand(CurOp++)); // Index.
  239. if (IndexVT)
  240. *IndexVT = Operands.back()->getSimpleValueType(0);
  241. }
  242. if (IsMasked) {
  243. // Mask needs to be copied to V0.
  244. SDValue Mask = Node->getOperand(CurOp++);
  245. Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
  246. Glue = Chain.getValue(1);
  247. Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
  248. }
  249. SDValue VL;
  250. selectVLOp(Node->getOperand(CurOp++), VL);
  251. Operands.push_back(VL);
  252. MVT XLenVT = Subtarget->getXLenVT();
  253. SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
  254. Operands.push_back(SEWOp);
  255. // Masked load has the tail policy argument.
  256. if (IsMasked && IsLoad) {
  257. // Policy must be a constant.
  258. uint64_t Policy = Node->getConstantOperandVal(CurOp++);
  259. SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
  260. Operands.push_back(PolicyOp);
  261. }
  262. Operands.push_back(Chain); // Chain.
  263. if (Glue)
  264. Operands.push_back(Glue);
  265. }
  266. static bool isAllUndef(ArrayRef<SDValue> Values) {
  267. return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
  268. }
  269. void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
  270. bool IsStrided) {
  271. SDLoc DL(Node);
  272. unsigned NF = Node->getNumValues() - 1;
  273. MVT VT = Node->getSimpleValueType(0);
  274. unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
  275. RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
  276. unsigned CurOp = 2;
  277. SmallVector<SDValue, 8> Operands;
  278. SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
  279. Node->op_begin() + CurOp + NF);
  280. bool IsTU = IsMasked || !isAllUndef(Regs);
  281. if (IsTU) {
  282. SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL);
  283. Operands.push_back(Merge);
  284. }
  285. CurOp += NF;
  286. addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
  287. Operands, /*IsLoad=*/true);
  288. const RISCV::VLSEGPseudo *P =
  289. RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
  290. static_cast<unsigned>(LMUL));
  291. MachineSDNode *Load =
  292. CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
  293. if (auto *MemOp = dyn_cast<MemSDNode>(Node))
  294. CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
  295. SDValue SuperReg = SDValue(Load, 0);
  296. for (unsigned I = 0; I < NF; ++I) {
  297. unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
  298. ReplaceUses(SDValue(Node, I),
  299. CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
  300. }
  301. ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
  302. CurDAG->RemoveDeadNode(Node);
  303. }
  304. void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
  305. SDLoc DL(Node);
  306. unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
  307. MVT VT = Node->getSimpleValueType(0);
  308. MVT XLenVT = Subtarget->getXLenVT();
  309. unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
  310. RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
  311. unsigned CurOp = 2;
  312. SmallVector<SDValue, 7> Operands;
  313. SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
  314. Node->op_begin() + CurOp + NF);
  315. bool IsTU = IsMasked || !isAllUndef(Regs);
  316. if (IsTU) {
  317. SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
  318. Operands.push_back(MaskedOff);
  319. }
  320. CurOp += NF;
  321. addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
  322. /*IsStridedOrIndexed*/ false, Operands,
  323. /*IsLoad=*/true);
  324. const RISCV::VLSEGPseudo *P =
  325. RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
  326. Log2SEW, static_cast<unsigned>(LMUL));
  327. MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
  328. XLenVT, MVT::Other, Operands);
  329. if (auto *MemOp = dyn_cast<MemSDNode>(Node))
  330. CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
  331. SDValue SuperReg = SDValue(Load, 0);
  332. for (unsigned I = 0; I < NF; ++I) {
  333. unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
  334. ReplaceUses(SDValue(Node, I),
  335. CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
  336. }
  337. ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); // VL
  338. ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain
  339. CurDAG->RemoveDeadNode(Node);
  340. }
  341. void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
  342. bool IsOrdered) {
  343. SDLoc DL(Node);
  344. unsigned NF = Node->getNumValues() - 1;
  345. MVT VT = Node->getSimpleValueType(0);
  346. unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
  347. RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
  348. unsigned CurOp = 2;
  349. SmallVector<SDValue, 8> Operands;
  350. SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
  351. Node->op_begin() + CurOp + NF);
  352. bool IsTU = IsMasked || !isAllUndef(Regs);
  353. if (IsTU) {
  354. SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
  355. Operands.push_back(MaskedOff);
  356. }
  357. CurOp += NF;
  358. MVT IndexVT;
  359. addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
  360. /*IsStridedOrIndexed*/ true, Operands,
  361. /*IsLoad=*/true, &IndexVT);
  362. assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
  363. "Element count mismatch");
  364. RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
  365. unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
  366. if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
  367. report_fatal_error("The V extension does not support EEW=64 for index "
  368. "values when XLEN=32");
  369. }
  370. const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
  371. NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
  372. static_cast<unsigned>(IndexLMUL));
  373. MachineSDNode *Load =
  374. CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
  375. if (auto *MemOp = dyn_cast<MemSDNode>(Node))
  376. CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
  377. SDValue SuperReg = SDValue(Load, 0);
  378. for (unsigned I = 0; I < NF; ++I) {
  379. unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
  380. ReplaceUses(SDValue(Node, I),
  381. CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
  382. }
  383. ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
  384. CurDAG->RemoveDeadNode(Node);
  385. }
  386. void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
  387. bool IsStrided) {
  388. SDLoc DL(Node);
  389. unsigned NF = Node->getNumOperands() - 4;
  390. if (IsStrided)
  391. NF--;
  392. if (IsMasked)
  393. NF--;
  394. MVT VT = Node->getOperand(2)->getSimpleValueType(0);
  395. unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
  396. RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
  397. SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
  398. SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
  399. SmallVector<SDValue, 8> Operands;
  400. Operands.push_back(StoreVal);
  401. unsigned CurOp = 2 + NF;
  402. addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
  403. Operands);
  404. const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
  405. NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
  406. MachineSDNode *Store =
  407. CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
  408. if (auto *MemOp = dyn_cast<MemSDNode>(Node))
  409. CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
  410. ReplaceNode(Node, Store);
  411. }
  412. void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
  413. bool IsOrdered) {
  414. SDLoc DL(Node);
  415. unsigned NF = Node->getNumOperands() - 5;
  416. if (IsMasked)
  417. --NF;
  418. MVT VT = Node->getOperand(2)->getSimpleValueType(0);
  419. unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
  420. RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
  421. SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
  422. SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
  423. SmallVector<SDValue, 8> Operands;
  424. Operands.push_back(StoreVal);
  425. unsigned CurOp = 2 + NF;
  426. MVT IndexVT;
  427. addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
  428. /*IsStridedOrIndexed*/ true, Operands,
  429. /*IsLoad=*/false, &IndexVT);
  430. assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
  431. "Element count mismatch");
  432. RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
  433. unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
  434. if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
  435. report_fatal_error("The V extension does not support EEW=64 for index "
  436. "values when XLEN=32");
  437. }
  438. const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
  439. NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
  440. static_cast<unsigned>(IndexLMUL));
  441. MachineSDNode *Store =
  442. CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
  443. if (auto *MemOp = dyn_cast<MemSDNode>(Node))
  444. CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
  445. ReplaceNode(Node, Store);
  446. }
  447. void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
  448. if (!Subtarget->hasVInstructions())
  449. return;
  450. assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
  451. Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
  452. "Unexpected opcode");
  453. SDLoc DL(Node);
  454. MVT XLenVT = Subtarget->getXLenVT();
  455. bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
  456. unsigned IntNoOffset = HasChain ? 1 : 0;
  457. unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
  458. assert((IntNo == Intrinsic::riscv_vsetvli ||
  459. IntNo == Intrinsic::riscv_vsetvlimax ||
  460. IntNo == Intrinsic::riscv_vsetvli_opt ||
  461. IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
  462. "Unexpected vsetvli intrinsic");
  463. bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
  464. IntNo == Intrinsic::riscv_vsetvlimax_opt;
  465. unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
  466. assert(Node->getNumOperands() == Offset + 2 &&
  467. "Unexpected number of operands");
  468. unsigned SEW =
  469. RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
  470. RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
  471. Node->getConstantOperandVal(Offset + 1) & 0x7);
  472. unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
  473. /*MaskAgnostic*/ false);
  474. SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
  475. SmallVector<EVT, 2> VTs = {XLenVT};
  476. if (HasChain)
  477. VTs.push_back(MVT::Other);
  478. SDValue VLOperand;
  479. unsigned Opcode = RISCV::PseudoVSETVLI;
  480. if (VLMax) {
  481. VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
  482. Opcode = RISCV::PseudoVSETVLIX0;
  483. } else {
  484. VLOperand = Node->getOperand(IntNoOffset + 1);
  485. if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
  486. uint64_t AVL = C->getZExtValue();
  487. if (isUInt<5>(AVL)) {
  488. SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
  489. SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
  490. if (HasChain)
  491. Ops.push_back(Node->getOperand(0));
  492. ReplaceNode(
  493. Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
  494. return;
  495. }
  496. }
  497. }
  498. SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
  499. if (HasChain)
  500. Ops.push_back(Node->getOperand(0));
  501. ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
  502. }
  503. bool RISCVDAGToDAGISel::tryShrinkShlLogicImm(SDNode *Node) {
  504. MVT VT = Node->getSimpleValueType(0);
  505. unsigned Opcode = Node->getOpcode();
  506. assert((Opcode == ISD::AND || Opcode == ISD::OR || Opcode == ISD::XOR) &&
  507. "Unexpected opcode");
  508. SDLoc DL(Node);
  509. // For operations of the form (x << C1) op C2, check if we can use
  510. // ANDI/ORI/XORI by transforming it into (x op (C2>>C1)) << C1.
  511. SDValue N0 = Node->getOperand(0);
  512. SDValue N1 = Node->getOperand(1);
  513. ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
  514. if (!Cst)
  515. return false;
  516. int64_t Val = Cst->getSExtValue();
  517. // Check if immediate can already use ANDI/ORI/XORI.
  518. if (isInt<12>(Val))
  519. return false;
  520. SDValue Shift = N0;
  521. // If Val is simm32 and we have a sext_inreg from i32, then the binop
  522. // produces at least 33 sign bits. We can peek through the sext_inreg and use
  523. // a SLLIW at the end.
  524. bool SignExt = false;
  525. if (isInt<32>(Val) && N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
  526. N0.hasOneUse() && cast<VTSDNode>(N0.getOperand(1))->getVT() == MVT::i32) {
  527. SignExt = true;
  528. Shift = N0.getOperand(0);
  529. }
  530. if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
  531. return false;
  532. ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
  533. if (!ShlCst)
  534. return false;
  535. uint64_t ShAmt = ShlCst->getZExtValue();
  536. // Make sure that we don't change the operation by removing bits.
  537. // This only matters for OR and XOR, AND is unaffected.
  538. uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
  539. if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
  540. return false;
  541. int64_t ShiftedVal = Val >> ShAmt;
  542. if (!isInt<12>(ShiftedVal))
  543. return false;
  544. // If we peeked through a sext_inreg, make sure the shift is valid for SLLIW.
  545. if (SignExt && ShAmt >= 32)
  546. return false;
  547. // Ok, we can reorder to get a smaller immediate.
  548. unsigned BinOpc;
  549. switch (Opcode) {
  550. default: llvm_unreachable("Unexpected opcode");
  551. case ISD::AND: BinOpc = RISCV::ANDI; break;
  552. case ISD::OR: BinOpc = RISCV::ORI; break;
  553. case ISD::XOR: BinOpc = RISCV::XORI; break;
  554. }
  555. unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
  556. SDNode *BinOp =
  557. CurDAG->getMachineNode(BinOpc, DL, VT, Shift.getOperand(0),
  558. CurDAG->getTargetConstant(ShiftedVal, DL, VT));
  559. SDNode *SLLI =
  560. CurDAG->getMachineNode(ShOpc, DL, VT, SDValue(BinOp, 0),
  561. CurDAG->getTargetConstant(ShAmt, DL, VT));
  562. ReplaceNode(Node, SLLI);
  563. return true;
  564. }
  565. void RISCVDAGToDAGISel::Select(SDNode *Node) {
  566. // If we have a custom node, we have already selected.
  567. if (Node->isMachineOpcode()) {
  568. LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
  569. Node->setNodeId(-1);
  570. return;
  571. }
  572. // Instruction Selection not handled by the auto-generated tablegen selection
  573. // should be handled here.
  574. unsigned Opcode = Node->getOpcode();
  575. MVT XLenVT = Subtarget->getXLenVT();
  576. SDLoc DL(Node);
  577. MVT VT = Node->getSimpleValueType(0);
  578. switch (Opcode) {
  579. case ISD::Constant: {
  580. auto *ConstNode = cast<ConstantSDNode>(Node);
  581. if (VT == XLenVT && ConstNode->isZero()) {
  582. SDValue New =
  583. CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
  584. ReplaceNode(Node, New.getNode());
  585. return;
  586. }
  587. int64_t Imm = ConstNode->getSExtValue();
  588. // If the upper XLen-16 bits are not used, try to convert this to a simm12
  589. // by sign extending bit 15.
  590. if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
  591. hasAllHUsers(Node))
  592. Imm = SignExtend64<16>(Imm);
  593. // If the upper 32-bits are not used try to convert this into a simm32 by
  594. // sign extending bit 32.
  595. if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
  596. Imm = SignExtend64<32>(Imm);
  597. ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
  598. return;
  599. }
  600. case ISD::SHL: {
  601. auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
  602. if (!N1C)
  603. break;
  604. SDValue N0 = Node->getOperand(0);
  605. if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
  606. !isa<ConstantSDNode>(N0.getOperand(1)))
  607. break;
  608. unsigned ShAmt = N1C->getZExtValue();
  609. uint64_t Mask = N0.getConstantOperandVal(1);
  610. // Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) where C2 has
  611. // 32 leading zeros and C3 trailing zeros.
  612. if (ShAmt <= 32 && isShiftedMask_64(Mask)) {
  613. unsigned XLen = Subtarget->getXLen();
  614. unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
  615. unsigned TrailingZeros = countTrailingZeros(Mask);
  616. if (TrailingZeros > 0 && LeadingZeros == 32) {
  617. SDNode *SRLIW = CurDAG->getMachineNode(
  618. RISCV::SRLIW, DL, VT, N0->getOperand(0),
  619. CurDAG->getTargetConstant(TrailingZeros, DL, VT));
  620. SDNode *SLLI = CurDAG->getMachineNode(
  621. RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
  622. CurDAG->getTargetConstant(TrailingZeros + ShAmt, DL, VT));
  623. ReplaceNode(Node, SLLI);
  624. return;
  625. }
  626. }
  627. break;
  628. }
  629. case ISD::SRL: {
  630. auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
  631. if (!N1C)
  632. break;
  633. SDValue N0 = Node->getOperand(0);
  634. if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
  635. break;
  636. unsigned ShAmt = N1C->getZExtValue();
  637. uint64_t Mask = N0.getConstantOperandVal(1);
  638. // Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has
  639. // 32 leading zeros and C3 trailing zeros.
  640. if (isShiftedMask_64(Mask) && N0.hasOneUse()) {
  641. unsigned XLen = Subtarget->getXLen();
  642. unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
  643. unsigned TrailingZeros = countTrailingZeros(Mask);
  644. if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
  645. SDNode *SRLIW = CurDAG->getMachineNode(
  646. RISCV::SRLIW, DL, VT, N0->getOperand(0),
  647. CurDAG->getTargetConstant(TrailingZeros, DL, VT));
  648. SDNode *SLLI = CurDAG->getMachineNode(
  649. RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
  650. CurDAG->getTargetConstant(TrailingZeros - ShAmt, DL, VT));
  651. ReplaceNode(Node, SLLI);
  652. return;
  653. }
  654. }
  655. // Optimize (srl (and X, C2), C) ->
  656. // (srli (slli X, (XLen-C3), (XLen-C3) + C)
  657. // Where C2 is a mask with C3 trailing ones.
  658. // Taking into account that the C2 may have had lower bits unset by
  659. // SimplifyDemandedBits. This avoids materializing the C2 immediate.
  660. // This pattern occurs when type legalizing right shifts for types with
  661. // less than XLen bits.
  662. Mask |= maskTrailingOnes<uint64_t>(ShAmt);
  663. if (!isMask_64(Mask))
  664. break;
  665. unsigned TrailingOnes = countTrailingOnes(Mask);
  666. if (ShAmt >= TrailingOnes)
  667. break;
  668. // If the mask has 32 trailing ones, use SRLIW.
  669. if (TrailingOnes == 32) {
  670. SDNode *SRLIW =
  671. CurDAG->getMachineNode(RISCV::SRLIW, DL, VT, N0->getOperand(0),
  672. CurDAG->getTargetConstant(ShAmt, DL, VT));
  673. ReplaceNode(Node, SRLIW);
  674. return;
  675. }
  676. // Only do the remaining transforms if the shift has one use.
  677. if (!N0.hasOneUse())
  678. break;
  679. // If C2 is (1 << ShAmt) use bexti if possible.
  680. if (Subtarget->hasStdExtZbs() && ShAmt + 1 == TrailingOnes) {
  681. SDNode *BEXTI =
  682. CurDAG->getMachineNode(RISCV::BEXTI, DL, VT, N0->getOperand(0),
  683. CurDAG->getTargetConstant(ShAmt, DL, VT));
  684. ReplaceNode(Node, BEXTI);
  685. return;
  686. }
  687. unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
  688. SDNode *SLLI =
  689. CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
  690. CurDAG->getTargetConstant(LShAmt, DL, VT));
  691. SDNode *SRLI = CurDAG->getMachineNode(
  692. RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
  693. CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
  694. ReplaceNode(Node, SRLI);
  695. return;
  696. }
  697. case ISD::SRA: {
  698. // Optimize (sra (sext_inreg X, i16), C) ->
  699. // (srai (slli X, (XLen-16), (XLen-16) + C)
  700. // And (sra (sext_inreg X, i8), C) ->
  701. // (srai (slli X, (XLen-8), (XLen-8) + C)
  702. // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
  703. // This transform matches the code we get without Zbb. The shifts are more
  704. // compressible, and this can help expose CSE opportunities in the sdiv by
  705. // constant optimization.
  706. auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
  707. if (!N1C)
  708. break;
  709. SDValue N0 = Node->getOperand(0);
  710. if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
  711. break;
  712. unsigned ShAmt = N1C->getZExtValue();
  713. unsigned ExtSize =
  714. cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
  715. // ExtSize of 32 should use sraiw via tablegen pattern.
  716. if (ExtSize >= 32 || ShAmt >= ExtSize)
  717. break;
  718. unsigned LShAmt = Subtarget->getXLen() - ExtSize;
  719. SDNode *SLLI =
  720. CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
  721. CurDAG->getTargetConstant(LShAmt, DL, VT));
  722. SDNode *SRAI = CurDAG->getMachineNode(
  723. RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
  724. CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
  725. ReplaceNode(Node, SRAI);
  726. return;
  727. }
  728. case ISD::OR:
  729. case ISD::XOR:
  730. if (tryShrinkShlLogicImm(Node))
  731. return;
  732. break;
  733. case ISD::AND: {
  734. auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
  735. if (!N1C)
  736. break;
  737. SDValue N0 = Node->getOperand(0);
  738. bool LeftShift = N0.getOpcode() == ISD::SHL;
  739. if (LeftShift || N0.getOpcode() == ISD::SRL) {
  740. auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
  741. if (!C)
  742. break;
  743. unsigned C2 = C->getZExtValue();
  744. unsigned XLen = Subtarget->getXLen();
  745. assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
  746. uint64_t C1 = N1C->getZExtValue();
  747. // Keep track of whether this is a c.andi. If we can't use c.andi, the
  748. // shift pair might offer more compression opportunities.
  749. // TODO: We could check for C extension here, but we don't have many lit
  750. // tests with the C extension enabled so not checking gets better
  751. // coverage.
  752. // TODO: What if ANDI faster than shift?
  753. bool IsCANDI = isInt<6>(N1C->getSExtValue());
  754. // Clear irrelevant bits in the mask.
  755. if (LeftShift)
  756. C1 &= maskTrailingZeros<uint64_t>(C2);
  757. else
  758. C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
  759. // Some transforms should only be done if the shift has a single use or
  760. // the AND would become (srli (slli X, 32), 32)
  761. bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
  762. SDValue X = N0.getOperand(0);
  763. // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
  764. // with c3 leading zeros.
  765. if (!LeftShift && isMask_64(C1)) {
  766. unsigned Leading = XLen - llvm::bit_width(C1);
  767. if (C2 < Leading) {
  768. // If the number of leading zeros is C2+32 this can be SRLIW.
  769. if (C2 + 32 == Leading) {
  770. SDNode *SRLIW = CurDAG->getMachineNode(
  771. RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
  772. ReplaceNode(Node, SRLIW);
  773. return;
  774. }
  775. // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32)
  776. // if c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
  777. //
  778. // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
  779. // legalized and goes through DAG combine.
  780. if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() &&
  781. X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
  782. cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
  783. SDNode *SRAIW =
  784. CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
  785. CurDAG->getTargetConstant(31, DL, VT));
  786. SDNode *SRLIW = CurDAG->getMachineNode(
  787. RISCV::SRLIW, DL, VT, SDValue(SRAIW, 0),
  788. CurDAG->getTargetConstant(Leading - 32, DL, VT));
  789. ReplaceNode(Node, SRLIW);
  790. return;
  791. }
  792. // (srli (slli x, c3-c2), c3).
  793. // Skip if we could use (zext.w (sraiw X, C2)).
  794. bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
  795. X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
  796. cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32;
  797. // Also Skip if we can use bexti.
  798. Skip |= Subtarget->hasStdExtZbs() && Leading == XLen - 1;
  799. if (OneUseOrZExtW && !Skip) {
  800. SDNode *SLLI = CurDAG->getMachineNode(
  801. RISCV::SLLI, DL, VT, X,
  802. CurDAG->getTargetConstant(Leading - C2, DL, VT));
  803. SDNode *SRLI = CurDAG->getMachineNode(
  804. RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
  805. CurDAG->getTargetConstant(Leading, DL, VT));
  806. ReplaceNode(Node, SRLI);
  807. return;
  808. }
  809. }
  810. }
  811. // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
  812. // shifted by c2 bits with c3 leading zeros.
  813. if (LeftShift && isShiftedMask_64(C1)) {
  814. unsigned Leading = XLen - llvm::bit_width(C1);
  815. if (C2 + Leading < XLen &&
  816. C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
  817. // Use slli.uw when possible.
  818. if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
  819. SDNode *SLLI_UW =
  820. CurDAG->getMachineNode(RISCV::SLLI_UW, DL, VT, X,
  821. CurDAG->getTargetConstant(C2, DL, VT));
  822. ReplaceNode(Node, SLLI_UW);
  823. return;
  824. }
  825. // (srli (slli c2+c3), c3)
  826. if (OneUseOrZExtW && !IsCANDI) {
  827. SDNode *SLLI = CurDAG->getMachineNode(
  828. RISCV::SLLI, DL, VT, X,
  829. CurDAG->getTargetConstant(C2 + Leading, DL, VT));
  830. SDNode *SRLI = CurDAG->getMachineNode(
  831. RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
  832. CurDAG->getTargetConstant(Leading, DL, VT));
  833. ReplaceNode(Node, SRLI);
  834. return;
  835. }
  836. }
  837. }
  838. // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
  839. // shifted mask with c2 leading zeros and c3 trailing zeros.
  840. if (!LeftShift && isShiftedMask_64(C1)) {
  841. unsigned Leading = XLen - llvm::bit_width(C1);
  842. unsigned Trailing = countTrailingZeros(C1);
  843. if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
  844. !IsCANDI) {
  845. unsigned SrliOpc = RISCV::SRLI;
  846. // If the input is zexti32 we should use SRLIW.
  847. if (X.getOpcode() == ISD::AND &&
  848. isa<ConstantSDNode>(X.getOperand(1)) &&
  849. X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
  850. SrliOpc = RISCV::SRLIW;
  851. X = X.getOperand(0);
  852. }
  853. SDNode *SRLI = CurDAG->getMachineNode(
  854. SrliOpc, DL, VT, X,
  855. CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
  856. SDNode *SLLI = CurDAG->getMachineNode(
  857. RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
  858. CurDAG->getTargetConstant(Trailing, DL, VT));
  859. ReplaceNode(Node, SLLI);
  860. return;
  861. }
  862. // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
  863. if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
  864. OneUseOrZExtW && !IsCANDI) {
  865. SDNode *SRLIW = CurDAG->getMachineNode(
  866. RISCV::SRLIW, DL, VT, X,
  867. CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
  868. SDNode *SLLI = CurDAG->getMachineNode(
  869. RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
  870. CurDAG->getTargetConstant(Trailing, DL, VT));
  871. ReplaceNode(Node, SLLI);
  872. return;
  873. }
  874. }
  875. // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
  876. // shifted mask with no leading zeros and c3 trailing zeros.
  877. if (LeftShift && isShiftedMask_64(C1)) {
  878. unsigned Leading = XLen - llvm::bit_width(C1);
  879. unsigned Trailing = countTrailingZeros(C1);
  880. if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
  881. SDNode *SRLI = CurDAG->getMachineNode(
  882. RISCV::SRLI, DL, VT, X,
  883. CurDAG->getTargetConstant(Trailing - C2, DL, VT));
  884. SDNode *SLLI = CurDAG->getMachineNode(
  885. RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
  886. CurDAG->getTargetConstant(Trailing, DL, VT));
  887. ReplaceNode(Node, SLLI);
  888. return;
  889. }
  890. // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
  891. if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
  892. SDNode *SRLIW = CurDAG->getMachineNode(
  893. RISCV::SRLIW, DL, VT, X,
  894. CurDAG->getTargetConstant(Trailing - C2, DL, VT));
  895. SDNode *SLLI = CurDAG->getMachineNode(
  896. RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
  897. CurDAG->getTargetConstant(Trailing, DL, VT));
  898. ReplaceNode(Node, SLLI);
  899. return;
  900. }
  901. }
  902. }
  903. if (tryShrinkShlLogicImm(Node))
  904. return;
  905. break;
  906. }
  907. case ISD::MUL: {
  908. // Special case for calculating (mul (and X, C2), C1) where the full product
  909. // fits in XLen bits. We can shift X left by the number of leading zeros in
  910. // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
  911. // product has XLen trailing zeros, putting it in the output of MULHU. This
  912. // can avoid materializing a constant in a register for C2.
  913. // RHS should be a constant.
  914. auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
  915. if (!N1C || !N1C->hasOneUse())
  916. break;
  917. // LHS should be an AND with constant.
  918. SDValue N0 = Node->getOperand(0);
  919. if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
  920. break;
  921. uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
  922. // Constant should be a mask.
  923. if (!isMask_64(C2))
  924. break;
  925. // If this can be an ANDI, ZEXT.H or ZEXT.W, don't do this if the ANDI/ZEXT
  926. // has multiple users or the constant is a simm12. This prevents inserting
  927. // a shift and still have uses of the AND/ZEXT. Shifting a simm12 will
  928. // likely make it more costly to materialize. Otherwise, using a SLLI
  929. // might allow it to be compressed.
  930. bool IsANDIOrZExt =
  931. isInt<12>(C2) ||
  932. (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb()) ||
  933. (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba());
  934. if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.hasOneUse()))
  935. break;
  936. // We need to shift left the AND input and C1 by a total of XLen bits.
  937. // How far left do we need to shift the AND input?
  938. unsigned XLen = Subtarget->getXLen();
  939. unsigned LeadingZeros = XLen - llvm::bit_width(C2);
  940. // The constant gets shifted by the remaining amount unless that would
  941. // shift bits out.
  942. uint64_t C1 = N1C->getZExtValue();
  943. unsigned ConstantShift = XLen - LeadingZeros;
  944. if (ConstantShift > (XLen - llvm::bit_width(C1)))
  945. break;
  946. uint64_t ShiftedC1 = C1 << ConstantShift;
  947. // If this RV32, we need to sign extend the constant.
  948. if (XLen == 32)
  949. ShiftedC1 = SignExtend64<32>(ShiftedC1);
  950. // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
  951. SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
  952. SDNode *SLLI =
  953. CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
  954. CurDAG->getTargetConstant(LeadingZeros, DL, VT));
  955. SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
  956. SDValue(SLLI, 0), SDValue(Imm, 0));
  957. ReplaceNode(Node, MULHU);
  958. return;
  959. }
  960. case ISD::INTRINSIC_WO_CHAIN: {
  961. unsigned IntNo = Node->getConstantOperandVal(0);
  962. switch (IntNo) {
  963. // By default we do not custom select any intrinsic.
  964. default:
  965. break;
  966. case Intrinsic::riscv_vmsgeu:
  967. case Intrinsic::riscv_vmsge: {
  968. SDValue Src1 = Node->getOperand(1);
  969. SDValue Src2 = Node->getOperand(2);
  970. bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
  971. bool IsCmpUnsignedZero = false;
  972. // Only custom select scalar second operand.
  973. if (Src2.getValueType() != XLenVT)
  974. break;
  975. // Small constants are handled with patterns.
  976. if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
  977. int64_t CVal = C->getSExtValue();
  978. if (CVal >= -15 && CVal <= 16) {
  979. if (!IsUnsigned || CVal != 0)
  980. break;
  981. IsCmpUnsignedZero = true;
  982. }
  983. }
  984. MVT Src1VT = Src1.getSimpleValueType();
  985. unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
  986. switch (RISCVTargetLowering::getLMUL(Src1VT)) {
  987. default:
  988. llvm_unreachable("Unexpected LMUL!");
  989. #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
  990. case RISCVII::VLMUL::lmulenum: \
  991. VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
  992. : RISCV::PseudoVMSLT_VX_##suffix; \
  993. VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
  994. VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
  995. break;
  996. CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
  997. CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
  998. CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
  999. CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
  1000. CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
  1001. CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
  1002. CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
  1003. #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
  1004. }
  1005. SDValue SEW = CurDAG->getTargetConstant(
  1006. Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
  1007. SDValue VL;
  1008. selectVLOp(Node->getOperand(3), VL);
  1009. // If vmsgeu with 0 immediate, expand it to vmset.
  1010. if (IsCmpUnsignedZero) {
  1011. ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
  1012. return;
  1013. }
  1014. // Expand to
  1015. // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
  1016. SDValue Cmp = SDValue(
  1017. CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
  1018. 0);
  1019. ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
  1020. {Cmp, Cmp, VL, SEW}));
  1021. return;
  1022. }
  1023. case Intrinsic::riscv_vmsgeu_mask:
  1024. case Intrinsic::riscv_vmsge_mask: {
  1025. SDValue Src1 = Node->getOperand(2);
  1026. SDValue Src2 = Node->getOperand(3);
  1027. bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
  1028. bool IsCmpUnsignedZero = false;
  1029. // Only custom select scalar second operand.
  1030. if (Src2.getValueType() != XLenVT)
  1031. break;
  1032. // Small constants are handled with patterns.
  1033. if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
  1034. int64_t CVal = C->getSExtValue();
  1035. if (CVal >= -15 && CVal <= 16) {
  1036. if (!IsUnsigned || CVal != 0)
  1037. break;
  1038. IsCmpUnsignedZero = true;
  1039. }
  1040. }
  1041. MVT Src1VT = Src1.getSimpleValueType();
  1042. unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
  1043. VMOROpcode;
  1044. switch (RISCVTargetLowering::getLMUL(Src1VT)) {
  1045. default:
  1046. llvm_unreachable("Unexpected LMUL!");
  1047. #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
  1048. case RISCVII::VLMUL::lmulenum: \
  1049. VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
  1050. : RISCV::PseudoVMSLT_VX_##suffix; \
  1051. VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
  1052. : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
  1053. break;
  1054. CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
  1055. CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
  1056. CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
  1057. CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
  1058. CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
  1059. CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
  1060. CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
  1061. #undef CASE_VMSLT_OPCODES
  1062. }
  1063. // Mask operations use the LMUL from the mask type.
  1064. switch (RISCVTargetLowering::getLMUL(VT)) {
  1065. default:
  1066. llvm_unreachable("Unexpected LMUL!");
  1067. #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
  1068. case RISCVII::VLMUL::lmulenum: \
  1069. VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
  1070. VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
  1071. VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
  1072. break;
  1073. CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
  1074. CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
  1075. CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
  1076. CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
  1077. CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
  1078. CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
  1079. CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
  1080. #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
  1081. }
  1082. SDValue SEW = CurDAG->getTargetConstant(
  1083. Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
  1084. SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
  1085. SDValue VL;
  1086. selectVLOp(Node->getOperand(5), VL);
  1087. SDValue MaskedOff = Node->getOperand(1);
  1088. SDValue Mask = Node->getOperand(4);
  1089. // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
  1090. if (IsCmpUnsignedZero) {
  1091. // We don't need vmor if the MaskedOff and the Mask are the same
  1092. // value.
  1093. if (Mask == MaskedOff) {
  1094. ReplaceUses(Node, Mask.getNode());
  1095. return;
  1096. }
  1097. ReplaceNode(Node,
  1098. CurDAG->getMachineNode(VMOROpcode, DL, VT,
  1099. {Mask, MaskedOff, VL, MaskSEW}));
  1100. return;
  1101. }
  1102. // If the MaskedOff value and the Mask are the same value use
  1103. // vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
  1104. // This avoids needing to copy v0 to vd before starting the next sequence.
  1105. if (Mask == MaskedOff) {
  1106. SDValue Cmp = SDValue(
  1107. CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
  1108. 0);
  1109. ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
  1110. {Mask, Cmp, VL, MaskSEW}));
  1111. return;
  1112. }
  1113. // Mask needs to be copied to V0.
  1114. SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
  1115. RISCV::V0, Mask, SDValue());
  1116. SDValue Glue = Chain.getValue(1);
  1117. SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
  1118. // Otherwise use
  1119. // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
  1120. // The result is mask undisturbed.
  1121. // We use the same instructions to emulate mask agnostic behavior, because
  1122. // the agnostic result can be either undisturbed or all 1.
  1123. SDValue Cmp = SDValue(
  1124. CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
  1125. {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
  1126. 0);
  1127. // vmxor.mm vd, vd, v0 is used to update active value.
  1128. ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
  1129. {Cmp, Mask, VL, MaskSEW}));
  1130. return;
  1131. }
  1132. case Intrinsic::riscv_vsetvli_opt:
  1133. case Intrinsic::riscv_vsetvlimax_opt:
  1134. return selectVSETVLI(Node);
  1135. }
  1136. break;
  1137. }
  1138. case ISD::INTRINSIC_W_CHAIN: {
  1139. unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
  1140. switch (IntNo) {
  1141. // By default we do not custom select any intrinsic.
  1142. default:
  1143. break;
  1144. case Intrinsic::riscv_vsetvli:
  1145. case Intrinsic::riscv_vsetvlimax:
  1146. return selectVSETVLI(Node);
  1147. case Intrinsic::riscv_vlseg2:
  1148. case Intrinsic::riscv_vlseg3:
  1149. case Intrinsic::riscv_vlseg4:
  1150. case Intrinsic::riscv_vlseg5:
  1151. case Intrinsic::riscv_vlseg6:
  1152. case Intrinsic::riscv_vlseg7:
  1153. case Intrinsic::riscv_vlseg8: {
  1154. selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
  1155. return;
  1156. }
  1157. case Intrinsic::riscv_vlseg2_mask:
  1158. case Intrinsic::riscv_vlseg3_mask:
  1159. case Intrinsic::riscv_vlseg4_mask:
  1160. case Intrinsic::riscv_vlseg5_mask:
  1161. case Intrinsic::riscv_vlseg6_mask:
  1162. case Intrinsic::riscv_vlseg7_mask:
  1163. case Intrinsic::riscv_vlseg8_mask: {
  1164. selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
  1165. return;
  1166. }
  1167. case Intrinsic::riscv_vlsseg2:
  1168. case Intrinsic::riscv_vlsseg3:
  1169. case Intrinsic::riscv_vlsseg4:
  1170. case Intrinsic::riscv_vlsseg5:
  1171. case Intrinsic::riscv_vlsseg6:
  1172. case Intrinsic::riscv_vlsseg7:
  1173. case Intrinsic::riscv_vlsseg8: {
  1174. selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
  1175. return;
  1176. }
  1177. case Intrinsic::riscv_vlsseg2_mask:
  1178. case Intrinsic::riscv_vlsseg3_mask:
  1179. case Intrinsic::riscv_vlsseg4_mask:
  1180. case Intrinsic::riscv_vlsseg5_mask:
  1181. case Intrinsic::riscv_vlsseg6_mask:
  1182. case Intrinsic::riscv_vlsseg7_mask:
  1183. case Intrinsic::riscv_vlsseg8_mask: {
  1184. selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
  1185. return;
  1186. }
  1187. case Intrinsic::riscv_vloxseg2:
  1188. case Intrinsic::riscv_vloxseg3:
  1189. case Intrinsic::riscv_vloxseg4:
  1190. case Intrinsic::riscv_vloxseg5:
  1191. case Intrinsic::riscv_vloxseg6:
  1192. case Intrinsic::riscv_vloxseg7:
  1193. case Intrinsic::riscv_vloxseg8:
  1194. selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
  1195. return;
  1196. case Intrinsic::riscv_vluxseg2:
  1197. case Intrinsic::riscv_vluxseg3:
  1198. case Intrinsic::riscv_vluxseg4:
  1199. case Intrinsic::riscv_vluxseg5:
  1200. case Intrinsic::riscv_vluxseg6:
  1201. case Intrinsic::riscv_vluxseg7:
  1202. case Intrinsic::riscv_vluxseg8:
  1203. selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
  1204. return;
  1205. case Intrinsic::riscv_vloxseg2_mask:
  1206. case Intrinsic::riscv_vloxseg3_mask:
  1207. case Intrinsic::riscv_vloxseg4_mask:
  1208. case Intrinsic::riscv_vloxseg5_mask:
  1209. case Intrinsic::riscv_vloxseg6_mask:
  1210. case Intrinsic::riscv_vloxseg7_mask:
  1211. case Intrinsic::riscv_vloxseg8_mask:
  1212. selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
  1213. return;
  1214. case Intrinsic::riscv_vluxseg2_mask:
  1215. case Intrinsic::riscv_vluxseg3_mask:
  1216. case Intrinsic::riscv_vluxseg4_mask:
  1217. case Intrinsic::riscv_vluxseg5_mask:
  1218. case Intrinsic::riscv_vluxseg6_mask:
  1219. case Intrinsic::riscv_vluxseg7_mask:
  1220. case Intrinsic::riscv_vluxseg8_mask:
  1221. selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
  1222. return;
  1223. case Intrinsic::riscv_vlseg8ff:
  1224. case Intrinsic::riscv_vlseg7ff:
  1225. case Intrinsic::riscv_vlseg6ff:
  1226. case Intrinsic::riscv_vlseg5ff:
  1227. case Intrinsic::riscv_vlseg4ff:
  1228. case Intrinsic::riscv_vlseg3ff:
  1229. case Intrinsic::riscv_vlseg2ff: {
  1230. selectVLSEGFF(Node, /*IsMasked*/ false);
  1231. return;
  1232. }
  1233. case Intrinsic::riscv_vlseg8ff_mask:
  1234. case Intrinsic::riscv_vlseg7ff_mask:
  1235. case Intrinsic::riscv_vlseg6ff_mask:
  1236. case Intrinsic::riscv_vlseg5ff_mask:
  1237. case Intrinsic::riscv_vlseg4ff_mask:
  1238. case Intrinsic::riscv_vlseg3ff_mask:
  1239. case Intrinsic::riscv_vlseg2ff_mask: {
  1240. selectVLSEGFF(Node, /*IsMasked*/ true);
  1241. return;
  1242. }
  1243. case Intrinsic::riscv_vloxei:
  1244. case Intrinsic::riscv_vloxei_mask:
  1245. case Intrinsic::riscv_vluxei:
  1246. case Intrinsic::riscv_vluxei_mask: {
  1247. bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
  1248. IntNo == Intrinsic::riscv_vluxei_mask;
  1249. bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
  1250. IntNo == Intrinsic::riscv_vloxei_mask;
  1251. MVT VT = Node->getSimpleValueType(0);
  1252. unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
  1253. unsigned CurOp = 2;
  1254. // Masked intrinsic only have TU version pseduo instructions.
  1255. bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
  1256. SmallVector<SDValue, 8> Operands;
  1257. if (IsTU)
  1258. Operands.push_back(Node->getOperand(CurOp++));
  1259. else
  1260. // Skip the undef passthru operand for nomask TA version pseudo
  1261. CurOp++;
  1262. MVT IndexVT;
  1263. addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
  1264. /*IsStridedOrIndexed*/ true, Operands,
  1265. /*IsLoad=*/true, &IndexVT);
  1266. assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
  1267. "Element count mismatch");
  1268. RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
  1269. RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
  1270. unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
  1271. if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
  1272. report_fatal_error("The V extension does not support EEW=64 for index "
  1273. "values when XLEN=32");
  1274. }
  1275. const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
  1276. IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
  1277. static_cast<unsigned>(IndexLMUL));
  1278. MachineSDNode *Load =
  1279. CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
  1280. if (auto *MemOp = dyn_cast<MemSDNode>(Node))
  1281. CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
  1282. ReplaceNode(Node, Load);
  1283. return;
  1284. }
  1285. case Intrinsic::riscv_vlm:
  1286. case Intrinsic::riscv_vle:
  1287. case Intrinsic::riscv_vle_mask:
  1288. case Intrinsic::riscv_vlse:
  1289. case Intrinsic::riscv_vlse_mask: {
  1290. bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
  1291. IntNo == Intrinsic::riscv_vlse_mask;
  1292. bool IsStrided =
  1293. IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
  1294. MVT VT = Node->getSimpleValueType(0);
  1295. unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
  1296. unsigned CurOp = 2;
  1297. // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
  1298. bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
  1299. // Masked intrinsic only have TU version pseduo instructions.
  1300. bool IsTU = HasPassthruOperand &&
  1301. (IsMasked || !Node->getOperand(CurOp).isUndef());
  1302. SmallVector<SDValue, 8> Operands;
  1303. if (IsTU)
  1304. Operands.push_back(Node->getOperand(CurOp++));
  1305. else if (HasPassthruOperand)
  1306. // Skip the undef passthru operand for nomask TA version pseudo
  1307. CurOp++;
  1308. addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
  1309. Operands, /*IsLoad=*/true);
  1310. RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
  1311. const RISCV::VLEPseudo *P =
  1312. RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
  1313. static_cast<unsigned>(LMUL));
  1314. MachineSDNode *Load =
  1315. CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
  1316. if (auto *MemOp = dyn_cast<MemSDNode>(Node))
  1317. CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
  1318. ReplaceNode(Node, Load);
  1319. return;
  1320. }
  1321. case Intrinsic::riscv_vleff:
  1322. case Intrinsic::riscv_vleff_mask: {
  1323. bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
  1324. MVT VT = Node->getSimpleValueType(0);
  1325. unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
  1326. unsigned CurOp = 2;
  1327. // Masked intrinsic only have TU version pseduo instructions.
  1328. bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
  1329. SmallVector<SDValue, 7> Operands;
  1330. if (IsTU)
  1331. Operands.push_back(Node->getOperand(CurOp++));
  1332. else
  1333. // Skip the undef passthru operand for nomask TA version pseudo
  1334. CurOp++;
  1335. addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
  1336. /*IsStridedOrIndexed*/ false, Operands,
  1337. /*IsLoad=*/true);
  1338. RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
  1339. const RISCV::VLEPseudo *P =
  1340. RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
  1341. Log2SEW, static_cast<unsigned>(LMUL));
  1342. MachineSDNode *Load = CurDAG->getMachineNode(
  1343. P->Pseudo, DL, Node->getVTList(), Operands);
  1344. if (auto *MemOp = dyn_cast<MemSDNode>(Node))
  1345. CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
  1346. ReplaceNode(Node, Load);
  1347. return;
  1348. }
  1349. }
  1350. break;
  1351. }
  1352. case ISD::INTRINSIC_VOID: {
  1353. unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
  1354. switch (IntNo) {
  1355. case Intrinsic::riscv_vsseg2:
  1356. case Intrinsic::riscv_vsseg3:
  1357. case Intrinsic::riscv_vsseg4:
  1358. case Intrinsic::riscv_vsseg5:
  1359. case Intrinsic::riscv_vsseg6:
  1360. case Intrinsic::riscv_vsseg7:
  1361. case Intrinsic::riscv_vsseg8: {
  1362. selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
  1363. return;
  1364. }
  1365. case Intrinsic::riscv_vsseg2_mask:
  1366. case Intrinsic::riscv_vsseg3_mask:
  1367. case Intrinsic::riscv_vsseg4_mask:
  1368. case Intrinsic::riscv_vsseg5_mask:
  1369. case Intrinsic::riscv_vsseg6_mask:
  1370. case Intrinsic::riscv_vsseg7_mask:
  1371. case Intrinsic::riscv_vsseg8_mask: {
  1372. selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
  1373. return;
  1374. }
  1375. case Intrinsic::riscv_vssseg2:
  1376. case Intrinsic::riscv_vssseg3:
  1377. case Intrinsic::riscv_vssseg4:
  1378. case Intrinsic::riscv_vssseg5:
  1379. case Intrinsic::riscv_vssseg6:
  1380. case Intrinsic::riscv_vssseg7:
  1381. case Intrinsic::riscv_vssseg8: {
  1382. selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
  1383. return;
  1384. }
  1385. case Intrinsic::riscv_vssseg2_mask:
  1386. case Intrinsic::riscv_vssseg3_mask:
  1387. case Intrinsic::riscv_vssseg4_mask:
  1388. case Intrinsic::riscv_vssseg5_mask:
  1389. case Intrinsic::riscv_vssseg6_mask:
  1390. case Intrinsic::riscv_vssseg7_mask:
  1391. case Intrinsic::riscv_vssseg8_mask: {
  1392. selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
  1393. return;
  1394. }
  1395. case Intrinsic::riscv_vsoxseg2:
  1396. case Intrinsic::riscv_vsoxseg3:
  1397. case Intrinsic::riscv_vsoxseg4:
  1398. case Intrinsic::riscv_vsoxseg5:
  1399. case Intrinsic::riscv_vsoxseg6:
  1400. case Intrinsic::riscv_vsoxseg7:
  1401. case Intrinsic::riscv_vsoxseg8:
  1402. selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
  1403. return;
  1404. case Intrinsic::riscv_vsuxseg2:
  1405. case Intrinsic::riscv_vsuxseg3:
  1406. case Intrinsic::riscv_vsuxseg4:
  1407. case Intrinsic::riscv_vsuxseg5:
  1408. case Intrinsic::riscv_vsuxseg6:
  1409. case Intrinsic::riscv_vsuxseg7:
  1410. case Intrinsic::riscv_vsuxseg8:
  1411. selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
  1412. return;
  1413. case Intrinsic::riscv_vsoxseg2_mask:
  1414. case Intrinsic::riscv_vsoxseg3_mask:
  1415. case Intrinsic::riscv_vsoxseg4_mask:
  1416. case Intrinsic::riscv_vsoxseg5_mask:
  1417. case Intrinsic::riscv_vsoxseg6_mask:
  1418. case Intrinsic::riscv_vsoxseg7_mask:
  1419. case Intrinsic::riscv_vsoxseg8_mask:
  1420. selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
  1421. return;
  1422. case Intrinsic::riscv_vsuxseg2_mask:
  1423. case Intrinsic::riscv_vsuxseg3_mask:
  1424. case Intrinsic::riscv_vsuxseg4_mask:
  1425. case Intrinsic::riscv_vsuxseg5_mask:
  1426. case Intrinsic::riscv_vsuxseg6_mask:
  1427. case Intrinsic::riscv_vsuxseg7_mask:
  1428. case Intrinsic::riscv_vsuxseg8_mask:
  1429. selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
  1430. return;
  1431. case Intrinsic::riscv_vsoxei:
  1432. case Intrinsic::riscv_vsoxei_mask:
  1433. case Intrinsic::riscv_vsuxei:
  1434. case Intrinsic::riscv_vsuxei_mask: {
  1435. bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
  1436. IntNo == Intrinsic::riscv_vsuxei_mask;
  1437. bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
  1438. IntNo == Intrinsic::riscv_vsoxei_mask;
  1439. MVT VT = Node->getOperand(2)->getSimpleValueType(0);
  1440. unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
  1441. unsigned CurOp = 2;
  1442. SmallVector<SDValue, 8> Operands;
  1443. Operands.push_back(Node->getOperand(CurOp++)); // Store value.
  1444. MVT IndexVT;
  1445. addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
  1446. /*IsStridedOrIndexed*/ true, Operands,
  1447. /*IsLoad=*/false, &IndexVT);
  1448. assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
  1449. "Element count mismatch");
  1450. RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
  1451. RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
  1452. unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
  1453. if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
  1454. report_fatal_error("The V extension does not support EEW=64 for index "
  1455. "values when XLEN=32");
  1456. }
  1457. const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
  1458. IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
  1459. static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
  1460. MachineSDNode *Store =
  1461. CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
  1462. if (auto *MemOp = dyn_cast<MemSDNode>(Node))
  1463. CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
  1464. ReplaceNode(Node, Store);
  1465. return;
  1466. }
  1467. case Intrinsic::riscv_vsm:
  1468. case Intrinsic::riscv_vse:
  1469. case Intrinsic::riscv_vse_mask:
  1470. case Intrinsic::riscv_vsse:
  1471. case Intrinsic::riscv_vsse_mask: {
  1472. bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
  1473. IntNo == Intrinsic::riscv_vsse_mask;
  1474. bool IsStrided =
  1475. IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
  1476. MVT VT = Node->getOperand(2)->getSimpleValueType(0);
  1477. unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
  1478. unsigned CurOp = 2;
  1479. SmallVector<SDValue, 8> Operands;
  1480. Operands.push_back(Node->getOperand(CurOp++)); // Store value.
  1481. addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
  1482. Operands);
  1483. RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
  1484. const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
  1485. IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
  1486. MachineSDNode *Store =
  1487. CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
  1488. if (auto *MemOp = dyn_cast<MemSDNode>(Node))
  1489. CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
  1490. ReplaceNode(Node, Store);
  1491. return;
  1492. }
  1493. }
  1494. break;
  1495. }
  1496. case ISD::BITCAST: {
  1497. MVT SrcVT = Node->getOperand(0).getSimpleValueType();
  1498. // Just drop bitcasts between vectors if both are fixed or both are
  1499. // scalable.
  1500. if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
  1501. (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
  1502. ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
  1503. CurDAG->RemoveDeadNode(Node);
  1504. return;
  1505. }
  1506. break;
  1507. }
  1508. case ISD::INSERT_SUBVECTOR: {
  1509. SDValue V = Node->getOperand(0);
  1510. SDValue SubV = Node->getOperand(1);
  1511. SDLoc DL(SubV);
  1512. auto Idx = Node->getConstantOperandVal(2);
  1513. MVT SubVecVT = SubV.getSimpleValueType();
  1514. const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
  1515. MVT SubVecContainerVT = SubVecVT;
  1516. // Establish the correct scalable-vector types for any fixed-length type.
  1517. if (SubVecVT.isFixedLengthVector())
  1518. SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
  1519. if (VT.isFixedLengthVector())
  1520. VT = TLI.getContainerForFixedLengthVector(VT);
  1521. const auto *TRI = Subtarget->getRegisterInfo();
  1522. unsigned SubRegIdx;
  1523. std::tie(SubRegIdx, Idx) =
  1524. RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
  1525. VT, SubVecContainerVT, Idx, TRI);
  1526. // If the Idx hasn't been completely eliminated then this is a subvector
  1527. // insert which doesn't naturally align to a vector register. These must
  1528. // be handled using instructions to manipulate the vector registers.
  1529. if (Idx != 0)
  1530. break;
  1531. RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
  1532. bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
  1533. SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
  1534. SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
  1535. (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
  1536. assert((!IsSubVecPartReg || V.isUndef()) &&
  1537. "Expecting lowering to have created legal INSERT_SUBVECTORs when "
  1538. "the subvector is smaller than a full-sized register");
  1539. // If we haven't set a SubRegIdx, then we must be going between
  1540. // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
  1541. if (SubRegIdx == RISCV::NoSubRegister) {
  1542. unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
  1543. assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
  1544. InRegClassID &&
  1545. "Unexpected subvector extraction");
  1546. SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
  1547. SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
  1548. DL, VT, SubV, RC);
  1549. ReplaceNode(Node, NewNode);
  1550. return;
  1551. }
  1552. SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
  1553. ReplaceNode(Node, Insert.getNode());
  1554. return;
  1555. }
  1556. case ISD::EXTRACT_SUBVECTOR: {
  1557. SDValue V = Node->getOperand(0);
  1558. auto Idx = Node->getConstantOperandVal(1);
  1559. MVT InVT = V.getSimpleValueType();
  1560. SDLoc DL(V);
  1561. const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
  1562. MVT SubVecContainerVT = VT;
  1563. // Establish the correct scalable-vector types for any fixed-length type.
  1564. if (VT.isFixedLengthVector())
  1565. SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
  1566. if (InVT.isFixedLengthVector())
  1567. InVT = TLI.getContainerForFixedLengthVector(InVT);
  1568. const auto *TRI = Subtarget->getRegisterInfo();
  1569. unsigned SubRegIdx;
  1570. std::tie(SubRegIdx, Idx) =
  1571. RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
  1572. InVT, SubVecContainerVT, Idx, TRI);
  1573. // If the Idx hasn't been completely eliminated then this is a subvector
  1574. // extract which doesn't naturally align to a vector register. These must
  1575. // be handled using instructions to manipulate the vector registers.
  1576. if (Idx != 0)
  1577. break;
  1578. // If we haven't set a SubRegIdx, then we must be going between
  1579. // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
  1580. if (SubRegIdx == RISCV::NoSubRegister) {
  1581. unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
  1582. assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
  1583. InRegClassID &&
  1584. "Unexpected subvector extraction");
  1585. SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
  1586. SDNode *NewNode =
  1587. CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
  1588. ReplaceNode(Node, NewNode);
  1589. return;
  1590. }
  1591. SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
  1592. ReplaceNode(Node, Extract.getNode());
  1593. return;
  1594. }
  1595. case RISCVISD::VMV_S_X_VL:
  1596. case RISCVISD::VFMV_S_F_VL:
  1597. case RISCVISD::VMV_V_X_VL:
  1598. case RISCVISD::VFMV_V_F_VL: {
  1599. // Only if we have optimized zero-stride vector load.
  1600. if (!Subtarget->hasOptimizedZeroStrideLoad())
  1601. break;
  1602. // Try to match splat of a scalar load to a strided load with stride of x0.
  1603. bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
  1604. Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
  1605. if (!Node->getOperand(0).isUndef())
  1606. break;
  1607. SDValue Src = Node->getOperand(1);
  1608. auto *Ld = dyn_cast<LoadSDNode>(Src);
  1609. if (!Ld)
  1610. break;
  1611. EVT MemVT = Ld->getMemoryVT();
  1612. // The memory VT should be the same size as the element type.
  1613. if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
  1614. break;
  1615. if (!IsProfitableToFold(Src, Node, Node) ||
  1616. !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
  1617. break;
  1618. SDValue VL;
  1619. if (IsScalarMove) {
  1620. // We could deal with more VL if we update the VSETVLI insert pass to
  1621. // avoid introducing more VSETVLI.
  1622. if (!isOneConstant(Node->getOperand(2)))
  1623. break;
  1624. selectVLOp(Node->getOperand(2), VL);
  1625. } else
  1626. selectVLOp(Node->getOperand(2), VL);
  1627. unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
  1628. SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
  1629. SDValue Operands[] = {Ld->getBasePtr(),
  1630. CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
  1631. Ld->getChain()};
  1632. RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
  1633. const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
  1634. /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
  1635. Log2SEW, static_cast<unsigned>(LMUL));
  1636. MachineSDNode *Load =
  1637. CurDAG->getMachineNode(P->Pseudo, DL, {VT, MVT::Other}, Operands);
  1638. // Update the chain.
  1639. ReplaceUses(Src.getValue(1), SDValue(Load, 1));
  1640. // Record the mem-refs
  1641. CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
  1642. // Replace the splat with the vlse.
  1643. ReplaceNode(Node, Load);
  1644. return;
  1645. }
  1646. }
  1647. // Select the default instruction.
  1648. SelectCode(Node);
  1649. }
  1650. bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
  1651. const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
  1652. switch (ConstraintID) {
  1653. case InlineAsm::Constraint_m:
  1654. // We just support simple memory operands that have a single address
  1655. // operand and need no special handling.
  1656. OutOps.push_back(Op);
  1657. return false;
  1658. case InlineAsm::Constraint_A:
  1659. OutOps.push_back(Op);
  1660. return false;
  1661. default:
  1662. break;
  1663. }
  1664. return true;
  1665. }
  1666. bool RISCVDAGToDAGISel::SelectAddrFrameIndex(SDValue Addr, SDValue &Base,
  1667. SDValue &Offset) {
  1668. if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
  1669. Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
  1670. Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT());
  1671. return true;
  1672. }
  1673. return false;
  1674. }
  1675. // Select a frame index and an optional immediate offset from an ADD or OR.
  1676. bool RISCVDAGToDAGISel::SelectFrameAddrRegImm(SDValue Addr, SDValue &Base,
  1677. SDValue &Offset) {
  1678. if (SelectAddrFrameIndex(Addr, Base, Offset))
  1679. return true;
  1680. if (!CurDAG->isBaseWithConstantOffset(Addr))
  1681. return false;
  1682. if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
  1683. int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
  1684. if (isInt<12>(CVal)) {
  1685. Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
  1686. Subtarget->getXLenVT());
  1687. Offset = CurDAG->getTargetConstant(CVal, SDLoc(Addr),
  1688. Subtarget->getXLenVT());
  1689. return true;
  1690. }
  1691. }
  1692. return false;
  1693. }
  1694. // Fold constant addresses.
  1695. static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL,
  1696. const MVT VT, const RISCVSubtarget *Subtarget,
  1697. SDValue Addr, SDValue &Base, SDValue &Offset) {
  1698. if (!isa<ConstantSDNode>(Addr))
  1699. return false;
  1700. int64_t CVal = cast<ConstantSDNode>(Addr)->getSExtValue();
  1701. // If the constant is a simm12, we can fold the whole constant and use X0 as
  1702. // the base. If the constant can be materialized with LUI+simm12, use LUI as
  1703. // the base. We can't use generateInstSeq because it favors LUI+ADDIW.
  1704. int64_t Lo12 = SignExtend64<12>(CVal);
  1705. int64_t Hi = (uint64_t)CVal - (uint64_t)Lo12;
  1706. if (!Subtarget->is64Bit() || isInt<32>(Hi)) {
  1707. if (Hi) {
  1708. int64_t Hi20 = (Hi >> 12) & 0xfffff;
  1709. Base = SDValue(
  1710. CurDAG->getMachineNode(RISCV::LUI, DL, VT,
  1711. CurDAG->getTargetConstant(Hi20, DL, VT)),
  1712. 0);
  1713. } else {
  1714. Base = CurDAG->getRegister(RISCV::X0, VT);
  1715. }
  1716. Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
  1717. return true;
  1718. }
  1719. // Ask how constant materialization would handle this constant.
  1720. RISCVMatInt::InstSeq Seq =
  1721. RISCVMatInt::generateInstSeq(CVal, Subtarget->getFeatureBits());
  1722. // If the last instruction would be an ADDI, we can fold its immediate and
  1723. // emit the rest of the sequence as the base.
  1724. if (Seq.back().getOpcode() != RISCV::ADDI)
  1725. return false;
  1726. Lo12 = Seq.back().getImm();
  1727. // Drop the last instruction.
  1728. Seq.pop_back();
  1729. assert(!Seq.empty() && "Expected more instructions in sequence");
  1730. Base = SDValue(selectImmSeq(CurDAG, DL, VT, Seq), 0);
  1731. Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
  1732. return true;
  1733. }
  1734. // Is this ADD instruction only used as the base pointer of scalar loads and
  1735. // stores?
  1736. static bool isWorthFoldingAdd(SDValue Add) {
  1737. for (auto *Use : Add->uses()) {
  1738. if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
  1739. Use->getOpcode() != ISD::ATOMIC_LOAD &&
  1740. Use->getOpcode() != ISD::ATOMIC_STORE)
  1741. return false;
  1742. EVT VT = cast<MemSDNode>(Use)->getMemoryVT();
  1743. if (!VT.isScalarInteger() && VT != MVT::f16 && VT != MVT::f32 &&
  1744. VT != MVT::f64)
  1745. return false;
  1746. // Don't allow stores of the value. It must be used as the address.
  1747. if (Use->getOpcode() == ISD::STORE &&
  1748. cast<StoreSDNode>(Use)->getValue() == Add)
  1749. return false;
  1750. if (Use->getOpcode() == ISD::ATOMIC_STORE &&
  1751. cast<AtomicSDNode>(Use)->getVal() == Add)
  1752. return false;
  1753. }
  1754. return true;
  1755. }
  1756. bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base,
  1757. SDValue &Offset) {
  1758. if (SelectAddrFrameIndex(Addr, Base, Offset))
  1759. return true;
  1760. SDLoc DL(Addr);
  1761. MVT VT = Addr.getSimpleValueType();
  1762. if (Addr.getOpcode() == RISCVISD::ADD_LO) {
  1763. Base = Addr.getOperand(0);
  1764. Offset = Addr.getOperand(1);
  1765. return true;
  1766. }
  1767. if (CurDAG->isBaseWithConstantOffset(Addr)) {
  1768. int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
  1769. if (isInt<12>(CVal)) {
  1770. Base = Addr.getOperand(0);
  1771. if (Base.getOpcode() == RISCVISD::ADD_LO) {
  1772. SDValue LoOperand = Base.getOperand(1);
  1773. if (auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
  1774. // If the Lo in (ADD_LO hi, lo) is a global variable's address
  1775. // (its low part, really), then we can rely on the alignment of that
  1776. // variable to provide a margin of safety before low part can overflow
  1777. // the 12 bits of the load/store offset. Check if CVal falls within
  1778. // that margin; if so (low part + CVal) can't overflow.
  1779. const DataLayout &DL = CurDAG->getDataLayout();
  1780. Align Alignment = commonAlignment(
  1781. GA->getGlobal()->getPointerAlignment(DL), GA->getOffset());
  1782. if (CVal == 0 || Alignment > CVal) {
  1783. int64_t CombinedOffset = CVal + GA->getOffset();
  1784. Base = Base.getOperand(0);
  1785. Offset = CurDAG->getTargetGlobalAddress(
  1786. GA->getGlobal(), SDLoc(LoOperand), LoOperand.getValueType(),
  1787. CombinedOffset, GA->getTargetFlags());
  1788. return true;
  1789. }
  1790. }
  1791. }
  1792. if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base))
  1793. Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
  1794. Offset = CurDAG->getTargetConstant(CVal, DL, VT);
  1795. return true;
  1796. }
  1797. }
  1798. // Handle ADD with large immediates.
  1799. if (Addr.getOpcode() == ISD::ADD && isa<ConstantSDNode>(Addr.getOperand(1))) {
  1800. int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
  1801. assert(!isInt<12>(CVal) && "simm12 not already handled?");
  1802. // Handle immediates in the range [-4096,-2049] or [2048, 4094]. We can use
  1803. // an ADDI for part of the offset and fold the rest into the load/store.
  1804. // This mirrors the AddiPair PatFrag in RISCVInstrInfo.td.
  1805. if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
  1806. int64_t Adj = CVal < 0 ? -2048 : 2047;
  1807. Base = SDValue(
  1808. CurDAG->getMachineNode(RISCV::ADDI, DL, VT, Addr.getOperand(0),
  1809. CurDAG->getTargetConstant(Adj, DL, VT)),
  1810. 0);
  1811. Offset = CurDAG->getTargetConstant(CVal - Adj, DL, VT);
  1812. return true;
  1813. }
  1814. // For larger immediates, we might be able to save one instruction from
  1815. // constant materialization by folding the Lo12 bits of the immediate into
  1816. // the address. We should only do this if the ADD is only used by loads and
  1817. // stores that can fold the lo12 bits. Otherwise, the ADD will get iseled
  1818. // separately with the full materialized immediate creating extra
  1819. // instructions.
  1820. if (isWorthFoldingAdd(Addr) &&
  1821. selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr.getOperand(1), Base,
  1822. Offset)) {
  1823. // Insert an ADD instruction with the materialized Hi52 bits.
  1824. Base = SDValue(
  1825. CurDAG->getMachineNode(RISCV::ADD, DL, VT, Addr.getOperand(0), Base),
  1826. 0);
  1827. return true;
  1828. }
  1829. }
  1830. if (selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr, Base, Offset))
  1831. return true;
  1832. Base = Addr;
  1833. Offset = CurDAG->getTargetConstant(0, DL, VT);
  1834. return true;
  1835. }
  1836. bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
  1837. SDValue &ShAmt) {
  1838. ShAmt = N;
  1839. // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
  1840. // amount. If there is an AND on the shift amount, we can bypass it if it
  1841. // doesn't affect any of those bits.
  1842. if (ShAmt.getOpcode() == ISD::AND && isa<ConstantSDNode>(ShAmt.getOperand(1))) {
  1843. const APInt &AndMask = ShAmt.getConstantOperandAPInt(1);
  1844. // Since the max shift amount is a power of 2 we can subtract 1 to make a
  1845. // mask that covers the bits needed to represent all shift amounts.
  1846. assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
  1847. APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
  1848. if (ShMask.isSubsetOf(AndMask)) {
  1849. ShAmt = ShAmt.getOperand(0);
  1850. } else {
  1851. // SimplifyDemandedBits may have optimized the mask so try restoring any
  1852. // bits that are known zero.
  1853. KnownBits Known = CurDAG->computeKnownBits(ShAmt.getOperand(0));
  1854. if (!ShMask.isSubsetOf(AndMask | Known.Zero))
  1855. return true;
  1856. ShAmt = ShAmt.getOperand(0);
  1857. }
  1858. }
  1859. if (ShAmt.getOpcode() == ISD::ADD &&
  1860. isa<ConstantSDNode>(ShAmt.getOperand(1))) {
  1861. uint64_t Imm = ShAmt.getConstantOperandVal(1);
  1862. // If we are shifting by X+N where N == 0 mod Size, then just shift by X
  1863. // to avoid the ADD.
  1864. if (Imm != 0 && Imm % ShiftWidth == 0) {
  1865. ShAmt = ShAmt.getOperand(0);
  1866. return true;
  1867. }
  1868. } else if (ShAmt.getOpcode() == ISD::SUB &&
  1869. isa<ConstantSDNode>(ShAmt.getOperand(0))) {
  1870. uint64_t Imm = ShAmt.getConstantOperandVal(0);
  1871. // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
  1872. // generate a NEG instead of a SUB of a constant.
  1873. if (Imm != 0 && Imm % ShiftWidth == 0) {
  1874. SDLoc DL(ShAmt);
  1875. EVT VT = ShAmt.getValueType();
  1876. SDValue Zero = CurDAG->getRegister(RISCV::X0, VT);
  1877. unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
  1878. MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
  1879. ShAmt.getOperand(1));
  1880. ShAmt = SDValue(Neg, 0);
  1881. return true;
  1882. }
  1883. // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
  1884. // to generate a NOT instead of a SUB of a constant.
  1885. if (Imm % ShiftWidth == ShiftWidth - 1) {
  1886. SDLoc DL(ShAmt);
  1887. EVT VT = ShAmt.getValueType();
  1888. MachineSDNode *Not =
  1889. CurDAG->getMachineNode(RISCV::XORI, DL, VT, ShAmt.getOperand(1),
  1890. CurDAG->getTargetConstant(-1, DL, VT));
  1891. ShAmt = SDValue(Not, 0);
  1892. return true;
  1893. }
  1894. }
  1895. return true;
  1896. }
  1897. bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
  1898. if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
  1899. cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
  1900. Val = N.getOperand(0);
  1901. return true;
  1902. }
  1903. MVT VT = N.getSimpleValueType();
  1904. if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
  1905. Val = N;
  1906. return true;
  1907. }
  1908. return false;
  1909. }
  1910. bool RISCVDAGToDAGISel::selectZExtBits(SDValue N, unsigned Bits, SDValue &Val) {
  1911. if (N.getOpcode() == ISD::AND) {
  1912. auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
  1913. if (C && C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
  1914. Val = N.getOperand(0);
  1915. return true;
  1916. }
  1917. }
  1918. MVT VT = N.getSimpleValueType();
  1919. APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), Bits);
  1920. if (CurDAG->MaskedValueIsZero(N, Mask)) {
  1921. Val = N;
  1922. return true;
  1923. }
  1924. return false;
  1925. }
  1926. /// Look for various patterns that can be done with a SHL that can be folded
  1927. /// into a SHXADD. \p ShAmt contains 1, 2, or 3 and is set based on which
  1928. /// SHXADD we are trying to match.
  1929. bool RISCVDAGToDAGISel::selectSHXADDOp(SDValue N, unsigned ShAmt,
  1930. SDValue &Val) {
  1931. if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
  1932. SDValue N0 = N.getOperand(0);
  1933. bool LeftShift = N0.getOpcode() == ISD::SHL;
  1934. if ((LeftShift || N0.getOpcode() == ISD::SRL) &&
  1935. isa<ConstantSDNode>(N0.getOperand(1))) {
  1936. uint64_t Mask = N.getConstantOperandVal(1);
  1937. unsigned C2 = N0.getConstantOperandVal(1);
  1938. unsigned XLen = Subtarget->getXLen();
  1939. if (LeftShift)
  1940. Mask &= maskTrailingZeros<uint64_t>(C2);
  1941. else
  1942. Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
  1943. // Look for (and (shl y, c2), c1) where c1 is a shifted mask with no
  1944. // leading zeros and c3 trailing zeros. We can use an SRLI by c2+c3
  1945. // followed by a SHXADD with c3 for the X amount.
  1946. if (isShiftedMask_64(Mask)) {
  1947. unsigned Leading = XLen - llvm::bit_width(Mask);
  1948. unsigned Trailing = countTrailingZeros(Mask);
  1949. if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
  1950. SDLoc DL(N);
  1951. EVT VT = N.getValueType();
  1952. Val = SDValue(CurDAG->getMachineNode(
  1953. RISCV::SRLI, DL, VT, N0.getOperand(0),
  1954. CurDAG->getTargetConstant(Trailing - C2, DL, VT)),
  1955. 0);
  1956. return true;
  1957. }
  1958. // Look for (and (shr y, c2), c1) where c1 is a shifted mask with c2
  1959. // leading zeros and c3 trailing zeros. We can use an SRLI by C3
  1960. // followed by a SHXADD using c3 for the X amount.
  1961. if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
  1962. SDLoc DL(N);
  1963. EVT VT = N.getValueType();
  1964. Val = SDValue(
  1965. CurDAG->getMachineNode(
  1966. RISCV::SRLI, DL, VT, N0.getOperand(0),
  1967. CurDAG->getTargetConstant(Leading + Trailing, DL, VT)),
  1968. 0);
  1969. return true;
  1970. }
  1971. }
  1972. }
  1973. }
  1974. bool LeftShift = N.getOpcode() == ISD::SHL;
  1975. if ((LeftShift || N.getOpcode() == ISD::SRL) &&
  1976. isa<ConstantSDNode>(N.getOperand(1))) {
  1977. SDValue N0 = N.getOperand(0);
  1978. if (N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
  1979. isa<ConstantSDNode>(N0.getOperand(1))) {
  1980. uint64_t Mask = N0.getConstantOperandVal(1);
  1981. if (isShiftedMask_64(Mask)) {
  1982. unsigned C1 = N.getConstantOperandVal(1);
  1983. unsigned XLen = Subtarget->getXLen();
  1984. unsigned Leading = XLen - llvm::bit_width(Mask);
  1985. unsigned Trailing = countTrailingZeros(Mask);
  1986. // Look for (shl (and X, Mask), C1) where Mask has 32 leading zeros and
  1987. // C3 trailing zeros. If C1+C3==ShAmt we can use SRLIW+SHXADD.
  1988. if (LeftShift && Leading == 32 && Trailing > 0 &&
  1989. (Trailing + C1) == ShAmt) {
  1990. SDLoc DL(N);
  1991. EVT VT = N.getValueType();
  1992. Val = SDValue(CurDAG->getMachineNode(
  1993. RISCV::SRLIW, DL, VT, N0.getOperand(0),
  1994. CurDAG->getTargetConstant(Trailing, DL, VT)),
  1995. 0);
  1996. return true;
  1997. }
  1998. // Look for (srl (and X, Mask), C1) where Mask has 32 leading zeros and
  1999. // C3 trailing zeros. If C3-C1==ShAmt we can use SRLIW+SHXADD.
  2000. if (!LeftShift && Leading == 32 && Trailing > C1 &&
  2001. (Trailing - C1) == ShAmt) {
  2002. SDLoc DL(N);
  2003. EVT VT = N.getValueType();
  2004. Val = SDValue(CurDAG->getMachineNode(
  2005. RISCV::SRLIW, DL, VT, N0.getOperand(0),
  2006. CurDAG->getTargetConstant(Trailing, DL, VT)),
  2007. 0);
  2008. return true;
  2009. }
  2010. }
  2011. }
  2012. }
  2013. return false;
  2014. }
  2015. /// Look for various patterns that can be done with a SHL that can be folded
  2016. /// into a SHXADD_UW. \p ShAmt contains 1, 2, or 3 and is set based on which
  2017. /// SHXADD_UW we are trying to match.
  2018. bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
  2019. SDValue &Val) {
  2020. if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1)) &&
  2021. N.hasOneUse()) {
  2022. SDValue N0 = N.getOperand(0);
  2023. if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) &&
  2024. N0.hasOneUse()) {
  2025. uint64_t Mask = N.getConstantOperandVal(1);
  2026. unsigned C2 = N0.getConstantOperandVal(1);
  2027. Mask &= maskTrailingZeros<uint64_t>(C2);
  2028. // Look for (and (shl y, c2), c1) where c1 is a shifted mask with
  2029. // 32-ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
  2030. // c2-ShAmt followed by SHXADD_UW with ShAmt for the X amount.
  2031. if (isShiftedMask_64(Mask)) {
  2032. unsigned Leading = countLeadingZeros(Mask);
  2033. unsigned Trailing = countTrailingZeros(Mask);
  2034. if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
  2035. SDLoc DL(N);
  2036. EVT VT = N.getValueType();
  2037. Val = SDValue(CurDAG->getMachineNode(
  2038. RISCV::SLLI, DL, VT, N0.getOperand(0),
  2039. CurDAG->getTargetConstant(C2 - ShAmt, DL, VT)),
  2040. 0);
  2041. return true;
  2042. }
  2043. }
  2044. }
  2045. }
  2046. return false;
  2047. }
  2048. // Return true if all users of this SDNode* only consume the lower \p Bits.
  2049. // This can be used to form W instructions for add/sub/mul/shl even when the
  2050. // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
  2051. // SimplifyDemandedBits has made it so some users see a sext_inreg and some
  2052. // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
  2053. // the add/sub/mul/shl to become non-W instructions. By checking the users we
  2054. // may be able to use a W instruction and CSE with the other instruction if
  2055. // this has happened. We could try to detect that the CSE opportunity exists
  2056. // before doing this, but that would be more complicated.
  2057. bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits,
  2058. const unsigned Depth) const {
  2059. assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
  2060. Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
  2061. Node->getOpcode() == ISD::SRL || Node->getOpcode() == ISD::AND ||
  2062. Node->getOpcode() == ISD::OR || Node->getOpcode() == ISD::XOR ||
  2063. Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
  2064. isa<ConstantSDNode>(Node) || Depth != 0) &&
  2065. "Unexpected opcode");
  2066. if (Depth >= SelectionDAG::MaxRecursionDepth)
  2067. return false;
  2068. for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
  2069. SDNode *User = *UI;
  2070. // Users of this node should have already been instruction selected
  2071. if (!User->isMachineOpcode())
  2072. return false;
  2073. // TODO: Add more opcodes?
  2074. switch (User->getMachineOpcode()) {
  2075. default:
  2076. return false;
  2077. case RISCV::ADDW:
  2078. case RISCV::ADDIW:
  2079. case RISCV::SUBW:
  2080. case RISCV::MULW:
  2081. case RISCV::SLLW:
  2082. case RISCV::SLLIW:
  2083. case RISCV::SRAW:
  2084. case RISCV::SRAIW:
  2085. case RISCV::SRLW:
  2086. case RISCV::SRLIW:
  2087. case RISCV::DIVW:
  2088. case RISCV::DIVUW:
  2089. case RISCV::REMW:
  2090. case RISCV::REMUW:
  2091. case RISCV::ROLW:
  2092. case RISCV::RORW:
  2093. case RISCV::RORIW:
  2094. case RISCV::CLZW:
  2095. case RISCV::CTZW:
  2096. case RISCV::CPOPW:
  2097. case RISCV::SLLI_UW:
  2098. case RISCV::FMV_W_X:
  2099. case RISCV::FCVT_H_W:
  2100. case RISCV::FCVT_H_WU:
  2101. case RISCV::FCVT_S_W:
  2102. case RISCV::FCVT_S_WU:
  2103. case RISCV::FCVT_D_W:
  2104. case RISCV::FCVT_D_WU:
  2105. if (Bits < 32)
  2106. return false;
  2107. break;
  2108. case RISCV::SLL:
  2109. case RISCV::SRA:
  2110. case RISCV::SRL:
  2111. case RISCV::ROL:
  2112. case RISCV::ROR:
  2113. case RISCV::BSET:
  2114. case RISCV::BCLR:
  2115. case RISCV::BINV:
  2116. // Shift amount operands only use log2(Xlen) bits.
  2117. if (UI.getOperandNo() != 1 || Bits < Log2_32(Subtarget->getXLen()))
  2118. return false;
  2119. break;
  2120. case RISCV::SLLI:
  2121. // SLLI only uses the lower (XLen - ShAmt) bits.
  2122. if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
  2123. return false;
  2124. break;
  2125. case RISCV::ANDI:
  2126. if (Bits >= (unsigned)llvm::bit_width(User->getConstantOperandVal(1)))
  2127. break;
  2128. goto RecCheck;
  2129. case RISCV::ORI: {
  2130. uint64_t Imm = cast<ConstantSDNode>(User->getOperand(1))->getSExtValue();
  2131. if (Bits >= (unsigned)llvm::bit_width<uint64_t>(~Imm))
  2132. break;
  2133. [[fallthrough]];
  2134. }
  2135. case RISCV::AND:
  2136. case RISCV::OR:
  2137. case RISCV::XOR:
  2138. case RISCV::XORI:
  2139. case RISCV::ANDN:
  2140. case RISCV::ORN:
  2141. case RISCV::XNOR:
  2142. case RISCV::SH1ADD:
  2143. case RISCV::SH2ADD:
  2144. case RISCV::SH3ADD:
  2145. RecCheck:
  2146. if (hasAllNBitUsers(User, Bits, Depth + 1))
  2147. break;
  2148. return false;
  2149. case RISCV::SRLI: {
  2150. unsigned ShAmt = User->getConstantOperandVal(1);
  2151. // If we are shifting right by less than Bits, and users don't demand any
  2152. // bits that were shifted into [Bits-1:0], then we can consider this as an
  2153. // N-Bit user.
  2154. if (Bits > ShAmt && hasAllNBitUsers(User, Bits - ShAmt, Depth + 1))
  2155. break;
  2156. return false;
  2157. }
  2158. case RISCV::SEXT_B:
  2159. case RISCV::PACKH:
  2160. if (Bits < 8)
  2161. return false;
  2162. break;
  2163. case RISCV::SEXT_H:
  2164. case RISCV::FMV_H_X:
  2165. case RISCV::ZEXT_H_RV32:
  2166. case RISCV::ZEXT_H_RV64:
  2167. case RISCV::PACKW:
  2168. if (Bits < 16)
  2169. return false;
  2170. break;
  2171. case RISCV::PACK:
  2172. if (Bits < (Subtarget->getXLen() / 2))
  2173. return false;
  2174. break;
  2175. case RISCV::ADD_UW:
  2176. case RISCV::SH1ADD_UW:
  2177. case RISCV::SH2ADD_UW:
  2178. case RISCV::SH3ADD_UW:
  2179. // The first operand to add.uw/shXadd.uw is implicitly zero extended from
  2180. // 32 bits.
  2181. if (UI.getOperandNo() != 0 || Bits < 32)
  2182. return false;
  2183. break;
  2184. case RISCV::SB:
  2185. if (UI.getOperandNo() != 0 || Bits < 8)
  2186. return false;
  2187. break;
  2188. case RISCV::SH:
  2189. if (UI.getOperandNo() != 0 || Bits < 16)
  2190. return false;
  2191. break;
  2192. case RISCV::SW:
  2193. if (UI.getOperandNo() != 0 || Bits < 32)
  2194. return false;
  2195. break;
  2196. }
  2197. }
  2198. return true;
  2199. }
  2200. // Select VL as a 5 bit immediate or a value that will become a register. This
  2201. // allows us to choose betwen VSETIVLI or VSETVLI later.
  2202. bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
  2203. auto *C = dyn_cast<ConstantSDNode>(N);
  2204. if (C && isUInt<5>(C->getZExtValue())) {
  2205. VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
  2206. N->getValueType(0));
  2207. } else if (C && C->isAllOnesValue()) {
  2208. // Treat all ones as VLMax.
  2209. VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
  2210. N->getValueType(0));
  2211. } else if (isa<RegisterSDNode>(N) &&
  2212. cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
  2213. // All our VL operands use an operand that allows GPRNoX0 or an immediate
  2214. // as the register class. Convert X0 to a special immediate to pass the
  2215. // MachineVerifier. This is recognized specially by the vsetvli insertion
  2216. // pass.
  2217. VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
  2218. N->getValueType(0));
  2219. } else {
  2220. VL = N;
  2221. }
  2222. return true;
  2223. }
  2224. bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
  2225. if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
  2226. return false;
  2227. assert(N.getNumOperands() == 3 && "Unexpected number of operands");
  2228. SplatVal = N.getOperand(1);
  2229. return true;
  2230. }
  2231. using ValidateFn = bool (*)(int64_t);
  2232. static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
  2233. SelectionDAG &DAG,
  2234. const RISCVSubtarget &Subtarget,
  2235. ValidateFn ValidateImm) {
  2236. if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
  2237. !isa<ConstantSDNode>(N.getOperand(1)))
  2238. return false;
  2239. assert(N.getNumOperands() == 3 && "Unexpected number of operands");
  2240. int64_t SplatImm =
  2241. cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
  2242. // The semantics of RISCVISD::VMV_V_X_VL is that when the operand
  2243. // type is wider than the resulting vector element type: an implicit
  2244. // truncation first takes place. Therefore, perform a manual
  2245. // truncation/sign-extension in order to ignore any truncated bits and catch
  2246. // any zero-extended immediate.
  2247. // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
  2248. // sign-extending to (XLenVT -1).
  2249. MVT XLenVT = Subtarget.getXLenVT();
  2250. assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
  2251. "Unexpected splat operand type");
  2252. MVT EltVT = N.getSimpleValueType().getVectorElementType();
  2253. if (EltVT.bitsLT(XLenVT))
  2254. SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
  2255. if (!ValidateImm(SplatImm))
  2256. return false;
  2257. SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
  2258. return true;
  2259. }
  2260. bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
  2261. return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
  2262. [](int64_t Imm) { return isInt<5>(Imm); });
  2263. }
  2264. bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
  2265. return selectVSplatSimmHelper(
  2266. N, SplatVal, *CurDAG, *Subtarget,
  2267. [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
  2268. }
  2269. bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
  2270. SDValue &SplatVal) {
  2271. return selectVSplatSimmHelper(
  2272. N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
  2273. return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
  2274. });
  2275. }
  2276. bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
  2277. if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
  2278. !isa<ConstantSDNode>(N.getOperand(1)))
  2279. return false;
  2280. int64_t SplatImm =
  2281. cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
  2282. if (!isUInt<5>(SplatImm))
  2283. return false;
  2284. SplatVal =
  2285. CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
  2286. return true;
  2287. }
  2288. bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
  2289. SDValue &Imm) {
  2290. if (auto *C = dyn_cast<ConstantSDNode>(N)) {
  2291. int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
  2292. if (!isInt<5>(ImmVal))
  2293. return false;
  2294. Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
  2295. return true;
  2296. }
  2297. return false;
  2298. }
  2299. // Try to remove sext.w if the input is a W instruction or can be made into
  2300. // a W instruction cheaply.
  2301. bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
  2302. // Look for the sext.w pattern, addiw rd, rs1, 0.
  2303. if (N->getMachineOpcode() != RISCV::ADDIW ||
  2304. !isNullConstant(N->getOperand(1)))
  2305. return false;
  2306. SDValue N0 = N->getOperand(0);
  2307. if (!N0.isMachineOpcode())
  2308. return false;
  2309. switch (N0.getMachineOpcode()) {
  2310. default:
  2311. break;
  2312. case RISCV::ADD:
  2313. case RISCV::ADDI:
  2314. case RISCV::SUB:
  2315. case RISCV::MUL:
  2316. case RISCV::SLLI: {
  2317. // Convert sext.w+add/sub/mul to their W instructions. This will create
  2318. // a new independent instruction. This improves latency.
  2319. unsigned Opc;
  2320. switch (N0.getMachineOpcode()) {
  2321. default:
  2322. llvm_unreachable("Unexpected opcode!");
  2323. case RISCV::ADD: Opc = RISCV::ADDW; break;
  2324. case RISCV::ADDI: Opc = RISCV::ADDIW; break;
  2325. case RISCV::SUB: Opc = RISCV::SUBW; break;
  2326. case RISCV::MUL: Opc = RISCV::MULW; break;
  2327. case RISCV::SLLI: Opc = RISCV::SLLIW; break;
  2328. }
  2329. SDValue N00 = N0.getOperand(0);
  2330. SDValue N01 = N0.getOperand(1);
  2331. // Shift amount needs to be uimm5.
  2332. if (N0.getMachineOpcode() == RISCV::SLLI &&
  2333. !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
  2334. break;
  2335. SDNode *Result =
  2336. CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
  2337. N00, N01);
  2338. ReplaceUses(N, Result);
  2339. return true;
  2340. }
  2341. case RISCV::ADDW:
  2342. case RISCV::ADDIW:
  2343. case RISCV::SUBW:
  2344. case RISCV::MULW:
  2345. case RISCV::SLLIW:
  2346. case RISCV::PACKW:
  2347. // Result is already sign extended just remove the sext.w.
  2348. // NOTE: We only handle the nodes that are selected with hasAllWUsers.
  2349. ReplaceUses(N, N0.getNode());
  2350. return true;
  2351. }
  2352. return false;
  2353. }
  2354. // Return true if we can make sure mask of N is all-ones mask.
  2355. static bool usesAllOnesMask(SDNode *N, unsigned MaskOpIdx) {
  2356. // Check that we're using V0 as a mask register.
  2357. if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
  2358. cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
  2359. return false;
  2360. // The glued user defines V0.
  2361. const auto *Glued = N->getGluedNode();
  2362. if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
  2363. return false;
  2364. // Check that we're defining V0 as a mask register.
  2365. if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
  2366. cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
  2367. return false;
  2368. // Check the instruction defining V0; it needs to be a VMSET pseudo.
  2369. SDValue MaskSetter = Glued->getOperand(2);
  2370. const auto IsVMSet = [](unsigned Opc) {
  2371. return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
  2372. Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
  2373. Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
  2374. Opc == RISCV::PseudoVMSET_M_B8;
  2375. };
  2376. // TODO: Check that the VMSET is the expected bitwidth? The pseudo has
  2377. // undefined behaviour if it's the wrong bitwidth, so we could choose to
  2378. // assume that it's all-ones? Same applies to its VL.
  2379. return MaskSetter->isMachineOpcode() &&
  2380. IsVMSet(MaskSetter.getMachineOpcode());
  2381. }
  2382. // Optimize masked RVV pseudo instructions with a known all-ones mask to their
  2383. // corresponding "unmasked" pseudo versions. The mask we're interested in will
  2384. // take the form of a V0 physical register operand, with a glued
  2385. // register-setting instruction.
  2386. bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
  2387. const RISCV::RISCVMaskedPseudoInfo *I =
  2388. RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
  2389. if (!I)
  2390. return false;
  2391. unsigned MaskOpIdx = I->MaskOpIdx;
  2392. if (!usesAllOnesMask(N, MaskOpIdx))
  2393. return false;
  2394. // Retrieve the tail policy operand index, if any.
  2395. std::optional<unsigned> TailPolicyOpIdx;
  2396. const RISCVInstrInfo &TII = *Subtarget->getInstrInfo();
  2397. const MCInstrDesc &MaskedMCID = TII.get(N->getMachineOpcode());
  2398. bool IsTA = true;
  2399. if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
  2400. TailPolicyOpIdx = getVecPolicyOpIdx(N, MaskedMCID);
  2401. if (!(N->getConstantOperandVal(*TailPolicyOpIdx) &
  2402. RISCVII::TAIL_AGNOSTIC)) {
  2403. // Keep the true-masked instruction when there is no unmasked TU
  2404. // instruction
  2405. if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef())
  2406. return false;
  2407. // We can't use TA if the tie-operand is not IMPLICIT_DEF
  2408. if (!N->getOperand(0).isUndef())
  2409. IsTA = false;
  2410. }
  2411. }
  2412. unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo;
  2413. // Check that we're dropping the mask operand and any policy operand
  2414. // when we transform to this unmasked pseudo. Additionally, if this insturtion
  2415. // is tail agnostic, the unmasked instruction should not have a merge op.
  2416. uint64_t TSFlags = TII.get(Opc).TSFlags;
  2417. assert((IsTA != RISCVII::hasMergeOp(TSFlags)) &&
  2418. RISCVII::hasDummyMaskOp(TSFlags) &&
  2419. !RISCVII::hasVecPolicyOp(TSFlags) &&
  2420. "Unexpected pseudo to transform to");
  2421. (void)TSFlags;
  2422. SmallVector<SDValue, 8> Ops;
  2423. // Skip the merge operand at index 0 if IsTA
  2424. for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) {
  2425. // Skip the mask, the policy, and the Glue.
  2426. SDValue Op = N->getOperand(I);
  2427. if (I == MaskOpIdx || I == TailPolicyOpIdx ||
  2428. Op.getValueType() == MVT::Glue)
  2429. continue;
  2430. Ops.push_back(Op);
  2431. }
  2432. // Transitively apply any node glued to our new node.
  2433. const auto *Glued = N->getGluedNode();
  2434. if (auto *TGlued = Glued->getGluedNode())
  2435. Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
  2436. SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
  2437. Result->setFlags(N->getFlags());
  2438. ReplaceUses(N, Result);
  2439. return true;
  2440. }
  2441. // Try to fold VMERGE_VVM with unmasked intrinsic to masked intrinsic. The
  2442. // peephole only deals with VMERGE_VVM which is TU and has false operand same as
  2443. // its true operand now. E.g. (VMERGE_VVM_M1_TU False, False, (VADD_M1 ...),
  2444. // ...) -> (VADD_VV_M1_MASK)
  2445. bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N, bool IsTA) {
  2446. unsigned Offset = IsTA ? 0 : 1;
  2447. uint64_t Policy = IsTA ? RISCVII::TAIL_AGNOSTIC : /*TUMU*/ 0;
  2448. SDValue False = N->getOperand(0 + Offset);
  2449. SDValue True = N->getOperand(1 + Offset);
  2450. SDValue Mask = N->getOperand(2 + Offset);
  2451. SDValue VL = N->getOperand(3 + Offset);
  2452. assert(True.getResNo() == 0 &&
  2453. "Expect True is the first output of an instruction.");
  2454. // Need N is the exactly one using True.
  2455. if (!True.hasOneUse())
  2456. return false;
  2457. if (!True.isMachineOpcode())
  2458. return false;
  2459. unsigned TrueOpc = True.getMachineOpcode();
  2460. // Skip if True has merge operand.
  2461. // TODO: Deal with True having same merge operand with N.
  2462. if (RISCVII::hasMergeOp(TII->get(TrueOpc).TSFlags))
  2463. return false;
  2464. // Skip if True has side effect.
  2465. // TODO: Support velff and vlsegff.
  2466. if (TII->get(TrueOpc).hasUnmodeledSideEffects())
  2467. return false;
  2468. // Only deal with True when True is unmasked intrinsic now.
  2469. const RISCV::RISCVMaskedPseudoInfo *Info =
  2470. RISCV::lookupMaskedIntrinsicByUnmaskedTA(TrueOpc);
  2471. if (!Info)
  2472. return false;
  2473. // The last operand of unmasked intrinsic should be sew or chain.
  2474. bool HasChainOp =
  2475. True.getOperand(True.getNumOperands() - 1).getValueType() == MVT::Other;
  2476. if (HasChainOp) {
  2477. // Avoid creating cycles in the DAG. We must ensure that none of the other
  2478. // operands depend on True through it's Chain.
  2479. SmallVector<const SDNode *, 4> LoopWorklist;
  2480. SmallPtrSet<const SDNode *, 16> Visited;
  2481. LoopWorklist.push_back(False.getNode());
  2482. LoopWorklist.push_back(Mask.getNode());
  2483. LoopWorklist.push_back(VL.getNode());
  2484. if (SDNode *Glued = N->getGluedNode())
  2485. LoopWorklist.push_back(Glued);
  2486. if (SDNode::hasPredecessorHelper(True.getNode(), Visited, LoopWorklist))
  2487. return false;
  2488. }
  2489. // Need True has same VL with N.
  2490. unsigned TrueVLIndex = True.getNumOperands() - HasChainOp - 2;
  2491. SDValue TrueVL = True.getOperand(TrueVLIndex);
  2492. auto IsNoFPExcept = [this](SDValue N) {
  2493. return !this->mayRaiseFPException(N.getNode()) ||
  2494. N->getFlags().hasNoFPExcept();
  2495. };
  2496. // Allow the peephole for non-exception True with VLMAX vector length, since
  2497. // all the values after VL of N are dependent on Merge. VLMAX should be
  2498. // lowered to (XLenVT -1).
  2499. if (TrueVL != VL && !(IsNoFPExcept(True) && isAllOnesConstant(TrueVL)))
  2500. return false;
  2501. SDLoc DL(N);
  2502. unsigned MaskedOpc = Info->MaskedPseudo;
  2503. assert(RISCVII::hasVecPolicyOp(TII->get(MaskedOpc).TSFlags) &&
  2504. "Expected instructions with mask have policy operand.");
  2505. assert(RISCVII::hasMergeOp(TII->get(MaskedOpc).TSFlags) &&
  2506. "Expected instructions with mask have merge operand.");
  2507. SmallVector<SDValue, 8> Ops;
  2508. Ops.push_back(False);
  2509. Ops.append(True->op_begin(), True->op_begin() + TrueVLIndex);
  2510. Ops.append({Mask, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)});
  2511. Ops.push_back(CurDAG->getTargetConstant(Policy, DL, Subtarget->getXLenVT()));
  2512. // Result node should have chain operand of True.
  2513. if (HasChainOp)
  2514. Ops.push_back(True.getOperand(True.getNumOperands() - 1));
  2515. // Result node should take over glued node of N.
  2516. if (N->getGluedNode())
  2517. Ops.push_back(N->getOperand(N->getNumOperands() - 1));
  2518. SDNode *Result =
  2519. CurDAG->getMachineNode(MaskedOpc, DL, True->getVTList(), Ops);
  2520. Result->setFlags(True->getFlags());
  2521. // Replace vmerge.vvm node by Result.
  2522. ReplaceUses(SDValue(N, 0), SDValue(Result, 0));
  2523. // Replace another value of True. E.g. chain and VL.
  2524. for (unsigned Idx = 1; Idx < True->getNumValues(); ++Idx)
  2525. ReplaceUses(True.getValue(Idx), SDValue(Result, Idx));
  2526. // Try to transform Result to unmasked intrinsic.
  2527. doPeepholeMaskedRVV(Result);
  2528. return true;
  2529. }
  2530. // Transform (VMERGE_VVM_<LMUL>_TU false, false, true, allones, vl, sew) to
  2531. // (VADD_VI_<LMUL>_TU false, true, 0, vl, sew). It may decrease uses of VMSET.
  2532. bool RISCVDAGToDAGISel::performVMergeToVAdd(SDNode *N) {
  2533. unsigned NewOpc;
  2534. switch (N->getMachineOpcode()) {
  2535. default:
  2536. llvm_unreachable("Expected VMERGE_VVM_<LMUL>_TU instruction.");
  2537. case RISCV::PseudoVMERGE_VVM_MF8_TU:
  2538. NewOpc = RISCV::PseudoVADD_VI_MF8_TU;
  2539. break;
  2540. case RISCV::PseudoVMERGE_VVM_MF4_TU:
  2541. NewOpc = RISCV::PseudoVADD_VI_MF4_TU;
  2542. break;
  2543. case RISCV::PseudoVMERGE_VVM_MF2_TU:
  2544. NewOpc = RISCV::PseudoVADD_VI_MF2_TU;
  2545. break;
  2546. case RISCV::PseudoVMERGE_VVM_M1_TU:
  2547. NewOpc = RISCV::PseudoVADD_VI_M1_TU;
  2548. break;
  2549. case RISCV::PseudoVMERGE_VVM_M2_TU:
  2550. NewOpc = RISCV::PseudoVADD_VI_M2_TU;
  2551. break;
  2552. case RISCV::PseudoVMERGE_VVM_M4_TU:
  2553. NewOpc = RISCV::PseudoVADD_VI_M4_TU;
  2554. break;
  2555. case RISCV::PseudoVMERGE_VVM_M8_TU:
  2556. NewOpc = RISCV::PseudoVADD_VI_M8_TU;
  2557. break;
  2558. }
  2559. if (!usesAllOnesMask(N, /* MaskOpIdx */ 3))
  2560. return false;
  2561. SDLoc DL(N);
  2562. EVT VT = N->getValueType(0);
  2563. SDValue Ops[] = {N->getOperand(1), N->getOperand(2),
  2564. CurDAG->getTargetConstant(0, DL, Subtarget->getXLenVT()),
  2565. N->getOperand(4), N->getOperand(5)};
  2566. SDNode *Result = CurDAG->getMachineNode(NewOpc, DL, VT, Ops);
  2567. ReplaceUses(N, Result);
  2568. return true;
  2569. }
  2570. bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
  2571. bool MadeChange = false;
  2572. SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
  2573. while (Position != CurDAG->allnodes_begin()) {
  2574. SDNode *N = &*--Position;
  2575. if (N->use_empty() || !N->isMachineOpcode())
  2576. continue;
  2577. auto IsVMergeTU = [](unsigned Opcode) {
  2578. return Opcode == RISCV::PseudoVMERGE_VVM_MF8_TU ||
  2579. Opcode == RISCV::PseudoVMERGE_VVM_MF4_TU ||
  2580. Opcode == RISCV::PseudoVMERGE_VVM_MF2_TU ||
  2581. Opcode == RISCV::PseudoVMERGE_VVM_M1_TU ||
  2582. Opcode == RISCV::PseudoVMERGE_VVM_M2_TU ||
  2583. Opcode == RISCV::PseudoVMERGE_VVM_M4_TU ||
  2584. Opcode == RISCV::PseudoVMERGE_VVM_M8_TU;
  2585. };
  2586. auto IsVMergeTA = [](unsigned Opcode) {
  2587. return Opcode == RISCV::PseudoVMERGE_VVM_MF8 ||
  2588. Opcode == RISCV::PseudoVMERGE_VVM_MF4 ||
  2589. Opcode == RISCV::PseudoVMERGE_VVM_MF2 ||
  2590. Opcode == RISCV::PseudoVMERGE_VVM_M1 ||
  2591. Opcode == RISCV::PseudoVMERGE_VVM_M2 ||
  2592. Opcode == RISCV::PseudoVMERGE_VVM_M4 ||
  2593. Opcode == RISCV::PseudoVMERGE_VVM_M8;
  2594. };
  2595. unsigned Opc = N->getMachineOpcode();
  2596. // The following optimizations require that the merge operand of N is same
  2597. // as the false operand of N.
  2598. if ((IsVMergeTU(Opc) && N->getOperand(0) == N->getOperand(1)) ||
  2599. IsVMergeTA(Opc))
  2600. MadeChange |= performCombineVMergeAndVOps(N, IsVMergeTA(Opc));
  2601. if (IsVMergeTU(Opc) && N->getOperand(0) == N->getOperand(1))
  2602. MadeChange |= performVMergeToVAdd(N);
  2603. }
  2604. return MadeChange;
  2605. }
  2606. // This pass converts a legalized DAG into a RISCV-specific DAG, ready
  2607. // for instruction scheduling.
  2608. FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM,
  2609. CodeGenOpt::Level OptLevel) {
  2610. return new RISCVDAGToDAGISel(TM, OptLevel);
  2611. }
  2612. char RISCVDAGToDAGISel::ID = 0;
  2613. INITIALIZE_PASS(RISCVDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)