RISCVInstrInfo.td 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866
  1. //===-- RISCVInstrInfo.td - Target Description for RISCV ---*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file describes the RISC-V instructions in TableGen format.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. //===----------------------------------------------------------------------===//
  13. // RISC-V specific DAG Nodes.
  14. //===----------------------------------------------------------------------===//
  15. // Target-independent type requirements, but with target-specific formats.
  16. def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
  17. SDTCisVT<1, i32>]>;
  18. def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
  19. SDTCisVT<1, i32>]>;
  20. // Target-dependent type requirements.
  21. def SDT_RISCVCall : SDTypeProfile<0, -1, [SDTCisVT<0, XLenVT>]>;
  22. def SDT_RISCVSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>,
  23. SDTCisVT<3, OtherVT>,
  24. SDTCisSameAs<0, 4>,
  25. SDTCisSameAs<4, 5>]>;
  26. def SDT_RISCVBrCC : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>,
  27. SDTCisVT<2, OtherVT>,
  28. SDTCisVT<3, OtherVT>]>;
  29. def SDT_RISCVReadCSR : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>]>;
  30. def SDT_RISCVWriteCSR : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisInt<1>]>;
  31. def SDT_RISCVSwapCSR : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
  32. SDTCisInt<2>]>;
  33. def SDT_RISCVReadCycleWide : SDTypeProfile<2, 0, [SDTCisVT<0, i32>,
  34. SDTCisVT<1, i32>]>;
  35. def SDT_RISCVIntUnaryOpW : SDTypeProfile<1, 1, [
  36. SDTCisSameAs<0, 1>, SDTCisVT<0, i64>
  37. ]>;
  38. def SDT_RISCVIntBinOpW : SDTypeProfile<1, 2, [
  39. SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>
  40. ]>;
  41. def SDT_RISCVIntShiftDOpW : SDTypeProfile<1, 3, [
  42. SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>, SDTCisVT<3, i64>
  43. ]>;
  44. // Target-independent nodes, but with target-specific formats.
  45. def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
  46. [SDNPHasChain, SDNPOutGlue]>;
  47. def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
  48. [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
  49. // Target-dependent nodes.
  50. def riscv_call : SDNode<"RISCVISD::CALL", SDT_RISCVCall,
  51. [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
  52. SDNPVariadic]>;
  53. def riscv_ret_flag : SDNode<"RISCVISD::RET_FLAG", SDTNone,
  54. [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
  55. def riscv_uret_flag : SDNode<"RISCVISD::URET_FLAG", SDTNone,
  56. [SDNPHasChain, SDNPOptInGlue]>;
  57. def riscv_sret_flag : SDNode<"RISCVISD::SRET_FLAG", SDTNone,
  58. [SDNPHasChain, SDNPOptInGlue]>;
  59. def riscv_mret_flag : SDNode<"RISCVISD::MRET_FLAG", SDTNone,
  60. [SDNPHasChain, SDNPOptInGlue]>;
  61. def riscv_selectcc : SDNode<"RISCVISD::SELECT_CC", SDT_RISCVSelectCC>;
  62. def riscv_brcc : SDNode<"RISCVISD::BR_CC", SDT_RISCVBrCC,
  63. [SDNPHasChain]>;
  64. def riscv_tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall,
  65. [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
  66. SDNPVariadic]>;
  67. def riscv_sllw : SDNode<"RISCVISD::SLLW", SDT_RISCVIntBinOpW>;
  68. def riscv_sraw : SDNode<"RISCVISD::SRAW", SDT_RISCVIntBinOpW>;
  69. def riscv_srlw : SDNode<"RISCVISD::SRLW", SDT_RISCVIntBinOpW>;
  70. def riscv_read_csr : SDNode<"RISCVISD::READ_CSR", SDT_RISCVReadCSR,
  71. [SDNPHasChain]>;
  72. def riscv_write_csr : SDNode<"RISCVISD::WRITE_CSR", SDT_RISCVWriteCSR,
  73. [SDNPHasChain]>;
  74. def riscv_swap_csr : SDNode<"RISCVISD::SWAP_CSR", SDT_RISCVSwapCSR,
  75. [SDNPHasChain]>;
  76. def riscv_read_cycle_wide : SDNode<"RISCVISD::READ_CYCLE_WIDE",
  77. SDT_RISCVReadCycleWide,
  78. [SDNPHasChain, SDNPSideEffect]>;
  79. def riscv_add_lo : SDNode<"RISCVISD::ADD_LO", SDTIntBinOp>;
  80. def riscv_hi : SDNode<"RISCVISD::HI", SDTIntUnaryOp>;
  81. def riscv_lla : SDNode<"RISCVISD::LLA", SDTIntUnaryOp>;
  82. def riscv_add_tprel : SDNode<"RISCVISD::ADD_TPREL",
  83. SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
  84. SDTCisSameAs<0, 2>,
  85. SDTCisSameAs<0, 3>,
  86. SDTCisInt<0>]>>;
  87. def riscv_la : SDNode<"RISCVISD::LA", SDTLoad,
  88. [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
  89. def riscv_la_tls_ie : SDNode<"RISCVISD::LA_TLS_IE", SDTLoad,
  90. [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
  91. def riscv_la_tls_gd : SDNode<"RISCVISD::LA_TLS_GD", SDTIntUnaryOp>;
  92. //===----------------------------------------------------------------------===//
  93. // Operand and SDNode transformation definitions.
  94. //===----------------------------------------------------------------------===//
  95. class ImmXLenAsmOperand<string prefix, string suffix = ""> : AsmOperandClass {
  96. let Name = prefix # "ImmXLen" # suffix;
  97. let RenderMethod = "addImmOperands";
  98. let DiagnosticType = !strconcat("Invalid", Name);
  99. }
  100. class ImmAsmOperand<string prefix, int width, string suffix> : AsmOperandClass {
  101. let Name = prefix # "Imm" # width # suffix;
  102. let RenderMethod = "addImmOperands";
  103. let DiagnosticType = !strconcat("Invalid", Name);
  104. }
  105. def ImmZeroAsmOperand : AsmOperandClass {
  106. let Name = "ImmZero";
  107. let RenderMethod = "addImmOperands";
  108. let DiagnosticType = !strconcat("Invalid", Name);
  109. }
  110. // A parse method for (${gpr}) or 0(${gpr}), where the 0 is be silently ignored.
  111. def ZeroOffsetMemOpOperand : AsmOperandClass {
  112. let Name = "ZeroOffsetMemOpOperand";
  113. let RenderMethod = "addRegOperands";
  114. let PredicateMethod = "isGPR";
  115. let ParserMethod = "parseZeroOffsetMemOp";
  116. }
  117. class MemOperand<RegisterClass regClass> : RegisterOperand<regClass>{
  118. let OperandType = "OPERAND_MEMORY";
  119. }
  120. def GPRMemZeroOffset : MemOperand<GPR> {
  121. let ParserMatchClass = ZeroOffsetMemOpOperand;
  122. let PrintMethod = "printZeroOffsetMemOp";
  123. }
  124. def GPRMem : MemOperand<GPR>;
  125. def SPMem : MemOperand<SP>;
  126. def GPRCMem : MemOperand<GPRC>;
  127. class SImmAsmOperand<int width, string suffix = "">
  128. : ImmAsmOperand<"S", width, suffix> {
  129. }
  130. class UImmAsmOperand<int width, string suffix = "">
  131. : ImmAsmOperand<"U", width, suffix> {
  132. }
  133. def FenceArg : AsmOperandClass {
  134. let Name = "FenceArg";
  135. let RenderMethod = "addFenceArgOperands";
  136. let DiagnosticType = "InvalidFenceArg";
  137. }
  138. def fencearg : Operand<XLenVT> {
  139. let ParserMatchClass = FenceArg;
  140. let PrintMethod = "printFenceArg";
  141. let DecoderMethod = "decodeUImmOperand<4>";
  142. let OperandType = "OPERAND_UIMM4";
  143. let OperandNamespace = "RISCVOp";
  144. }
  145. def UImmLog2XLenAsmOperand : AsmOperandClass {
  146. let Name = "UImmLog2XLen";
  147. let RenderMethod = "addImmOperands";
  148. let DiagnosticType = "InvalidUImmLog2XLen";
  149. }
  150. def uimmlog2xlen : Operand<XLenVT>, ImmLeaf<XLenVT, [{
  151. if (Subtarget->is64Bit())
  152. return isUInt<6>(Imm);
  153. return isUInt<5>(Imm);
  154. }]> {
  155. let ParserMatchClass = UImmLog2XLenAsmOperand;
  156. // TODO: should ensure invalid shamt is rejected when decoding.
  157. let DecoderMethod = "decodeUImmOperand<6>";
  158. let MCOperandPredicate = [{
  159. int64_t Imm;
  160. if (!MCOp.evaluateAsConstantImm(Imm))
  161. return false;
  162. if (STI.getTargetTriple().isArch64Bit())
  163. return isUInt<6>(Imm);
  164. return isUInt<5>(Imm);
  165. }];
  166. let OperandType = "OPERAND_UIMMLOG2XLEN";
  167. let OperandNamespace = "RISCVOp";
  168. }
  169. def uimm2 : Operand<XLenVT> {
  170. let ParserMatchClass = UImmAsmOperand<2>;
  171. let DecoderMethod = "decodeUImmOperand<2>";
  172. let OperandType = "OPERAND_UIMM2";
  173. let OperandNamespace = "RISCVOp";
  174. }
  175. def uimm3 : Operand<XLenVT> {
  176. let ParserMatchClass = UImmAsmOperand<3>;
  177. let DecoderMethod = "decodeUImmOperand<3>";
  178. let OperandType = "OPERAND_UIMM3";
  179. let OperandNamespace = "RISCVOp";
  180. }
  181. def uimm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]> {
  182. let ParserMatchClass = UImmAsmOperand<5>;
  183. let DecoderMethod = "decodeUImmOperand<5>";
  184. let OperandType = "OPERAND_UIMM5";
  185. let OperandNamespace = "RISCVOp";
  186. }
  187. def InsnDirectiveOpcode : AsmOperandClass {
  188. let Name = "InsnDirectiveOpcode";
  189. let ParserMethod = "parseInsnDirectiveOpcode";
  190. let RenderMethod = "addImmOperands";
  191. let PredicateMethod = "isImm";
  192. }
  193. def uimm7_opcode : Operand<XLenVT> {
  194. let ParserMatchClass = InsnDirectiveOpcode;
  195. let DecoderMethod = "decodeUImmOperand<7>";
  196. let OperandType = "OPERAND_UIMM7";
  197. let OperandNamespace = "RISCVOp";
  198. }
  199. def uimm7 : Operand<XLenVT> {
  200. let ParserMatchClass = UImmAsmOperand<7>;
  201. let DecoderMethod = "decodeUImmOperand<7>";
  202. let OperandType = "OPERAND_UIMM7";
  203. let OperandNamespace = "RISCVOp";
  204. }
  205. def simm12 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<12>(Imm);}]> {
  206. let ParserMatchClass = SImmAsmOperand<12>;
  207. let EncoderMethod = "getImmOpValue";
  208. let DecoderMethod = "decodeSImmOperand<12>";
  209. let MCOperandPredicate = [{
  210. int64_t Imm;
  211. if (MCOp.evaluateAsConstantImm(Imm))
  212. return isInt<12>(Imm);
  213. return MCOp.isBareSymbolRef();
  214. }];
  215. let OperandType = "OPERAND_SIMM12";
  216. let OperandNamespace = "RISCVOp";
  217. }
  218. // A 12-bit signed immediate which cannot fit in 6-bit signed immediate,
  219. // but even negative value fit in 12-bit.
  220. def simm12_no6 : ImmLeaf<XLenVT, [{
  221. return isInt<12>(Imm) && !isInt<6>(Imm) && isInt<12>(-Imm);}]>;
  222. // A 13-bit signed immediate where the least significant bit is zero.
  223. def simm13_lsb0 : Operand<OtherVT> {
  224. let ParserMatchClass = SImmAsmOperand<13, "Lsb0">;
  225. let PrintMethod = "printBranchOperand";
  226. let EncoderMethod = "getImmOpValueAsr1";
  227. let DecoderMethod = "decodeSImmOperandAndLsl1<13>";
  228. let MCOperandPredicate = [{
  229. int64_t Imm;
  230. if (MCOp.evaluateAsConstantImm(Imm))
  231. return isShiftedInt<12, 1>(Imm);
  232. return MCOp.isBareSymbolRef();
  233. }];
  234. let OperandType = "OPERAND_PCREL";
  235. }
  236. class UImm20Operand : Operand<XLenVT> {
  237. let EncoderMethod = "getImmOpValue";
  238. let DecoderMethod = "decodeUImmOperand<20>";
  239. let MCOperandPredicate = [{
  240. int64_t Imm;
  241. if (MCOp.evaluateAsConstantImm(Imm))
  242. return isUInt<20>(Imm);
  243. return MCOp.isBareSymbolRef();
  244. }];
  245. let OperandType = "OPERAND_UIMM20";
  246. let OperandNamespace = "RISCVOp";
  247. }
  248. def uimm20_lui : UImm20Operand {
  249. let ParserMatchClass = UImmAsmOperand<20, "LUI">;
  250. }
  251. def uimm20_auipc : UImm20Operand {
  252. let ParserMatchClass = UImmAsmOperand<20, "AUIPC">;
  253. }
  254. def Simm21Lsb0JALAsmOperand : SImmAsmOperand<21, "Lsb0JAL"> {
  255. let ParserMethod = "parseJALOffset";
  256. }
  257. // A 21-bit signed immediate where the least significant bit is zero.
  258. def simm21_lsb0_jal : Operand<OtherVT> {
  259. let ParserMatchClass = Simm21Lsb0JALAsmOperand;
  260. let PrintMethod = "printBranchOperand";
  261. let EncoderMethod = "getImmOpValueAsr1";
  262. let DecoderMethod = "decodeSImmOperandAndLsl1<21>";
  263. let MCOperandPredicate = [{
  264. int64_t Imm;
  265. if (MCOp.evaluateAsConstantImm(Imm))
  266. return isShiftedInt<20, 1>(Imm);
  267. return MCOp.isBareSymbolRef();
  268. }];
  269. let OperandType = "OPERAND_PCREL";
  270. }
  271. def BareSymbol : AsmOperandClass {
  272. let Name = "BareSymbol";
  273. let RenderMethod = "addImmOperands";
  274. let DiagnosticType = "InvalidBareSymbol";
  275. let ParserMethod = "parseBareSymbol";
  276. }
  277. // A bare symbol.
  278. def bare_symbol : Operand<XLenVT> {
  279. let ParserMatchClass = BareSymbol;
  280. }
  281. def CallSymbol : AsmOperandClass {
  282. let Name = "CallSymbol";
  283. let RenderMethod = "addImmOperands";
  284. let DiagnosticType = "InvalidCallSymbol";
  285. let ParserMethod = "parseCallSymbol";
  286. }
  287. // A bare symbol used in call/tail only.
  288. def call_symbol : Operand<XLenVT> {
  289. let ParserMatchClass = CallSymbol;
  290. }
  291. def PseudoJumpSymbol : AsmOperandClass {
  292. let Name = "PseudoJumpSymbol";
  293. let RenderMethod = "addImmOperands";
  294. let DiagnosticType = "InvalidPseudoJumpSymbol";
  295. let ParserMethod = "parsePseudoJumpSymbol";
  296. }
  297. // A bare symbol used for pseudo jumps only.
  298. def pseudo_jump_symbol : Operand<XLenVT> {
  299. let ParserMatchClass = PseudoJumpSymbol;
  300. }
  301. def TPRelAddSymbol : AsmOperandClass {
  302. let Name = "TPRelAddSymbol";
  303. let RenderMethod = "addImmOperands";
  304. let DiagnosticType = "InvalidTPRelAddSymbol";
  305. let ParserMethod = "parseOperandWithModifier";
  306. }
  307. // A bare symbol with the %tprel_add variant.
  308. def tprel_add_symbol : Operand<XLenVT> {
  309. let ParserMatchClass = TPRelAddSymbol;
  310. }
  311. def CSRSystemRegister : AsmOperandClass {
  312. let Name = "CSRSystemRegister";
  313. let ParserMethod = "parseCSRSystemRegister";
  314. let DiagnosticType = "InvalidCSRSystemRegister";
  315. }
  316. def csr_sysreg : Operand<XLenVT> {
  317. let ParserMatchClass = CSRSystemRegister;
  318. let PrintMethod = "printCSRSystemRegister";
  319. let DecoderMethod = "decodeUImmOperand<12>";
  320. let OperandType = "OPERAND_UIMM12";
  321. let OperandNamespace = "RISCVOp";
  322. }
  323. // A parameterized register class alternative to i32imm/i64imm from Target.td.
  324. def ixlenimm : Operand<XLenVT>;
  325. def ixlenimm_li : Operand<XLenVT> {
  326. let ParserMatchClass = ImmXLenAsmOperand<"", "LI">;
  327. }
  328. // Standalone (codegen-only) immleaf patterns.
  329. // A 12-bit signed immediate plus one where the imm range will be [-2047, 2048].
  330. def simm12_plus1 : ImmLeaf<XLenVT,
  331. [{return (isInt<12>(Imm) && Imm != -2048) || Imm == 2048;}]>;
  332. // A 6-bit constant greater than 32.
  333. def uimm6gt32 : ImmLeaf<XLenVT, [{
  334. return isUInt<6>(Imm) && Imm > 32;
  335. }]>;
  336. // Addressing modes.
  337. // Necessary because a frameindex can't be matched directly in a pattern.
  338. def FrameAddrRegImm : ComplexPattern<iPTR, 2, "SelectFrameAddrRegImm",
  339. [frameindex, or, add]>;
  340. def AddrRegImm : ComplexPattern<iPTR, 2, "SelectAddrRegImm">;
  341. // Return the negation of an immediate value.
  342. def NegImm : SDNodeXForm<imm, [{
  343. return CurDAG->getTargetConstant(-N->getSExtValue(), SDLoc(N),
  344. N->getValueType(0));
  345. }]>;
  346. // Return an immediate value minus 32.
  347. def ImmSub32 : SDNodeXForm<imm, [{
  348. return CurDAG->getTargetConstant(N->getSExtValue() - 32, SDLoc(N),
  349. N->getValueType(0));
  350. }]>;
  351. // Return an immediate subtracted from XLen.
  352. def ImmSubFromXLen : SDNodeXForm<imm, [{
  353. uint64_t XLen = Subtarget->getXLen();
  354. return CurDAG->getTargetConstant(XLen - N->getZExtValue(), SDLoc(N),
  355. N->getValueType(0));
  356. }]>;
  357. // Return an immediate subtracted from 32.
  358. def ImmSubFrom32 : SDNodeXForm<imm, [{
  359. return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N),
  360. N->getValueType(0));
  361. }]>;
  362. // Check if (add r, imm) can be optimized to (ADDI (ADDI r, imm0), imm1),
  363. // in which imm = imm0 + imm1 and both imm0 and imm1 are simm12. We make imm0
  364. // as large as possible and imm1 as small as possible so that we might be able
  365. // to use c.addi for the small immediate.
  366. def AddiPair : PatLeaf<(imm), [{
  367. if (!N->hasOneUse())
  368. return false;
  369. // The immediate operand must be in range [-4096,-2049] or [2048,4094].
  370. int64_t Imm = N->getSExtValue();
  371. return (-4096 <= Imm && Imm <= -2049) || (2048 <= Imm && Imm <= 4094);
  372. }]>;
  373. // Return imm - (imm < 0 ? -2048 : 2047).
  374. def AddiPairImmSmall : SDNodeXForm<imm, [{
  375. int64_t Imm = N->getSExtValue();
  376. int64_t Adj = N->getSExtValue() < 0 ? -2048 : 2047;
  377. return CurDAG->getTargetConstant(Imm - Adj, SDLoc(N),
  378. N->getValueType(0));
  379. }]>;
  380. // Return -2048 if immediate is negative or 2047 if positive. These are the
  381. // largest simm12 values.
  382. def AddiPairImmLarge : SDNodeXForm<imm, [{
  383. int64_t Imm = N->getSExtValue() < 0 ? -2048 : 2047;
  384. return CurDAG->getTargetConstant(Imm, SDLoc(N),
  385. N->getValueType(0));
  386. }]>;
  387. def TrailingZeros : SDNodeXForm<imm, [{
  388. return CurDAG->getTargetConstant(countTrailingZeros(N->getZExtValue()),
  389. SDLoc(N), N->getValueType(0));
  390. }]>;
  391. def XLenSubTrailingOnes : SDNodeXForm<imm, [{
  392. uint64_t XLen = Subtarget->getXLen();
  393. uint64_t TrailingOnes = countTrailingOnes(N->getZExtValue());
  394. return CurDAG->getTargetConstant(XLen - TrailingOnes, SDLoc(N),
  395. N->getValueType(0));
  396. }]>;
  397. // Checks if this mask is a non-empty sequence of ones starting at the
  398. // most/least significant bit with the remainder zero and exceeds simm32/simm12.
  399. def LeadingOnesMask : PatLeaf<(imm), [{
  400. if (!N->hasOneUse())
  401. return false;
  402. return !isInt<32>(N->getSExtValue()) && isMask_64(~N->getSExtValue());
  403. }], TrailingZeros>;
  404. def TrailingOnesMask : PatLeaf<(imm), [{
  405. if (!N->hasOneUse())
  406. return false;
  407. return !isInt<12>(N->getSExtValue()) && isMask_64(N->getZExtValue());
  408. }], XLenSubTrailingOnes>;
  409. // Similar to LeadingOnesMask, but only consider leading ones in the lower 32
  410. // bits.
  411. def LeadingOnesWMask : PatLeaf<(imm), [{
  412. if (!N->hasOneUse())
  413. return false;
  414. // If the value is a uint32 but not an int32, it must have bit 31 set and
  415. // bits 63:32 cleared. After that we're looking for a shifted mask but not
  416. // an all ones mask.
  417. int64_t Imm = N->getSExtValue();
  418. return !isInt<32>(Imm) && isUInt<32>(Imm) && isShiftedMask_64(Imm) &&
  419. Imm != UINT64_C(0xffffffff);
  420. }], TrailingZeros>;
  421. //===----------------------------------------------------------------------===//
  422. // Instruction Formats
  423. //===----------------------------------------------------------------------===//
  424. include "RISCVInstrFormats.td"
  425. //===----------------------------------------------------------------------===//
  426. // Instruction Class Templates
  427. //===----------------------------------------------------------------------===//
  428. let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
  429. class BranchCC_rri<bits<3> funct3, string opcodestr>
  430. : RVInstB<funct3, OPC_BRANCH, (outs),
  431. (ins GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12),
  432. opcodestr, "$rs1, $rs2, $imm12">,
  433. Sched<[WriteJmp, ReadJmp, ReadJmp]> {
  434. let isBranch = 1;
  435. let isTerminator = 1;
  436. }
  437. let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
  438. class Load_ri<bits<3> funct3, string opcodestr>
  439. : RVInstI<funct3, OPC_LOAD, (outs GPR:$rd), (ins GPRMem:$rs1, simm12:$imm12),
  440. opcodestr, "$rd, ${imm12}(${rs1})">;
  441. class HLoad_r<bits<7> funct7, bits<5> funct5, string opcodestr>
  442. : RVInstR<funct7, 0b100, OPC_SYSTEM, (outs GPR:$rd),
  443. (ins GPRMemZeroOffset:$rs1), opcodestr, "$rd, $rs1"> {
  444. let rs2 = funct5;
  445. }
  446. }
  447. // Operands for stores are in the order srcreg, base, offset rather than
  448. // reflecting the order these fields are specified in the instruction
  449. // encoding.
  450. let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
  451. class Store_rri<bits<3> funct3, string opcodestr>
  452. : RVInstS<funct3, OPC_STORE, (outs),
  453. (ins GPR:$rs2, GPRMem:$rs1, simm12:$imm12),
  454. opcodestr, "$rs2, ${imm12}(${rs1})">;
  455. class HStore_rr<bits<7> funct7, string opcodestr>
  456. : RVInstR<funct7, 0b100, OPC_SYSTEM, (outs),
  457. (ins GPR:$rs2, GPRMemZeroOffset:$rs1),
  458. opcodestr, "$rs2, $rs1"> {
  459. let rd = 0;
  460. }
  461. }
  462. let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
  463. class ALU_ri<bits<3> funct3, string opcodestr>
  464. : RVInstI<funct3, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1, simm12:$imm12),
  465. opcodestr, "$rd, $rs1, $imm12">,
  466. Sched<[WriteIALU, ReadIALU]>;
  467. let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
  468. class Shift_ri<bits<5> imm11_7, bits<3> funct3, string opcodestr>
  469. : RVInstIShift<imm11_7, funct3, OPC_OP_IMM, (outs GPR:$rd),
  470. (ins GPR:$rs1, uimmlog2xlen:$shamt), opcodestr,
  471. "$rd, $rs1, $shamt">,
  472. Sched<[WriteShiftImm, ReadShiftImm]>;
  473. let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
  474. class ALU_rr<bits<7> funct7, bits<3> funct3, string opcodestr,
  475. bit Commutable = 0>
  476. : RVInstR<funct7, funct3, OPC_OP, (outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
  477. opcodestr, "$rd, $rs1, $rs2"> {
  478. let isCommutable = Commutable;
  479. }
  480. let hasNoSchedulingInfo = 1,
  481. hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
  482. class CSR_ir<bits<3> funct3, string opcodestr>
  483. : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), (ins csr_sysreg:$imm12, GPR:$rs1),
  484. opcodestr, "$rd, $imm12, $rs1">, Sched<[WriteCSR, ReadCSR]>;
  485. let hasNoSchedulingInfo = 1,
  486. hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
  487. class CSR_ii<bits<3> funct3, string opcodestr>
  488. : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd),
  489. (ins csr_sysreg:$imm12, uimm5:$rs1),
  490. opcodestr, "$rd, $imm12, $rs1">, Sched<[WriteCSR]>;
  491. let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
  492. class ShiftW_ri<bits<7> imm11_5, bits<3> funct3, string opcodestr>
  493. : RVInstIShiftW<imm11_5, funct3, OPC_OP_IMM_32, (outs GPR:$rd),
  494. (ins GPR:$rs1, uimm5:$shamt), opcodestr,
  495. "$rd, $rs1, $shamt">,
  496. Sched<[WriteShiftImm32, ReadShiftImm32]>;
  497. let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
  498. class ALUW_rr<bits<7> funct7, bits<3> funct3, string opcodestr,
  499. bit Commutable = 0>
  500. : RVInstR<funct7, funct3, OPC_OP_32, (outs GPR:$rd),
  501. (ins GPR:$rs1, GPR:$rs2), opcodestr, "$rd, $rs1, $rs2"> {
  502. let isCommutable = Commutable;
  503. }
  504. let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
  505. class Priv<string opcodestr, bits<7> funct7>
  506. : RVInstR<funct7, 0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1, GPR:$rs2),
  507. opcodestr, "">;
  508. let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
  509. class Priv_rr<string opcodestr, bits<7> funct7>
  510. : RVInstR<funct7, 0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1, GPR:$rs2),
  511. opcodestr, "$rs1, $rs2"> {
  512. let rd = 0;
  513. }
  514. //===----------------------------------------------------------------------===//
  515. // Instructions
  516. //===----------------------------------------------------------------------===//
  517. let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
  518. let isReMaterializable = 1, isAsCheapAsAMove = 1,
  519. IsSignExtendingOpW = 1 in
  520. def LUI : RVInstU<OPC_LUI, (outs GPR:$rd), (ins uimm20_lui:$imm20),
  521. "lui", "$rd, $imm20">, Sched<[WriteIALU]>;
  522. def AUIPC : RVInstU<OPC_AUIPC, (outs GPR:$rd), (ins uimm20_auipc:$imm20),
  523. "auipc", "$rd, $imm20">, Sched<[WriteIALU]>;
  524. let isCall = 1 in
  525. def JAL : RVInstJ<OPC_JAL, (outs GPR:$rd), (ins simm21_lsb0_jal:$imm20),
  526. "jal", "$rd, $imm20">, Sched<[WriteJal]>;
  527. let isCall = 1 in
  528. def JALR : RVInstI<0b000, OPC_JALR, (outs GPR:$rd),
  529. (ins GPR:$rs1, simm12:$imm12),
  530. "jalr", "$rd, ${imm12}(${rs1})">,
  531. Sched<[WriteJalr, ReadJalr]>;
  532. } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
  533. def BEQ : BranchCC_rri<0b000, "beq">;
  534. def BNE : BranchCC_rri<0b001, "bne">;
  535. def BLT : BranchCC_rri<0b100, "blt">;
  536. def BGE : BranchCC_rri<0b101, "bge">;
  537. def BLTU : BranchCC_rri<0b110, "bltu">;
  538. def BGEU : BranchCC_rri<0b111, "bgeu">;
  539. let IsSignExtendingOpW = 1 in {
  540. def LB : Load_ri<0b000, "lb">, Sched<[WriteLDB, ReadMemBase]>;
  541. def LH : Load_ri<0b001, "lh">, Sched<[WriteLDH, ReadMemBase]>;
  542. def LW : Load_ri<0b010, "lw">, Sched<[WriteLDW, ReadMemBase]>;
  543. def LBU : Load_ri<0b100, "lbu">, Sched<[WriteLDB, ReadMemBase]>;
  544. def LHU : Load_ri<0b101, "lhu">, Sched<[WriteLDH, ReadMemBase]>;
  545. }
  546. def SB : Store_rri<0b000, "sb">, Sched<[WriteSTB, ReadStoreData, ReadMemBase]>;
  547. def SH : Store_rri<0b001, "sh">, Sched<[WriteSTH, ReadStoreData, ReadMemBase]>;
  548. def SW : Store_rri<0b010, "sw">, Sched<[WriteSTW, ReadStoreData, ReadMemBase]>;
  549. // ADDI isn't always rematerializable, but isReMaterializable will be used as
  550. // a hint which is verified in isReallyTriviallyReMaterializable.
  551. let isReMaterializable = 1, isAsCheapAsAMove = 1 in
  552. def ADDI : ALU_ri<0b000, "addi">;
  553. let IsSignExtendingOpW = 1 in {
  554. def SLTI : ALU_ri<0b010, "slti">;
  555. def SLTIU : ALU_ri<0b011, "sltiu">;
  556. }
  557. let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
  558. def XORI : ALU_ri<0b100, "xori">;
  559. def ORI : ALU_ri<0b110, "ori">;
  560. }
  561. def ANDI : ALU_ri<0b111, "andi">;
  562. def SLLI : Shift_ri<0b00000, 0b001, "slli">;
  563. def SRLI : Shift_ri<0b00000, 0b101, "srli">;
  564. def SRAI : Shift_ri<0b01000, 0b101, "srai">;
  565. def ADD : ALU_rr<0b0000000, 0b000, "add", /*Commutable*/1>,
  566. Sched<[WriteIALU, ReadIALU, ReadIALU]>;
  567. def SUB : ALU_rr<0b0100000, 0b000, "sub">,
  568. Sched<[WriteIALU, ReadIALU, ReadIALU]>;
  569. def SLL : ALU_rr<0b0000000, 0b001, "sll">,
  570. Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>;
  571. let IsSignExtendingOpW = 1 in {
  572. def SLT : ALU_rr<0b0000000, 0b010, "slt">,
  573. Sched<[WriteIALU, ReadIALU, ReadIALU]>;
  574. def SLTU : ALU_rr<0b0000000, 0b011, "sltu">,
  575. Sched<[WriteIALU, ReadIALU, ReadIALU]>;
  576. }
  577. def XOR : ALU_rr<0b0000000, 0b100, "xor", /*Commutable*/1>,
  578. Sched<[WriteIALU, ReadIALU, ReadIALU]>;
  579. def SRL : ALU_rr<0b0000000, 0b101, "srl">,
  580. Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>;
  581. def SRA : ALU_rr<0b0100000, 0b101, "sra">,
  582. Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>;
  583. def OR : ALU_rr<0b0000000, 0b110, "or", /*Commutable*/1>,
  584. Sched<[WriteIALU, ReadIALU, ReadIALU]>;
  585. def AND : ALU_rr<0b0000000, 0b111, "and", /*Commutable*/1>,
  586. Sched<[WriteIALU, ReadIALU, ReadIALU]>;
  587. let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
  588. def FENCE : RVInstI<0b000, OPC_MISC_MEM, (outs),
  589. (ins fencearg:$pred, fencearg:$succ),
  590. "fence", "$pred, $succ">, Sched<[]> {
  591. bits<4> pred;
  592. bits<4> succ;
  593. let rs1 = 0;
  594. let rd = 0;
  595. let imm12 = {0b0000,pred,succ};
  596. }
  597. def FENCE_TSO : RVInstI<0b000, OPC_MISC_MEM, (outs), (ins), "fence.tso", "">, Sched<[]> {
  598. let rs1 = 0;
  599. let rd = 0;
  600. let imm12 = {0b1000,0b0011,0b0011};
  601. }
  602. def FENCE_I : RVInstI<0b001, OPC_MISC_MEM, (outs), (ins), "fence.i", "">, Sched<[]> {
  603. let rs1 = 0;
  604. let rd = 0;
  605. let imm12 = 0;
  606. }
  607. def ECALL : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ecall", "">, Sched<[WriteJmp]> {
  608. let rs1 = 0;
  609. let rd = 0;
  610. let imm12 = 0;
  611. }
  612. def EBREAK : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ebreak", "">,
  613. Sched<[]> {
  614. let rs1 = 0;
  615. let rd = 0;
  616. let imm12 = 1;
  617. }
  618. // This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
  619. // instruction (i.e., it should always trap, if your implementation has invalid
  620. // instruction traps).
  621. def UNIMP : RVInstI<0b001, OPC_SYSTEM, (outs), (ins), "unimp", "">,
  622. Sched<[]> {
  623. let rs1 = 0;
  624. let rd = 0;
  625. let imm12 = 0b110000000000;
  626. }
  627. let Predicates = [HasStdExtZawrs] in {
  628. def WRS_NTO : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "wrs.nto", "">,
  629. Sched<[]> {
  630. let rs1 = 0;
  631. let rd = 0;
  632. let imm12 = 0b000000001101;
  633. }
  634. def WRS_STO : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "wrs.sto", "">,
  635. Sched<[]> {
  636. let rs1 = 0;
  637. let rd = 0;
  638. let imm12 = 0b000000011101;
  639. }
  640. } // Predicates = [HasStdExtZawrs]
  641. } // hasSideEffects = 1, mayLoad = 0, mayStore = 0
  642. def CSRRW : CSR_ir<0b001, "csrrw">;
  643. def CSRRS : CSR_ir<0b010, "csrrs">;
  644. def CSRRC : CSR_ir<0b011, "csrrc">;
  645. def CSRRWI : CSR_ii<0b101, "csrrwi">;
  646. def CSRRSI : CSR_ii<0b110, "csrrsi">;
  647. def CSRRCI : CSR_ii<0b111, "csrrci">;
  648. /// RV64I instructions
  649. let Predicates = [IsRV64] in {
  650. def LWU : Load_ri<0b110, "lwu">, Sched<[WriteLDW, ReadMemBase]>;
  651. def LD : Load_ri<0b011, "ld">, Sched<[WriteLDD, ReadMemBase]>;
  652. def SD : Store_rri<0b011, "sd">, Sched<[WriteSTD, ReadStoreData, ReadMemBase]>;
  653. let IsSignExtendingOpW = 1 in {
  654. let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
  655. def ADDIW : RVInstI<0b000, OPC_OP_IMM_32, (outs GPR:$rd),
  656. (ins GPR:$rs1, simm12:$imm12),
  657. "addiw", "$rd, $rs1, $imm12">,
  658. Sched<[WriteIALU32, ReadIALU32]>;
  659. def SLLIW : ShiftW_ri<0b0000000, 0b001, "slliw">;
  660. def SRLIW : ShiftW_ri<0b0000000, 0b101, "srliw">;
  661. def SRAIW : ShiftW_ri<0b0100000, 0b101, "sraiw">;
  662. def ADDW : ALUW_rr<0b0000000, 0b000, "addw", /*Commutable*/1>,
  663. Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>;
  664. def SUBW : ALUW_rr<0b0100000, 0b000, "subw">,
  665. Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>;
  666. def SLLW : ALUW_rr<0b0000000, 0b001, "sllw">,
  667. Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>;
  668. def SRLW : ALUW_rr<0b0000000, 0b101, "srlw">,
  669. Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>;
  670. def SRAW : ALUW_rr<0b0100000, 0b101, "sraw">,
  671. Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>;
  672. } // IsSignExtendingOpW = 1
  673. } // Predicates = [IsRV64]
  674. //===----------------------------------------------------------------------===//
  675. // Privileged instructions
  676. //===----------------------------------------------------------------------===//
  677. let isBarrier = 1, isReturn = 1, isTerminator = 1 in {
  678. def URET : Priv<"uret", 0b0000000>, Sched<[]> {
  679. let rd = 0;
  680. let rs1 = 0;
  681. let rs2 = 0b00010;
  682. }
  683. def SRET : Priv<"sret", 0b0001000>, Sched<[]> {
  684. let rd = 0;
  685. let rs1 = 0;
  686. let rs2 = 0b00010;
  687. }
  688. def MRET : Priv<"mret", 0b0011000>, Sched<[]> {
  689. let rd = 0;
  690. let rs1 = 0;
  691. let rs2 = 0b00010;
  692. }
  693. } // isBarrier = 1, isReturn = 1, isTerminator = 1
  694. def WFI : Priv<"wfi", 0b0001000>, Sched<[]> {
  695. let rd = 0;
  696. let rs1 = 0;
  697. let rs2 = 0b00101;
  698. }
  699. let Predicates = [HasStdExtSvinval] in {
  700. def SFENCE_W_INVAL : Priv<"sfence.w.inval", 0b0001100>, Sched<[]> {
  701. let rd = 0;
  702. let rs1 = 0;
  703. let rs2 = 0;
  704. }
  705. def SFENCE_INVAL_IR : Priv<"sfence.inval.ir", 0b0001100>, Sched<[]> {
  706. let rd = 0;
  707. let rs1 = 0;
  708. let rs2 = 0b00001;
  709. }
  710. def SINVAL_VMA : Priv_rr<"sinval.vma", 0b0001011>, Sched<[]>;
  711. def HINVAL_VVMA : Priv_rr<"hinval.vvma", 0b0010011>, Sched<[]>;
  712. def HINVAL_GVMA : Priv_rr<"hinval.gvma", 0b0110011>, Sched<[]>;
  713. } // Predicates = [HasStdExtSvinval]
  714. def SFENCE_VMA : Priv_rr<"sfence.vma", 0b0001001>, Sched<[]>;
  715. let Predicates = [HasStdExtH] in {
  716. def HFENCE_VVMA : Priv_rr<"hfence.vvma", 0b0010001>, Sched<[]>;
  717. def HFENCE_GVMA : Priv_rr<"hfence.gvma", 0b0110001>, Sched<[]>;
  718. def HLV_B : HLoad_r<0b0110000, 0b00000, "hlv.b">, Sched<[]>;
  719. def HLV_BU : HLoad_r<0b0110000, 0b00001, "hlv.bu">, Sched<[]>;
  720. def HLV_H : HLoad_r<0b0110010, 0b00000, "hlv.h">, Sched<[]>;
  721. def HLV_HU : HLoad_r<0b0110010, 0b00001, "hlv.hu">, Sched<[]>;
  722. def HLVX_HU : HLoad_r<0b0110010, 0b00011, "hlvx.hu">, Sched<[]>;
  723. def HLV_W : HLoad_r<0b0110100, 0b00000, "hlv.w">, Sched<[]>;
  724. def HLVX_WU : HLoad_r<0b0110100, 0b00011, "hlvx.wu">, Sched<[]>;
  725. def HSV_B : HStore_rr<0b0110001, "hsv.b">, Sched<[]>;
  726. def HSV_H : HStore_rr<0b0110011, "hsv.h">, Sched<[]>;
  727. def HSV_W : HStore_rr<0b0110101, "hsv.w">, Sched<[]>;
  728. }
  729. let Predicates = [IsRV64, HasStdExtH] in {
  730. def HLV_WU : HLoad_r<0b0110100, 0b00001, "hlv.wu">, Sched<[]>;
  731. def HLV_D : HLoad_r<0b0110110, 0b00000, "hlv.d">, Sched<[]>;
  732. def HSV_D : HStore_rr<0b0110111, "hsv.d">, Sched<[]>;
  733. }
  734. //===----------------------------------------------------------------------===//
  735. // Debug instructions
  736. //===----------------------------------------------------------------------===//
  737. let isBarrier = 1, isReturn = 1, isTerminator = 1 in {
  738. def DRET : Priv<"dret", 0b0111101>, Sched<[]> {
  739. let rd = 0;
  740. let rs1 = 0;
  741. let rs2 = 0b10010;
  742. }
  743. } // isBarrier = 1, isReturn = 1, isTerminator = 1
  744. //===----------------------------------------------------------------------===//
  745. // Assembler Pseudo Instructions (User-Level ISA, Version 2.2, Chapter 20)
  746. //===----------------------------------------------------------------------===//
  747. def : InstAlias<"nop", (ADDI X0, X0, 0)>;
  748. // Note that the size is 32 because up to 8 32-bit instructions are needed to
  749. // generate an arbitrary 64-bit immediate. However, the size does not really
  750. // matter since PseudoLI is currently only used in the AsmParser where it gets
  751. // expanded to real instructions immediately.
  752. let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32,
  753. isCodeGenOnly = 0, isAsmParserOnly = 1 in
  754. def PseudoLI : Pseudo<(outs GPR:$rd), (ins ixlenimm_li:$imm), [],
  755. "li", "$rd, $imm">;
  756. def PseudoLB : PseudoLoad<"lb">;
  757. def PseudoLBU : PseudoLoad<"lbu">;
  758. def PseudoLH : PseudoLoad<"lh">;
  759. def PseudoLHU : PseudoLoad<"lhu">;
  760. def PseudoLW : PseudoLoad<"lw">;
  761. def PseudoSB : PseudoStore<"sb">;
  762. def PseudoSH : PseudoStore<"sh">;
  763. def PseudoSW : PseudoStore<"sw">;
  764. let Predicates = [IsRV64] in {
  765. def PseudoLWU : PseudoLoad<"lwu">;
  766. def PseudoLD : PseudoLoad<"ld">;
  767. def PseudoSD : PseudoStore<"sd">;
  768. } // Predicates = [IsRV64]
  769. def : InstAlias<"li $rd, $imm", (ADDI GPR:$rd, X0, simm12:$imm)>;
  770. def : InstAlias<"mv $rd, $rs", (ADDI GPR:$rd, GPR:$rs, 0)>;
  771. def : InstAlias<"not $rd, $rs", (XORI GPR:$rd, GPR:$rs, -1)>;
  772. def : InstAlias<"neg $rd, $rs", (SUB GPR:$rd, X0, GPR:$rs)>;
  773. let Predicates = [IsRV64] in {
  774. def : InstAlias<"negw $rd, $rs", (SUBW GPR:$rd, X0, GPR:$rs)>;
  775. def : InstAlias<"sext.w $rd, $rs", (ADDIW GPR:$rd, GPR:$rs, 0)>;
  776. } // Predicates = [IsRV64]
  777. def : InstAlias<"seqz $rd, $rs", (SLTIU GPR:$rd, GPR:$rs, 1)>;
  778. def : InstAlias<"snez $rd, $rs", (SLTU GPR:$rd, X0, GPR:$rs)>;
  779. def : InstAlias<"sltz $rd, $rs", (SLT GPR:$rd, GPR:$rs, X0)>;
  780. def : InstAlias<"sgtz $rd, $rs", (SLT GPR:$rd, X0, GPR:$rs)>;
  781. // sgt/sgtu are recognised by the GNU assembler but the canonical slt/sltu
  782. // form will always be printed. Therefore, set a zero weight.
  783. def : InstAlias<"sgt $rd, $rs, $rt", (SLT GPR:$rd, GPR:$rt, GPR:$rs), 0>;
  784. def : InstAlias<"sgtu $rd, $rs, $rt", (SLTU GPR:$rd, GPR:$rt, GPR:$rs), 0>;
  785. def : InstAlias<"beqz $rs, $offset",
  786. (BEQ GPR:$rs, X0, simm13_lsb0:$offset)>;
  787. def : InstAlias<"bnez $rs, $offset",
  788. (BNE GPR:$rs, X0, simm13_lsb0:$offset)>;
  789. def : InstAlias<"blez $rs, $offset",
  790. (BGE X0, GPR:$rs, simm13_lsb0:$offset)>;
  791. def : InstAlias<"bgez $rs, $offset",
  792. (BGE GPR:$rs, X0, simm13_lsb0:$offset)>;
  793. def : InstAlias<"bltz $rs, $offset",
  794. (BLT GPR:$rs, X0, simm13_lsb0:$offset)>;
  795. def : InstAlias<"bgtz $rs, $offset",
  796. (BLT X0, GPR:$rs, simm13_lsb0:$offset)>;
  797. // Always output the canonical mnemonic for the pseudo branch instructions.
  798. // The GNU tools emit the canonical mnemonic for the branch pseudo instructions
  799. // as well (e.g. "bgt" will be recognised by the assembler but never printed by
  800. // objdump). Match this behaviour by setting a zero weight.
  801. def : InstAlias<"bgt $rs, $rt, $offset",
  802. (BLT GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
  803. def : InstAlias<"ble $rs, $rt, $offset",
  804. (BGE GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
  805. def : InstAlias<"bgtu $rs, $rt, $offset",
  806. (BLTU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
  807. def : InstAlias<"bleu $rs, $rt, $offset",
  808. (BGEU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
  809. def : InstAlias<"j $offset", (JAL X0, simm21_lsb0_jal:$offset)>;
  810. def : InstAlias<"jal $offset", (JAL X1, simm21_lsb0_jal:$offset)>;
  811. // Non-zero offset aliases of "jalr" are the lowest weight, followed by the
  812. // two-register form, then the one-register forms and finally "ret".
  813. def : InstAlias<"jr $rs", (JALR X0, GPR:$rs, 0), 3>;
  814. def : InstAlias<"jr ${offset}(${rs})", (JALR X0, GPR:$rs, simm12:$offset)>;
  815. def : InstAlias<"jalr $rs", (JALR X1, GPR:$rs, 0), 3>;
  816. def : InstAlias<"jalr ${offset}(${rs})", (JALR X1, GPR:$rs, simm12:$offset)>;
  817. def : InstAlias<"jalr $rd, $rs", (JALR GPR:$rd, GPR:$rs, 0), 2>;
  818. def : InstAlias<"ret", (JALR X0, X1, 0), 4>;
  819. // Non-canonical forms for jump targets also accepted by the assembler.
  820. def : InstAlias<"jr $rs, $offset", (JALR X0, GPR:$rs, simm12:$offset), 0>;
  821. def : InstAlias<"jalr $rs, $offset", (JALR X1, GPR:$rs, simm12:$offset), 0>;
  822. def : InstAlias<"jalr $rd, $rs, $offset", (JALR GPR:$rd, GPR:$rs, simm12:$offset), 0>;
  823. def : InstAlias<"fence", (FENCE 0xF, 0xF)>; // 0xF == iorw
  824. let Predicates = [HasStdExtZihintpause] in
  825. def : InstAlias<"pause", (FENCE 0x1, 0x0)>; // 0x1 == w
  826. def : InstAlias<"rdinstret $rd", (CSRRS GPR:$rd, INSTRET.Encoding, X0)>;
  827. def : InstAlias<"rdcycle $rd", (CSRRS GPR:$rd, CYCLE.Encoding, X0)>;
  828. def : InstAlias<"rdtime $rd", (CSRRS GPR:$rd, TIME.Encoding, X0)>;
  829. let Predicates = [IsRV32] in {
  830. def : InstAlias<"rdinstreth $rd", (CSRRS GPR:$rd, INSTRETH.Encoding, X0)>;
  831. def : InstAlias<"rdcycleh $rd", (CSRRS GPR:$rd, CYCLEH.Encoding, X0)>;
  832. def : InstAlias<"rdtimeh $rd", (CSRRS GPR:$rd, TIMEH.Encoding, X0)>;
  833. } // Predicates = [IsRV32]
  834. def : InstAlias<"csrr $rd, $csr", (CSRRS GPR:$rd, csr_sysreg:$csr, X0)>;
  835. def : InstAlias<"csrw $csr, $rs", (CSRRW X0, csr_sysreg:$csr, GPR:$rs)>;
  836. def : InstAlias<"csrs $csr, $rs", (CSRRS X0, csr_sysreg:$csr, GPR:$rs)>;
  837. def : InstAlias<"csrc $csr, $rs", (CSRRC X0, csr_sysreg:$csr, GPR:$rs)>;
  838. def : InstAlias<"csrwi $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>;
  839. def : InstAlias<"csrsi $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>;
  840. def : InstAlias<"csrci $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>;
  841. let EmitPriority = 0 in {
  842. def : InstAlias<"csrw $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>;
  843. def : InstAlias<"csrs $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>;
  844. def : InstAlias<"csrc $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>;
  845. def : InstAlias<"csrrw $rd, $csr, $imm", (CSRRWI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
  846. def : InstAlias<"csrrs $rd, $csr, $imm", (CSRRSI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
  847. def : InstAlias<"csrrc $rd, $csr, $imm", (CSRRCI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
  848. }
  849. def : InstAlias<"sfence.vma", (SFENCE_VMA X0, X0)>;
  850. def : InstAlias<"sfence.vma $rs", (SFENCE_VMA GPR:$rs, X0)>;
  851. def : InstAlias<"hfence.gvma", (HFENCE_GVMA X0, X0)>;
  852. def : InstAlias<"hfence.gvma $rs", (HFENCE_GVMA GPR:$rs, X0)>;
  853. def : InstAlias<"hfence.vvma", (HFENCE_VVMA X0, X0)>;
  854. def : InstAlias<"hfence.vvma $rs", (HFENCE_VVMA GPR:$rs, X0)>;
  855. let Predicates = [HasStdExtZihintntl] in {
  856. def : InstAlias<"ntl.p1", (ADD X0, X0, X2)>;
  857. def : InstAlias<"ntl.pall", (ADD X0, X0, X3)>;
  858. def : InstAlias<"ntl.s1", (ADD X0, X0, X4)>;
  859. def : InstAlias<"ntl.all", (ADD X0, X0, X5)>;
  860. } // Predicates = [HasStdExtZihintntl]
  861. let EmitPriority = 0 in {
  862. def : InstAlias<"lb $rd, (${rs1})",
  863. (LB GPR:$rd, GPR:$rs1, 0)>;
  864. def : InstAlias<"lh $rd, (${rs1})",
  865. (LH GPR:$rd, GPR:$rs1, 0)>;
  866. def : InstAlias<"lw $rd, (${rs1})",
  867. (LW GPR:$rd, GPR:$rs1, 0)>;
  868. def : InstAlias<"lbu $rd, (${rs1})",
  869. (LBU GPR:$rd, GPR:$rs1, 0)>;
  870. def : InstAlias<"lhu $rd, (${rs1})",
  871. (LHU GPR:$rd, GPR:$rs1, 0)>;
  872. def : InstAlias<"sb $rs2, (${rs1})",
  873. (SB GPR:$rs2, GPR:$rs1, 0)>;
  874. def : InstAlias<"sh $rs2, (${rs1})",
  875. (SH GPR:$rs2, GPR:$rs1, 0)>;
  876. def : InstAlias<"sw $rs2, (${rs1})",
  877. (SW GPR:$rs2, GPR:$rs1, 0)>;
  878. def : InstAlias<"add $rd, $rs1, $imm12",
  879. (ADDI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
  880. def : InstAlias<"and $rd, $rs1, $imm12",
  881. (ANDI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
  882. def : InstAlias<"xor $rd, $rs1, $imm12",
  883. (XORI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
  884. def : InstAlias<"or $rd, $rs1, $imm12",
  885. (ORI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
  886. def : InstAlias<"sll $rd, $rs1, $shamt",
  887. (SLLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
  888. def : InstAlias<"srl $rd, $rs1, $shamt",
  889. (SRLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
  890. def : InstAlias<"sra $rd, $rs1, $shamt",
  891. (SRAI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
  892. let Predicates = [IsRV64] in {
  893. def : InstAlias<"lwu $rd, (${rs1})",
  894. (LWU GPR:$rd, GPR:$rs1, 0)>;
  895. def : InstAlias<"ld $rd, (${rs1})",
  896. (LD GPR:$rd, GPR:$rs1, 0)>;
  897. def : InstAlias<"sd $rs2, (${rs1})",
  898. (SD GPR:$rs2, GPR:$rs1, 0)>;
  899. def : InstAlias<"addw $rd, $rs1, $imm12",
  900. (ADDIW GPR:$rd, GPR:$rs1, simm12:$imm12)>;
  901. def : InstAlias<"sllw $rd, $rs1, $shamt",
  902. (SLLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
  903. def : InstAlias<"srlw $rd, $rs1, $shamt",
  904. (SRLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
  905. def : InstAlias<"sraw $rd, $rs1, $shamt",
  906. (SRAIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
  907. } // Predicates = [IsRV64]
  908. def : InstAlias<"slt $rd, $rs1, $imm12",
  909. (SLTI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
  910. def : InstAlias<"sltu $rd, $rs1, $imm12",
  911. (SLTIU GPR:$rd, GPR:$rs1, simm12:$imm12)>;
  912. }
  913. def : MnemonicAlias<"move", "mv">;
  914. // The SCALL and SBREAK instructions wererenamed to ECALL and EBREAK in
  915. // version 2.1 of the user-level ISA. Like the GNU toolchain, we still accept
  916. // the old name for backwards compatibility.
  917. def : MnemonicAlias<"scall", "ecall">;
  918. def : MnemonicAlias<"sbreak", "ebreak">;
  919. // This alias was added to the spec in December 2020. Don't print it by default
  920. // to allow assembly we print to be compatible with versions of GNU assembler
  921. // that don't support this alias.
  922. def : InstAlias<"zext.b $rd, $rs", (ANDI GPR:$rd, GPR:$rs, 0xFF), 0>;
  923. //===----------------------------------------------------------------------===//
  924. // .insn directive instructions
  925. //===----------------------------------------------------------------------===//
  926. // isCodeGenOnly = 1 to hide them from the tablegened assembly parser.
  927. let isCodeGenOnly = 1, hasSideEffects = 1, mayLoad = 1, mayStore = 1,
  928. hasNoSchedulingInfo = 1 in {
  929. def InsnR : DirectiveInsnR<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, uimm3:$funct3,
  930. uimm7:$funct7, AnyReg:$rs1,
  931. AnyReg:$rs2),
  932. "$opcode, $funct3, $funct7, $rd, $rs1, $rs2">;
  933. def InsnR4 : DirectiveInsnR4<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode,
  934. uimm3:$funct3,
  935. uimm2:$funct2,
  936. AnyReg:$rs1, AnyReg:$rs2,
  937. AnyReg:$rs3),
  938. "$opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3">;
  939. def InsnI : DirectiveInsnI<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, uimm3:$funct3,
  940. AnyReg:$rs1, simm12:$imm12),
  941. "$opcode, $funct3, $rd, $rs1, $imm12">;
  942. def InsnI_Mem : DirectiveInsnI<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode,
  943. uimm3:$funct3,
  944. AnyReg:$rs1,
  945. simm12:$imm12),
  946. "$opcode, $funct3, $rd, ${imm12}(${rs1})">;
  947. def InsnB : DirectiveInsnB<(outs), (ins uimm7_opcode:$opcode, uimm3:$funct3,
  948. AnyReg:$rs1, AnyReg:$rs2,
  949. simm13_lsb0:$imm12),
  950. "$opcode, $funct3, $rs1, $rs2, $imm12">;
  951. def InsnU : DirectiveInsnU<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode,
  952. uimm20_lui:$imm20),
  953. "$opcode, $rd, $imm20">;
  954. def InsnJ : DirectiveInsnJ<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode,
  955. simm21_lsb0_jal:$imm20),
  956. "$opcode, $rd, $imm20">;
  957. def InsnS : DirectiveInsnS<(outs), (ins uimm7_opcode:$opcode, uimm3:$funct3,
  958. AnyReg:$rs2, AnyReg:$rs1,
  959. simm12:$imm12),
  960. "$opcode, $funct3, $rs2, ${imm12}(${rs1})">;
  961. }
  962. // Use InstAliases to match these so that we can combine the insn and format
  963. // into a mnemonic to use as the key for the tablegened asm matcher table. The
  964. // parser will take care of creating these fake mnemonics and will only do it
  965. // for known formats.
  966. let EmitPriority = 0 in {
  967. def : InstAlias<".insn_r $opcode, $funct3, $funct7, $rd, $rs1, $rs2",
  968. (InsnR AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm7:$funct7,
  969. AnyReg:$rs1, AnyReg:$rs2)>;
  970. // Accept 4 register form of ".insn r" as alias for ".insn r4".
  971. def : InstAlias<".insn_r $opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3",
  972. (InsnR4 AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm2:$funct2,
  973. AnyReg:$rs1, AnyReg:$rs2, AnyReg:$rs3)>;
  974. def : InstAlias<".insn_r4 $opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3",
  975. (InsnR4 AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm2:$funct2,
  976. AnyReg:$rs1, AnyReg:$rs2, AnyReg:$rs3)>;
  977. def : InstAlias<".insn_i $opcode, $funct3, $rd, $rs1, $imm12",
  978. (InsnI AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1,
  979. simm12:$imm12)>;
  980. def : InstAlias<".insn_i $opcode, $funct3, $rd, ${imm12}(${rs1})",
  981. (InsnI_Mem AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3,
  982. AnyReg:$rs1, simm12:$imm12)>;
  983. def : InstAlias<".insn_b $opcode, $funct3, $rs1, $rs2, $imm12",
  984. (InsnB uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1,
  985. AnyReg:$rs2, simm13_lsb0:$imm12)>;
  986. // Accept sb as an alias for b.
  987. def : InstAlias<".insn_sb $opcode, $funct3, $rs1, $rs2, $imm12",
  988. (InsnB uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1,
  989. AnyReg:$rs2, simm13_lsb0:$imm12)>;
  990. def : InstAlias<".insn_u $opcode, $rd, $imm20",
  991. (InsnU AnyReg:$rd, uimm7_opcode:$opcode, uimm20_lui:$imm20)>;
  992. def : InstAlias<".insn_j $opcode, $rd, $imm20",
  993. (InsnJ AnyReg:$rd, uimm7_opcode:$opcode, simm21_lsb0_jal:$imm20)>;
  994. // Accept uj as an alias for j.
  995. def : InstAlias<".insn_uj $opcode, $rd, $imm20",
  996. (InsnJ AnyReg:$rd, uimm7_opcode:$opcode, simm21_lsb0_jal:$imm20)>;
  997. def : InstAlias<".insn_s $opcode, $funct3, $rs2, ${imm12}(${rs1})",
  998. (InsnS uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs2,
  999. AnyReg:$rs1, simm12:$imm12)>;
  1000. }
  1001. //===----------------------------------------------------------------------===//
  1002. // Pseudo-instructions and codegen patterns
  1003. //
  1004. // Naming convention: For 'generic' pattern classes, we use the naming
  1005. // convention PatTy1Ty2. For pattern classes which offer a more complex
  1006. // expansion, prefix the class name, e.g. BccPat.
  1007. //===----------------------------------------------------------------------===//
  1008. /// Generic pattern classes
  1009. class PatGpr<SDPatternOperator OpNode, RVInst Inst>
  1010. : Pat<(OpNode GPR:$rs1), (Inst GPR:$rs1)>;
  1011. class PatGprGpr<SDPatternOperator OpNode, RVInst Inst>
  1012. : Pat<(OpNode GPR:$rs1, GPR:$rs2), (Inst GPR:$rs1, GPR:$rs2)>;
  1013. class PatGprImm<SDPatternOperator OpNode, RVInst Inst, ImmLeaf ImmType>
  1014. : Pat<(XLenVT (OpNode (XLenVT GPR:$rs1), ImmType:$imm)),
  1015. (Inst GPR:$rs1, ImmType:$imm)>;
  1016. class PatGprSimm12<SDPatternOperator OpNode, RVInstI Inst>
  1017. : PatGprImm<OpNode, Inst, simm12>;
  1018. class PatGprUimmLog2XLen<SDPatternOperator OpNode, RVInstIShift Inst>
  1019. : PatGprImm<OpNode, Inst, uimmlog2xlen>;
  1020. /// Predicates
  1021. def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{
  1022. return cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32);
  1023. }]>;
  1024. def sexti32 : ComplexPattern<i64, 1, "selectSExti32">;
  1025. def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{
  1026. return cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32);
  1027. }]>;
  1028. def zexti32 : ComplexPattern<i64, 1, "selectZExtBits<32>">;
  1029. def zexti16 : ComplexPattern<XLenVT, 1, "selectZExtBits<16>">;
  1030. def zexti8 : ComplexPattern<XLenVT, 1, "selectZExtBits<8>">;
  1031. class binop_oneuse<SDPatternOperator operator>
  1032. : PatFrag<(ops node:$A, node:$B),
  1033. (operator node:$A, node:$B), [{
  1034. return N->hasOneUse();
  1035. }]>;
  1036. def and_oneuse : binop_oneuse<and>;
  1037. def add_oneuse : binop_oneuse<add>;
  1038. def mul_oneuse : binop_oneuse<mul>;
  1039. def mul_const_oneuse : PatFrag<(ops node:$A, node:$B),
  1040. (mul node:$A, node:$B), [{
  1041. if (auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
  1042. if (N1C->hasOneUse())
  1043. return true;
  1044. return false;
  1045. }]>;
  1046. class unop_oneuse<SDPatternOperator operator>
  1047. : PatFrag<(ops node:$A),
  1048. (operator node:$A), [{
  1049. return N->hasOneUse();
  1050. }]>;
  1051. def sext_oneuse : unop_oneuse<sext>;
  1052. def zext_oneuse : unop_oneuse<zext>;
  1053. def anyext_oneuse : unop_oneuse<anyext>;
  1054. def fpext_oneuse : unop_oneuse<any_fpextend>;
  1055. /// Simple arithmetic operations
  1056. def : PatGprGpr<add, ADD>;
  1057. def : PatGprSimm12<add, ADDI>;
  1058. def : PatGprGpr<sub, SUB>;
  1059. def : PatGprGpr<or, OR>;
  1060. def : PatGprSimm12<or, ORI>;
  1061. def : PatGprGpr<and, AND>;
  1062. def : PatGprSimm12<and, ANDI>;
  1063. def : PatGprGpr<xor, XOR>;
  1064. def : PatGprSimm12<xor, XORI>;
  1065. def : PatGprUimmLog2XLen<shl, SLLI>;
  1066. def : PatGprUimmLog2XLen<srl, SRLI>;
  1067. def : PatGprUimmLog2XLen<sra, SRAI>;
  1068. // Select 'or' as ADDI if the immediate bits are known to be 0 in $rs1. This
  1069. // can improve compressibility.
  1070. def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
  1071. KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0);
  1072. KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0);
  1073. return KnownBits::haveNoCommonBitsSet(Known0, Known1);
  1074. }]>;
  1075. def : PatGprSimm12<or_is_add, ADDI>;
  1076. // negate of low bit can be done via two (compressible) shifts. The negate
  1077. // is never compressible since rs1 and rd can't be the same register.
  1078. def : Pat<(XLenVT (sub 0, (and_oneuse GPR:$rs, 1))),
  1079. (SRAI (SLLI $rs, (ImmSubFromXLen (XLenVT 1))),
  1080. (ImmSubFromXLen (XLenVT 1)))>;
  1081. // AND with leading/trailing ones mask exceeding simm32/simm12.
  1082. def : Pat<(i64 (and GPR:$rs, LeadingOnesMask:$mask)),
  1083. (SLLI (SRLI $rs, LeadingOnesMask:$mask), LeadingOnesMask:$mask)>;
  1084. def : Pat<(XLenVT (and GPR:$rs, TrailingOnesMask:$mask)),
  1085. (SRLI (SLLI $rs, TrailingOnesMask:$mask), TrailingOnesMask:$mask)>;
  1086. // Match both a plain shift and one where the shift amount is masked (this is
  1087. // typically introduced when the legalizer promotes the shift amount and
  1088. // zero-extends it). For RISC-V, the mask is unnecessary as shifts in the base
  1089. // ISA only read the least significant 5 bits (RV32I) or 6 bits (RV64I).
  1090. def shiftMaskXLen : ComplexPattern<XLenVT, 1, "selectShiftMaskXLen", [], [], 0>;
  1091. def shiftMask32 : ComplexPattern<i64, 1, "selectShiftMask32", [], [], 0>;
  1092. class shiftop<SDPatternOperator operator>
  1093. : PatFrag<(ops node:$val, node:$count),
  1094. (operator node:$val, (XLenVT (shiftMaskXLen node:$count)))>;
  1095. class shiftopw<SDPatternOperator operator>
  1096. : PatFrag<(ops node:$val, node:$count),
  1097. (operator node:$val, (i64 (shiftMask32 node:$count)))>;
  1098. def : PatGprGpr<shiftop<shl>, SLL>;
  1099. def : PatGprGpr<shiftop<srl>, SRL>;
  1100. def : PatGprGpr<shiftop<sra>, SRA>;
  1101. // This is a special case of the ADD instruction used to facilitate the use of a
  1102. // fourth operand to emit a relocation on a symbol relating to this instruction.
  1103. // The relocation does not affect any bits of the instruction itself but is used
  1104. // as a hint to the linker.
  1105. let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0 in
  1106. def PseudoAddTPRel : Pseudo<(outs GPR:$rd),
  1107. (ins GPR:$rs1, GPR:$rs2, tprel_add_symbol:$src), [],
  1108. "add", "$rd, $rs1, $rs2, $src">;
  1109. /// FrameIndex calculations
  1110. def : Pat<(FrameAddrRegImm GPR:$rs1, simm12:$imm12),
  1111. (ADDI GPR:$rs1, simm12:$imm12)>;
  1112. /// HI and ADD_LO address nodes.
  1113. def : Pat<(riscv_hi tglobaladdr:$in), (LUI tglobaladdr:$in)>;
  1114. def : Pat<(riscv_hi tblockaddress:$in), (LUI tblockaddress:$in)>;
  1115. def : Pat<(riscv_hi tjumptable:$in), (LUI tjumptable:$in)>;
  1116. def : Pat<(riscv_hi tconstpool:$in), (LUI tconstpool:$in)>;
  1117. def : Pat<(riscv_add_lo GPR:$hi, tglobaladdr:$lo),
  1118. (ADDI GPR:$hi, tglobaladdr:$lo)>;
  1119. def : Pat<(riscv_add_lo GPR:$hi, tblockaddress:$lo),
  1120. (ADDI GPR:$hi, tblockaddress:$lo)>;
  1121. def : Pat<(riscv_add_lo GPR:$hi, tjumptable:$lo),
  1122. (ADDI GPR:$hi, tjumptable:$lo)>;
  1123. def : Pat<(riscv_add_lo GPR:$hi, tconstpool:$lo),
  1124. (ADDI GPR:$hi, tconstpool:$lo)>;
  1125. /// TLS address nodes.
  1126. def : Pat<(riscv_hi tglobaltlsaddr:$in), (LUI tglobaltlsaddr:$in)>;
  1127. def : Pat<(riscv_add_tprel GPR:$rs1, GPR:$rs2, tglobaltlsaddr:$src),
  1128. (PseudoAddTPRel GPR:$rs1, GPR:$rs2, tglobaltlsaddr:$src)>;
  1129. def : Pat<(riscv_add_lo GPR:$src, tglobaltlsaddr:$lo),
  1130. (ADDI GPR:$src, tglobaltlsaddr:$lo)>;
  1131. /// Setcc
  1132. def : PatGprGpr<setlt, SLT>;
  1133. def : PatGprSimm12<setlt, SLTI>;
  1134. def : PatGprGpr<setult, SLTU>;
  1135. def : PatGprSimm12<setult, SLTIU>;
  1136. // Define pattern expansions for setcc operations that aren't directly
  1137. // handled by a RISC-V instruction.
  1138. def : Pat<(seteq GPR:$rs1, 0), (SLTIU GPR:$rs1, 1)>;
  1139. def : Pat<(seteq GPR:$rs1, GPR:$rs2), (SLTIU (XOR GPR:$rs1, GPR:$rs2), 1)>;
  1140. def : Pat<(seteq GPR:$rs1, simm12_plus1:$imm12),
  1141. (SLTIU (ADDI GPR:$rs1, (NegImm simm12_plus1:$imm12)), 1)>;
  1142. def : Pat<(seteq GPR:$rs1, -2048),
  1143. (SLTIU (XORI GPR:$rs1, -2048), 1)>;
  1144. def : Pat<(setne GPR:$rs1, 0), (SLTU X0, GPR:$rs1)>;
  1145. def : Pat<(setne GPR:$rs1, GPR:$rs2), (SLTU X0, (XOR GPR:$rs1, GPR:$rs2))>;
  1146. def : Pat<(setne GPR:$rs1, simm12_plus1:$imm12),
  1147. (SLTU X0, (ADDI GPR:$rs1, (NegImm simm12_plus1:$imm12)))>;
  1148. def : Pat<(setne GPR:$rs1, -2048),
  1149. (SLTU X0, (XORI GPR:$rs1, -2048))>;
  1150. def : Pat<(setne GPR:$rs1, -1), (SLTIU GPR:$rs1, -1)>;
  1151. def IntCCtoRISCVCC : SDNodeXForm<riscv_selectcc, [{
  1152. ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
  1153. RISCVCC::CondCode BrCC = getRISCVCCForIntCC(CC);
  1154. return CurDAG->getTargetConstant(BrCC, SDLoc(N), Subtarget->getXLenVT());
  1155. }]>;
  1156. def riscv_selectcc_frag : PatFrag<(ops node:$lhs, node:$rhs, node:$cc,
  1157. node:$truev, node:$falsev),
  1158. (riscv_selectcc node:$lhs, node:$rhs,
  1159. node:$cc, node:$truev,
  1160. node:$falsev), [{}],
  1161. IntCCtoRISCVCC>;
  1162. let Predicates = [HasShortForwardBranchOpt], isSelect = 1,
  1163. Constraints = "$dst = $falsev", isCommutable = 1, Size = 8 in {
  1164. // This instruction moves $truev to $dst when the condition is true. It will
  1165. // be expanded to control flow in RISCVExpandPseudoInsts.
  1166. def PseudoCCMOVGPR : Pseudo<(outs GPR:$dst),
  1167. (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
  1168. GPR:$falsev, GPR:$truev),
  1169. [(set GPR:$dst,
  1170. (riscv_selectcc_frag:$cc GPR:$lhs, GPR:$rhs,
  1171. cond, GPR:$truev,
  1172. GPR:$falsev))]>,
  1173. Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>;
  1174. }
  1175. // Conditional binops, that updates update $dst to (op rs1, rs2) when condition
  1176. // is true. Returns $falsev otherwise. Selected by optimizeSelect.
  1177. // TODO: Can we use DefaultOperands on the regular binop to accomplish this more
  1178. // like how ARM does predication?
  1179. let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8,
  1180. Constraints = "$dst = $falsev" in {
  1181. def PseudoCCADD : Pseudo<(outs GPR:$dst),
  1182. (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
  1183. GPR:$falsev, GPR:$rs1, GPR:$rs2), []>,
  1184. Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>;
  1185. def PseudoCCSUB : Pseudo<(outs GPR:$dst),
  1186. (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
  1187. GPR:$falsev, GPR:$rs1, GPR:$rs2), []>,
  1188. Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>;
  1189. def PseudoCCAND : Pseudo<(outs GPR:$dst),
  1190. (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
  1191. GPR:$falsev, GPR:$rs1, GPR:$rs2), []>,
  1192. Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>;
  1193. def PseudoCCOR : Pseudo<(outs GPR:$dst),
  1194. (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
  1195. GPR:$falsev, GPR:$rs1, GPR:$rs2), []>,
  1196. Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>;
  1197. def PseudoCCXOR : Pseudo<(outs GPR:$dst),
  1198. (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
  1199. GPR:$falsev, GPR:$rs1, GPR:$rs2), []>,
  1200. Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>;
  1201. // RV64I instructions
  1202. def PseudoCCADDW : Pseudo<(outs GPR:$dst),
  1203. (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
  1204. GPR:$falsev, GPR:$rs1, GPR:$rs2), []>,
  1205. Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>;
  1206. def PseudoCCSUBW : Pseudo<(outs GPR:$dst),
  1207. (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
  1208. GPR:$falsev, GPR:$rs1, GPR:$rs2), []>,
  1209. Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>;
  1210. }
  1211. multiclass SelectCC_GPR_rrirr<RegisterClass valty> {
  1212. let usesCustomInserter = 1 in
  1213. def _Using_CC_GPR : Pseudo<(outs valty:$dst),
  1214. (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
  1215. valty:$truev, valty:$falsev),
  1216. [(set valty:$dst,
  1217. (riscv_selectcc_frag:$cc GPR:$lhs, GPR:$rhs, cond,
  1218. valty:$truev, valty:$falsev))]>;
  1219. // Explicitly select 0 in the condition to X0. The register coalescer doesn't
  1220. // always do it.
  1221. def : Pat<(riscv_selectcc_frag:$cc GPR:$lhs, 0, cond, valty:$truev,
  1222. valty:$falsev),
  1223. (!cast<Instruction>(NAME#"_Using_CC_GPR") GPR:$lhs, X0,
  1224. (IntCCtoRISCVCC $cc), valty:$truev, valty:$falsev)>;
  1225. }
  1226. let Predicates = [NoShortForwardBranchOpt] in
  1227. defm Select_GPR : SelectCC_GPR_rrirr<GPR>;
  1228. class SelectCompressOpt<CondCode Cond>: Pat<(riscv_selectcc_frag:$select GPR:$lhs, simm12_no6:$Constant, Cond,
  1229. GPR:$truev, GPR:$falsev),
  1230. (Select_GPR_Using_CC_GPR (ADDI GPR:$lhs, (NegImm simm12:$Constant)), X0,
  1231. (IntCCtoRISCVCC $select), GPR:$truev, GPR:$falsev)>;
  1232. def OptForMinSize : Predicate<"MF ? MF->getFunction().hasMinSize() : false">;
  1233. let Predicates = [HasStdExtC, OptForMinSize] in {
  1234. def : SelectCompressOpt<SETEQ>;
  1235. def : SelectCompressOpt<SETNE>;
  1236. }
  1237. /// Branches and jumps
  1238. // Match `riscv_brcc` and lower to the appropriate RISC-V branch instruction.
  1239. multiclass BccPat<CondCode Cond, RVInstB Inst> {
  1240. def : Pat<(riscv_brcc GPR:$rs1, GPR:$rs2, Cond, bb:$imm12),
  1241. (Inst GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12)>;
  1242. // Explicitly select 0 to X0. The register coalescer doesn't always do it.
  1243. def : Pat<(riscv_brcc GPR:$rs1, 0, Cond, bb:$imm12),
  1244. (Inst GPR:$rs1, X0, simm13_lsb0:$imm12)>;
  1245. }
  1246. class BrccCompessOpt<CondCode Cond, RVInstB Inst> : Pat<(riscv_brcc GPR:$lhs, simm12_no6:$Constant, Cond, bb:$place),
  1247. (Inst (ADDI GPR:$lhs, (NegImm simm12:$Constant)), X0, bb:$place)>;
  1248. defm : BccPat<SETEQ, BEQ>;
  1249. defm : BccPat<SETNE, BNE>;
  1250. defm : BccPat<SETLT, BLT>;
  1251. defm : BccPat<SETGE, BGE>;
  1252. defm : BccPat<SETULT, BLTU>;
  1253. defm : BccPat<SETUGE, BGEU>;
  1254. let Predicates = [HasStdExtC, OptForMinSize] in {
  1255. def : BrccCompessOpt<SETEQ, BEQ>;
  1256. def : BrccCompessOpt<SETNE, BNE>;
  1257. }
  1258. let isBarrier = 1, isBranch = 1, isTerminator = 1 in
  1259. def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>,
  1260. PseudoInstExpansion<(JAL X0, simm21_lsb0_jal:$imm20)>;
  1261. let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in
  1262. def PseudoBRIND : Pseudo<(outs), (ins GPRJALR:$rs1, simm12:$imm12), []>,
  1263. PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>;
  1264. def : Pat<(brind GPRJALR:$rs1), (PseudoBRIND GPRJALR:$rs1, 0)>;
  1265. def : Pat<(brind (add GPRJALR:$rs1, simm12:$imm12)),
  1266. (PseudoBRIND GPRJALR:$rs1, simm12:$imm12)>;
  1267. // PseudoCALLReg is a generic pseudo instruction for calls which will eventually
  1268. // expand to auipc and jalr while encoding, with any given register used as the
  1269. // destination.
  1270. // Define AsmString to print "call" when compile with -S flag.
  1271. // Define isCodeGenOnly = 0 to support parsing assembly "call" instruction.
  1272. let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, Size = 8, hasSideEffects = 0,
  1273. mayStore = 0, mayLoad = 0 in
  1274. def PseudoCALLReg : Pseudo<(outs GPR:$rd), (ins call_symbol:$func), []>,
  1275. Sched<[WriteIALU, WriteJalr, ReadJalr]> {
  1276. let AsmString = "call\t$rd, $func";
  1277. }
  1278. // PseudoCALL is a pseudo instruction which will eventually expand to auipc
  1279. // and jalr while encoding. This is desirable, as an auipc+jalr pair with
  1280. // R_RISCV_CALL and R_RISCV_RELAX relocations can be be relaxed by the linker
  1281. // if the offset fits in a signed 21-bit immediate.
  1282. // Define AsmString to print "call" when compile with -S flag.
  1283. // Define isCodeGenOnly = 0 to support parsing assembly "call" instruction.
  1284. let isCall = 1, Defs = [X1], isCodeGenOnly = 0, Size = 8 in
  1285. def PseudoCALL : Pseudo<(outs), (ins call_symbol:$func), []>,
  1286. Sched<[WriteIALU, WriteJalr, ReadJalr]> {
  1287. let AsmString = "call\t$func";
  1288. }
  1289. def : Pat<(riscv_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>;
  1290. def : Pat<(riscv_call texternalsym:$func), (PseudoCALL texternalsym:$func)>;
  1291. def : Pat<(riscv_uret_flag), (URET X0, X0)>;
  1292. def : Pat<(riscv_sret_flag), (SRET X0, X0)>;
  1293. def : Pat<(riscv_mret_flag), (MRET X0, X0)>;
  1294. let isCall = 1, Defs = [X1] in
  1295. def PseudoCALLIndirect : Pseudo<(outs), (ins GPRJALR:$rs1),
  1296. [(riscv_call GPRJALR:$rs1)]>,
  1297. PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>;
  1298. let isBarrier = 1, isReturn = 1, isTerminator = 1 in
  1299. def PseudoRET : Pseudo<(outs), (ins), [(riscv_ret_flag)]>,
  1300. PseudoInstExpansion<(JALR X0, X1, 0)>;
  1301. // PseudoTAIL is a pseudo instruction similar to PseudoCALL and will eventually
  1302. // expand to auipc and jalr while encoding.
  1303. // Define AsmString to print "tail" when compile with -S flag.
  1304. let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2],
  1305. Size = 8, isCodeGenOnly = 0 in
  1306. def PseudoTAIL : Pseudo<(outs), (ins call_symbol:$dst), []>,
  1307. Sched<[WriteIALU, WriteJalr, ReadJalr]> {
  1308. let AsmString = "tail\t$dst";
  1309. }
  1310. let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2] in
  1311. def PseudoTAILIndirect : Pseudo<(outs), (ins GPRTC:$rs1),
  1312. [(riscv_tail GPRTC:$rs1)]>,
  1313. PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>;
  1314. def : Pat<(riscv_tail (iPTR tglobaladdr:$dst)),
  1315. (PseudoTAIL tglobaladdr:$dst)>;
  1316. def : Pat<(riscv_tail (iPTR texternalsym:$dst)),
  1317. (PseudoTAIL texternalsym:$dst)>;
  1318. let isCall = 0, isBarrier = 1, isBranch = 1, isTerminator = 1, Size = 8,
  1319. isCodeGenOnly = 0, hasSideEffects = 0, mayStore = 0, mayLoad = 0 in
  1320. def PseudoJump : Pseudo<(outs GPR:$rd), (ins pseudo_jump_symbol:$target), []>,
  1321. Sched<[WriteIALU, WriteJalr, ReadJalr]> {
  1322. let AsmString = "jump\t$target, $rd";
  1323. }
  1324. let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0,
  1325. isAsmParserOnly = 1 in
  1326. def PseudoLLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
  1327. "lla", "$dst, $src">;
  1328. def : Pat<(riscv_lla tglobaladdr:$in), (PseudoLLA tglobaladdr:$in)>;
  1329. def : Pat<(riscv_lla tblockaddress:$in), (PseudoLLA tblockaddress:$in)>;
  1330. def : Pat<(riscv_lla tjumptable:$in), (PseudoLLA tjumptable:$in)>;
  1331. def : Pat<(riscv_lla tconstpool:$in), (PseudoLLA tconstpool:$in)>;
  1332. let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0,
  1333. isAsmParserOnly = 1 in
  1334. def PseudoLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
  1335. "la", "$dst, $src">;
  1336. def : Pat<(riscv_la tglobaladdr:$in), (PseudoLA tglobaladdr:$in)>;
  1337. let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0,
  1338. isAsmParserOnly = 1 in
  1339. def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
  1340. "la.tls.ie", "$dst, $src">;
  1341. def : Pat<(riscv_la_tls_ie tglobaltlsaddr:$in),
  1342. (PseudoLA_TLS_IE tglobaltlsaddr:$in)>;
  1343. let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0,
  1344. isAsmParserOnly = 1 in
  1345. def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
  1346. "la.tls.gd", "$dst, $src">;
  1347. def : Pat<(riscv_la_tls_gd tglobaltlsaddr:$in),
  1348. (PseudoLA_TLS_GD tglobaltlsaddr:$in)>;
  1349. /// Sign/Zero Extends
  1350. // There are single-instruction versions of these in Zbb, so disable these
  1351. // Pseudos if that extension is present.
  1352. let hasSideEffects = 0, mayLoad = 0,
  1353. mayStore = 0, isCodeGenOnly = 0, isAsmParserOnly = 1 in {
  1354. def PseudoSEXT_B : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "sext.b", "$rd, $rs">;
  1355. def PseudoSEXT_H : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "sext.h", "$rd, $rs">;
  1356. // rv64's sext.w is defined above, using InstAlias<"sext.w ...
  1357. // zext.b is defined above, using InstAlias<"zext.b ...
  1358. def PseudoZEXT_H : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "zext.h", "$rd, $rs">;
  1359. } // hasSideEffects = 0, ...
  1360. let Predicates = [IsRV64], hasSideEffects = 0, mayLoad = 0, mayStore = 0,
  1361. isCodeGenOnly = 0, isAsmParserOnly = 1 in {
  1362. def PseudoZEXT_W : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "zext.w", "$rd, $rs">;
  1363. } // Predicates = [IsRV64], ...
  1364. /// Loads
  1365. multiclass LdPat<PatFrag LoadOp, RVInst Inst, ValueType vt = XLenVT> {
  1366. def : Pat<(vt (LoadOp (AddrRegImm GPR:$rs1, simm12:$imm12))),
  1367. (Inst GPR:$rs1, simm12:$imm12)>;
  1368. }
  1369. defm : LdPat<sextloadi8, LB>;
  1370. defm : LdPat<extloadi8, LB>;
  1371. defm : LdPat<sextloadi16, LH>;
  1372. defm : LdPat<extloadi16, LH>;
  1373. defm : LdPat<load, LW, i32>, Requires<[IsRV32]>;
  1374. defm : LdPat<zextloadi8, LBU>;
  1375. defm : LdPat<zextloadi16, LHU>;
  1376. /// Stores
  1377. multiclass StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy,
  1378. ValueType vt> {
  1379. def : Pat<(StoreOp (vt StTy:$rs2), (AddrRegImm GPR:$rs1, simm12:$imm12)),
  1380. (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
  1381. }
  1382. defm : StPat<truncstorei8, SB, GPR, XLenVT>;
  1383. defm : StPat<truncstorei16, SH, GPR, XLenVT>;
  1384. defm : StPat<store, SW, GPR, i32>, Requires<[IsRV32]>;
  1385. /// Fences
  1386. // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
  1387. // Manual: Volume I.
  1388. // fence acquire -> fence r, rw
  1389. def : Pat<(atomic_fence (XLenVT 4), (timm)), (FENCE 0b10, 0b11)>;
  1390. // fence release -> fence rw, w
  1391. def : Pat<(atomic_fence (XLenVT 5), (timm)), (FENCE 0b11, 0b1)>;
  1392. // fence acq_rel -> fence.tso
  1393. def : Pat<(atomic_fence (XLenVT 6), (timm)), (FENCE_TSO)>;
  1394. // fence seq_cst -> fence rw, rw
  1395. def : Pat<(atomic_fence (XLenVT 7), (timm)), (FENCE 0b11, 0b11)>;
  1396. // Lowering for atomic load and store is defined in RISCVInstrInfoA.td.
  1397. // Although these are lowered to fence+load/store instructions defined in the
  1398. // base RV32I/RV64I ISA, this lowering is only used when the A extension is
  1399. // present. This is necessary as it isn't valid to mix __atomic_* libcalls
  1400. // with inline atomic operations for the same object.
  1401. /// Access to system registers
  1402. // Helpers for defining specific operations. They are defined for each system
  1403. // register separately. Side effect is not used because dependencies are
  1404. // expressed via use-def properties.
  1405. class ReadSysReg<SysReg SR, list<Register> Regs>
  1406. : Pseudo<(outs GPR:$rd), (ins),
  1407. [(set GPR:$rd, (riscv_read_csr (XLenVT SR.Encoding)))]>,
  1408. PseudoInstExpansion<(CSRRS GPR:$rd, SR.Encoding, X0)> {
  1409. let hasSideEffects = 0;
  1410. let Uses = Regs;
  1411. }
  1412. class WriteSysReg<SysReg SR, list<Register> Regs>
  1413. : Pseudo<(outs), (ins GPR:$val),
  1414. [(riscv_write_csr (XLenVT SR.Encoding), GPR:$val)]>,
  1415. PseudoInstExpansion<(CSRRW X0, SR.Encoding, GPR:$val)> {
  1416. let hasSideEffects = 0;
  1417. let Defs = Regs;
  1418. }
  1419. class WriteSysRegImm<SysReg SR, list<Register> Regs>
  1420. : Pseudo<(outs), (ins uimm5:$val),
  1421. [(riscv_write_csr (XLenVT SR.Encoding), uimm5:$val)]>,
  1422. PseudoInstExpansion<(CSRRWI X0, SR.Encoding, uimm5:$val)> {
  1423. let hasSideEffects = 0;
  1424. let Defs = Regs;
  1425. }
  1426. class SwapSysReg<SysReg SR, list<Register> Regs>
  1427. : Pseudo<(outs GPR:$rd), (ins GPR:$val),
  1428. [(set GPR:$rd, (riscv_swap_csr (XLenVT SR.Encoding), GPR:$val))]>,
  1429. PseudoInstExpansion<(CSRRW GPR:$rd, SR.Encoding, GPR:$val)> {
  1430. let hasSideEffects = 0;
  1431. let Uses = Regs;
  1432. let Defs = Regs;
  1433. }
  1434. class SwapSysRegImm<SysReg SR, list<Register> Regs>
  1435. : Pseudo<(outs GPR:$rd), (ins uimm5:$val),
  1436. [(set GPR:$rd, (riscv_swap_csr (XLenVT SR.Encoding), uimm5:$val))]>,
  1437. PseudoInstExpansion<(CSRRWI GPR:$rd, SR.Encoding, uimm5:$val)> {
  1438. let hasSideEffects = 0;
  1439. let Uses = Regs;
  1440. let Defs = Regs;
  1441. }
  1442. def ReadFRM : ReadSysReg<SysRegFRM, [FRM]>;
  1443. def WriteFRM : WriteSysReg<SysRegFRM, [FRM]>;
  1444. def WriteFRMImm : WriteSysRegImm<SysRegFRM, [FRM]>;
  1445. def SwapFRMImm : SwapSysRegImm<SysRegFRM, [FRM]>;
  1446. let hasSideEffects = true in {
  1447. def ReadFFLAGS : ReadSysReg<SysRegFFLAGS, [FFLAGS]>;
  1448. def WriteFFLAGS : WriteSysReg<SysRegFFLAGS, [FFLAGS]>;
  1449. }
  1450. /// Other pseudo-instructions
  1451. // Pessimistically assume the stack pointer will be clobbered
  1452. let Defs = [X2], Uses = [X2] in {
  1453. def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
  1454. [(callseq_start timm:$amt1, timm:$amt2)]>;
  1455. def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
  1456. [(callseq_end timm:$amt1, timm:$amt2)]>;
  1457. } // Defs = [X2], Uses = [X2]
  1458. /// RV64 patterns
  1459. let Predicates = [IsRV64, NotHasStdExtZba] in {
  1460. def : Pat<(i64 (and GPR:$rs1, 0xffffffff)), (SRLI (SLLI GPR:$rs1, 32), 32)>;
  1461. // If we're shifting a 32-bit zero extended value left by 0-31 bits, use 2
  1462. // shifts instead of 3. This can occur when unsigned is used to index an array.
  1463. def : Pat<(i64 (shl (and GPR:$rs1, 0xffffffff), uimm5:$shamt)),
  1464. (SRLI (SLLI GPR:$rs1, 32), (ImmSubFrom32 uimm5:$shamt))>;
  1465. }
  1466. // PatFrag to allow ADDW/SUBW/MULW/SLLW to be selected from i64 add/sub/mul/shl
  1467. // if only the lower 32 bits of their result is used.
  1468. class binop_allwusers<SDPatternOperator operator>
  1469. : PatFrag<(ops node:$lhs, node:$rhs),
  1470. (i64 (operator node:$lhs, node:$rhs)), [{
  1471. return hasAllWUsers(Node);
  1472. }]>;
  1473. def sexti32_allwusers : PatFrag<(ops node:$src),
  1474. (sext_inreg node:$src, i32), [{
  1475. return hasAllWUsers(Node);
  1476. }]>;
  1477. def ImmSExt32 : SDNodeXForm<imm, [{
  1478. return CurDAG->getTargetConstant(SignExtend64<32>(N->getSExtValue()),
  1479. SDLoc(N), N->getValueType(0));
  1480. }]>;
  1481. // Look for constants where the upper 32 bits are 0, but sign extending bit 31
  1482. // would be an simm12.
  1483. def u32simm12 : ImmLeaf<XLenVT, [{
  1484. return isUInt<32>(Imm) && isInt<12>(SignExtend64<32>(Imm));
  1485. }], ImmSExt32>;
  1486. let Predicates = [IsRV64] in {
  1487. def : Pat<(i64 (and GPR:$rs, LeadingOnesWMask:$mask)),
  1488. (SLLI (SRLIW $rs, LeadingOnesWMask:$mask), LeadingOnesWMask:$mask)>;
  1489. /// sext and zext
  1490. // Sign extend is not needed if all users are W instructions.
  1491. def : Pat<(sexti32_allwusers GPR:$rs1), (XLenVT GPR:$rs1)>;
  1492. def : Pat<(sext_inreg GPR:$rs1, i32), (ADDIW GPR:$rs1, 0)>;
  1493. /// ALU operations
  1494. def : Pat<(i64 (srl (and GPR:$rs1, 0xffffffff), uimm5:$shamt)),
  1495. (SRLIW GPR:$rs1, uimm5:$shamt)>;
  1496. def : Pat<(i64 (srl (shl GPR:$rs1, (i64 32)), uimm6gt32:$shamt)),
  1497. (SRLIW GPR:$rs1, (ImmSub32 uimm6gt32:$shamt))>;
  1498. def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt),
  1499. (SRAIW GPR:$rs1, uimm5:$shamt)>;
  1500. def : Pat<(i64 (sra (shl GPR:$rs1, (i64 32)), uimm6gt32:$shamt)),
  1501. (SRAIW GPR:$rs1, (ImmSub32 uimm6gt32:$shamt))>;
  1502. def : PatGprGpr<shiftopw<riscv_sllw>, SLLW>;
  1503. def : PatGprGpr<shiftopw<riscv_srlw>, SRLW>;
  1504. def : PatGprGpr<shiftopw<riscv_sraw>, SRAW>;
  1505. // Select W instructions if only the lower 32 bits of the result are used.
  1506. def : PatGprGpr<binop_allwusers<add>, ADDW>;
  1507. def : PatGprSimm12<binop_allwusers<add>, ADDIW>;
  1508. def : PatGprGpr<binop_allwusers<sub>, SUBW>;
  1509. def : PatGprImm<binop_allwusers<shl>, SLLIW, uimm5>;
  1510. // If this is a shr of a value sign extended from i32, and all the users only
  1511. // use the lower 32 bits, we can use an sraiw to remove the sext_inreg. This
  1512. // occurs because SimplifyDemandedBits prefers srl over sra.
  1513. def : Pat<(binop_allwusers<srl> (sext_inreg GPR:$rs1, i32), uimm5:$shamt),
  1514. (SRAIW GPR:$rs1, uimm5:$shamt)>;
  1515. // Use binop_allwusers to recover immediates that may have been broken by
  1516. // SimplifyDemandedBits.
  1517. def : Pat<(binop_allwusers<and> GPR:$rs1, u32simm12:$imm),
  1518. (ANDI GPR:$rs1, u32simm12:$imm)>;
  1519. def : Pat<(binop_allwusers<or> GPR:$rs1, u32simm12:$imm),
  1520. (ORI GPR:$rs1, u32simm12:$imm)>;
  1521. def : Pat<(binop_allwusers<xor> GPR:$rs1, u32simm12:$imm),
  1522. (XORI GPR:$rs1, u32simm12:$imm)>;
  1523. /// Loads
  1524. defm : LdPat<sextloadi32, LW, i64>;
  1525. defm : LdPat<extloadi32, LW, i64>;
  1526. defm : LdPat<zextloadi32, LWU, i64>;
  1527. defm : LdPat<load, LD, i64>;
  1528. /// Stores
  1529. defm : StPat<truncstorei32, SW, GPR, i64>;
  1530. defm : StPat<store, SD, GPR, i64>;
  1531. } // Predicates = [IsRV64]
  1532. /// readcyclecounter
  1533. // On RV64, we can directly read the 64-bit "cycle" CSR.
  1534. let Predicates = [IsRV64] in
  1535. def : Pat<(i64 (readcyclecounter)), (CSRRS CYCLE.Encoding, X0)>;
  1536. // On RV32, ReadCycleWide will be expanded to the suggested loop reading both
  1537. // halves of the 64-bit "cycle" CSR.
  1538. let Predicates = [IsRV32], usesCustomInserter = 1, hasNoSchedulingInfo = 1 in
  1539. def ReadCycleWide : Pseudo<(outs GPR:$lo, GPR:$hi), (ins),
  1540. [(set GPR:$lo, GPR:$hi, (riscv_read_cycle_wide))],
  1541. "", "">;
  1542. /// traps
  1543. // We lower `trap` to `unimp`, as this causes a hard exception on nearly all
  1544. // systems.
  1545. def : Pat<(trap), (UNIMP)>;
  1546. // We lower `debugtrap` to `ebreak`, as this will get the attention of the
  1547. // debugger if possible.
  1548. def : Pat<(debugtrap), (EBREAK)>;
  1549. let Predicates = [IsRV64], Uses = [X5],
  1550. Defs = [X1, X6, X7, X28, X29, X30, X31] in
  1551. def HWASAN_CHECK_MEMACCESS_SHORTGRANULES
  1552. : Pseudo<(outs), (ins GPRJALR:$ptr, i32imm:$accessinfo),
  1553. [(int_hwasan_check_memaccess_shortgranules X5, GPRJALR:$ptr,
  1554. (i32 timm:$accessinfo))]>;
  1555. /// Simple optimization
  1556. def : Pat<(add GPR:$rs1, (AddiPair:$rs2)),
  1557. (ADDI (ADDI GPR:$rs1, (AddiPairImmLarge AddiPair:$rs2)),
  1558. (AddiPairImmSmall GPR:$rs2))>;
  1559. let Predicates = [IsRV64] in {
  1560. // Select W instructions if only the lower 32-bits of the result are used.
  1561. def : Pat<(binop_allwusers<add> GPR:$rs1, (AddiPair:$rs2)),
  1562. (ADDIW (ADDIW GPR:$rs1, (AddiPairImmLarge AddiPair:$rs2)),
  1563. (AddiPairImmSmall AddiPair:$rs2))>;
  1564. }
  1565. //===----------------------------------------------------------------------===//
  1566. // Standard extensions
  1567. //===----------------------------------------------------------------------===//
  1568. include "RISCVInstrInfoM.td"
  1569. include "RISCVInstrInfoA.td"
  1570. include "RISCVInstrInfoF.td"
  1571. include "RISCVInstrInfoD.td"
  1572. include "RISCVInstrInfoC.td"
  1573. include "RISCVInstrInfoZb.td"
  1574. include "RISCVInstrInfoZk.td"
  1575. include "RISCVInstrInfoV.td"
  1576. include "RISCVInstrInfoZfh.td"
  1577. include "RISCVInstrInfoZicbo.td"
  1578. //===----------------------------------------------------------------------===//
  1579. // Vendor extensions
  1580. //===----------------------------------------------------------------------===//
  1581. include "RISCVInstrInfoXVentana.td"
  1582. include "RISCVInstrInfoXTHead.td"