AArch64RegisterInfo.td 73 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740
  1. //=- AArch64RegisterInfo.td - Describe the AArch64 Registers -*- tablegen -*-=//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. //
  10. //===----------------------------------------------------------------------===//
  11. class AArch64Reg<bits<16> enc, string n, list<Register> subregs = [],
  12. list<string> altNames = []>
  13. : Register<n, altNames> {
  14. let HWEncoding = enc;
  15. let Namespace = "AArch64";
  16. let SubRegs = subregs;
  17. }
  18. let Namespace = "AArch64" in {
  19. def sub_32 : SubRegIndex<32>;
  20. def bsub : SubRegIndex<8>;
  21. def hsub : SubRegIndex<16>;
  22. def ssub : SubRegIndex<32>;
  23. def dsub : SubRegIndex<64>;
  24. def sube32 : SubRegIndex<32>;
  25. def subo32 : SubRegIndex<32>;
  26. def sube64 : SubRegIndex<64>;
  27. def subo64 : SubRegIndex<64>;
  28. // SVE
  29. def zsub : SubRegIndex<128>;
  30. // Note: zsub_hi should never be used directly because it represents
  31. // the scalable part of the SVE vector and cannot be manipulated as a
  32. // subvector in the same way the lower 128bits can.
  33. def zsub_hi : SubRegIndex<128>;
  34. // Note: Code depends on these having consecutive numbers
  35. def dsub0 : SubRegIndex<64>;
  36. def dsub1 : SubRegIndex<64>;
  37. def dsub2 : SubRegIndex<64>;
  38. def dsub3 : SubRegIndex<64>;
  39. // Note: Code depends on these having consecutive numbers
  40. def qsub0 : SubRegIndex<128>;
  41. def qsub1 : SubRegIndex<128>;
  42. def qsub2 : SubRegIndex<128>;
  43. def qsub3 : SubRegIndex<128>;
  44. // Note: Code depends on these having consecutive numbers
  45. def zasubb : SubRegIndex<2048>; // (16 x 16)/1 bytes = 2048 bits
  46. def zasubh0 : SubRegIndex<1024>; // (16 x 16)/2 bytes = 1024 bits
  47. def zasubh1 : SubRegIndex<1024>; // (16 x 16)/2 bytes = 1024 bits
  48. def zasubs0 : SubRegIndex<512>; // (16 x 16)/4 bytes = 512 bits
  49. def zasubs1 : SubRegIndex<512>; // (16 x 16)/4 bytes = 512 bits
  50. def zasubd0 : SubRegIndex<256>; // (16 x 16)/8 bytes = 256 bits
  51. def zasubd1 : SubRegIndex<256>; // (16 x 16)/8 bytes = 256 bits
  52. def zasubq0 : SubRegIndex<128>; // (16 x 16)/16 bytes = 128 bits
  53. def zasubq1 : SubRegIndex<128>; // (16 x 16)/16 bytes = 128 bits
  54. }
  55. let Namespace = "AArch64" in {
  56. def vreg : RegAltNameIndex;
  57. def vlist1 : RegAltNameIndex;
  58. }
  59. //===----------------------------------------------------------------------===//
  60. // Registers
  61. //===----------------------------------------------------------------------===//
  62. def W0 : AArch64Reg<0, "w0" >, DwarfRegNum<[0]>;
  63. def W1 : AArch64Reg<1, "w1" >, DwarfRegNum<[1]>;
  64. def W2 : AArch64Reg<2, "w2" >, DwarfRegNum<[2]>;
  65. def W3 : AArch64Reg<3, "w3" >, DwarfRegNum<[3]>;
  66. def W4 : AArch64Reg<4, "w4" >, DwarfRegNum<[4]>;
  67. def W5 : AArch64Reg<5, "w5" >, DwarfRegNum<[5]>;
  68. def W6 : AArch64Reg<6, "w6" >, DwarfRegNum<[6]>;
  69. def W7 : AArch64Reg<7, "w7" >, DwarfRegNum<[7]>;
  70. def W8 : AArch64Reg<8, "w8" >, DwarfRegNum<[8]>;
  71. def W9 : AArch64Reg<9, "w9" >, DwarfRegNum<[9]>;
  72. def W10 : AArch64Reg<10, "w10">, DwarfRegNum<[10]>;
  73. def W11 : AArch64Reg<11, "w11">, DwarfRegNum<[11]>;
  74. def W12 : AArch64Reg<12, "w12">, DwarfRegNum<[12]>;
  75. def W13 : AArch64Reg<13, "w13">, DwarfRegNum<[13]>;
  76. def W14 : AArch64Reg<14, "w14">, DwarfRegNum<[14]>;
  77. def W15 : AArch64Reg<15, "w15">, DwarfRegNum<[15]>;
  78. def W16 : AArch64Reg<16, "w16">, DwarfRegNum<[16]>;
  79. def W17 : AArch64Reg<17, "w17">, DwarfRegNum<[17]>;
  80. def W18 : AArch64Reg<18, "w18">, DwarfRegNum<[18]>;
  81. def W19 : AArch64Reg<19, "w19">, DwarfRegNum<[19]>;
  82. def W20 : AArch64Reg<20, "w20">, DwarfRegNum<[20]>;
  83. def W21 : AArch64Reg<21, "w21">, DwarfRegNum<[21]>;
  84. def W22 : AArch64Reg<22, "w22">, DwarfRegNum<[22]>;
  85. def W23 : AArch64Reg<23, "w23">, DwarfRegNum<[23]>;
  86. def W24 : AArch64Reg<24, "w24">, DwarfRegNum<[24]>;
  87. def W25 : AArch64Reg<25, "w25">, DwarfRegNum<[25]>;
  88. def W26 : AArch64Reg<26, "w26">, DwarfRegNum<[26]>;
  89. def W27 : AArch64Reg<27, "w27">, DwarfRegNum<[27]>;
  90. def W28 : AArch64Reg<28, "w28">, DwarfRegNum<[28]>;
  91. def W29 : AArch64Reg<29, "w29">, DwarfRegNum<[29]>;
  92. def W30 : AArch64Reg<30, "w30">, DwarfRegNum<[30]>;
  93. def WSP : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>;
  94. let isConstant = true in
  95. def WZR : AArch64Reg<31, "wzr">, DwarfRegAlias<WSP>;
  96. let SubRegIndices = [sub_32] in {
  97. def X0 : AArch64Reg<0, "x0", [W0]>, DwarfRegAlias<W0>;
  98. def X1 : AArch64Reg<1, "x1", [W1]>, DwarfRegAlias<W1>;
  99. def X2 : AArch64Reg<2, "x2", [W2]>, DwarfRegAlias<W2>;
  100. def X3 : AArch64Reg<3, "x3", [W3]>, DwarfRegAlias<W3>;
  101. def X4 : AArch64Reg<4, "x4", [W4]>, DwarfRegAlias<W4>;
  102. def X5 : AArch64Reg<5, "x5", [W5]>, DwarfRegAlias<W5>;
  103. def X6 : AArch64Reg<6, "x6", [W6]>, DwarfRegAlias<W6>;
  104. def X7 : AArch64Reg<7, "x7", [W7]>, DwarfRegAlias<W7>;
  105. def X8 : AArch64Reg<8, "x8", [W8]>, DwarfRegAlias<W8>;
  106. def X9 : AArch64Reg<9, "x9", [W9]>, DwarfRegAlias<W9>;
  107. def X10 : AArch64Reg<10, "x10", [W10]>, DwarfRegAlias<W10>;
  108. def X11 : AArch64Reg<11, "x11", [W11]>, DwarfRegAlias<W11>;
  109. def X12 : AArch64Reg<12, "x12", [W12]>, DwarfRegAlias<W12>;
  110. def X13 : AArch64Reg<13, "x13", [W13]>, DwarfRegAlias<W13>;
  111. def X14 : AArch64Reg<14, "x14", [W14]>, DwarfRegAlias<W14>;
  112. def X15 : AArch64Reg<15, "x15", [W15]>, DwarfRegAlias<W15>;
  113. def X16 : AArch64Reg<16, "x16", [W16]>, DwarfRegAlias<W16>;
  114. def X17 : AArch64Reg<17, "x17", [W17]>, DwarfRegAlias<W17>;
  115. def X18 : AArch64Reg<18, "x18", [W18]>, DwarfRegAlias<W18>;
  116. def X19 : AArch64Reg<19, "x19", [W19]>, DwarfRegAlias<W19>;
  117. def X20 : AArch64Reg<20, "x20", [W20]>, DwarfRegAlias<W20>;
  118. def X21 : AArch64Reg<21, "x21", [W21]>, DwarfRegAlias<W21>;
  119. def X22 : AArch64Reg<22, "x22", [W22]>, DwarfRegAlias<W22>;
  120. def X23 : AArch64Reg<23, "x23", [W23]>, DwarfRegAlias<W23>;
  121. def X24 : AArch64Reg<24, "x24", [W24]>, DwarfRegAlias<W24>;
  122. def X25 : AArch64Reg<25, "x25", [W25]>, DwarfRegAlias<W25>;
  123. def X26 : AArch64Reg<26, "x26", [W26]>, DwarfRegAlias<W26>;
  124. def X27 : AArch64Reg<27, "x27", [W27]>, DwarfRegAlias<W27>;
  125. def X28 : AArch64Reg<28, "x28", [W28]>, DwarfRegAlias<W28>;
  126. def FP : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias<W29>;
  127. def LR : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias<W30>;
  128. def SP : AArch64Reg<31, "sp", [WSP]>, DwarfRegAlias<WSP>;
  129. let isConstant = true in
  130. def XZR : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias<WSP>;
  131. }
  132. // Condition code register.
  133. def NZCV : AArch64Reg<0, "nzcv">;
  134. // First fault status register
  135. def FFR : AArch64Reg<0, "ffr">, DwarfRegNum<[47]>;
  136. // Purely virtual Vector Granule (VG) Dwarf register
  137. def VG : AArch64Reg<0, "vg">, DwarfRegNum<[46]>;
  138. // Floating-point control register
  139. def FPCR : AArch64Reg<0, "fpcr">;
  140. // GPR register classes with the intersections of GPR32/GPR32sp and
  141. // GPR64/GPR64sp for use by the coalescer.
  142. def GPR32common : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 30)> {
  143. let AltOrders = [(rotl GPR32common, 8)];
  144. let AltOrderSelect = [{ return 1; }];
  145. }
  146. def GPR64common : RegisterClass<"AArch64", [i64], 64,
  147. (add (sequence "X%u", 0, 28), FP, LR)> {
  148. let AltOrders = [(rotl GPR64common, 8)];
  149. let AltOrderSelect = [{ return 1; }];
  150. }
  151. // GPR register classes which exclude SP/WSP.
  152. def GPR32 : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR)> {
  153. let AltOrders = [(rotl GPR32, 8)];
  154. let AltOrderSelect = [{ return 1; }];
  155. }
  156. def GPR64 : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR)> {
  157. let AltOrders = [(rotl GPR64, 8)];
  158. let AltOrderSelect = [{ return 1; }];
  159. }
  160. // GPR register classes which include SP/WSP.
  161. def GPR32sp : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WSP)> {
  162. let AltOrders = [(rotl GPR32sp, 8)];
  163. let AltOrderSelect = [{ return 1; }];
  164. }
  165. def GPR64sp : RegisterClass<"AArch64", [i64], 64, (add GPR64common, SP)> {
  166. let AltOrders = [(rotl GPR64sp, 8)];
  167. let AltOrderSelect = [{ return 1; }];
  168. }
  169. def GPR32sponly : RegisterClass<"AArch64", [i32], 32, (add WSP)>;
  170. def GPR64sponly : RegisterClass<"AArch64", [i64], 64, (add SP)>;
  171. def GPR64spPlus0Operand : AsmOperandClass {
  172. let Name = "GPR64sp0";
  173. let RenderMethod = "addRegOperands";
  174. let PredicateMethod = "isGPR64<AArch64::GPR64spRegClassID>";
  175. let ParserMethod = "tryParseGPR64sp0Operand";
  176. }
  177. def GPR64sp0 : RegisterOperand<GPR64sp> {
  178. let ParserMatchClass = GPR64spPlus0Operand;
  179. }
  180. // GPR32/GPR64 but with zero-register substitution enabled.
  181. // TODO: Roll this out to GPR32/GPR64/GPR32all/GPR64all.
  182. def GPR32z : RegisterOperand<GPR32> {
  183. let GIZeroRegister = WZR;
  184. }
  185. def GPR64z : RegisterOperand<GPR64> {
  186. let GIZeroRegister = XZR;
  187. }
  188. // GPR argument registers.
  189. def GPR32arg : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 7)>;
  190. def GPR64arg : RegisterClass<"AArch64", [i64], 64, (sequence "X%u", 0, 7)>;
  191. // GPR register classes which include WZR/XZR AND SP/WSP. This is not a
  192. // constraint used by any instructions, it is used as a common super-class.
  193. def GPR32all : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR, WSP)>;
  194. def GPR64all : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR, SP)>;
  195. // For tail calls, we can't use callee-saved registers, as they are restored
  196. // to the saved value before the tail call, which would clobber a call address.
  197. // This is for indirect tail calls to store the address of the destination.
  198. def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X21,
  199. X22, X23, X24, X25, X26,
  200. X27, X28, FP, LR)>;
  201. // Restricted set of tail call registers, for use when branch target
  202. // enforcement is enabled. These are the only registers which can be used to
  203. // indirectly branch (not call) to the "BTI c" instruction at the start of a
  204. // BTI-protected function.
  205. def rtcGPR64 : RegisterClass<"AArch64", [i64], 64, (add X16, X17)>;
  206. // Register set that excludes registers that are reserved for procedure calls.
  207. // This is used for pseudo-instructions that are actually implemented using a
  208. // procedure call.
  209. def GPR64noip : RegisterClass<"AArch64", [i64], 64, (sub GPR64, X16, X17, LR)>;
  210. // GPR register classes for post increment amount of vector load/store that
  211. // has alternate printing when Rm=31 and prints a constant immediate value
  212. // equal to the total number of bytes transferred.
  213. // FIXME: TableGen *should* be able to do these itself now. There appears to be
  214. // a bug in counting how many operands a Post-indexed MCInst should have which
  215. // means the aliases don't trigger.
  216. def GPR64pi1 : RegisterOperand<GPR64, "printPostIncOperand<1>">;
  217. def GPR64pi2 : RegisterOperand<GPR64, "printPostIncOperand<2>">;
  218. def GPR64pi3 : RegisterOperand<GPR64, "printPostIncOperand<3>">;
  219. def GPR64pi4 : RegisterOperand<GPR64, "printPostIncOperand<4>">;
  220. def GPR64pi6 : RegisterOperand<GPR64, "printPostIncOperand<6>">;
  221. def GPR64pi8 : RegisterOperand<GPR64, "printPostIncOperand<8>">;
  222. def GPR64pi12 : RegisterOperand<GPR64, "printPostIncOperand<12>">;
  223. def GPR64pi16 : RegisterOperand<GPR64, "printPostIncOperand<16>">;
  224. def GPR64pi24 : RegisterOperand<GPR64, "printPostIncOperand<24>">;
  225. def GPR64pi32 : RegisterOperand<GPR64, "printPostIncOperand<32>">;
  226. def GPR64pi48 : RegisterOperand<GPR64, "printPostIncOperand<48>">;
  227. def GPR64pi64 : RegisterOperand<GPR64, "printPostIncOperand<64>">;
  228. // Condition code regclass.
  229. def CCR : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
  230. let CopyCost = -1; // Don't allow copying of status registers.
  231. // CCR is not allocatable.
  232. let isAllocatable = 0;
  233. }
  234. //===----------------------------------------------------------------------===//
  235. // Floating Point Scalar Registers
  236. //===----------------------------------------------------------------------===//
  237. def B0 : AArch64Reg<0, "b0">, DwarfRegNum<[64]>;
  238. def B1 : AArch64Reg<1, "b1">, DwarfRegNum<[65]>;
  239. def B2 : AArch64Reg<2, "b2">, DwarfRegNum<[66]>;
  240. def B3 : AArch64Reg<3, "b3">, DwarfRegNum<[67]>;
  241. def B4 : AArch64Reg<4, "b4">, DwarfRegNum<[68]>;
  242. def B5 : AArch64Reg<5, "b5">, DwarfRegNum<[69]>;
  243. def B6 : AArch64Reg<6, "b6">, DwarfRegNum<[70]>;
  244. def B7 : AArch64Reg<7, "b7">, DwarfRegNum<[71]>;
  245. def B8 : AArch64Reg<8, "b8">, DwarfRegNum<[72]>;
  246. def B9 : AArch64Reg<9, "b9">, DwarfRegNum<[73]>;
  247. def B10 : AArch64Reg<10, "b10">, DwarfRegNum<[74]>;
  248. def B11 : AArch64Reg<11, "b11">, DwarfRegNum<[75]>;
  249. def B12 : AArch64Reg<12, "b12">, DwarfRegNum<[76]>;
  250. def B13 : AArch64Reg<13, "b13">, DwarfRegNum<[77]>;
  251. def B14 : AArch64Reg<14, "b14">, DwarfRegNum<[78]>;
  252. def B15 : AArch64Reg<15, "b15">, DwarfRegNum<[79]>;
  253. def B16 : AArch64Reg<16, "b16">, DwarfRegNum<[80]>;
  254. def B17 : AArch64Reg<17, "b17">, DwarfRegNum<[81]>;
  255. def B18 : AArch64Reg<18, "b18">, DwarfRegNum<[82]>;
  256. def B19 : AArch64Reg<19, "b19">, DwarfRegNum<[83]>;
  257. def B20 : AArch64Reg<20, "b20">, DwarfRegNum<[84]>;
  258. def B21 : AArch64Reg<21, "b21">, DwarfRegNum<[85]>;
  259. def B22 : AArch64Reg<22, "b22">, DwarfRegNum<[86]>;
  260. def B23 : AArch64Reg<23, "b23">, DwarfRegNum<[87]>;
  261. def B24 : AArch64Reg<24, "b24">, DwarfRegNum<[88]>;
  262. def B25 : AArch64Reg<25, "b25">, DwarfRegNum<[89]>;
  263. def B26 : AArch64Reg<26, "b26">, DwarfRegNum<[90]>;
  264. def B27 : AArch64Reg<27, "b27">, DwarfRegNum<[91]>;
  265. def B28 : AArch64Reg<28, "b28">, DwarfRegNum<[92]>;
  266. def B29 : AArch64Reg<29, "b29">, DwarfRegNum<[93]>;
  267. def B30 : AArch64Reg<30, "b30">, DwarfRegNum<[94]>;
  268. def B31 : AArch64Reg<31, "b31">, DwarfRegNum<[95]>;
  269. let SubRegIndices = [bsub] in {
  270. def H0 : AArch64Reg<0, "h0", [B0]>, DwarfRegAlias<B0>;
  271. def H1 : AArch64Reg<1, "h1", [B1]>, DwarfRegAlias<B1>;
  272. def H2 : AArch64Reg<2, "h2", [B2]>, DwarfRegAlias<B2>;
  273. def H3 : AArch64Reg<3, "h3", [B3]>, DwarfRegAlias<B3>;
  274. def H4 : AArch64Reg<4, "h4", [B4]>, DwarfRegAlias<B4>;
  275. def H5 : AArch64Reg<5, "h5", [B5]>, DwarfRegAlias<B5>;
  276. def H6 : AArch64Reg<6, "h6", [B6]>, DwarfRegAlias<B6>;
  277. def H7 : AArch64Reg<7, "h7", [B7]>, DwarfRegAlias<B7>;
  278. def H8 : AArch64Reg<8, "h8", [B8]>, DwarfRegAlias<B8>;
  279. def H9 : AArch64Reg<9, "h9", [B9]>, DwarfRegAlias<B9>;
  280. def H10 : AArch64Reg<10, "h10", [B10]>, DwarfRegAlias<B10>;
  281. def H11 : AArch64Reg<11, "h11", [B11]>, DwarfRegAlias<B11>;
  282. def H12 : AArch64Reg<12, "h12", [B12]>, DwarfRegAlias<B12>;
  283. def H13 : AArch64Reg<13, "h13", [B13]>, DwarfRegAlias<B13>;
  284. def H14 : AArch64Reg<14, "h14", [B14]>, DwarfRegAlias<B14>;
  285. def H15 : AArch64Reg<15, "h15", [B15]>, DwarfRegAlias<B15>;
  286. def H16 : AArch64Reg<16, "h16", [B16]>, DwarfRegAlias<B16>;
  287. def H17 : AArch64Reg<17, "h17", [B17]>, DwarfRegAlias<B17>;
  288. def H18 : AArch64Reg<18, "h18", [B18]>, DwarfRegAlias<B18>;
  289. def H19 : AArch64Reg<19, "h19", [B19]>, DwarfRegAlias<B19>;
  290. def H20 : AArch64Reg<20, "h20", [B20]>, DwarfRegAlias<B20>;
  291. def H21 : AArch64Reg<21, "h21", [B21]>, DwarfRegAlias<B21>;
  292. def H22 : AArch64Reg<22, "h22", [B22]>, DwarfRegAlias<B22>;
  293. def H23 : AArch64Reg<23, "h23", [B23]>, DwarfRegAlias<B23>;
  294. def H24 : AArch64Reg<24, "h24", [B24]>, DwarfRegAlias<B24>;
  295. def H25 : AArch64Reg<25, "h25", [B25]>, DwarfRegAlias<B25>;
  296. def H26 : AArch64Reg<26, "h26", [B26]>, DwarfRegAlias<B26>;
  297. def H27 : AArch64Reg<27, "h27", [B27]>, DwarfRegAlias<B27>;
  298. def H28 : AArch64Reg<28, "h28", [B28]>, DwarfRegAlias<B28>;
  299. def H29 : AArch64Reg<29, "h29", [B29]>, DwarfRegAlias<B29>;
  300. def H30 : AArch64Reg<30, "h30", [B30]>, DwarfRegAlias<B30>;
  301. def H31 : AArch64Reg<31, "h31", [B31]>, DwarfRegAlias<B31>;
  302. }
  303. let SubRegIndices = [hsub] in {
  304. def S0 : AArch64Reg<0, "s0", [H0]>, DwarfRegAlias<B0>;
  305. def S1 : AArch64Reg<1, "s1", [H1]>, DwarfRegAlias<B1>;
  306. def S2 : AArch64Reg<2, "s2", [H2]>, DwarfRegAlias<B2>;
  307. def S3 : AArch64Reg<3, "s3", [H3]>, DwarfRegAlias<B3>;
  308. def S4 : AArch64Reg<4, "s4", [H4]>, DwarfRegAlias<B4>;
  309. def S5 : AArch64Reg<5, "s5", [H5]>, DwarfRegAlias<B5>;
  310. def S6 : AArch64Reg<6, "s6", [H6]>, DwarfRegAlias<B6>;
  311. def S7 : AArch64Reg<7, "s7", [H7]>, DwarfRegAlias<B7>;
  312. def S8 : AArch64Reg<8, "s8", [H8]>, DwarfRegAlias<B8>;
  313. def S9 : AArch64Reg<9, "s9", [H9]>, DwarfRegAlias<B9>;
  314. def S10 : AArch64Reg<10, "s10", [H10]>, DwarfRegAlias<B10>;
  315. def S11 : AArch64Reg<11, "s11", [H11]>, DwarfRegAlias<B11>;
  316. def S12 : AArch64Reg<12, "s12", [H12]>, DwarfRegAlias<B12>;
  317. def S13 : AArch64Reg<13, "s13", [H13]>, DwarfRegAlias<B13>;
  318. def S14 : AArch64Reg<14, "s14", [H14]>, DwarfRegAlias<B14>;
  319. def S15 : AArch64Reg<15, "s15", [H15]>, DwarfRegAlias<B15>;
  320. def S16 : AArch64Reg<16, "s16", [H16]>, DwarfRegAlias<B16>;
  321. def S17 : AArch64Reg<17, "s17", [H17]>, DwarfRegAlias<B17>;
  322. def S18 : AArch64Reg<18, "s18", [H18]>, DwarfRegAlias<B18>;
  323. def S19 : AArch64Reg<19, "s19", [H19]>, DwarfRegAlias<B19>;
  324. def S20 : AArch64Reg<20, "s20", [H20]>, DwarfRegAlias<B20>;
  325. def S21 : AArch64Reg<21, "s21", [H21]>, DwarfRegAlias<B21>;
  326. def S22 : AArch64Reg<22, "s22", [H22]>, DwarfRegAlias<B22>;
  327. def S23 : AArch64Reg<23, "s23", [H23]>, DwarfRegAlias<B23>;
  328. def S24 : AArch64Reg<24, "s24", [H24]>, DwarfRegAlias<B24>;
  329. def S25 : AArch64Reg<25, "s25", [H25]>, DwarfRegAlias<B25>;
  330. def S26 : AArch64Reg<26, "s26", [H26]>, DwarfRegAlias<B26>;
  331. def S27 : AArch64Reg<27, "s27", [H27]>, DwarfRegAlias<B27>;
  332. def S28 : AArch64Reg<28, "s28", [H28]>, DwarfRegAlias<B28>;
  333. def S29 : AArch64Reg<29, "s29", [H29]>, DwarfRegAlias<B29>;
  334. def S30 : AArch64Reg<30, "s30", [H30]>, DwarfRegAlias<B30>;
  335. def S31 : AArch64Reg<31, "s31", [H31]>, DwarfRegAlias<B31>;
  336. }
  337. let SubRegIndices = [ssub], RegAltNameIndices = [vreg, vlist1] in {
  338. def D0 : AArch64Reg<0, "d0", [S0], ["v0", ""]>, DwarfRegAlias<B0>;
  339. def D1 : AArch64Reg<1, "d1", [S1], ["v1", ""]>, DwarfRegAlias<B1>;
  340. def D2 : AArch64Reg<2, "d2", [S2], ["v2", ""]>, DwarfRegAlias<B2>;
  341. def D3 : AArch64Reg<3, "d3", [S3], ["v3", ""]>, DwarfRegAlias<B3>;
  342. def D4 : AArch64Reg<4, "d4", [S4], ["v4", ""]>, DwarfRegAlias<B4>;
  343. def D5 : AArch64Reg<5, "d5", [S5], ["v5", ""]>, DwarfRegAlias<B5>;
  344. def D6 : AArch64Reg<6, "d6", [S6], ["v6", ""]>, DwarfRegAlias<B6>;
  345. def D7 : AArch64Reg<7, "d7", [S7], ["v7", ""]>, DwarfRegAlias<B7>;
  346. def D8 : AArch64Reg<8, "d8", [S8], ["v8", ""]>, DwarfRegAlias<B8>;
  347. def D9 : AArch64Reg<9, "d9", [S9], ["v9", ""]>, DwarfRegAlias<B9>;
  348. def D10 : AArch64Reg<10, "d10", [S10], ["v10", ""]>, DwarfRegAlias<B10>;
  349. def D11 : AArch64Reg<11, "d11", [S11], ["v11", ""]>, DwarfRegAlias<B11>;
  350. def D12 : AArch64Reg<12, "d12", [S12], ["v12", ""]>, DwarfRegAlias<B12>;
  351. def D13 : AArch64Reg<13, "d13", [S13], ["v13", ""]>, DwarfRegAlias<B13>;
  352. def D14 : AArch64Reg<14, "d14", [S14], ["v14", ""]>, DwarfRegAlias<B14>;
  353. def D15 : AArch64Reg<15, "d15", [S15], ["v15", ""]>, DwarfRegAlias<B15>;
  354. def D16 : AArch64Reg<16, "d16", [S16], ["v16", ""]>, DwarfRegAlias<B16>;
  355. def D17 : AArch64Reg<17, "d17", [S17], ["v17", ""]>, DwarfRegAlias<B17>;
  356. def D18 : AArch64Reg<18, "d18", [S18], ["v18", ""]>, DwarfRegAlias<B18>;
  357. def D19 : AArch64Reg<19, "d19", [S19], ["v19", ""]>, DwarfRegAlias<B19>;
  358. def D20 : AArch64Reg<20, "d20", [S20], ["v20", ""]>, DwarfRegAlias<B20>;
  359. def D21 : AArch64Reg<21, "d21", [S21], ["v21", ""]>, DwarfRegAlias<B21>;
  360. def D22 : AArch64Reg<22, "d22", [S22], ["v22", ""]>, DwarfRegAlias<B22>;
  361. def D23 : AArch64Reg<23, "d23", [S23], ["v23", ""]>, DwarfRegAlias<B23>;
  362. def D24 : AArch64Reg<24, "d24", [S24], ["v24", ""]>, DwarfRegAlias<B24>;
  363. def D25 : AArch64Reg<25, "d25", [S25], ["v25", ""]>, DwarfRegAlias<B25>;
  364. def D26 : AArch64Reg<26, "d26", [S26], ["v26", ""]>, DwarfRegAlias<B26>;
  365. def D27 : AArch64Reg<27, "d27", [S27], ["v27", ""]>, DwarfRegAlias<B27>;
  366. def D28 : AArch64Reg<28, "d28", [S28], ["v28", ""]>, DwarfRegAlias<B28>;
  367. def D29 : AArch64Reg<29, "d29", [S29], ["v29", ""]>, DwarfRegAlias<B29>;
  368. def D30 : AArch64Reg<30, "d30", [S30], ["v30", ""]>, DwarfRegAlias<B30>;
  369. def D31 : AArch64Reg<31, "d31", [S31], ["v31", ""]>, DwarfRegAlias<B31>;
  370. }
  371. let SubRegIndices = [dsub], RegAltNameIndices = [vreg, vlist1] in {
  372. def Q0 : AArch64Reg<0, "q0", [D0], ["v0", ""]>, DwarfRegAlias<B0>;
  373. def Q1 : AArch64Reg<1, "q1", [D1], ["v1", ""]>, DwarfRegAlias<B1>;
  374. def Q2 : AArch64Reg<2, "q2", [D2], ["v2", ""]>, DwarfRegAlias<B2>;
  375. def Q3 : AArch64Reg<3, "q3", [D3], ["v3", ""]>, DwarfRegAlias<B3>;
  376. def Q4 : AArch64Reg<4, "q4", [D4], ["v4", ""]>, DwarfRegAlias<B4>;
  377. def Q5 : AArch64Reg<5, "q5", [D5], ["v5", ""]>, DwarfRegAlias<B5>;
  378. def Q6 : AArch64Reg<6, "q6", [D6], ["v6", ""]>, DwarfRegAlias<B6>;
  379. def Q7 : AArch64Reg<7, "q7", [D7], ["v7", ""]>, DwarfRegAlias<B7>;
  380. def Q8 : AArch64Reg<8, "q8", [D8], ["v8", ""]>, DwarfRegAlias<B8>;
  381. def Q9 : AArch64Reg<9, "q9", [D9], ["v9", ""]>, DwarfRegAlias<B9>;
  382. def Q10 : AArch64Reg<10, "q10", [D10], ["v10", ""]>, DwarfRegAlias<B10>;
  383. def Q11 : AArch64Reg<11, "q11", [D11], ["v11", ""]>, DwarfRegAlias<B11>;
  384. def Q12 : AArch64Reg<12, "q12", [D12], ["v12", ""]>, DwarfRegAlias<B12>;
  385. def Q13 : AArch64Reg<13, "q13", [D13], ["v13", ""]>, DwarfRegAlias<B13>;
  386. def Q14 : AArch64Reg<14, "q14", [D14], ["v14", ""]>, DwarfRegAlias<B14>;
  387. def Q15 : AArch64Reg<15, "q15", [D15], ["v15", ""]>, DwarfRegAlias<B15>;
  388. def Q16 : AArch64Reg<16, "q16", [D16], ["v16", ""]>, DwarfRegAlias<B16>;
  389. def Q17 : AArch64Reg<17, "q17", [D17], ["v17", ""]>, DwarfRegAlias<B17>;
  390. def Q18 : AArch64Reg<18, "q18", [D18], ["v18", ""]>, DwarfRegAlias<B18>;
  391. def Q19 : AArch64Reg<19, "q19", [D19], ["v19", ""]>, DwarfRegAlias<B19>;
  392. def Q20 : AArch64Reg<20, "q20", [D20], ["v20", ""]>, DwarfRegAlias<B20>;
  393. def Q21 : AArch64Reg<21, "q21", [D21], ["v21", ""]>, DwarfRegAlias<B21>;
  394. def Q22 : AArch64Reg<22, "q22", [D22], ["v22", ""]>, DwarfRegAlias<B22>;
  395. def Q23 : AArch64Reg<23, "q23", [D23], ["v23", ""]>, DwarfRegAlias<B23>;
  396. def Q24 : AArch64Reg<24, "q24", [D24], ["v24", ""]>, DwarfRegAlias<B24>;
  397. def Q25 : AArch64Reg<25, "q25", [D25], ["v25", ""]>, DwarfRegAlias<B25>;
  398. def Q26 : AArch64Reg<26, "q26", [D26], ["v26", ""]>, DwarfRegAlias<B26>;
  399. def Q27 : AArch64Reg<27, "q27", [D27], ["v27", ""]>, DwarfRegAlias<B27>;
  400. def Q28 : AArch64Reg<28, "q28", [D28], ["v28", ""]>, DwarfRegAlias<B28>;
  401. def Q29 : AArch64Reg<29, "q29", [D29], ["v29", ""]>, DwarfRegAlias<B29>;
  402. def Q30 : AArch64Reg<30, "q30", [D30], ["v30", ""]>, DwarfRegAlias<B30>;
  403. def Q31 : AArch64Reg<31, "q31", [D31], ["v31", ""]>, DwarfRegAlias<B31>;
  404. }
  405. def FPR8 : RegisterClass<"AArch64", [untyped], 8, (sequence "B%u", 0, 31)> {
  406. let Size = 8;
  407. }
  408. def FPR16 : RegisterClass<"AArch64", [f16, bf16], 16, (sequence "H%u", 0, 31)> {
  409. let Size = 16;
  410. }
  411. def FPR16_lo : RegisterClass<"AArch64", [f16], 16, (trunc FPR16, 16)> {
  412. let Size = 16;
  413. }
  414. def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)>;
  415. def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32,
  416. v1i64, v4f16, v4bf16],
  417. 64, (sequence "D%u", 0, 31)>;
  418. def FPR64_lo : RegisterClass<"AArch64",
  419. [v8i8, v4i16, v2i32, v1i64, v4f16, v4bf16, v2f32,
  420. v1f64],
  421. 64, (trunc FPR64, 16)>;
  422. // We don't (yet) have an f128 legal type, so don't use that here. We
  423. // normalize 128-bit vectors to v2f64 for arg passing and such, so use
  424. // that here.
  425. def FPR128 : RegisterClass<"AArch64",
  426. [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128,
  427. v8f16, v8bf16],
  428. 128, (sequence "Q%u", 0, 31)>;
  429. // The lower 16 vector registers. Some instructions can only take registers
  430. // in this range.
  431. def FPR128_lo : RegisterClass<"AArch64",
  432. [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16,
  433. v8bf16],
  434. 128, (trunc FPR128, 16)>;
  435. // Pairs, triples, and quads of 64-bit vector registers.
  436. def DSeqPairs : RegisterTuples<[dsub0, dsub1], [(rotl FPR64, 0), (rotl FPR64, 1)]>;
  437. def DSeqTriples : RegisterTuples<[dsub0, dsub1, dsub2],
  438. [(rotl FPR64, 0), (rotl FPR64, 1),
  439. (rotl FPR64, 2)]>;
  440. def DSeqQuads : RegisterTuples<[dsub0, dsub1, dsub2, dsub3],
  441. [(rotl FPR64, 0), (rotl FPR64, 1),
  442. (rotl FPR64, 2), (rotl FPR64, 3)]>;
  443. def DD : RegisterClass<"AArch64", [untyped], 64, (add DSeqPairs)> {
  444. let Size = 128;
  445. }
  446. def DDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqTriples)> {
  447. let Size = 192;
  448. }
  449. def DDDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqQuads)> {
  450. let Size = 256;
  451. }
  452. // Pairs, triples, and quads of 128-bit vector registers.
  453. def QSeqPairs : RegisterTuples<[qsub0, qsub1], [(rotl FPR128, 0), (rotl FPR128, 1)]>;
  454. def QSeqTriples : RegisterTuples<[qsub0, qsub1, qsub2],
  455. [(rotl FPR128, 0), (rotl FPR128, 1),
  456. (rotl FPR128, 2)]>;
  457. def QSeqQuads : RegisterTuples<[qsub0, qsub1, qsub2, qsub3],
  458. [(rotl FPR128, 0), (rotl FPR128, 1),
  459. (rotl FPR128, 2), (rotl FPR128, 3)]>;
  460. def QQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqPairs)> {
  461. let Size = 256;
  462. }
  463. def QQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqTriples)> {
  464. let Size = 384;
  465. }
  466. def QQQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqQuads)> {
  467. let Size = 512;
  468. }
  469. // Vector operand versions of the FP registers. Alternate name printing and
  470. // assembler matching.
  471. def VectorReg64AsmOperand : AsmOperandClass {
  472. let Name = "VectorReg64";
  473. let PredicateMethod = "isNeonVectorReg";
  474. }
  475. def VectorReg128AsmOperand : AsmOperandClass {
  476. let Name = "VectorReg128";
  477. let PredicateMethod = "isNeonVectorReg";
  478. }
  479. def V64 : RegisterOperand<FPR64, "printVRegOperand"> {
  480. let ParserMatchClass = VectorReg64AsmOperand;
  481. }
  482. def V128 : RegisterOperand<FPR128, "printVRegOperand"> {
  483. let ParserMatchClass = VectorReg128AsmOperand;
  484. }
  485. def VectorRegLoAsmOperand : AsmOperandClass {
  486. let Name = "VectorRegLo";
  487. let PredicateMethod = "isNeonVectorRegLo";
  488. }
  489. def V64_lo : RegisterOperand<FPR64_lo, "printVRegOperand"> {
  490. let ParserMatchClass = VectorRegLoAsmOperand;
  491. }
  492. def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> {
  493. let ParserMatchClass = VectorRegLoAsmOperand;
  494. }
  495. class TypedVecListAsmOperand<int count, string vecty, int lanes, int eltsize>
  496. : AsmOperandClass {
  497. let Name = "TypedVectorList" # count # "_" # lanes # eltsize;
  498. let PredicateMethod
  499. = "isTypedVectorList<RegKind::NeonVector, " # count # ", " # lanes # ", " # eltsize # ">";
  500. let RenderMethod = "addVectorListOperands<" # vecty # ", " # count # ">";
  501. }
  502. class TypedVecListRegOperand<RegisterClass Reg, int lanes, string eltsize>
  503. : RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '"
  504. # eltsize # "'>">;
  505. multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
  506. // With implicit types (probably on instruction instead). E.g. { v0, v1 }
  507. def _64AsmOperand : AsmOperandClass {
  508. let Name = NAME # "64";
  509. let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
  510. let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_DReg, " # count # ">";
  511. }
  512. def "64" : RegisterOperand<Reg64, "printImplicitlyTypedVectorList"> {
  513. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_64AsmOperand");
  514. }
  515. def _128AsmOperand : AsmOperandClass {
  516. let Name = NAME # "128";
  517. let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
  518. let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_QReg, " # count # ">";
  519. }
  520. def "128" : RegisterOperand<Reg128, "printImplicitlyTypedVectorList"> {
  521. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_128AsmOperand");
  522. }
  523. // 64-bit register lists with explicit type.
  524. // { v0.8b, v1.8b }
  525. def _8bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 8, 8>;
  526. def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> {
  527. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand");
  528. }
  529. // { v0.4h, v1.4h }
  530. def _4hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 4, 16>;
  531. def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> {
  532. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand");
  533. }
  534. // { v0.2s, v1.2s }
  535. def _2sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 2, 32>;
  536. def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> {
  537. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand");
  538. }
  539. // { v0.1d, v1.1d }
  540. def _1dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 1, 64>;
  541. def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> {
  542. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand");
  543. }
  544. // 128-bit register lists with explicit type
  545. // { v0.16b, v1.16b }
  546. def _16bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 16, 8>;
  547. def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> {
  548. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand");
  549. }
  550. // { v0.8h, v1.8h }
  551. def _8hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 8, 16>;
  552. def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> {
  553. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand");
  554. }
  555. // { v0.4s, v1.4s }
  556. def _4sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 4, 32>;
  557. def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> {
  558. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand");
  559. }
  560. // { v0.2d, v1.2d }
  561. def _2dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 2, 64>;
  562. def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> {
  563. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand");
  564. }
  565. // { v0.b, v1.b }
  566. def _bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 8>;
  567. def "b" : TypedVecListRegOperand<Reg128, 0, "b"> {
  568. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand");
  569. }
  570. // { v0.h, v1.h }
  571. def _hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 16>;
  572. def "h" : TypedVecListRegOperand<Reg128, 0, "h"> {
  573. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand");
  574. }
  575. // { v0.s, v1.s }
  576. def _sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 32>;
  577. def "s" : TypedVecListRegOperand<Reg128, 0, "s"> {
  578. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand");
  579. }
  580. // { v0.d, v1.d }
  581. def _dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 64>;
  582. def "d" : TypedVecListRegOperand<Reg128, 0, "d"> {
  583. let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand");
  584. }
  585. }
  586. defm VecListOne : VectorList<1, FPR64, FPR128>;
  587. defm VecListTwo : VectorList<2, DD, QQ>;
  588. defm VecListThree : VectorList<3, DDD, QQQ>;
  589. defm VecListFour : VectorList<4, DDDD, QQQQ>;
  590. class FPRAsmOperand<string RC> : AsmOperandClass {
  591. let Name = "FPRAsmOperand" # RC;
  592. let PredicateMethod = "isGPR64<AArch64::" # RC # "RegClassID>";
  593. let RenderMethod = "addRegOperands";
  594. }
  595. // Register operand versions of the scalar FP registers.
  596. def FPR8Op : RegisterOperand<FPR8, "printOperand"> {
  597. let ParserMatchClass = FPRAsmOperand<"FPR8">;
  598. }
  599. def FPR16Op : RegisterOperand<FPR16, "printOperand"> {
  600. let ParserMatchClass = FPRAsmOperand<"FPR16">;
  601. }
  602. def FPR16Op_lo : RegisterOperand<FPR16_lo, "printOperand"> {
  603. let ParserMatchClass = FPRAsmOperand<"FPR16_lo">;
  604. }
  605. def FPR32Op : RegisterOperand<FPR32, "printOperand"> {
  606. let ParserMatchClass = FPRAsmOperand<"FPR32">;
  607. }
  608. def FPR64Op : RegisterOperand<FPR64, "printOperand"> {
  609. let ParserMatchClass = FPRAsmOperand<"FPR64">;
  610. }
  611. def FPR128Op : RegisterOperand<FPR128, "printOperand"> {
  612. let ParserMatchClass = FPRAsmOperand<"FPR128">;
  613. }
  614. //===----------------------------------------------------------------------===//
  615. // ARMv8.1a atomic CASP register operands
  616. def WSeqPairs : RegisterTuples<[sube32, subo32],
  617. [(decimate (rotl GPR32, 0), 2),
  618. (decimate (rotl GPR32, 1), 2)]>;
  619. def XSeqPairs : RegisterTuples<[sube64, subo64],
  620. [(decimate (rotl GPR64, 0), 2),
  621. (decimate (rotl GPR64, 1), 2)]>;
  622. def WSeqPairsClass : RegisterClass<"AArch64", [untyped], 32,
  623. (add WSeqPairs)>{
  624. let Size = 64;
  625. }
  626. def XSeqPairsClass : RegisterClass<"AArch64", [untyped], 64,
  627. (add XSeqPairs)>{
  628. let Size = 128;
  629. }
  630. let RenderMethod = "addRegOperands", ParserMethod="tryParseGPRSeqPair" in {
  631. def WSeqPairsAsmOperandClass : AsmOperandClass { let Name = "WSeqPair"; }
  632. def XSeqPairsAsmOperandClass : AsmOperandClass { let Name = "XSeqPair"; }
  633. }
  634. def WSeqPairClassOperand :
  635. RegisterOperand<WSeqPairsClass, "printGPRSeqPairsClassOperand<32>"> {
  636. let ParserMatchClass = WSeqPairsAsmOperandClass;
  637. }
  638. def XSeqPairClassOperand :
  639. RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> {
  640. let ParserMatchClass = XSeqPairsAsmOperandClass;
  641. }
  642. // Reuse the parsing and register numbers from XSeqPairs, but encoding is different.
  643. def MrrsMssrPairClassOperand :
  644. RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> {
  645. let ParserMatchClass = XSeqPairsAsmOperandClass;
  646. }
  647. def SyspXzrPairOperandMatcherClass : AsmOperandClass {
  648. let Name = "SyspXzrPair";
  649. let RenderMethod = "addSyspXzrPairOperand";
  650. let ParserMethod = "tryParseSyspXzrPair";
  651. }
  652. def SyspXzrPairOperand :
  653. RegisterOperand<GPR64, "printSyspXzrPair"> { // needed to allow alias with XZR operand
  654. let ParserMatchClass = SyspXzrPairOperandMatcherClass;
  655. }
  656. //===----- END: v8.1a atomic CASP register operands -----------------------===//
  657. //===----------------------------------------------------------------------===//
  658. // Armv8.7a accelerator extension register operands: 8 consecutive GPRs
  659. // starting with an even one
  660. let Namespace = "AArch64" in {
  661. foreach i = 0-7 in
  662. def "x8sub_"#i : SubRegIndex<64, !mul(64, i)>;
  663. }
  664. def Tuples8X : RegisterTuples<
  665. !foreach(i, [0,1,2,3,4,5,6,7], !cast<SubRegIndex>("x8sub_"#i)),
  666. !foreach(i, [0,1,2,3,4,5,6,7], (trunc (decimate (rotl GPR64, i), 2), 12))>;
  667. def GPR64x8Class : RegisterClass<"AArch64", [i64x8], 512, (trunc Tuples8X, 12)> {
  668. let Size = 512;
  669. }
  670. def GPR64x8AsmOp : AsmOperandClass {
  671. let Name = "GPR64x8";
  672. let ParserMethod = "tryParseGPR64x8";
  673. let RenderMethod = "addRegOperands";
  674. }
  675. def GPR64x8 : RegisterOperand<GPR64x8Class, "printGPR64x8"> {
  676. let ParserMatchClass = GPR64x8AsmOp;
  677. let PrintMethod = "printGPR64x8";
  678. }
  679. //===----- END: v8.7a accelerator extension register operands -------------===//
  680. // SVE predicate registers
  681. def P0 : AArch64Reg<0, "p0">, DwarfRegNum<[48]>;
  682. def P1 : AArch64Reg<1, "p1">, DwarfRegNum<[49]>;
  683. def P2 : AArch64Reg<2, "p2">, DwarfRegNum<[50]>;
  684. def P3 : AArch64Reg<3, "p3">, DwarfRegNum<[51]>;
  685. def P4 : AArch64Reg<4, "p4">, DwarfRegNum<[52]>;
  686. def P5 : AArch64Reg<5, "p5">, DwarfRegNum<[53]>;
  687. def P6 : AArch64Reg<6, "p6">, DwarfRegNum<[54]>;
  688. def P7 : AArch64Reg<7, "p7">, DwarfRegNum<[55]>;
  689. def P8 : AArch64Reg<8, "p8">, DwarfRegNum<[56]>;
  690. def P9 : AArch64Reg<9, "p9">, DwarfRegNum<[57]>;
  691. def P10 : AArch64Reg<10, "p10">, DwarfRegNum<[58]>;
  692. def P11 : AArch64Reg<11, "p11">, DwarfRegNum<[59]>;
  693. def P12 : AArch64Reg<12, "p12">, DwarfRegNum<[60]>;
  694. def P13 : AArch64Reg<13, "p13">, DwarfRegNum<[61]>;
  695. def P14 : AArch64Reg<14, "p14">, DwarfRegNum<[62]>;
  696. def P15 : AArch64Reg<15, "p15">, DwarfRegNum<[63]>;
  697. // The part of SVE registers that don't overlap Neon registers.
  698. // These are only used as part of clobber lists.
  699. def Z0_HI : AArch64Reg<0, "z0_hi">;
  700. def Z1_HI : AArch64Reg<1, "z1_hi">;
  701. def Z2_HI : AArch64Reg<2, "z2_hi">;
  702. def Z3_HI : AArch64Reg<3, "z3_hi">;
  703. def Z4_HI : AArch64Reg<4, "z4_hi">;
  704. def Z5_HI : AArch64Reg<5, "z5_hi">;
  705. def Z6_HI : AArch64Reg<6, "z6_hi">;
  706. def Z7_HI : AArch64Reg<7, "z7_hi">;
  707. def Z8_HI : AArch64Reg<8, "z8_hi">;
  708. def Z9_HI : AArch64Reg<9, "z9_hi">;
  709. def Z10_HI : AArch64Reg<10, "z10_hi">;
  710. def Z11_HI : AArch64Reg<11, "z11_hi">;
  711. def Z12_HI : AArch64Reg<12, "z12_hi">;
  712. def Z13_HI : AArch64Reg<13, "z13_hi">;
  713. def Z14_HI : AArch64Reg<14, "z14_hi">;
  714. def Z15_HI : AArch64Reg<15, "z15_hi">;
  715. def Z16_HI : AArch64Reg<16, "z16_hi">;
  716. def Z17_HI : AArch64Reg<17, "z17_hi">;
  717. def Z18_HI : AArch64Reg<18, "z18_hi">;
  718. def Z19_HI : AArch64Reg<19, "z19_hi">;
  719. def Z20_HI : AArch64Reg<20, "z20_hi">;
  720. def Z21_HI : AArch64Reg<21, "z21_hi">;
  721. def Z22_HI : AArch64Reg<22, "z22_hi">;
  722. def Z23_HI : AArch64Reg<23, "z23_hi">;
  723. def Z24_HI : AArch64Reg<24, "z24_hi">;
  724. def Z25_HI : AArch64Reg<25, "z25_hi">;
  725. def Z26_HI : AArch64Reg<26, "z26_hi">;
  726. def Z27_HI : AArch64Reg<27, "z27_hi">;
  727. def Z28_HI : AArch64Reg<28, "z28_hi">;
  728. def Z29_HI : AArch64Reg<29, "z29_hi">;
  729. def Z30_HI : AArch64Reg<30, "z30_hi">;
  730. def Z31_HI : AArch64Reg<31, "z31_hi">;
  731. // SVE variable-size vector registers
  732. let SubRegIndices = [zsub,zsub_hi] in {
  733. def Z0 : AArch64Reg<0, "z0", [Q0, Z0_HI]>, DwarfRegNum<[96]>;
  734. def Z1 : AArch64Reg<1, "z1", [Q1, Z1_HI]>, DwarfRegNum<[97]>;
  735. def Z2 : AArch64Reg<2, "z2", [Q2, Z2_HI]>, DwarfRegNum<[98]>;
  736. def Z3 : AArch64Reg<3, "z3", [Q3, Z3_HI]>, DwarfRegNum<[99]>;
  737. def Z4 : AArch64Reg<4, "z4", [Q4, Z4_HI]>, DwarfRegNum<[100]>;
  738. def Z5 : AArch64Reg<5, "z5", [Q5, Z5_HI]>, DwarfRegNum<[101]>;
  739. def Z6 : AArch64Reg<6, "z6", [Q6, Z6_HI]>, DwarfRegNum<[102]>;
  740. def Z7 : AArch64Reg<7, "z7", [Q7, Z7_HI]>, DwarfRegNum<[103]>;
  741. def Z8 : AArch64Reg<8, "z8", [Q8, Z8_HI]>, DwarfRegNum<[104]>;
  742. def Z9 : AArch64Reg<9, "z9", [Q9, Z9_HI]>, DwarfRegNum<[105]>;
  743. def Z10 : AArch64Reg<10, "z10", [Q10, Z10_HI]>, DwarfRegNum<[106]>;
  744. def Z11 : AArch64Reg<11, "z11", [Q11, Z11_HI]>, DwarfRegNum<[107]>;
  745. def Z12 : AArch64Reg<12, "z12", [Q12, Z12_HI]>, DwarfRegNum<[108]>;
  746. def Z13 : AArch64Reg<13, "z13", [Q13, Z13_HI]>, DwarfRegNum<[109]>;
  747. def Z14 : AArch64Reg<14, "z14", [Q14, Z14_HI]>, DwarfRegNum<[110]>;
  748. def Z15 : AArch64Reg<15, "z15", [Q15, Z15_HI]>, DwarfRegNum<[111]>;
  749. def Z16 : AArch64Reg<16, "z16", [Q16, Z16_HI]>, DwarfRegNum<[112]>;
  750. def Z17 : AArch64Reg<17, "z17", [Q17, Z17_HI]>, DwarfRegNum<[113]>;
  751. def Z18 : AArch64Reg<18, "z18", [Q18, Z18_HI]>, DwarfRegNum<[114]>;
  752. def Z19 : AArch64Reg<19, "z19", [Q19, Z19_HI]>, DwarfRegNum<[115]>;
  753. def Z20 : AArch64Reg<20, "z20", [Q20, Z20_HI]>, DwarfRegNum<[116]>;
  754. def Z21 : AArch64Reg<21, "z21", [Q21, Z21_HI]>, DwarfRegNum<[117]>;
  755. def Z22 : AArch64Reg<22, "z22", [Q22, Z22_HI]>, DwarfRegNum<[118]>;
  756. def Z23 : AArch64Reg<23, "z23", [Q23, Z23_HI]>, DwarfRegNum<[119]>;
  757. def Z24 : AArch64Reg<24, "z24", [Q24, Z24_HI]>, DwarfRegNum<[120]>;
  758. def Z25 : AArch64Reg<25, "z25", [Q25, Z25_HI]>, DwarfRegNum<[121]>;
  759. def Z26 : AArch64Reg<26, "z26", [Q26, Z26_HI]>, DwarfRegNum<[122]>;
  760. def Z27 : AArch64Reg<27, "z27", [Q27, Z27_HI]>, DwarfRegNum<[123]>;
  761. def Z28 : AArch64Reg<28, "z28", [Q28, Z28_HI]>, DwarfRegNum<[124]>;
  762. def Z29 : AArch64Reg<29, "z29", [Q29, Z29_HI]>, DwarfRegNum<[125]>;
  763. def Z30 : AArch64Reg<30, "z30", [Q30, Z30_HI]>, DwarfRegNum<[126]>;
  764. def Z31 : AArch64Reg<31, "z31", [Q31, Z31_HI]>, DwarfRegNum<[127]>;
  765. }
  766. // Enum describing the element size for destructive
  767. // operations.
  768. class ElementSizeEnum<bits<3> val> {
  769. bits<3> Value = val;
  770. }
  771. def ElementSizeNone : ElementSizeEnum<0>;
  772. def ElementSizeB : ElementSizeEnum<1>;
  773. def ElementSizeH : ElementSizeEnum<2>;
  774. def ElementSizeS : ElementSizeEnum<3>;
  775. def ElementSizeD : ElementSizeEnum<4>;
  776. def ElementSizeQ : ElementSizeEnum<5>; // Unused
  777. class SVERegOp <string Suffix, AsmOperandClass C,
  778. ElementSizeEnum Size,
  779. RegisterClass RC> : RegisterOperand<RC> {
  780. ElementSizeEnum ElementSize;
  781. let ElementSize = Size;
  782. let PrintMethod = !if(!eq(Suffix, ""),
  783. "printSVERegOp<>",
  784. "printSVERegOp<'" # Suffix # "'>");
  785. let ParserMatchClass = C;
  786. }
  787. class PPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
  788. RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
  789. class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
  790. RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
  791. //******************************************************************************
  792. // SVE predicate register classes.
  793. class PPRClass<int firstreg, int lastreg> : RegisterClass<
  794. "AArch64",
  795. [ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1 ], 16,
  796. (sequence "P%u", firstreg, lastreg)> {
  797. let Size = 16;
  798. }
  799. def PPR : PPRClass<0, 15>;
  800. def PPR_3b : PPRClass<0, 7>; // Restricted 3 bit SVE predicate register class.
  801. def PPR_p8to15 : PPRClass<8, 15>;
  802. class PPRAsmOperand <string name, string RegClass, int Width>: AsmOperandClass {
  803. let Name = "SVE" # name # "Reg";
  804. let PredicateMethod = "isSVEPredicateVectorRegOfWidth<"
  805. # Width # ", " # "AArch64::" # RegClass # "RegClassID>";
  806. let DiagnosticType = "InvalidSVE" # name # "Reg";
  807. let RenderMethod = "addRegOperands";
  808. let ParserMethod = "tryParseSVEPredicateVector<RegKind::SVEPredicateVector>";
  809. }
  810. def PPRAsmOpAny : PPRAsmOperand<"PredicateAny", "PPR", 0>;
  811. def PPRAsmOp8 : PPRAsmOperand<"PredicateB", "PPR", 8>;
  812. def PPRAsmOp16 : PPRAsmOperand<"PredicateH", "PPR", 16>;
  813. def PPRAsmOp32 : PPRAsmOperand<"PredicateS", "PPR", 32>;
  814. def PPRAsmOp64 : PPRAsmOperand<"PredicateD", "PPR", 64>;
  815. def PPRAny : PPRRegOp<"", PPRAsmOpAny, ElementSizeNone, PPR>;
  816. def PPR8 : PPRRegOp<"b", PPRAsmOp8, ElementSizeB, PPR>;
  817. def PPR16 : PPRRegOp<"h", PPRAsmOp16, ElementSizeH, PPR>;
  818. def PPR32 : PPRRegOp<"s", PPRAsmOp32, ElementSizeS, PPR>;
  819. def PPR64 : PPRRegOp<"d", PPRAsmOp64, ElementSizeD, PPR>;
  820. def PPRAsmOp3bAny : PPRAsmOperand<"Predicate3bAny", "PPR_3b", 0>;
  821. def PPR3bAny : PPRRegOp<"", PPRAsmOp3bAny, ElementSizeNone, PPR_3b>;
  822. // SVE predicate-as-counter operand
  823. class PNRAsmOperand<string name, string RegClass, int Width>
  824. : PPRAsmOperand<name, RegClass, Width> {
  825. let PredicateMethod = "isSVEPredicateAsCounterRegOfWidth<"
  826. # Width # ", " # "AArch64::"
  827. # RegClass # "RegClassID>";
  828. let DiagnosticType = "InvalidSVE" # name # "Reg";
  829. let ParserMethod = "tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>";
  830. }
  831. class PNRRegOp<string Suffix, AsmOperandClass C, int EltSize, RegisterClass RC>
  832. : PPRRegOp<Suffix, C, ElementSizeNone, RC> {
  833. let PrintMethod = "printPredicateAsCounter<" # EltSize # ">";
  834. }
  835. def PNRAsmOpAny: PNRAsmOperand<"PNPredicateAny", "PPR", 0>;
  836. def PNRAsmOp8 : PNRAsmOperand<"PNPredicateB", "PPR", 8>;
  837. def PNRAsmOp16 : PNRAsmOperand<"PNPredicateH", "PPR", 16>;
  838. def PNRAsmOp32 : PNRAsmOperand<"PNPredicateS", "PPR", 32>;
  839. def PNRAsmOp64 : PNRAsmOperand<"PNPredicateD", "PPR", 64>;
  840. def PNRAny : PNRRegOp<"", PNRAsmOpAny, 0, PPR>;
  841. def PNR8 : PNRRegOp<"b", PNRAsmOp8, 8, PPR>;
  842. def PNR16 : PNRRegOp<"h", PNRAsmOp16, 16, PPR>;
  843. def PNR32 : PNRRegOp<"s", PNRAsmOp32, 32, PPR>;
  844. def PNR64 : PNRRegOp<"d", PNRAsmOp64, 64, PPR>;
  845. class PNRP8to15RegOp<string Suffix, AsmOperandClass C, int EltSize, RegisterClass RC>
  846. : PPRRegOp<Suffix, C, ElementSizeNone, RC> {
  847. let PrintMethod = "printPredicateAsCounter<" # EltSize # ">";
  848. let EncoderMethod = "EncodePPR_p8to15";
  849. let DecoderMethod = "DecodePPR_p8to15RegisterClass";
  850. }
  851. def PNRAsmAny_p8to15 : PNRAsmOperand<"PNPredicateAny_p8to15", "PPR_p8to15", 0>;
  852. def PNRAsmOp8_p8to15 : PNRAsmOperand<"PNPredicateB_p8to15", "PPR_p8to15", 8>;
  853. def PNRAsmOp16_p8to15 : PNRAsmOperand<"PNPredicateH_p8to15", "PPR_p8to15", 16>;
  854. def PNRAsmOp32_p8to15 : PNRAsmOperand<"PNPredicateS_p8to15", "PPR_p8to15", 32>;
  855. def PNRAsmOp64_p8to15 : PNRAsmOperand<"PNPredicateD_p8to15", "PPR_p8to15", 64>;
  856. def PNRAny_p8to15 : PNRP8to15RegOp<"", PNRAsmAny_p8to15, 0, PPR_p8to15>;
  857. def PNR8_p8to15 : PNRP8to15RegOp<"b", PNRAsmOp8_p8to15, 8, PPR_p8to15>;
  858. def PNR16_p8to15 : PNRP8to15RegOp<"h", PNRAsmOp16_p8to15, 16, PPR_p8to15>;
  859. def PNR32_p8to15 : PNRP8to15RegOp<"s", PNRAsmOp32_p8to15, 32, PPR_p8to15>;
  860. def PNR64_p8to15 : PNRP8to15RegOp<"d", PNRAsmOp64_p8to15, 64, PPR_p8to15>;
  861. let Namespace = "AArch64" in {
  862. def psub0 : SubRegIndex<16, -1>;
  863. def psub1 : SubRegIndex<16, -1>;
  864. }
  865. // Pairs of SVE predicate vector registers.
  866. def PSeqPairs : RegisterTuples<[psub0, psub1], [(rotl PPR, 0), (rotl PPR, 1)]>;
  867. def PPR2 : RegisterClass<"AArch64", [untyped], 16, (add PSeqPairs)> {
  868. let Size = 32;
  869. }
  870. class PPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass {
  871. let Name = "SVEPredicateList" # NumRegs # "x" # ElementWidth;
  872. let ParserMethod = "tryParseVectorList<RegKind::SVEPredicateVector>";
  873. let PredicateMethod = "isTypedVectorList<RegKind::SVEPredicateVector, "
  874. # NumRegs #", 0, "#ElementWidth #">";
  875. let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_PReg, "
  876. # NumRegs #">";
  877. }
  878. def PP_b : RegisterOperand<PPR2, "printTypedVectorList<0,'b'>"> {
  879. let ParserMatchClass = PPRVectorList<8, 2>;
  880. }
  881. def PP_h : RegisterOperand<PPR2, "printTypedVectorList<0,'h'>"> {
  882. let ParserMatchClass = PPRVectorList<16, 2>;
  883. }
  884. def PP_s : RegisterOperand<PPR2, "printTypedVectorList<0,'s'>"> {
  885. let ParserMatchClass = PPRVectorList<32, 2>;
  886. }
  887. def PP_d : RegisterOperand<PPR2, "printTypedVectorList<0,'d'>"> {
  888. let ParserMatchClass = PPRVectorList<64, 2>;
  889. }
  890. // SVE2 multiple-of-2 multi-predicate-vector operands
  891. def PPR2Mul2 : RegisterClass<"AArch64", [untyped], 16, (add (decimate PSeqPairs, 2))> {
  892. let Size = 32;
  893. }
  894. class PPRVectorListMul<int ElementWidth, int NumRegs> : PPRVectorList<ElementWidth, NumRegs> {
  895. let Name = "SVEPredicateListMul" # NumRegs # "x" # ElementWidth;
  896. let DiagnosticType = "Invalid" # Name;
  897. let PredicateMethod =
  898. "isTypedVectorListMultiple<RegKind::SVEPredicateVector, " # NumRegs # ", 0, "
  899. # ElementWidth # ">";
  900. }
  901. let EncoderMethod = "EncodeRegAsMultipleOf<2>",
  902. DecoderMethod = "DecodePPR2Mul2RegisterClass" in {
  903. def PP_b_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'b'>"> {
  904. let ParserMatchClass = PPRVectorListMul<8, 2>;
  905. }
  906. def PP_h_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'h'>"> {
  907. let ParserMatchClass = PPRVectorListMul<16, 2>;
  908. }
  909. def PP_s_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'s'>"> {
  910. let ParserMatchClass = PPRVectorListMul<32, 2>;
  911. }
  912. def PP_d_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'d'>"> {
  913. let ParserMatchClass = PPRVectorListMul<64, 2>;
  914. }
  915. } // end let EncoderMethod/DecoderMethod
  916. //******************************************************************************
  917. // SVE vector register classes
  918. class ZPRClass<int lastreg> : RegisterClass<"AArch64",
  919. [nxv16i8, nxv8i16, nxv4i32, nxv2i64,
  920. nxv2f16, nxv4f16, nxv8f16,
  921. nxv2bf16, nxv4bf16, nxv8bf16,
  922. nxv2f32, nxv4f32,
  923. nxv2f64],
  924. 128, (sequence "Z%u", 0, lastreg)> {
  925. let Size = 128;
  926. }
  927. def ZPR : ZPRClass<31>;
  928. def ZPR_4b : ZPRClass<15>; // Restricted 4 bit SVE vector register class.
  929. def ZPR_3b : ZPRClass<7>; // Restricted 3 bit SVE vector register class.
  930. class ZPRAsmOperand<string name, int Width, string RegClassSuffix = "">
  931. : AsmOperandClass {
  932. let Name = "SVE" # name # "Reg";
  933. let PredicateMethod = "isSVEDataVectorRegOfWidth<"
  934. # Width # ", AArch64::ZPR"
  935. # RegClassSuffix # "RegClassID>";
  936. let RenderMethod = "addRegOperands";
  937. let DiagnosticType = "InvalidZPR" # RegClassSuffix # Width;
  938. let ParserMethod = "tryParseSVEDataVector<false, "
  939. # !if(!eq(Width, 0), "false", "true") # ">";
  940. }
  941. def ZPRAsmOpAny : ZPRAsmOperand<"VectorAny", 0>;
  942. def ZPRAsmOp8 : ZPRAsmOperand<"VectorB", 8>;
  943. def ZPRAsmOp16 : ZPRAsmOperand<"VectorH", 16>;
  944. def ZPRAsmOp32 : ZPRAsmOperand<"VectorS", 32>;
  945. def ZPRAsmOp64 : ZPRAsmOperand<"VectorD", 64>;
  946. def ZPRAsmOp128 : ZPRAsmOperand<"VectorQ", 128>;
  947. def ZPRAny : ZPRRegOp<"", ZPRAsmOpAny, ElementSizeNone, ZPR>;
  948. def ZPR8 : ZPRRegOp<"b", ZPRAsmOp8, ElementSizeB, ZPR>;
  949. def ZPR16 : ZPRRegOp<"h", ZPRAsmOp16, ElementSizeH, ZPR>;
  950. def ZPR32 : ZPRRegOp<"s", ZPRAsmOp32, ElementSizeS, ZPR>;
  951. def ZPR64 : ZPRRegOp<"d", ZPRAsmOp64, ElementSizeD, ZPR>;
  952. def ZPR128 : ZPRRegOp<"q", ZPRAsmOp128, ElementSizeQ, ZPR>;
  953. def ZPRAsmOp3b8 : ZPRAsmOperand<"Vector3bB", 8, "_3b">;
  954. def ZPRAsmOp3b16 : ZPRAsmOperand<"Vector3bH", 16, "_3b">;
  955. def ZPRAsmOp3b32 : ZPRAsmOperand<"Vector3bS", 32, "_3b">;
  956. def ZPR3b8 : ZPRRegOp<"b", ZPRAsmOp3b8, ElementSizeB, ZPR_3b>;
  957. def ZPR3b16 : ZPRRegOp<"h", ZPRAsmOp3b16, ElementSizeH, ZPR_3b>;
  958. def ZPR3b32 : ZPRRegOp<"s", ZPRAsmOp3b32, ElementSizeS, ZPR_3b>;
  959. def ZPRAsmOp4b8 : ZPRAsmOperand<"Vector4bB", 8, "_4b">;
  960. def ZPRAsmOp4b16 : ZPRAsmOperand<"Vector4bH", 16, "_4b">;
  961. def ZPRAsmOp4b32 : ZPRAsmOperand<"Vector4bS", 32, "_4b">;
  962. def ZPRAsmOp4b64 : ZPRAsmOperand<"Vector4bD", 64, "_4b">;
  963. def ZPR4b8 : ZPRRegOp<"b", ZPRAsmOp4b8, ElementSizeB, ZPR_4b>;
  964. def ZPR4b16 : ZPRRegOp<"h", ZPRAsmOp4b16, ElementSizeH, ZPR_4b>;
  965. def ZPR4b32 : ZPRRegOp<"s", ZPRAsmOp4b32, ElementSizeS, ZPR_4b>;
  966. def ZPR4b64 : ZPRRegOp<"d", ZPRAsmOp4b64, ElementSizeD, ZPR_4b>;
  967. class FPRasZPR<int Width> : AsmOperandClass{
  968. let Name = "FPR" # Width # "asZPR";
  969. let PredicateMethod = "isFPRasZPR<AArch64::FPR" # Width # "RegClassID>";
  970. let RenderMethod = "addFPRasZPRRegOperands<" # Width # ">";
  971. }
  972. class FPRasZPROperand<int Width> : RegisterOperand<ZPR> {
  973. let ParserMatchClass = FPRasZPR<Width>;
  974. let PrintMethod = "printZPRasFPR<" # Width # ">";
  975. }
  976. def FPR8asZPR : FPRasZPROperand<8>;
  977. def FPR16asZPR : FPRasZPROperand<16>;
  978. def FPR32asZPR : FPRasZPROperand<32>;
  979. def FPR64asZPR : FPRasZPROperand<64>;
  980. def FPR128asZPR : FPRasZPROperand<128>;
  981. let Namespace = "AArch64" in {
  982. def zsub0 : SubRegIndex<128, -1>;
  983. def zsub1 : SubRegIndex<128, -1>;
  984. def zsub2 : SubRegIndex<128, -1>;
  985. def zsub3 : SubRegIndex<128, -1>;
  986. }
  987. // Pairs, triples, and quads of SVE vector registers.
  988. def ZSeqPairs : RegisterTuples<[zsub0, zsub1], [(rotl ZPR, 0), (rotl ZPR, 1)]>;
  989. def ZSeqTriples : RegisterTuples<[zsub0, zsub1, zsub2], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2)]>;
  990. def ZSeqQuads : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2), (rotl ZPR, 3)]>;
  991. def ZPR2 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqPairs)> {
  992. let Size = 256;
  993. }
  994. def ZPR3 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqTriples)> {
  995. let Size = 384;
  996. }
  997. def ZPR4 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqQuads)> {
  998. let Size = 512;
  999. }
  1000. class ZPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass {
  1001. let Name = "SVEVectorList" # NumRegs # ElementWidth;
  1002. let ParserMethod = "tryParseVectorList<RegKind::SVEDataVector>";
  1003. let PredicateMethod =
  1004. "isTypedVectorList<RegKind::SVEDataVector, " #NumRegs #", 0, " #ElementWidth #">";
  1005. let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_ZReg, " # NumRegs # ">";
  1006. }
  1007. def Z_b : RegisterOperand<ZPR, "printTypedVectorList<0,'b'>"> {
  1008. let ParserMatchClass = ZPRVectorList<8, 1>;
  1009. }
  1010. def Z_h : RegisterOperand<ZPR, "printTypedVectorList<0,'h'>"> {
  1011. let ParserMatchClass = ZPRVectorList<16, 1>;
  1012. }
  1013. def Z_s : RegisterOperand<ZPR, "printTypedVectorList<0,'s'>"> {
  1014. let ParserMatchClass = ZPRVectorList<32, 1>;
  1015. }
  1016. def Z_d : RegisterOperand<ZPR, "printTypedVectorList<0,'d'>"> {
  1017. let ParserMatchClass = ZPRVectorList<64, 1>;
  1018. }
  1019. def Z_q : RegisterOperand<ZPR, "printTypedVectorList<0,'q'>"> {
  1020. let ParserMatchClass = ZPRVectorList<128, 1>;
  1021. }
  1022. def ZZ_b : RegisterOperand<ZPR2, "printTypedVectorList<0,'b'>"> {
  1023. let ParserMatchClass = ZPRVectorList<8, 2>;
  1024. }
  1025. def ZZ_h : RegisterOperand<ZPR2, "printTypedVectorList<0,'h'>"> {
  1026. let ParserMatchClass = ZPRVectorList<16, 2>;
  1027. }
  1028. def ZZ_s : RegisterOperand<ZPR2, "printTypedVectorList<0,'s'>"> {
  1029. let ParserMatchClass = ZPRVectorList<32, 2>;
  1030. }
  1031. def ZZ_d : RegisterOperand<ZPR2, "printTypedVectorList<0,'d'>"> {
  1032. let ParserMatchClass = ZPRVectorList<64, 2>;
  1033. }
  1034. def ZZ_q : RegisterOperand<ZPR2, "printTypedVectorList<0,'q'>"> {
  1035. let ParserMatchClass = ZPRVectorList<128, 2>;
  1036. }
  1037. def ZZZ_b : RegisterOperand<ZPR3, "printTypedVectorList<0,'b'>"> {
  1038. let ParserMatchClass = ZPRVectorList<8, 3>;
  1039. }
  1040. def ZZZ_h : RegisterOperand<ZPR3, "printTypedVectorList<0,'h'>"> {
  1041. let ParserMatchClass = ZPRVectorList<16, 3>;
  1042. }
  1043. def ZZZ_s : RegisterOperand<ZPR3, "printTypedVectorList<0,'s'>"> {
  1044. let ParserMatchClass = ZPRVectorList<32, 3>;
  1045. }
  1046. def ZZZ_d : RegisterOperand<ZPR3, "printTypedVectorList<0,'d'>"> {
  1047. let ParserMatchClass = ZPRVectorList<64, 3>;
  1048. }
  1049. def ZZZ_q : RegisterOperand<ZPR3, "printTypedVectorList<0,'q'>"> {
  1050. let ParserMatchClass = ZPRVectorList<128, 3>;
  1051. }
  1052. def ZZZZ_b : RegisterOperand<ZPR4, "printTypedVectorList<0,'b'>"> {
  1053. let ParserMatchClass = ZPRVectorList<8, 4>;
  1054. }
  1055. def ZZZZ_h : RegisterOperand<ZPR4, "printTypedVectorList<0,'h'>"> {
  1056. let ParserMatchClass = ZPRVectorList<16, 4>;
  1057. }
  1058. def ZZZZ_s : RegisterOperand<ZPR4, "printTypedVectorList<0,'s'>"> {
  1059. let ParserMatchClass = ZPRVectorList<32, 4>;
  1060. }
  1061. def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> {
  1062. let ParserMatchClass = ZPRVectorList<64, 4>;
  1063. }
  1064. def ZZZZ_q : RegisterOperand<ZPR4, "printTypedVectorList<0,'q'>"> {
  1065. let ParserMatchClass = ZPRVectorList<128, 4>;
  1066. }
  1067. // SME2 multiple-of-2 or 4 multi-vector operands
  1068. def ZPR2Mul2 : RegisterClass<"AArch64", [untyped], 128, (add (decimate ZSeqPairs, 2))> {
  1069. let Size = 256;
  1070. }
  1071. def ZPR4Mul4 : RegisterClass<"AArch64", [untyped], 128, (add (decimate ZSeqQuads, 4))> {
  1072. let Size = 512;
  1073. }
  1074. class ZPRVectorListMul<int ElementWidth, int NumRegs> : ZPRVectorList<ElementWidth, NumRegs> {
  1075. let Name = "SVEVectorListMul" # NumRegs # "x" # ElementWidth;
  1076. let DiagnosticType = "Invalid" # Name;
  1077. let PredicateMethod =
  1078. "isTypedVectorListMultiple<RegKind::SVEDataVector, " # NumRegs # ", 0, "
  1079. # ElementWidth # ">";
  1080. }
  1081. let EncoderMethod = "EncodeRegAsMultipleOf<2>",
  1082. DecoderMethod = "DecodeZPR2Mul2RegisterClass" in {
  1083. def ZZ_b_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'b'>"> {
  1084. let ParserMatchClass = ZPRVectorListMul<8, 2>;
  1085. }
  1086. def ZZ_h_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'h'>"> {
  1087. let ParserMatchClass = ZPRVectorListMul<16, 2>;
  1088. }
  1089. def ZZ_s_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'s'>"> {
  1090. let ParserMatchClass = ZPRVectorListMul<32, 2>;
  1091. }
  1092. def ZZ_d_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'d'>"> {
  1093. let ParserMatchClass = ZPRVectorListMul<64, 2>;
  1094. }
  1095. def ZZ_q_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'q'>"> {
  1096. let ParserMatchClass = ZPRVectorListMul<128, 2>;
  1097. }
  1098. } // end let EncoderMethod/DecoderMethod
  1099. let EncoderMethod = "EncodeRegAsMultipleOf<4>",
  1100. DecoderMethod = "DecodeZPR4Mul4RegisterClass" in {
  1101. def ZZZZ_b_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'b'>"> {
  1102. let ParserMatchClass = ZPRVectorListMul<8, 4>;
  1103. }
  1104. def ZZZZ_h_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'h'>"> {
  1105. let ParserMatchClass = ZPRVectorListMul<16, 4>;
  1106. }
  1107. def ZZZZ_s_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'s'>"> {
  1108. let ParserMatchClass = ZPRVectorListMul<32, 4>;
  1109. }
  1110. def ZZZZ_d_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'d'>"> {
  1111. let ParserMatchClass = ZPRVectorListMul<64, 4>;
  1112. }
  1113. def ZZZZ_q_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'q'>"> {
  1114. let ParserMatchClass = ZPRVectorListMul<128, 4>;
  1115. }
  1116. } // end let EncoderMethod/DecoderMethod
  1117. // SME2 strided multi-vector operands
  1118. // ZStridedPairs
  1119. //
  1120. // A group of two Z vectors with strided numbering consisting of:
  1121. // Zn+0.T and Zn+8.T
  1122. // where n is in the range 0 to 7 and 16 to 23 inclusive, and T is one of B, H,
  1123. // S, or D.
  1124. // Z0_Z8, Z1_Z9, Z2_Z10, Z3_Z11, Z4_Z12, Z5_Z13, Z6_Z14, Z7_Z15
  1125. def ZStridedPairsLo : RegisterTuples<[zsub0, zsub1], [
  1126. (trunc (rotl ZPR, 0), 8), (trunc (rotl ZPR, 8), 8)
  1127. ]>;
  1128. // Z16_Z24, Z17_Z25, Z18_Z26, Z19_Z27, Z20_Z28, Z21_Z29, Z22_Z30, Z23_Z31
  1129. def ZStridedPairsHi : RegisterTuples<[zsub0, zsub1], [
  1130. (trunc (rotl ZPR, 16), 8), (trunc (rotl ZPR, 24), 8)
  1131. ]>;
  1132. // ZStridedQuads
  1133. //
  1134. // A group of four Z vectors with strided numbering consisting of:
  1135. // Zn+0.T, Zn+4.T, Zn+8.T and Zn+12.T
  1136. // where n is in the range 0 to 3 and 16 to 19 inclusive, and T is one of B, H,
  1137. // S, or D.
  1138. // Z0_Z4_Z8_Z12, Z1_Z5_Z9_Z13, Z2_Z6_Z10_Z14, Z3_Z7_Z11_Z15
  1139. def ZStridedQuadsLo : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [
  1140. (trunc (rotl ZPR, 0), 4), (trunc (rotl ZPR, 4), 4),
  1141. (trunc (rotl ZPR, 8), 4), (trunc (rotl ZPR, 12), 4)
  1142. ]>;
  1143. // Z16_Z20_Z24_Z28, Z17_Z21_Z25_Z29, Z18_Z22_Z26_Z30, Z19_Z23_Z27_Z31
  1144. def ZStridedQuadsHi : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [
  1145. (trunc (rotl ZPR, 16), 4), (trunc (rotl ZPR, 20), 4),
  1146. (trunc (rotl ZPR, 24), 4), (trunc (rotl ZPR, 28), 4)
  1147. ]>;
  1148. def ZPR2Strided : RegisterClass<"AArch64", [untyped], 256,
  1149. (add ZStridedPairsLo, ZStridedPairsHi)> {
  1150. let Size = 256;
  1151. }
  1152. def ZPR4Strided : RegisterClass<"AArch64", [untyped], 512,
  1153. (add ZStridedQuadsLo, ZStridedQuadsHi)> {
  1154. let Size = 512;
  1155. }
  1156. class ZPRVectorListStrided<int ElementWidth, int NumRegs, int Stride>
  1157. : ZPRVectorList<ElementWidth, NumRegs> {
  1158. let Name = "SVEVectorListStrided" # NumRegs # "x" # ElementWidth;
  1159. let DiagnosticType = "Invalid" # Name;
  1160. let PredicateMethod = "isTypedVectorListStrided<RegKind::SVEDataVector, "
  1161. # NumRegs # "," # Stride # "," # ElementWidth # ">";
  1162. let RenderMethod = "addStridedVectorListOperands<" # NumRegs # ">";
  1163. }
  1164. let EncoderMethod = "EncodeZPR2StridedRegisterClass",
  1165. DecoderMethod = "DecodeZPR2StridedRegisterClass" in {
  1166. def ZZ_b_strided
  1167. : RegisterOperand<ZPR2Strided, "printTypedVectorList<0, 'b'>"> {
  1168. let ParserMatchClass = ZPRVectorListStrided<8, 2, 8>;
  1169. }
  1170. def ZZ_h_strided
  1171. : RegisterOperand<ZPR2Strided, "printTypedVectorList<0, 'h'>"> {
  1172. let ParserMatchClass = ZPRVectorListStrided<16, 2, 8>;
  1173. }
  1174. def ZZ_s_strided
  1175. : RegisterOperand<ZPR2Strided, "printTypedVectorList<0,'s'>"> {
  1176. let ParserMatchClass = ZPRVectorListStrided<32, 2, 8>;
  1177. }
  1178. def ZZ_d_strided
  1179. : RegisterOperand<ZPR2Strided, "printTypedVectorList<0,'d'>"> {
  1180. let ParserMatchClass = ZPRVectorListStrided<64, 2, 8>;
  1181. }
  1182. }
  1183. let EncoderMethod = "EncodeZPR4StridedRegisterClass",
  1184. DecoderMethod = "DecodeZPR4StridedRegisterClass" in {
  1185. def ZZZZ_b_strided
  1186. : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'b'>"> {
  1187. let ParserMatchClass = ZPRVectorListStrided<8, 4, 4>;
  1188. }
  1189. def ZZZZ_h_strided
  1190. : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'h'>"> {
  1191. let ParserMatchClass = ZPRVectorListStrided<16, 4, 4>;
  1192. }
  1193. def ZZZZ_s_strided
  1194. : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'s'>"> {
  1195. let ParserMatchClass = ZPRVectorListStrided<32, 4, 4>;
  1196. }
  1197. def ZZZZ_d_strided
  1198. : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'d'>"> {
  1199. let ParserMatchClass = ZPRVectorListStrided<64, 4, 4>;
  1200. }
  1201. }
  1202. class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale,
  1203. bit ScaleAlwaysSame = 0b0> : AsmOperandClass {
  1204. let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale
  1205. # !if(ScaleAlwaysSame, "Only", "");
  1206. let PredicateMethod = "isSVEDataVectorRegWithShiftExtend<"
  1207. # RegWidth # ", AArch64::ZPRRegClassID, "
  1208. # "AArch64_AM::" # ShiftExtend # ", "
  1209. # Scale # ", "
  1210. # !if(ScaleAlwaysSame, "true", "false")
  1211. # ">";
  1212. let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale;
  1213. let RenderMethod = "addRegOperands";
  1214. let ParserMethod = "tryParseSVEDataVector<true, true>";
  1215. }
  1216. class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr,
  1217. int RegWidth, int Scale, string Suffix = "">
  1218. : RegisterOperand<ZPR> {
  1219. let ParserMatchClass =
  1220. !cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale # Suffix);
  1221. let PrintMethod = "printRegWithShiftExtend<"
  1222. # !if(SignExtend, "true", "false") # ", "
  1223. # Scale # ", "
  1224. # !if(IsLSL, "'x'", "'w'") # ", "
  1225. # !if(!eq(RegWidth, 32), "'s'", "'d'") # ">";
  1226. }
  1227. foreach RegWidth = [32, 64] in {
  1228. // UXTW(8|16|32|64)
  1229. def ZPR#RegWidth#AsmOpndExtUXTW8Only : ZPRExtendAsmOperand<"UXTW", RegWidth, 8, 0b1>;
  1230. def ZPR#RegWidth#AsmOpndExtUXTW8 : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>;
  1231. def ZPR#RegWidth#AsmOpndExtUXTW16 : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>;
  1232. def ZPR#RegWidth#AsmOpndExtUXTW32 : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>;
  1233. def ZPR#RegWidth#AsmOpndExtUXTW64 : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>;
  1234. def ZPR#RegWidth#ExtUXTW8Only : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8, "Only">;
  1235. def ZPR#RegWidth#ExtUXTW8 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>;
  1236. def ZPR#RegWidth#ExtUXTW16 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>;
  1237. def ZPR#RegWidth#ExtUXTW32 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>;
  1238. def ZPR#RegWidth#ExtUXTW64 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>;
  1239. // SXTW(8|16|32|64)
  1240. def ZPR#RegWidth#AsmOpndExtSXTW8Only : ZPRExtendAsmOperand<"SXTW", RegWidth, 8, 0b1>;
  1241. def ZPR#RegWidth#AsmOpndExtSXTW8 : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>;
  1242. def ZPR#RegWidth#AsmOpndExtSXTW16 : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>;
  1243. def ZPR#RegWidth#AsmOpndExtSXTW32 : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>;
  1244. def ZPR#RegWidth#AsmOpndExtSXTW64 : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>;
  1245. def ZPR#RegWidth#ExtSXTW8Only : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8, "Only">;
  1246. def ZPR#RegWidth#ExtSXTW8 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>;
  1247. def ZPR#RegWidth#ExtSXTW16 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>;
  1248. def ZPR#RegWidth#ExtSXTW32 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>;
  1249. def ZPR#RegWidth#ExtSXTW64 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 64>;
  1250. // LSL(8|16|32|64)
  1251. def ZPR#RegWidth#AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", RegWidth, 8>;
  1252. def ZPR#RegWidth#AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", RegWidth, 16>;
  1253. def ZPR#RegWidth#AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", RegWidth, 32>;
  1254. def ZPR#RegWidth#AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", RegWidth, 64>;
  1255. def ZPR#RegWidth#ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>;
  1256. def ZPR#RegWidth#ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>;
  1257. def ZPR#RegWidth#ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>;
  1258. def ZPR#RegWidth#ExtLSL64 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>;
  1259. }
  1260. class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegClass> : AsmOperandClass {
  1261. let Name = AsmOperandName # Scale;
  1262. let PredicateMethod = "isGPR64WithShiftExtend<AArch64::"#RegClass#"RegClassID, " # Scale # ">";
  1263. let DiagnosticType = "Invalid" # AsmOperandName # Scale;
  1264. let RenderMethod = "addRegOperands";
  1265. let ParserMethod = "tryParseGPROperand<true>";
  1266. }
  1267. class GPR64ExtendRegisterOperand<string Name, int Scale, RegisterClass RegClass> : RegisterOperand<RegClass>{
  1268. let ParserMatchClass = !cast<AsmOperandClass>(Name);
  1269. let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x', 0>";
  1270. }
  1271. foreach Scale = [8, 16, 32, 64, 128] in {
  1272. def GPR64shiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64shifted", Scale, "GPR64">;
  1273. def GPR64shifted # Scale : GPR64ExtendRegisterOperand<"GPR64shiftedAsmOpnd" # Scale, Scale, GPR64>;
  1274. def GPR64NoXZRshiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64NoXZRshifted", Scale, "GPR64common">;
  1275. def GPR64NoXZRshifted # Scale : GPR64ExtendRegisterOperand<"GPR64NoXZRshiftedAsmOpnd" # Scale, Scale, GPR64common>;
  1276. }
  1277. // Accumulator array tiles.
  1278. def ZAQ0 : AArch64Reg<0, "za0.q">;
  1279. def ZAQ1 : AArch64Reg<1, "za1.q">;
  1280. def ZAQ2 : AArch64Reg<2, "za2.q">;
  1281. def ZAQ3 : AArch64Reg<3, "za3.q">;
  1282. def ZAQ4 : AArch64Reg<4, "za4.q">;
  1283. def ZAQ5 : AArch64Reg<5, "za5.q">;
  1284. def ZAQ6 : AArch64Reg<6, "za6.q">;
  1285. def ZAQ7 : AArch64Reg<7, "za7.q">;
  1286. def ZAQ8 : AArch64Reg<8, "za8.q">;
  1287. def ZAQ9 : AArch64Reg<9, "za9.q">;
  1288. def ZAQ10 : AArch64Reg<10, "za10.q">;
  1289. def ZAQ11 : AArch64Reg<11, "za11.q">;
  1290. def ZAQ12 : AArch64Reg<12, "za12.q">;
  1291. def ZAQ13 : AArch64Reg<13, "za13.q">;
  1292. def ZAQ14 : AArch64Reg<14, "za14.q">;
  1293. def ZAQ15 : AArch64Reg<15, "za15.q">;
  1294. let SubRegIndices = [zasubq0, zasubq1] in {
  1295. def ZAD0 : AArch64Reg<0, "za0.d", [ZAQ0, ZAQ8]>;
  1296. def ZAD1 : AArch64Reg<1, "za1.d", [ZAQ1, ZAQ9]>;
  1297. def ZAD2 : AArch64Reg<2, "za2.d", [ZAQ2, ZAQ10]>;
  1298. def ZAD3 : AArch64Reg<3, "za3.d", [ZAQ3, ZAQ11]>;
  1299. def ZAD4 : AArch64Reg<4, "za4.d", [ZAQ4, ZAQ12]>;
  1300. def ZAD5 : AArch64Reg<5, "za5.d", [ZAQ5, ZAQ13]>;
  1301. def ZAD6 : AArch64Reg<6, "za6.d", [ZAQ6, ZAQ14]>;
  1302. def ZAD7 : AArch64Reg<7, "za7.d", [ZAQ7, ZAQ15]>;
  1303. }
  1304. let SubRegIndices = [zasubd0, zasubd1] in {
  1305. def ZAS0 : AArch64Reg<0, "za0.s", [ZAD0, ZAD4]>;
  1306. def ZAS1 : AArch64Reg<1, "za1.s", [ZAD1, ZAD5]>;
  1307. def ZAS2 : AArch64Reg<2, "za2.s", [ZAD2, ZAD6]>;
  1308. def ZAS3 : AArch64Reg<3, "za3.s", [ZAD3, ZAD7]>;
  1309. }
  1310. let SubRegIndices = [zasubs0, zasubs1] in {
  1311. def ZAH0 : AArch64Reg<0, "za0.h", [ZAS0, ZAS2]>;
  1312. def ZAH1 : AArch64Reg<1, "za1.h", [ZAS1, ZAS3]>;
  1313. }
  1314. let SubRegIndices = [zasubh0, zasubh1] in {
  1315. def ZAB0 : AArch64Reg<0, "za0.b", [ZAH0, ZAH1]>;
  1316. }
  1317. let SubRegIndices = [zasubb] in {
  1318. def ZA : AArch64Reg<0, "za", [ZAB0]>;
  1319. }
  1320. def ZT0 : AArch64Reg<0, "zt0">;
  1321. // SME Register Classes
  1322. let isAllocatable = 0 in {
  1323. // Accumulator array
  1324. def MPR : RegisterClass<"AArch64", [untyped], 2048, (add ZA)> {
  1325. let Size = 2048;
  1326. }
  1327. // Accumulator array as single tiles
  1328. def MPR8 : RegisterClass<"AArch64", [untyped], 2048, (add (sequence "ZAB%u", 0, 0))> {
  1329. let Size = 2048;
  1330. }
  1331. def MPR16 : RegisterClass<"AArch64", [untyped], 1024, (add (sequence "ZAH%u", 0, 1))> {
  1332. let Size = 1024;
  1333. }
  1334. def MPR32 : RegisterClass<"AArch64", [untyped], 512, (add (sequence "ZAS%u", 0, 3))> {
  1335. let Size = 512;
  1336. }
  1337. def MPR64 : RegisterClass<"AArch64", [untyped], 256, (add (sequence "ZAD%u", 0, 7))> {
  1338. let Size = 256;
  1339. }
  1340. def MPR128 : RegisterClass<"AArch64", [untyped], 128, (add (sequence "ZAQ%u", 0, 15))> {
  1341. let Size = 128;
  1342. }
  1343. }
  1344. def ZTR : RegisterClass<"AArch64", [untyped], 512, (add ZT0)> {
  1345. let Size = 512;
  1346. let DiagnosticType = "InvalidLookupTable";
  1347. }
  1348. // SME Register Operands
  1349. // There are three types of SME matrix register operands:
  1350. // * Tiles:
  1351. //
  1352. // These tiles make up the larger accumulator matrix. The tile representation
  1353. // has an element type suffix, e.g. za0.b or za15.q and can be any of the
  1354. // registers:
  1355. // ZAQ0..ZAQ15
  1356. // ZAD0..ZAD7
  1357. // ZAS0..ZAS3
  1358. // ZAH0..ZAH1
  1359. // or ZAB0
  1360. //
  1361. // * Tile vectors:
  1362. //
  1363. // Their representation is similar to regular tiles, but they have an extra
  1364. // 'h' or 'v' to tell how the vector at [reg+offset] is layed out in the tile,
  1365. // horizontally or vertically.
  1366. //
  1367. // e.g. za1h.h or za15v.q, which corresponds to vectors in registers ZAH1 and
  1368. // ZAQ15, respectively. The horizontal/vertical is more a property of the
  1369. // instruction, than a property of the asm-operand itself, or its register.
  1370. // The distinction is required for the parsing/printing of the operand,
  1371. // as from a compiler's perspective, the whole tile is read/written.
  1372. //
  1373. // * Accumulator matrix:
  1374. //
  1375. // This is the entire matrix accumulator register ZA (<=> ZAB0), printed as
  1376. // 'za'.
  1377. //
  1378. // Tiles
  1379. //
  1380. class MatrixTileAsmOperand<string RC, int EltSize> : AsmOperandClass {
  1381. let Name = "MatrixTile" # EltSize;
  1382. let DiagnosticType = "Invalid" # Name;
  1383. let ParserMethod = "tryParseMatrixRegister";
  1384. let RenderMethod = "addMatrixOperands";
  1385. let PredicateMethod = "isMatrixRegOperand<"
  1386. # "MatrixKind::Tile" # ", "
  1387. # EltSize # ", AArch64::" # RC # "RegClassID>";
  1388. }
  1389. class MatrixTileOperand<int EltSize, int NumBitsForTile, RegisterClass RC>
  1390. : RegisterOperand<RC> {
  1391. let ParserMatchClass = MatrixTileAsmOperand<!cast<string>(RC), EltSize>;
  1392. let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">";
  1393. let PrintMethod = "printMatrixTile";
  1394. }
  1395. def TileOp16 : MatrixTileOperand<16, 1, MPR16>;
  1396. def TileOp32 : MatrixTileOperand<32, 2, MPR32>;
  1397. def TileOp64 : MatrixTileOperand<64, 3, MPR64>;
  1398. //
  1399. // Tile vectors (horizontal and vertical)
  1400. //
  1401. class MatrixTileVectorAsmOperand<string RC, int EltSize, int IsVertical>
  1402. : AsmOperandClass {
  1403. let Name = "MatrixTileVector" # !if(IsVertical, "V", "H") # EltSize;
  1404. let DiagnosticType = "Invalid" # Name;
  1405. let ParserMethod = "tryParseMatrixRegister";
  1406. let RenderMethod = "addMatrixOperands";
  1407. let PredicateMethod = "isMatrixRegOperand<"
  1408. # "MatrixKind::"
  1409. # !if(IsVertical, "Col", "Row") # ", "
  1410. # EltSize # ", AArch64::" # RC # "RegClassID>";
  1411. }
  1412. class MatrixTileVectorOperand<int EltSize, int NumBitsForTile,
  1413. RegisterClass RC, int IsVertical>
  1414. : RegisterOperand<RC> {
  1415. let ParserMatchClass = MatrixTileVectorAsmOperand<!cast<string>(RC), EltSize,
  1416. IsVertical>;
  1417. let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">";
  1418. let PrintMethod = "printMatrixTileVector<" # IsVertical # ">";
  1419. }
  1420. def TileVectorOpH8 : MatrixTileVectorOperand< 8, 0, MPR8, 0>;
  1421. def TileVectorOpH16 : MatrixTileVectorOperand< 16, 1, MPR16, 0>;
  1422. def TileVectorOpH32 : MatrixTileVectorOperand< 32, 2, MPR32, 0>;
  1423. def TileVectorOpH64 : MatrixTileVectorOperand< 64, 3, MPR64, 0>;
  1424. def TileVectorOpH128 : MatrixTileVectorOperand<128, 4, MPR128, 0>;
  1425. def TileVectorOpV8 : MatrixTileVectorOperand< 8, 0, MPR8, 1>;
  1426. def TileVectorOpV16 : MatrixTileVectorOperand< 16, 1, MPR16, 1>;
  1427. def TileVectorOpV32 : MatrixTileVectorOperand< 32, 2, MPR32, 1>;
  1428. def TileVectorOpV64 : MatrixTileVectorOperand< 64, 3, MPR64, 1>;
  1429. def TileVectorOpV128 : MatrixTileVectorOperand<128, 4, MPR128, 1>;
  1430. //
  1431. // Accumulator matrix
  1432. //
  1433. class MatrixAsmOperand<string RC, int EltSize> : AsmOperandClass {
  1434. let Name = "Matrix" # !if(EltSize, !cast<string>(EltSize), "");
  1435. let DiagnosticType = "Invalid" # Name;
  1436. let ParserMethod = "tryParseMatrixRegister";
  1437. let RenderMethod = "addMatrixOperands";
  1438. let PredicateMethod = "isMatrixRegOperand<"
  1439. # "MatrixKind::Array" # ", "
  1440. # EltSize # ", AArch64::" # RC # "RegClassID>";
  1441. }
  1442. class MatrixOperand<RegisterClass RC, int EltSize> : RegisterOperand<RC> {
  1443. let ParserMatchClass = MatrixAsmOperand<!cast<string>(RC), EltSize>;
  1444. let PrintMethod = "printMatrix<" # EltSize # ">";
  1445. }
  1446. def MatrixOp : MatrixOperand<MPR, 0>;
  1447. // SME2 register operands and classes
  1448. def MatrixOp8 : MatrixOperand<MPR, 8>;
  1449. def MatrixOp16 : MatrixOperand<MPR, 16>;
  1450. def MatrixOp32 : MatrixOperand<MPR, 32>;
  1451. def MatrixOp64 : MatrixOperand<MPR, 64>;
  1452. class MatrixTileListAsmOperand : AsmOperandClass {
  1453. let Name = "MatrixTileList";
  1454. let ParserMethod = "tryParseMatrixTileList";
  1455. let RenderMethod = "addMatrixTileListOperands";
  1456. let PredicateMethod = "isMatrixTileList";
  1457. }
  1458. class MatrixTileListOperand : Operand<i8> {
  1459. let ParserMatchClass = MatrixTileListAsmOperand<>;
  1460. let DecoderMethod = "DecodeMatrixTileListRegisterClass";
  1461. let EncoderMethod = "EncodeMatrixTileListRegisterClass";
  1462. let PrintMethod = "printMatrixTileList";
  1463. }
  1464. def MatrixTileList : MatrixTileListOperand<>;
  1465. def MatrixIndexGPR32_8_11 : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 8, 11)> {
  1466. let DiagnosticType = "InvalidMatrixIndexGPR32_8_11";
  1467. }
  1468. def MatrixIndexGPR32_12_15 : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 12, 15)> {
  1469. let DiagnosticType = "InvalidMatrixIndexGPR32_12_15";
  1470. }
  1471. def MatrixIndexGPR32Op8_11 : RegisterOperand<MatrixIndexGPR32_8_11> {
  1472. let EncoderMethod = "encodeMatrixIndexGPR32<AArch64::W8>";
  1473. }
  1474. def MatrixIndexGPR32Op12_15 : RegisterOperand<MatrixIndexGPR32_12_15> {
  1475. let EncoderMethod = "encodeMatrixIndexGPR32<AArch64::W12>";
  1476. }
  1477. def SVCROperand : AsmOperandClass {
  1478. let Name = "SVCR";
  1479. let ParserMethod = "tryParseSVCR";
  1480. let DiagnosticType = "Invalid" # Name;
  1481. }
  1482. def svcr_op : Operand<i32>, TImmLeaf<i32, [{
  1483. return AArch64SVCR::lookupSVCRByEncoding(Imm) != nullptr;
  1484. }]> {
  1485. let ParserMatchClass = SVCROperand;
  1486. let PrintMethod = "printSVCROp";
  1487. let DecoderMethod = "DecodeSVCROp";
  1488. let MCOperandPredicate = [{
  1489. if (!MCOp.isImm())
  1490. return false;
  1491. return AArch64SVCR::lookupSVCRByEncoding(MCOp.getImm()) != nullptr;
  1492. }];
  1493. }
  1494. //===----------------------------------------------------------------------===//
  1495. // Register categories.
  1496. //
  1497. def GeneralPurposeRegisters : RegisterCategory<[GPR64, GPR32]>;
  1498. def FIXED_REGS : RegisterClass<"AArch64", [i64], 64, (add FP, SP, VG, FFR)>;
  1499. def FixedRegisters : RegisterCategory<[CCR, FIXED_REGS]>;