PPCRegisterInfo.td 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. //===-- PPCRegisterInfo.td - The PowerPC Register File -----*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. //
  10. //===----------------------------------------------------------------------===//
  11. let Namespace = "PPC" in {
  12. def sub_lt : SubRegIndex<1>;
  13. def sub_gt : SubRegIndex<1, 1>;
  14. def sub_eq : SubRegIndex<1, 2>;
  15. def sub_un : SubRegIndex<1, 3>;
  16. def sub_32 : SubRegIndex<32>;
  17. def sub_64 : SubRegIndex<64>;
  18. def sub_vsx0 : SubRegIndex<128>;
  19. def sub_vsx1 : SubRegIndex<128, 128>;
  20. def sub_pair0 : SubRegIndex<256>;
  21. def sub_pair1 : SubRegIndex<256, 256>;
  22. def sub_gp8_x0 : SubRegIndex<64>;
  23. def sub_gp8_x1 : SubRegIndex<64, 64>;
  24. }
  25. class PPCReg<string n> : Register<n> {
  26. let Namespace = "PPC";
  27. }
  28. // We identify all our registers with a 5-bit ID, for consistency's sake.
  29. // GPR - One of the 32 32-bit general-purpose registers
  30. class GPR<bits<5> num, string n> : PPCReg<n> {
  31. let HWEncoding{4-0} = num;
  32. }
  33. // GP8 - One of the 32 64-bit general-purpose registers
  34. class GP8<GPR SubReg, string n> : PPCReg<n> {
  35. let HWEncoding = SubReg.HWEncoding;
  36. let SubRegs = [SubReg];
  37. let SubRegIndices = [sub_32];
  38. }
  39. // SPE - One of the 32 64-bit general-purpose registers (SPE)
  40. class SPE<GPR SubReg, string n> : PPCReg<n> {
  41. let HWEncoding = SubReg.HWEncoding;
  42. let SubRegs = [SubReg];
  43. let SubRegIndices = [sub_32];
  44. }
  45. // SPR - One of the 32-bit special-purpose registers
  46. class SPR<bits<10> num, string n> : PPCReg<n> {
  47. let HWEncoding{9-0} = num;
  48. }
  49. // FPR - One of the 32 64-bit floating-point registers
  50. class FPR<bits<5> num, string n> : PPCReg<n> {
  51. let HWEncoding{4-0} = num;
  52. }
  53. // VF - One of the 32 64-bit floating-point subregisters of the vector
  54. // registers (used by VSX).
  55. class VF<bits<5> num, string n> : PPCReg<n> {
  56. let HWEncoding{4-0} = num;
  57. let HWEncoding{5} = 1;
  58. }
  59. // VR - One of the 32 128-bit vector registers
  60. class VR<VF SubReg, string n> : PPCReg<n> {
  61. let HWEncoding{4-0} = SubReg.HWEncoding{4-0};
  62. let HWEncoding{5} = 0;
  63. let SubRegs = [SubReg];
  64. let SubRegIndices = [sub_64];
  65. }
  66. // VSRL - One of the 32 128-bit VSX registers that overlap with the scalar
  67. // floating-point registers.
  68. class VSRL<FPR SubReg, string n> : PPCReg<n> {
  69. let HWEncoding = SubReg.HWEncoding;
  70. let SubRegs = [SubReg];
  71. let SubRegIndices = [sub_64];
  72. }
  73. // VSXReg - One of the VSX registers in the range vs32-vs63 with numbering
  74. // and encoding to match.
  75. class VSXReg<bits<6> num, string n> : PPCReg<n> {
  76. let HWEncoding{5-0} = num;
  77. }
  78. // CR - One of the 8 4-bit condition registers
  79. class CR<bits<3> num, string n, list<Register> subregs> : PPCReg<n> {
  80. let HWEncoding{2-0} = num;
  81. let SubRegs = subregs;
  82. }
  83. // CRBIT - One of the 32 1-bit condition register fields
  84. class CRBIT<bits<5> num, string n> : PPCReg<n> {
  85. let HWEncoding{4-0} = num;
  86. }
  87. // ACC - One of the 8 512-bit VSX accumulators.
  88. class ACC<bits<3> num, string n, list<Register> subregs> : PPCReg<n> {
  89. let HWEncoding{2-0} = num;
  90. let SubRegs = subregs;
  91. }
  92. // UACC - One of the 8 512-bit VSX accumulators prior to being primed.
  93. // Without using this register class, the register allocator has no way to
  94. // differentiate a primed accumulator from an unprimed accumulator.
  95. // This may result in invalid copies between primed and unprimed accumulators.
  96. class UACC<bits<3> num, string n, list<Register> subregs> : PPCReg<n> {
  97. let HWEncoding{2-0} = num;
  98. let SubRegs = subregs;
  99. }
  100. // VSR Pairs - One of the 32 paired even-odd consecutive VSRs.
  101. class VSRPair<bits<5> num, string n, list<Register> subregs> : PPCReg<n> {
  102. let HWEncoding{4-0} = num;
  103. let SubRegs = subregs;
  104. }
  105. // GP8Pair - Consecutive even-odd paired GP8.
  106. class GP8Pair<string n, bits<5> EvenIndex> : PPCReg<n> {
  107. assert !eq(EvenIndex{0}, 0), "Index should be even.";
  108. let HWEncoding{4-0} = EvenIndex;
  109. let SubRegs = [!cast<GP8>("X"#EvenIndex), !cast<GP8>("X"#!add(EvenIndex, 1))];
  110. let DwarfNumbers = [-1, -1];
  111. let SubRegIndices = [sub_gp8_x0, sub_gp8_x1];
  112. }
  113. // General-purpose registers
  114. foreach Index = 0-31 in {
  115. def R#Index : GPR<Index, "r"#Index>, DwarfRegNum<[-2, Index]>;
  116. }
  117. // 64-bit General-purpose registers
  118. foreach Index = 0-31 in {
  119. def X#Index : GP8<!cast<GPR>("R"#Index), "r"#Index>,
  120. DwarfRegNum<[Index, -2]>;
  121. }
  122. // SPE registers
  123. foreach Index = 0-31 in {
  124. def S#Index : SPE<!cast<GPR>("R"#Index), "r"#Index>,
  125. DwarfRegNum<[!add(Index, 1200), !add(Index, 1200)]>;
  126. }
  127. // Floating-point registers
  128. foreach Index = 0-31 in {
  129. def F#Index : FPR<Index, "f"#Index>,
  130. DwarfRegNum<[!add(Index, 32), !add(Index, 32)]>;
  131. }
  132. // 64-bit Floating-point subregisters of Altivec registers
  133. // Note: the register names are v0-v31 or vs32-vs63 depending on the use.
  134. // Custom C++ code is used to produce the correct name and encoding.
  135. foreach Index = 0-31 in {
  136. def VF#Index : VF<Index, "v" #Index>,
  137. DwarfRegNum<[!add(Index, 77), !add(Index, 77)]>;
  138. }
  139. // Vector registers
  140. foreach Index = 0-31 in {
  141. def V#Index : VR<!cast<VF>("VF"#Index), "v"#Index>,
  142. DwarfRegNum<[!add(Index, 77), !add(Index, 77)]>;
  143. }
  144. // VSX registers
  145. foreach Index = 0-31 in {
  146. def VSL#Index : VSRL<!cast<FPR>("F"#Index), "vs"#Index>,
  147. DwarfRegAlias<!cast<FPR>("F"#Index)>;
  148. }
  149. // Dummy VSX registers, this defines string: "vs32"-"vs63", and is only used for
  150. // asm printing.
  151. foreach Index = 32-63 in {
  152. def VSX#Index : VSXReg<Index, "vs"#Index>;
  153. }
  154. let SubRegIndices = [sub_vsx0, sub_vsx1] in {
  155. // VSR pairs 0 - 15 (corresponding to VSRs 0 - 30 paired with 1 - 31).
  156. foreach Index = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 } in {
  157. def VSRp#!srl(Index, 1) : VSRPair<!srl(Index, 1), "vsp"#Index,
  158. [!cast<VSRL>("VSL"#Index), !cast<VSRL>("VSL"#!add(Index, 1))]>,
  159. DwarfRegNum<[-1, -1]>;
  160. }
  161. // VSR pairs 16 - 31 (corresponding to VSRs 32 - 62 paired with 33 - 63).
  162. foreach Index = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 } in {
  163. def VSRp#!add(!srl(Index, 1), 16) :
  164. VSRPair<!add(!srl(Index, 1), 16), "vsp"#!add(Index, 32),
  165. [!cast<VR>("V"#Index), !cast<VR>("V"#!add(Index, 1))]>,
  166. DwarfRegNum<[-1, -1]>;
  167. }
  168. }
  169. // 16 paired even-odd consecutive GP8s.
  170. foreach Index = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 } in {
  171. def G8p#!srl(Index, 1) : GP8Pair<"r"#Index, Index>;
  172. }
  173. // The representation of r0 when treated as the constant 0.
  174. def ZERO : GPR<0, "0">, DwarfRegAlias<R0>;
  175. def ZERO8 : GP8<ZERO, "0">, DwarfRegAlias<X0>;
  176. // Representations of the frame pointer used by ISD::FRAMEADDR.
  177. def FP : GPR<0 /* arbitrary */, "**FRAME POINTER**">;
  178. def FP8 : GP8<FP, "**FRAME POINTER**">;
  179. // Representations of the base pointer used by setjmp.
  180. def BP : GPR<0 /* arbitrary */, "**BASE POINTER**">;
  181. def BP8 : GP8<BP, "**BASE POINTER**">;
  182. // Condition register bits
  183. def CR0LT : CRBIT< 0, "0">;
  184. def CR0GT : CRBIT< 1, "1">;
  185. def CR0EQ : CRBIT< 2, "2">;
  186. def CR0UN : CRBIT< 3, "3">;
  187. def CR1LT : CRBIT< 4, "4">;
  188. def CR1GT : CRBIT< 5, "5">;
  189. def CR1EQ : CRBIT< 6, "6">;
  190. def CR1UN : CRBIT< 7, "7">;
  191. def CR2LT : CRBIT< 8, "8">;
  192. def CR2GT : CRBIT< 9, "9">;
  193. def CR2EQ : CRBIT<10, "10">;
  194. def CR2UN : CRBIT<11, "11">;
  195. def CR3LT : CRBIT<12, "12">;
  196. def CR3GT : CRBIT<13, "13">;
  197. def CR3EQ : CRBIT<14, "14">;
  198. def CR3UN : CRBIT<15, "15">;
  199. def CR4LT : CRBIT<16, "16">;
  200. def CR4GT : CRBIT<17, "17">;
  201. def CR4EQ : CRBIT<18, "18">;
  202. def CR4UN : CRBIT<19, "19">;
  203. def CR5LT : CRBIT<20, "20">;
  204. def CR5GT : CRBIT<21, "21">;
  205. def CR5EQ : CRBIT<22, "22">;
  206. def CR5UN : CRBIT<23, "23">;
  207. def CR6LT : CRBIT<24, "24">;
  208. def CR6GT : CRBIT<25, "25">;
  209. def CR6EQ : CRBIT<26, "26">;
  210. def CR6UN : CRBIT<27, "27">;
  211. def CR7LT : CRBIT<28, "28">;
  212. def CR7GT : CRBIT<29, "29">;
  213. def CR7EQ : CRBIT<30, "30">;
  214. def CR7UN : CRBIT<31, "31">;
  215. // Condition registers
  216. let SubRegIndices = [sub_lt, sub_gt, sub_eq, sub_un] in {
  217. def CR0 : CR<0, "cr0", [CR0LT, CR0GT, CR0EQ, CR0UN]>, DwarfRegNum<[68, 68]>;
  218. def CR1 : CR<1, "cr1", [CR1LT, CR1GT, CR1EQ, CR1UN]>, DwarfRegNum<[69, 69]>;
  219. def CR2 : CR<2, "cr2", [CR2LT, CR2GT, CR2EQ, CR2UN]>, DwarfRegNum<[70, 70]>;
  220. def CR3 : CR<3, "cr3", [CR3LT, CR3GT, CR3EQ, CR3UN]>, DwarfRegNum<[71, 71]>;
  221. def CR4 : CR<4, "cr4", [CR4LT, CR4GT, CR4EQ, CR4UN]>, DwarfRegNum<[72, 72]>;
  222. def CR5 : CR<5, "cr5", [CR5LT, CR5GT, CR5EQ, CR5UN]>, DwarfRegNum<[73, 73]>;
  223. def CR6 : CR<6, "cr6", [CR6LT, CR6GT, CR6EQ, CR6UN]>, DwarfRegNum<[74, 74]>;
  224. def CR7 : CR<7, "cr7", [CR7LT, CR7GT, CR7EQ, CR7UN]>, DwarfRegNum<[75, 75]>;
  225. }
  226. // Link register
  227. def LR : SPR<8, "lr">, DwarfRegNum<[-2, 65]>;
  228. //let Aliases = [LR] in
  229. def LR8 : SPR<8, "lr">, DwarfRegNum<[65, -2]>;
  230. // Count register
  231. def CTR : SPR<9, "ctr">, DwarfRegNum<[-2, 66]>;
  232. def CTR8 : SPR<9, "ctr">, DwarfRegNum<[66, -2]>;
  233. // VRsave register
  234. def VRSAVE: SPR<256, "vrsave">, DwarfRegNum<[109]>;
  235. // SPE extra registers
  236. // SPE Accumulator for multiply-accumulate SPE operations. Never directly
  237. // accessed, so there's no real encoding for it.
  238. def SPEACC: DwarfRegNum<[99, 111]>;
  239. def SPEFSCR: SPR<512, "spefscr">, DwarfRegNum<[612, 112]>;
  240. def XER: SPR<1, "xer">, DwarfRegNum<[76]>;
  241. // Carry bit. In the architecture this is really bit 0 of the XER register
  242. // (which really is SPR register 1); this is the only bit interesting to a
  243. // compiler.
  244. def CARRY: SPR<1, "xer">, DwarfRegNum<[76]> {
  245. let Aliases = [XER];
  246. }
  247. // FP rounding mode: bits 30 and 31 of the FP status and control register
  248. // This is not allocated as a normal register; it appears only in
  249. // Uses and Defs. The ABI says it needs to be preserved by a function,
  250. // but this is not achieved by saving and restoring it as with
  251. // most registers, it has to be done in code; to make this work all the
  252. // return and call instructions are described as Uses of RM, so instructions
  253. // that do nothing but change RM will not get deleted.
  254. def RM: PPCReg<"**ROUNDING MODE**">;
  255. /// Register classes
  256. // Allocate volatiles first
  257. // then nonvolatiles in reverse order since stmw/lmw save from rN to r31
  258. def GPRC : RegisterClass<"PPC", [i32,f32], 32, (add (sequence "R%u", 2, 12),
  259. (sequence "R%u", 30, 13),
  260. R31, R0, R1, FP, BP)> {
  261. // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
  262. // put it at the end of the list.
  263. // On AIX, CSRs are allocated starting from R31 according to:
  264. // https://www.ibm.com/docs/en/ssw_aix_72/assembler/assembler_pdf.pdf.
  265. // This also helps setting the correct `NumOfGPRsSaved' in traceback table.
  266. let AltOrders = [(add (sub GPRC, R2), R2),
  267. (add (sequence "R%u", 2, 12),
  268. (sequence "R%u", 31, 13), R0, R1, FP, BP)];
  269. let AltOrderSelect = [{
  270. return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
  271. }];
  272. }
  273. def G8RC : RegisterClass<"PPC", [i64], 64, (add (sequence "X%u", 2, 12),
  274. (sequence "X%u", 30, 14),
  275. X31, X13, X0, X1, FP8, BP8)> {
  276. // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
  277. // put it at the end of the list.
  278. let AltOrders = [(add (sub G8RC, X2), X2),
  279. (add (sequence "X%u", 2, 12),
  280. (sequence "X%u", 31, 13), X0, X1, FP8, BP8)];
  281. let AltOrderSelect = [{
  282. return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
  283. }];
  284. }
  285. // For some instructions r0 is special (representing the value 0 instead of
  286. // the value in the r0 register), and we use these register subclasses to
  287. // prevent r0 from being allocated for use by those instructions.
  288. def GPRC_NOR0 : RegisterClass<"PPC", [i32,f32], 32, (add (sub GPRC, R0), ZERO)> {
  289. // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
  290. // put it at the end of the list.
  291. let AltOrders = [(add (sub GPRC_NOR0, R2), R2),
  292. (add (sequence "R%u", 2, 12),
  293. (sequence "R%u", 31, 13), R1, FP, BP, ZERO)];
  294. let AltOrderSelect = [{
  295. return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
  296. }];
  297. }
  298. def G8RC_NOX0 : RegisterClass<"PPC", [i64], 64, (add (sub G8RC, X0), ZERO8)> {
  299. // On non-Darwin PPC64 systems, R2 can be allocated, but must be restored, so
  300. // put it at the end of the list.
  301. let AltOrders = [(add (sub G8RC_NOX0, X2), X2),
  302. (add (sequence "X%u", 2, 12),
  303. (sequence "X%u", 31, 13), X1, FP8, BP8, ZERO8)];
  304. let AltOrderSelect = [{
  305. return MF.getSubtarget<PPCSubtarget>().getGPRAllocationOrderIdx();
  306. }];
  307. }
  308. def SPERC : RegisterClass<"PPC", [f64], 64, (add (sequence "S%u", 2, 12),
  309. (sequence "S%u", 30, 13),
  310. S31, S0, S1)>;
  311. // Allocate volatiles first, then non-volatiles in reverse order. With the SVR4
  312. // ABI the size of the Floating-point register save area is determined by the
  313. // allocated non-volatile register with the lowest register number, as FP
  314. // register N is spilled to offset 8 * (32 - N) below the back chain word of the
  315. // previous stack frame. By allocating non-volatiles in reverse order we make
  316. // sure that the Floating-point register save area is always as small as
  317. // possible because there aren't any unused spill slots.
  318. def F8RC : RegisterClass<"PPC", [f64], 64, (add (sequence "F%u", 0, 13),
  319. (sequence "F%u", 31, 14))>;
  320. def F4RC : RegisterClass<"PPC", [f32], 32, (add F8RC)>;
  321. def VRRC : RegisterClass<"PPC",
  322. [v16i8,v8i16,v4i32,v2i64,v1i128,v4f32,v2f64, f128],
  323. 128,
  324. (add V2, V3, V4, V5, V0, V1, V6, V7, V8, V9, V10, V11,
  325. V12, V13, V14, V15, V16, V17, V18, V19, V31, V30,
  326. V29, V28, V27, V26, V25, V24, V23, V22, V21, V20)>;
  327. // VSX register classes (the allocation order mirrors that of the corresponding
  328. // subregister classes).
  329. def VSLRC : RegisterClass<"PPC", [v4i32,v4f32,v2f64,v2i64], 128,
  330. (add (sequence "VSL%u", 0, 13),
  331. (sequence "VSL%u", 31, 14))>;
  332. def VSRC : RegisterClass<"PPC", [v4i32,v4f32,v2f64,v2i64], 128,
  333. (add VSLRC, VRRC)>;
  334. // Register classes for the 64-bit "scalar" VSX subregisters.
  335. def VFRC : RegisterClass<"PPC", [f64], 64,
  336. (add VF2, VF3, VF4, VF5, VF0, VF1, VF6, VF7,
  337. VF8, VF9, VF10, VF11, VF12, VF13, VF14,
  338. VF15, VF16, VF17, VF18, VF19, VF31, VF30,
  339. VF29, VF28, VF27, VF26, VF25, VF24, VF23,
  340. VF22, VF21, VF20)>;
  341. def VSFRC : RegisterClass<"PPC", [f64], 64, (add F8RC, VFRC)>;
  342. // Allow spilling GPR's into caller-saved VSR's.
  343. def SPILLTOVSRRC : RegisterClass<"PPC", [i64, f64], 64, (add G8RC, (sub VSFRC,
  344. (sequence "VF%u", 31, 20),
  345. (sequence "F%u", 31, 14)))>;
  346. // Register class for single precision scalars in VSX registers
  347. def VSSRC : RegisterClass<"PPC", [f32], 32, (add VSFRC)>;
  348. def CRBITRC : RegisterClass<"PPC", [i1], 32,
  349. (add CR2LT, CR2GT, CR2EQ, CR2UN,
  350. CR3LT, CR3GT, CR3EQ, CR3UN,
  351. CR4LT, CR4GT, CR4EQ, CR4UN,
  352. CR5LT, CR5GT, CR5EQ, CR5UN,
  353. CR6LT, CR6GT, CR6EQ, CR6UN,
  354. CR7LT, CR7GT, CR7EQ, CR7UN,
  355. CR1LT, CR1GT, CR1EQ, CR1UN,
  356. CR0LT, CR0GT, CR0EQ, CR0UN)> {
  357. let Size = 32;
  358. let AltOrders = [(sub CRBITRC, CR2LT, CR2GT, CR2EQ, CR2UN, CR3LT, CR3GT,
  359. CR3EQ, CR3UN, CR4LT, CR4GT, CR4EQ, CR4UN)];
  360. let AltOrderSelect = [{
  361. return MF.getSubtarget<PPCSubtarget>().isELFv2ABI() &&
  362. MF.getInfo<PPCFunctionInfo>()->isNonVolatileCRDisabled();
  363. }];
  364. }
  365. def CRRC : RegisterClass<"PPC", [i32], 32,
  366. (add CR0, CR1, CR5, CR6,
  367. CR7, CR2, CR3, CR4)> {
  368. let AltOrders = [(sub CRRC, CR2, CR3, CR4)];
  369. let AltOrderSelect = [{
  370. return MF.getSubtarget<PPCSubtarget>().isELFv2ABI() &&
  371. MF.getInfo<PPCFunctionInfo>()->isNonVolatileCRDisabled();
  372. }];
  373. }
  374. // The CTR registers are not allocatable because they're used by the
  375. // decrement-and-branch instructions, and thus need to stay live across
  376. // multiple basic blocks.
  377. def CTRRC : RegisterClass<"PPC", [i32], 32, (add CTR)> {
  378. let isAllocatable = 0;
  379. }
  380. def CTRRC8 : RegisterClass<"PPC", [i64], 64, (add CTR8)> {
  381. let isAllocatable = 0;
  382. }
  383. def LRRC : RegisterClass<"PPC", [i32], 32, (add LR)> {
  384. let isAllocatable = 0;
  385. }
  386. def LR8RC : RegisterClass<"PPC", [i64], 64, (add LR8)> {
  387. let isAllocatable = 0;
  388. }
  389. def VRSAVERC : RegisterClass<"PPC", [i32], 32, (add VRSAVE)>;
  390. def CARRYRC : RegisterClass<"PPC", [i32], 32, (add CARRY, XER)> {
  391. let CopyCost = -1;
  392. }
  393. let SubRegIndices = [sub_pair0, sub_pair1] in {
  394. def ACC0 : ACC<0, "acc0", [VSRp0, VSRp1]>, DwarfRegNum<[-1, -1]>;
  395. def ACC1 : ACC<1, "acc1", [VSRp2, VSRp3]>, DwarfRegNum<[-1, -1]>;
  396. def ACC2 : ACC<2, "acc2", [VSRp4, VSRp5]>, DwarfRegNum<[-1, -1]>;
  397. def ACC3 : ACC<3, "acc3", [VSRp6, VSRp7]>, DwarfRegNum<[-1, -1]>;
  398. def ACC4 : ACC<4, "acc4", [VSRp8, VSRp9]>, DwarfRegNum<[-1, -1]>;
  399. def ACC5 : ACC<5, "acc5", [VSRp10, VSRp11]>, DwarfRegNum<[-1, -1]>;
  400. def ACC6 : ACC<6, "acc6", [VSRp12, VSRp13]>, DwarfRegNum<[-1, -1]>;
  401. def ACC7 : ACC<7, "acc7", [VSRp14, VSRp15]>, DwarfRegNum<[-1, -1]>;
  402. }
  403. def ACCRC : RegisterClass<"PPC", [v512i1], 128, (add ACC0, ACC1, ACC2, ACC3,
  404. ACC4, ACC5, ACC6, ACC7)> {
  405. // The AllocationPriority is in the range [0, 63]. Assigned the ACC registers
  406. // the highest possible priority in this range to force the register allocator
  407. // to assign these registers first. This is done because the ACC registers
  408. // must represent 4 advacent vector registers. For example ACC1 must be
  409. // VS4 - VS7. The value here must be at least 32 as we want to allocate
  410. // these registers even before we allocate global ranges.
  411. let AllocationPriority = 63;
  412. let Size = 512;
  413. }
  414. let SubRegIndices = [sub_pair0, sub_pair1] in {
  415. def UACC0 : UACC<0, "acc0", [VSRp0, VSRp1]>, DwarfRegNum<[-1, -1]>;
  416. def UACC1 : UACC<1, "acc1", [VSRp2, VSRp3]>, DwarfRegNum<[-1, -1]>;
  417. def UACC2 : UACC<2, "acc2", [VSRp4, VSRp5]>, DwarfRegNum<[-1, -1]>;
  418. def UACC3 : UACC<3, "acc3", [VSRp6, VSRp7]>, DwarfRegNum<[-1, -1]>;
  419. def UACC4 : UACC<4, "acc4", [VSRp8, VSRp9]>, DwarfRegNum<[-1, -1]>;
  420. def UACC5 : UACC<5, "acc5", [VSRp10, VSRp11]>, DwarfRegNum<[-1, -1]>;
  421. def UACC6 : UACC<6, "acc6", [VSRp12, VSRp13]>, DwarfRegNum<[-1, -1]>;
  422. def UACC7 : UACC<7, "acc7", [VSRp14, VSRp15]>, DwarfRegNum<[-1, -1]>;
  423. }
  424. def UACCRC : RegisterClass<"PPC", [v512i1], 128,
  425. (add UACC0, UACC1, UACC2, UACC3,
  426. UACC4, UACC5, UACC6, UACC7)> {
  427. // The AllocationPriority for the UACC registers is still high and must be at
  428. // least 32 as we want to allocate these registers before we allocate other
  429. // global ranges. The value must be less than the AllocationPriority of the
  430. // ACC registers.
  431. let AllocationPriority = 36;
  432. let Size = 512;
  433. }
  434. // FIXME: This allocation order may increase stack frame size when allocating
  435. // non-volatile registers.
  436. //
  437. // Placing Altivec registers first and allocate the rest as underlying VSX
  438. // ones, to reduce interference with accumulator registers (lower 32 VSRs).
  439. // This reduces copies when loading for accumulators, which is common use for
  440. // paired VSX registers.
  441. def VSRpRC :
  442. RegisterClass<"PPC", [v256i1], 128,
  443. (add VSRp17, VSRp18, VSRp16, VSRp19, VSRp20, VSRp21,
  444. VSRp22, VSRp23, VSRp24, VSRp25, VSRp31, VSRp30,
  445. VSRp29, VSRp28, VSRp27, VSRp26,
  446. (sequence "VSRp%u", 0, 6),
  447. (sequence "VSRp%u", 15, 7))> {
  448. // Give the VSRp registers a non-zero AllocationPriority. The value is less
  449. // than 32 as these registers should not always be allocated before global
  450. // ranges and the value should be less than the AllocationPriority - 32 for
  451. // the UACC registers. Even global VSRp registers should be allocated after
  452. // the UACC registers have been chosen.
  453. let AllocationPriority = 2;
  454. let Size = 256;
  455. }
  456. // Make AllocationOrder as similar as G8RC's to avoid potential spilling.
  457. // Similarly, we have an AltOrder for 64-bit ELF ABI which r2 is allocated
  458. // at last.
  459. def G8pRC :
  460. RegisterClass<"PPC", [i128], 128,
  461. (add (sequence "G8p%u", 1, 5),
  462. (sequence "G8p%u", 14, 7),
  463. G8p15, G8p6, G8p0)> {
  464. let AltOrders = [(add (sub G8pRC, G8p1), G8p1)];
  465. let AltOrderSelect = [{
  466. return MF.getSubtarget<PPCSubtarget>().is64BitELFABI();
  467. }];
  468. let Size = 128;
  469. }