PPCCallingConv.td 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. //===- PPCCallingConv.td - Calling Conventions for PowerPC -*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This describes the calling conventions for the PowerPC 32- and 64-bit
  10. // architectures.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. /// CCIfSubtarget - Match if the current subtarget has a feature F.
  14. class CCIfSubtarget<string F, CCAction A>
  15. : CCIf<!strconcat("static_cast<const PPCSubtarget&>"
  16. "(State.getMachineFunction().getSubtarget()).",
  17. F),
  18. A>;
  19. class CCIfNotSubtarget<string F, CCAction A>
  20. : CCIf<!strconcat("!static_cast<const PPCSubtarget&>"
  21. "(State.getMachineFunction().getSubtarget()).",
  22. F),
  23. A>;
  24. class CCIfOrigArgWasNotPPCF128<CCAction A>
  25. : CCIf<"!static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)",
  26. A>;
  27. class CCIfOrigArgWasPPCF128<CCAction A>
  28. : CCIf<"static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)",
  29. A>;
  30. //===----------------------------------------------------------------------===//
  31. // Return Value Calling Convention
  32. //===----------------------------------------------------------------------===//
  33. // PPC64 AnyReg return-value convention. No explicit register is specified for
  34. // the return-value. The register allocator is allowed and expected to choose
  35. // any free register.
  36. //
  37. // This calling convention is currently only supported by the stackmap and
  38. // patchpoint intrinsics. All other uses will result in an assert on Debug
  39. // builds. On Release builds we fallback to the PPC C calling convention.
  40. def RetCC_PPC64_AnyReg : CallingConv<[
  41. CCCustom<"CC_PPC_AnyReg_Error">
  42. ]>;
  43. // Return-value convention for PowerPC coldcc.
  44. let Entry = 1 in
  45. def RetCC_PPC_Cold : CallingConv<[
  46. // Use the same return registers as RetCC_PPC, but limited to only
  47. // one return value. The remaining return values will be saved to
  48. // the stack.
  49. CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>,
  50. CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>,
  51. CCIfType<[i32], CCAssignToReg<[R3]>>,
  52. CCIfType<[i64], CCAssignToReg<[X3]>>,
  53. CCIfType<[i128], CCAssignToReg<[X3]>>,
  54. CCIfType<[f32], CCAssignToReg<[F1]>>,
  55. CCIfType<[f64], CCAssignToReg<[F1]>>,
  56. CCIfType<[f128], CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2]>>>,
  57. CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
  58. CCIfSubtarget<"hasAltivec()",
  59. CCAssignToReg<[V2]>>>
  60. ]>;
  61. // Return-value convention for PowerPC
  62. let Entry = 1 in
  63. def RetCC_PPC : CallingConv<[
  64. CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>,
  65. // On PPC64, integer return values are always promoted to i64
  66. CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>,
  67. CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>,
  68. CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>,
  69. CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>,
  70. CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
  71. // Floating point types returned as "direct" go into F1 .. F8; note that
  72. // only the ELFv2 ABI fully utilizes all these registers.
  73. CCIfNotSubtarget<"hasSPE()",
  74. CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>,
  75. CCIfNotSubtarget<"hasSPE()",
  76. CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>,
  77. CCIfSubtarget<"hasSPE()",
  78. CCIfType<[f32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>,
  79. CCIfSubtarget<"hasSPE()",
  80. CCIfType<[f64], CCCustom<"CC_PPC32_SPE_RetF64">>>,
  81. // For P9, f128 are passed in vector registers.
  82. CCIfType<[f128],
  83. CCIfSubtarget<"hasAltivec()",
  84. CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>,
  85. // Vector types returned as "direct" go into V2 .. V9; note that only the
  86. // ELFv2 ABI fully utilizes all these registers.
  87. CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
  88. CCIfSubtarget<"hasAltivec()",
  89. CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>
  90. ]>;
  91. // No explicit register is specified for the AnyReg calling convention. The
  92. // register allocator may assign the arguments to any free register.
  93. //
  94. // This calling convention is currently only supported by the stackmap and
  95. // patchpoint intrinsics. All other uses will result in an assert on Debug
  96. // builds. On Release builds we fallback to the PPC C calling convention.
  97. def CC_PPC64_AnyReg : CallingConv<[
  98. CCCustom<"CC_PPC_AnyReg_Error">
  99. ]>;
  100. // Note that we don't currently have calling conventions for 64-bit
  101. // PowerPC, but handle all the complexities of the ABI in the lowering
  102. // logic. FIXME: See if the logic can be simplified with use of CCs.
  103. // This may require some extensions to current table generation.
  104. // Simple calling convention for 64-bit ELF PowerPC fast isel.
  105. // Only handle ints and floats. All ints are promoted to i64.
  106. // Vector types and quadword ints are not handled.
  107. let Entry = 1 in
  108. def CC_PPC64_ELF_FIS : CallingConv<[
  109. CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>,
  110. CCIfType<[i1], CCPromoteToType<i64>>,
  111. CCIfType<[i8], CCPromoteToType<i64>>,
  112. CCIfType<[i16], CCPromoteToType<i64>>,
  113. CCIfType<[i32], CCPromoteToType<i64>>,
  114. CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>,
  115. CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>
  116. ]>;
  117. // Simple return-value convention for 64-bit ELF PowerPC fast isel.
  118. // All small ints are promoted to i64. Vector types, quadword ints,
  119. // and multiple register returns are "supported" to avoid compile
  120. // errors, but none are handled by the fast selector.
  121. let Entry = 1 in
  122. def RetCC_PPC64_ELF_FIS : CallingConv<[
  123. CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>,
  124. CCIfType<[i1], CCPromoteToType<i64>>,
  125. CCIfType<[i8], CCPromoteToType<i64>>,
  126. CCIfType<[i16], CCPromoteToType<i64>>,
  127. CCIfType<[i32], CCPromoteToType<i64>>,
  128. CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>,
  129. CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
  130. CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
  131. CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
  132. CCIfType<[f128],
  133. CCIfSubtarget<"hasAltivec()",
  134. CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>,
  135. CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
  136. CCIfSubtarget<"hasAltivec()",
  137. CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>
  138. ]>;
  139. //===----------------------------------------------------------------------===//
  140. // PowerPC System V Release 4 32-bit ABI
  141. //===----------------------------------------------------------------------===//
  142. def CC_PPC32_SVR4_Common : CallingConv<[
  143. CCIfType<[i1], CCPromoteToType<i32>>,
  144. // The ABI requires i64 to be passed in two adjacent registers with the first
  145. // register having an odd register number.
  146. CCIfType<[i32],
  147. CCIfSplit<CCIfSubtarget<"useSoftFloat()",
  148. CCIfOrigArgWasNotPPCF128<
  149. CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>>,
  150. CCIfType<[i32],
  151. CCIfSplit<CCIfNotSubtarget<"useSoftFloat()",
  152. CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>,
  153. CCIfType<[f64],
  154. CCIfSubtarget<"hasSPE()",
  155. CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>,
  156. CCIfSplit<CCIfSubtarget<"useSoftFloat()",
  157. CCIfOrigArgWasPPCF128<CCCustom<
  158. "CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128">>>>,
  159. // The 'nest' parameter, if any, is passed in R11.
  160. CCIfNest<CCAssignToReg<[R11]>>,
  161. // The first 8 integer arguments are passed in integer registers.
  162. CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>,
  163. // Make sure the i64 words from a long double are either both passed in
  164. // registers or both passed on the stack.
  165. CCIfType<[f64], CCIfSplit<CCCustom<"CC_PPC32_SVR4_Custom_AlignFPArgRegs">>>,
  166. // FP values are passed in F1 - F8.
  167. CCIfType<[f32, f64],
  168. CCIfNotSubtarget<"hasSPE()",
  169. CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>,
  170. CCIfType<[f64],
  171. CCIfSubtarget<"hasSPE()",
  172. CCCustom<"CC_PPC32_SPE_CustomSplitFP64">>>,
  173. CCIfType<[f32],
  174. CCIfSubtarget<"hasSPE()",
  175. CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>,
  176. // Split arguments have an alignment of 8 bytes on the stack.
  177. CCIfType<[i32], CCIfSplit<CCAssignToStack<4, 8>>>,
  178. CCIfType<[i32], CCAssignToStack<4, 4>>,
  179. // Floats are stored in double precision format, thus they have the same
  180. // alignment and size as doubles.
  181. // With SPE floats are stored as single precision, so have alignment and
  182. // size of int.
  183. CCIfType<[f32,f64], CCIfNotSubtarget<"hasSPE()", CCAssignToStack<8, 8>>>,
  184. CCIfType<[f32], CCIfSubtarget<"hasSPE()", CCAssignToStack<4, 4>>>,
  185. CCIfType<[f64], CCIfSubtarget<"hasSPE()", CCAssignToStack<8, 8>>>,
  186. // Vectors and float128 get 16-byte stack slots that are 16-byte aligned.
  187. CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>>,
  188. CCIfType<[f128], CCIfSubtarget<"hasAltivec()", CCAssignToStack<16, 16>>>
  189. ]>;
  190. // This calling convention puts vector arguments always on the stack. It is used
  191. // to assign vector arguments which belong to the variable portion of the
  192. // parameter list of a variable argument function.
  193. let Entry = 1 in
  194. def CC_PPC32_SVR4_VarArg : CallingConv<[
  195. CCDelegateTo<CC_PPC32_SVR4_Common>
  196. ]>;
  197. // In contrast to CC_PPC32_SVR4_VarArg, this calling convention first tries to
  198. // put vector arguments in vector registers before putting them on the stack.
  199. let Entry = 1 in
  200. def CC_PPC32_SVR4 : CallingConv<[
  201. // The first 12 Vector arguments are passed in AltiVec registers.
  202. CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
  203. CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7,
  204. V8, V9, V10, V11, V12, V13]>>>,
  205. // Float128 types treated as vector arguments.
  206. CCIfType<[f128],
  207. CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7,
  208. V8, V9, V10, V11, V12, V13]>>>,
  209. CCDelegateTo<CC_PPC32_SVR4_Common>
  210. ]>;
  211. // Helper "calling convention" to handle aggregate by value arguments.
  212. // Aggregate by value arguments are always placed in the local variable space
  213. // of the caller. This calling convention is only used to assign those stack
  214. // offsets in the callers stack frame.
  215. //
  216. // Still, the address of the aggregate copy in the callers stack frame is passed
  217. // in a GPR (or in the parameter list area if all GPRs are allocated) from the
  218. // caller to the callee. The location for the address argument is assigned by
  219. // the CC_PPC32_SVR4 calling convention.
  220. //
  221. // The only purpose of CC_PPC32_SVR4_Custom_Dummy is to skip arguments which are
  222. // not passed by value.
  223. let Entry = 1 in
  224. def CC_PPC32_SVR4_ByVal : CallingConv<[
  225. CCIfByVal<CCPassByVal<4, 4>>,
  226. CCCustom<"CC_PPC32_SVR4_Custom_Dummy">
  227. ]>;
  228. def CSR_Altivec : CalleeSavedRegs<(add V20, V21, V22, V23, V24, V25, V26, V27,
  229. V28, V29, V30, V31)>;
  230. // SPE does not use FPRs, so break out the common register set as base.
  231. def CSR_SVR432_COMM : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20,
  232. R21, R22, R23, R24, R25, R26, R27,
  233. R28, R29, R30, R31, CR2, CR3, CR4
  234. )>;
  235. def CSR_SVR432 : CalleeSavedRegs<(add CSR_SVR432_COMM, F14, F15, F16, F17, F18,
  236. F19, F20, F21, F22, F23, F24, F25, F26,
  237. F27, F28, F29, F30, F31
  238. )>;
  239. def CSR_SPE : CalleeSavedRegs<(add S14, S15, S16, S17, S18, S19, S20, S21, S22,
  240. S23, S24, S25, S26, S27, S28, S29, S30, S31
  241. )>;
  242. def CSR_SVR432_Altivec : CalleeSavedRegs<(add CSR_SVR432, CSR_Altivec)>;
  243. def CSR_SVR432_SPE : CalleeSavedRegs<(add CSR_SVR432_COMM, CSR_SPE)>;
  244. def CSR_AIX32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20,
  245. R21, R22, R23, R24, R25, R26, R27, R28,
  246. R29, R30, R31, F14, F15, F16, F17, F18,
  247. F19, F20, F21, F22, F23, F24, F25, F26,
  248. F27, F28, F29, F30, F31, CR2, CR3, CR4
  249. )>;
  250. def CSR_AIX32_Altivec : CalleeSavedRegs<(add CSR_AIX32, CSR_Altivec)>;
  251. // Common CalleeSavedRegs for SVR4 and AIX.
  252. def CSR_PPC64 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20,
  253. X21, X22, X23, X24, X25, X26, X27, X28,
  254. X29, X30, X31, F14, F15, F16, F17, F18,
  255. F19, F20, F21, F22, F23, F24, F25, F26,
  256. F27, F28, F29, F30, F31, CR2, CR3, CR4
  257. )>;
  258. def CSR_PPC64_Altivec : CalleeSavedRegs<(add CSR_PPC64, CSR_Altivec)>;
  259. def CSR_PPC64_R2 : CalleeSavedRegs<(add CSR_PPC64, X2)>;
  260. def CSR_PPC64_R2_Altivec : CalleeSavedRegs<(add CSR_PPC64_Altivec, X2)>;
  261. def CSR_NoRegs : CalleeSavedRegs<(add)>;
  262. // coldcc calling convection marks most registers as non-volatile.
  263. // Do not include r1 since the stack pointer is never considered a CSR.
  264. // Do not include r2, since it is the TOC register and is added depending
  265. // on whether or not the function uses the TOC and is a non-leaf.
  266. // Do not include r0,r11,r13 as they are optional in functional linkage
  267. // and value may be altered by inter-library calls.
  268. // Do not include r12 as it is used as a scratch register.
  269. // Do not include return registers r3, f1, v2.
  270. def CSR_SVR32_ColdCC_Common : CalleeSavedRegs<(add (sequence "R%u", 4, 10),
  271. (sequence "R%u", 14, 31),
  272. (sequence "CR%u", 0, 7))>;
  273. def CSR_SVR32_ColdCC : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common,
  274. F0, (sequence "F%u", 2, 31))>;
  275. def CSR_SVR32_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR32_ColdCC,
  276. (sequence "V%u", 0, 1),
  277. (sequence "V%u", 3, 31))>;
  278. def CSR_SVR32_ColdCC_SPE : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common,
  279. (sequence "S%u", 4, 10),
  280. (sequence "S%u", 14, 31))>;
  281. def CSR_SVR64_ColdCC : CalleeSavedRegs<(add (sequence "X%u", 4, 10),
  282. (sequence "X%u", 14, 31),
  283. F0, (sequence "F%u", 2, 31),
  284. (sequence "CR%u", 0, 7))>;
  285. def CSR_SVR64_ColdCC_R2: CalleeSavedRegs<(add CSR_SVR64_ColdCC, X2)>;
  286. def CSR_SVR64_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC,
  287. (sequence "V%u", 0, 1),
  288. (sequence "V%u", 3, 31))>;
  289. def CSR_SVR64_ColdCC_R2_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC_Altivec, X2)>;
  290. def CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10),
  291. (sequence "X%u", 14, 31),
  292. (sequence "F%u", 0, 31),
  293. (sequence "CR%u", 0, 7))>;
  294. def CSR_64_AllRegs_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs,
  295. (sequence "V%u", 0, 31))>;
  296. def CSR_64_AllRegs_AIX_Dflt_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs,
  297. (sequence "V%u", 0, 19))>;
  298. def CSR_64_AllRegs_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec,
  299. (sequence "VSL%u", 0, 31))>;
  300. def CSR_64_AllRegs_AIX_Dflt_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec,
  301. (sequence "VSL%u", 0, 19))>;