X86CallingConv.td 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229
  1. //===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This describes the calling conventions for the X86-32 and X86-64
  10. // architectures.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. /// CCIfSubtarget - Match if the current subtarget has a feature F.
  14. class CCIfSubtarget<string F, CCAction A>
  15. : CCIf<!strconcat("static_cast<const X86Subtarget&>"
  16. "(State.getMachineFunction().getSubtarget()).", F),
  17. A>;
  18. /// CCIfNotSubtarget - Match if the current subtarget doesn't has a feature F.
  19. class CCIfNotSubtarget<string F, CCAction A>
  20. : CCIf<!strconcat("!static_cast<const X86Subtarget&>"
  21. "(State.getMachineFunction().getSubtarget()).", F),
  22. A>;
  23. /// CCIfIsVarArgOnWin - Match if isVarArg on Windows 32bits.
  24. class CCIfIsVarArgOnWin<CCAction A>
  25. : CCIf<"State.isVarArg() && "
  26. "State.getMachineFunction().getSubtarget().getTargetTriple()."
  27. "isWindowsMSVCEnvironment()",
  28. A>;
  29. // Register classes for RegCall
  30. class RC_X86_RegCall {
  31. list<Register> GPR_8 = [];
  32. list<Register> GPR_16 = [];
  33. list<Register> GPR_32 = [];
  34. list<Register> GPR_64 = [];
  35. list<Register> FP_CALL = [FP0];
  36. list<Register> FP_RET = [FP0, FP1];
  37. list<Register> XMM = [];
  38. list<Register> YMM = [];
  39. list<Register> ZMM = [];
  40. }
  41. // RegCall register classes for 32 bits
  42. def RC_X86_32_RegCall : RC_X86_RegCall {
  43. let GPR_8 = [AL, CL, DL, DIL, SIL];
  44. let GPR_16 = [AX, CX, DX, DI, SI];
  45. let GPR_32 = [EAX, ECX, EDX, EDI, ESI];
  46. let GPR_64 = [RAX]; ///< Not actually used, but AssignToReg can't handle []
  47. ///< \todo Fix AssignToReg to enable empty lists
  48. let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7];
  49. let YMM = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7];
  50. let ZMM = [ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7];
  51. }
  52. class RC_X86_64_RegCall : RC_X86_RegCall {
  53. let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
  54. XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15];
  55. let YMM = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
  56. YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15];
  57. let ZMM = [ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7,
  58. ZMM8, ZMM9, ZMM10, ZMM11, ZMM12, ZMM13, ZMM14, ZMM15];
  59. }
  60. def RC_X86_64_RegCall_Win : RC_X86_64_RegCall {
  61. let GPR_8 = [AL, CL, DL, DIL, SIL, R8B, R9B, R10B, R11B, R12B, R14B, R15B];
  62. let GPR_16 = [AX, CX, DX, DI, SI, R8W, R9W, R10W, R11W, R12W, R14W, R15W];
  63. let GPR_32 = [EAX, ECX, EDX, EDI, ESI, R8D, R9D, R10D, R11D, R12D, R14D, R15D];
  64. let GPR_64 = [RAX, RCX, RDX, RDI, RSI, R8, R9, R10, R11, R12, R14, R15];
  65. }
  66. def RC_X86_64_RegCall_SysV : RC_X86_64_RegCall {
  67. let GPR_8 = [AL, CL, DL, DIL, SIL, R8B, R9B, R12B, R13B, R14B, R15B];
  68. let GPR_16 = [AX, CX, DX, DI, SI, R8W, R9W, R12W, R13W, R14W, R15W];
  69. let GPR_32 = [EAX, ECX, EDX, EDI, ESI, R8D, R9D, R12D, R13D, R14D, R15D];
  70. let GPR_64 = [RAX, RCX, RDX, RDI, RSI, R8, R9, R12, R13, R14, R15];
  71. }
  72. // X86-64 Intel regcall calling convention.
  73. multiclass X86_RegCall_base<RC_X86_RegCall RC> {
  74. def CC_#NAME : CallingConv<[
  75. // Handles byval parameters.
  76. CCIfSubtarget<"is64Bit()", CCIfByVal<CCPassByVal<8, 8>>>,
  77. CCIfByVal<CCPassByVal<4, 4>>,
  78. // Promote i1/i8/i16/v1i1 arguments to i32.
  79. CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
  80. // Promote v8i1/v16i1/v32i1 arguments to i32.
  81. CCIfType<[v8i1, v16i1, v32i1], CCPromoteToType<i32>>,
  82. // bool, char, int, enum, long, pointer --> GPR
  83. CCIfType<[i32], CCAssignToReg<RC.GPR_32>>,
  84. // long long, __int64 --> GPR
  85. CCIfType<[i64], CCAssignToReg<RC.GPR_64>>,
  86. // __mmask64 (v64i1) --> GPR64 (for x64) or 2 x GPR32 (for IA32)
  87. CCIfType<[v64i1], CCPromoteToType<i64>>,
  88. CCIfSubtarget<"is64Bit()", CCIfType<[i64],
  89. CCAssignToReg<RC.GPR_64>>>,
  90. CCIfSubtarget<"is32Bit()", CCIfType<[i64],
  91. CCCustom<"CC_X86_32_RegCall_Assign2Regs">>>,
  92. // float, double, float128 --> XMM
  93. // In the case of SSE disabled --> save to stack
  94. CCIfType<[f32, f64, f128],
  95. CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
  96. // long double --> FP
  97. CCIfType<[f80], CCAssignToReg<RC.FP_CALL>>,
  98. // __m128, __m128i, __m128d --> XMM
  99. // In the case of SSE disabled --> save to stack
  100. CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
  101. CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
  102. // __m256, __m256i, __m256d --> YMM
  103. // In the case of SSE disabled --> save to stack
  104. CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
  105. CCIfSubtarget<"hasAVX()", CCAssignToReg<RC.YMM>>>,
  106. // __m512, __m512i, __m512d --> ZMM
  107. // In the case of SSE disabled --> save to stack
  108. CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
  109. CCIfSubtarget<"hasAVX512()",CCAssignToReg<RC.ZMM>>>,
  110. // If no register was found -> assign to stack
  111. // In 64 bit, assign 64/32 bit values to 8 byte stack
  112. CCIfSubtarget<"is64Bit()", CCIfType<[i32, i64, f32, f64],
  113. CCAssignToStack<8, 8>>>,
  114. // In 32 bit, assign 64/32 bit values to 8/4 byte stack
  115. CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
  116. CCIfType<[i64, f64], CCAssignToStack<8, 4>>,
  117. // MMX type gets 8 byte slot in stack , while alignment depends on target
  118. CCIfSubtarget<"is64Bit()", CCIfType<[x86mmx], CCAssignToStack<8, 8>>>,
  119. CCIfType<[x86mmx], CCAssignToStack<8, 4>>,
  120. // float 128 get stack slots whose size and alignment depends
  121. // on the subtarget.
  122. CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
  123. // Vectors get 16-byte stack slots that are 16-byte aligned.
  124. CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
  125. CCAssignToStack<16, 16>>,
  126. // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
  127. CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
  128. CCAssignToStack<32, 32>>,
  129. // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
  130. CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
  131. CCAssignToStack<64, 64>>
  132. ]>;
  133. def RetCC_#NAME : CallingConv<[
  134. // Promote i1, v1i1, v8i1 arguments to i8.
  135. CCIfType<[i1, v1i1, v8i1], CCPromoteToType<i8>>,
  136. // Promote v16i1 arguments to i16.
  137. CCIfType<[v16i1], CCPromoteToType<i16>>,
  138. // Promote v32i1 arguments to i32.
  139. CCIfType<[v32i1], CCPromoteToType<i32>>,
  140. // bool, char, int, enum, long, pointer --> GPR
  141. CCIfType<[i8], CCAssignToReg<RC.GPR_8>>,
  142. CCIfType<[i16], CCAssignToReg<RC.GPR_16>>,
  143. CCIfType<[i32], CCAssignToReg<RC.GPR_32>>,
  144. // long long, __int64 --> GPR
  145. CCIfType<[i64], CCAssignToReg<RC.GPR_64>>,
  146. // __mmask64 (v64i1) --> GPR64 (for x64) or 2 x GPR32 (for IA32)
  147. CCIfType<[v64i1], CCPromoteToType<i64>>,
  148. CCIfSubtarget<"is64Bit()", CCIfType<[i64],
  149. CCAssignToReg<RC.GPR_64>>>,
  150. CCIfSubtarget<"is32Bit()", CCIfType<[i64],
  151. CCCustom<"CC_X86_32_RegCall_Assign2Regs">>>,
  152. // long double --> FP
  153. CCIfType<[f80], CCAssignToReg<RC.FP_RET>>,
  154. // float, double, float128 --> XMM
  155. CCIfType<[f32, f64, f128],
  156. CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
  157. // __m128, __m128i, __m128d --> XMM
  158. CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
  159. CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
  160. // __m256, __m256i, __m256d --> YMM
  161. CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
  162. CCIfSubtarget<"hasAVX()", CCAssignToReg<RC.YMM>>>,
  163. // __m512, __m512i, __m512d --> ZMM
  164. CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
  165. CCIfSubtarget<"hasAVX512()", CCAssignToReg<RC.ZMM>>>
  166. ]>;
  167. }
  168. //===----------------------------------------------------------------------===//
  169. // Return Value Calling Conventions
  170. //===----------------------------------------------------------------------===//
  171. // Return-value conventions common to all X86 CC's.
  172. def RetCC_X86Common : CallingConv<[
  173. // Scalar values are returned in AX first, then DX. For i8, the ABI
  174. // requires the values to be in AL and AH, however this code uses AL and DL
  175. // instead. This is because using AH for the second register conflicts with
  176. // the way LLVM does multiple return values -- a return of {i16,i8} would end
  177. // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
  178. // for functions that return two i8 values are currently expected to pack the
  179. // values into an i16 (which uses AX, and thus AL:AH).
  180. //
  181. // For code that doesn't care about the ABI, we allow returning more than two
  182. // integer values in registers.
  183. CCIfType<[v1i1], CCPromoteToType<i8>>,
  184. CCIfType<[i1], CCPromoteToType<i8>>,
  185. CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
  186. CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
  187. CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
  188. CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
  189. // Boolean vectors of AVX-512 are returned in SIMD registers.
  190. // The call from AVX to AVX-512 function should work,
  191. // since the boolean types in AVX/AVX2 are promoted by default.
  192. CCIfType<[v2i1], CCPromoteToType<v2i64>>,
  193. CCIfType<[v4i1], CCPromoteToType<v4i32>>,
  194. CCIfType<[v8i1], CCPromoteToType<v8i16>>,
  195. CCIfType<[v16i1], CCPromoteToType<v16i8>>,
  196. CCIfType<[v32i1], CCPromoteToType<v32i8>>,
  197. CCIfType<[v64i1], CCPromoteToType<v64i8>>,
  198. // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3
  199. // can only be used by ABI non-compliant code. If the target doesn't have XMM
  200. // registers, it won't have vector types.
  201. CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
  202. CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
  203. // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
  204. // can only be used by ABI non-compliant code. This vector type is only
  205. // supported while using the AVX target feature.
  206. CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
  207. CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
  208. // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
  209. // can only be used by ABI non-compliant code. This vector type is only
  210. // supported while using the AVX-512 target feature.
  211. CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
  212. CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
  213. // MMX vector types are always returned in MM0. If the target doesn't have
  214. // MM0, it doesn't support these vector types.
  215. CCIfType<[x86mmx], CCAssignToReg<[MM0]>>,
  216. // Long double types are always returned in FP0 (even with SSE),
  217. // except on Win64.
  218. CCIfNotSubtarget<"isTargetWin64()", CCIfType<[f80], CCAssignToReg<[FP0, FP1]>>>
  219. ]>;
  220. // X86-32 C return-value convention.
  221. def RetCC_X86_32_C : CallingConv<[
  222. // The X86-32 calling convention returns FP values in FP0, unless marked
  223. // with "inreg" (used here to distinguish one kind of reg from another,
  224. // weirdly; this is really the sse-regparm calling convention) in which
  225. // case they use XMM0, otherwise it is the same as the common X86 calling
  226. // conv.
  227. CCIfInReg<CCIfSubtarget<"hasSSE2()",
  228. CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
  229. CCIfSubtarget<"hasX87()",
  230. CCIfType<[f32, f64], CCAssignToReg<[FP0, FP1]>>>,
  231. CCIfNotSubtarget<"hasX87()",
  232. CCIfType<[f32], CCAssignToReg<[EAX, EDX, ECX]>>>,
  233. CCIfType<[f16], CCAssignToReg<[XMM0,XMM1,XMM2]>>,
  234. CCDelegateTo<RetCC_X86Common>
  235. ]>;
  236. // X86-32 FastCC return-value convention.
  237. def RetCC_X86_32_Fast : CallingConv<[
  238. // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has
  239. // SSE2.
  240. // This can happen when a float, 2 x float, or 3 x float vector is split by
  241. // target lowering, and is returned in 1-3 sse regs.
  242. CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
  243. CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
  244. // For integers, ECX can be used as an extra return register
  245. CCIfType<[i8], CCAssignToReg<[AL, DL, CL]>>,
  246. CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
  247. CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
  248. // Otherwise, it is the same as the common X86 calling convention.
  249. CCDelegateTo<RetCC_X86Common>
  250. ]>;
  251. // Intel_OCL_BI return-value convention.
  252. def RetCC_Intel_OCL_BI : CallingConv<[
  253. // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3.
  254. CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
  255. CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
  256. // 256-bit FP vectors
  257. // No more than 4 registers
  258. CCIfType<[v8f32, v4f64, v8i32, v4i64],
  259. CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
  260. // 512-bit FP vectors
  261. CCIfType<[v16f32, v8f64, v16i32, v8i64],
  262. CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
  263. // i32, i64 in the standard way
  264. CCDelegateTo<RetCC_X86Common>
  265. ]>;
  266. // X86-32 HiPE return-value convention.
  267. def RetCC_X86_32_HiPE : CallingConv<[
  268. // Promote all types to i32
  269. CCIfType<[i8, i16], CCPromoteToType<i32>>,
  270. // Return: HP, P, VAL1, VAL2
  271. CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX]>>
  272. ]>;
  273. // X86-32 Vectorcall return-value convention.
  274. def RetCC_X86_32_VectorCall : CallingConv<[
  275. // Floating Point types are returned in XMM0,XMM1,XMMM2 and XMM3.
  276. CCIfType<[f32, f64, f128],
  277. CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
  278. // Return integers in the standard way.
  279. CCDelegateTo<RetCC_X86Common>
  280. ]>;
  281. // X86-64 C return-value convention.
  282. def RetCC_X86_64_C : CallingConv<[
  283. // The X86-64 calling convention always returns FP values in XMM0.
  284. CCIfType<[f16], CCAssignToReg<[XMM0, XMM1]>>,
  285. CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>,
  286. CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>,
  287. CCIfType<[f128], CCAssignToReg<[XMM0, XMM1]>>,
  288. // MMX vector types are always returned in XMM0.
  289. CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
  290. // Pointers are always returned in full 64-bit registers.
  291. CCIfPtr<CCCustom<"CC_X86_64_Pointer">>,
  292. CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
  293. CCDelegateTo<RetCC_X86Common>
  294. ]>;
  295. // X86-Win64 C return-value convention.
  296. def RetCC_X86_Win64_C : CallingConv<[
  297. // The X86-Win64 calling convention always returns __m64 values in RAX.
  298. CCIfType<[x86mmx], CCBitConvertToType<i64>>,
  299. // GCC returns FP values in RAX on Win64.
  300. CCIfType<[f32], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i32>>>,
  301. CCIfType<[f64], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i64>>>,
  302. // Otherwise, everything is the same as 'normal' X86-64 C CC.
  303. CCDelegateTo<RetCC_X86_64_C>
  304. ]>;
  305. // X86-64 vectorcall return-value convention.
  306. def RetCC_X86_64_Vectorcall : CallingConv<[
  307. // Vectorcall calling convention always returns FP values in XMMs.
  308. CCIfType<[f32, f64, f128],
  309. CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
  310. // Otherwise, everything is the same as Windows X86-64 C CC.
  311. CCDelegateTo<RetCC_X86_Win64_C>
  312. ]>;
  313. // X86-64 HiPE return-value convention.
  314. def RetCC_X86_64_HiPE : CallingConv<[
  315. // Promote all types to i64
  316. CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
  317. // Return: HP, P, VAL1, VAL2
  318. CCIfType<[i64], CCAssignToReg<[R15, RBP, RAX, RDX]>>
  319. ]>;
  320. // X86-64 WebKit_JS return-value convention.
  321. def RetCC_X86_64_WebKit_JS : CallingConv<[
  322. // Promote all types to i64
  323. CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
  324. // Return: RAX
  325. CCIfType<[i64], CCAssignToReg<[RAX]>>
  326. ]>;
  327. def RetCC_X86_64_Swift : CallingConv<[
  328. CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
  329. // For integers, ECX, R8D can be used as extra return registers.
  330. CCIfType<[v1i1], CCPromoteToType<i8>>,
  331. CCIfType<[i1], CCPromoteToType<i8>>,
  332. CCIfType<[i8] , CCAssignToReg<[AL, DL, CL, R8B]>>,
  333. CCIfType<[i16], CCAssignToReg<[AX, DX, CX, R8W]>>,
  334. CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX, R8D]>>,
  335. CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX, R8]>>,
  336. // XMM0, XMM1, XMM2 and XMM3 can be used to return FP values.
  337. CCIfType<[f32], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
  338. CCIfType<[f64], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
  339. CCIfType<[f128], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
  340. // MMX vector types are returned in XMM0, XMM1, XMM2 and XMM3.
  341. CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
  342. CCDelegateTo<RetCC_X86Common>
  343. ]>;
  344. // X86-64 AnyReg return-value convention. No explicit register is specified for
  345. // the return-value. The register allocator is allowed and expected to choose
  346. // any free register.
  347. //
  348. // This calling convention is currently only supported by the stackmap and
  349. // patchpoint intrinsics. All other uses will result in an assert on Debug
  350. // builds. On Release builds we fallback to the X86 C calling convention.
  351. def RetCC_X86_64_AnyReg : CallingConv<[
  352. CCCustom<"CC_X86_AnyReg_Error">
  353. ]>;
  354. // X86-64 HHVM return-value convention.
  355. def RetCC_X86_64_HHVM: CallingConv<[
  356. // Promote all types to i64
  357. CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
  358. // Return: could return in any GP register save RSP and R12.
  359. CCIfType<[i64], CCAssignToReg<[RBX, RBP, RDI, RSI, RDX, RCX, R8, R9,
  360. RAX, R10, R11, R13, R14, R15]>>
  361. ]>;
  362. defm X86_32_RegCall :
  363. X86_RegCall_base<RC_X86_32_RegCall>;
  364. defm X86_Win64_RegCall :
  365. X86_RegCall_base<RC_X86_64_RegCall_Win>;
  366. defm X86_SysV64_RegCall :
  367. X86_RegCall_base<RC_X86_64_RegCall_SysV>;
  368. // This is the root return-value convention for the X86-32 backend.
  369. def RetCC_X86_32 : CallingConv<[
  370. // If FastCC, use RetCC_X86_32_Fast.
  371. CCIfCC<"CallingConv::Fast", CCDelegateTo<RetCC_X86_32_Fast>>,
  372. CCIfCC<"CallingConv::Tail", CCDelegateTo<RetCC_X86_32_Fast>>,
  373. // CFGuard_Check never returns a value so does not need a RetCC.
  374. // If HiPE, use RetCC_X86_32_HiPE.
  375. CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_32_HiPE>>,
  376. CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_32_VectorCall>>,
  377. CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_32_RegCall>>,
  378. // Otherwise, use RetCC_X86_32_C.
  379. CCDelegateTo<RetCC_X86_32_C>
  380. ]>;
  381. // This is the root return-value convention for the X86-64 backend.
  382. def RetCC_X86_64 : CallingConv<[
  383. // HiPE uses RetCC_X86_64_HiPE
  384. CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
  385. // Handle JavaScript calls.
  386. CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<RetCC_X86_64_WebKit_JS>>,
  387. CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
  388. // Handle Swift calls.
  389. CCIfCC<"CallingConv::Swift", CCDelegateTo<RetCC_X86_64_Swift>>,
  390. CCIfCC<"CallingConv::SwiftTail", CCDelegateTo<RetCC_X86_64_Swift>>,
  391. // Handle explicit CC selection
  392. CCIfCC<"CallingConv::Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
  393. CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
  394. // Handle Vectorcall CC
  395. CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_64_Vectorcall>>,
  396. // Handle HHVM calls.
  397. CCIfCC<"CallingConv::HHVM", CCDelegateTo<RetCC_X86_64_HHVM>>,
  398. CCIfCC<"CallingConv::X86_RegCall",
  399. CCIfSubtarget<"isTargetWin64()",
  400. CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
  401. CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_SysV64_RegCall>>,
  402. // Mingw64 and native Win64 use Win64 CC
  403. CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
  404. // Otherwise, drop to normal X86-64 CC
  405. CCDelegateTo<RetCC_X86_64_C>
  406. ]>;
  407. // This is the return-value convention used for the entire X86 backend.
  408. let Entry = 1 in
  409. def RetCC_X86 : CallingConv<[
  410. // Check if this is the Intel OpenCL built-ins calling convention
  411. CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<RetCC_Intel_OCL_BI>>,
  412. CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>,
  413. CCDelegateTo<RetCC_X86_32>
  414. ]>;
  415. //===----------------------------------------------------------------------===//
  416. // X86-64 Argument Calling Conventions
  417. //===----------------------------------------------------------------------===//
  418. def CC_X86_64_C : CallingConv<[
  419. // Handles byval parameters.
  420. CCIfByVal<CCPassByVal<8, 8>>,
  421. // Promote i1/i8/i16/v1i1 arguments to i32.
  422. CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
  423. // The 'nest' parameter, if any, is passed in R10.
  424. CCIfNest<CCIfSubtarget<"isTarget64BitILP32()", CCAssignToReg<[R10D]>>>,
  425. CCIfNest<CCAssignToReg<[R10]>>,
  426. // Pass SwiftSelf in a callee saved register.
  427. CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R13]>>>,
  428. // A SwiftError is passed in R12.
  429. CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
  430. // Pass SwiftAsync in an otherwise callee saved register so that calls to
  431. // normal functions don't need to save it somewhere.
  432. CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[R14]>>>,
  433. // For Swift Calling Conventions, pass sret in %rax.
  434. CCIfCC<"CallingConv::Swift",
  435. CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>,
  436. CCIfCC<"CallingConv::SwiftTail",
  437. CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>,
  438. // Pointers are always passed in full 64-bit registers.
  439. CCIfPtr<CCCustom<"CC_X86_64_Pointer">>,
  440. // The first 6 integer arguments are passed in integer registers.
  441. CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
  442. CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
  443. // The first 8 MMX vector arguments are passed in XMM registers on Darwin.
  444. CCIfType<[x86mmx],
  445. CCIfSubtarget<"isTargetDarwin()",
  446. CCIfSubtarget<"hasSSE2()",
  447. CCPromoteToType<v2i64>>>>,
  448. // Boolean vectors of AVX-512 are passed in SIMD registers.
  449. // The call from AVX to AVX-512 function should work,
  450. // since the boolean types in AVX/AVX2 are promoted by default.
  451. CCIfType<[v2i1], CCPromoteToType<v2i64>>,
  452. CCIfType<[v4i1], CCPromoteToType<v4i32>>,
  453. CCIfType<[v8i1], CCPromoteToType<v8i16>>,
  454. CCIfType<[v16i1], CCPromoteToType<v16i8>>,
  455. CCIfType<[v32i1], CCPromoteToType<v32i8>>,
  456. CCIfType<[v64i1], CCPromoteToType<v64i8>>,
  457. // The first 8 FP/Vector arguments are passed in XMM registers.
  458. CCIfType<[f16, f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
  459. CCIfSubtarget<"hasSSE1()",
  460. CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
  461. // The first 8 256-bit vector arguments are passed in YMM registers, unless
  462. // this is a vararg function.
  463. // FIXME: This isn't precisely correct; the x86-64 ABI document says that
  464. // fixed arguments to vararg functions are supposed to be passed in
  465. // registers. Actually modeling that would be a lot of work, though.
  466. CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
  467. CCIfSubtarget<"hasAVX()",
  468. CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
  469. YMM4, YMM5, YMM6, YMM7]>>>>,
  470. // The first 8 512-bit vector arguments are passed in ZMM registers.
  471. CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
  472. CCIfSubtarget<"hasAVX512()",
  473. CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
  474. // Integer/FP values get stored in stack slots that are 8 bytes in size and
  475. // 8-byte aligned if there are no more registers to hold them.
  476. CCIfType<[i32, i64, f16, f32, f64], CCAssignToStack<8, 8>>,
  477. // Long doubles get stack slots whose size and alignment depends on the
  478. // subtarget.
  479. CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
  480. // Vectors get 16-byte stack slots that are 16-byte aligned.
  481. CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCAssignToStack<16, 16>>,
  482. // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
  483. CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
  484. CCAssignToStack<32, 32>>,
  485. // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
  486. CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
  487. CCAssignToStack<64, 64>>
  488. ]>;
  489. // Calling convention for X86-64 HHVM.
  490. def CC_X86_64_HHVM : CallingConv<[
  491. // Use all/any GP registers for args, except RSP.
  492. CCIfType<[i64], CCAssignToReg<[RBX, R12, RBP, R15,
  493. RDI, RSI, RDX, RCX, R8, R9,
  494. RAX, R10, R11, R13, R14]>>
  495. ]>;
  496. // Calling convention for helper functions in HHVM.
  497. def CC_X86_64_HHVM_C : CallingConv<[
  498. // Pass the first argument in RBP.
  499. CCIfType<[i64], CCAssignToReg<[RBP]>>,
  500. // Otherwise it's the same as the regular C calling convention.
  501. CCDelegateTo<CC_X86_64_C>
  502. ]>;
  503. // Calling convention used on Win64
  504. def CC_X86_Win64_C : CallingConv<[
  505. // FIXME: Handle varargs.
  506. // Byval aggregates are passed by pointer
  507. CCIfByVal<CCPassIndirect<i64>>,
  508. // Promote i1/v1i1 arguments to i8.
  509. CCIfType<[i1, v1i1], CCPromoteToType<i8>>,
  510. // The 'nest' parameter, if any, is passed in R10.
  511. CCIfNest<CCAssignToReg<[R10]>>,
  512. // A SwiftError is passed in R12.
  513. CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
  514. // Pass SwiftSelf in a callee saved register.
  515. CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R13]>>>,
  516. // Pass SwiftAsync in an otherwise callee saved register so that calls to
  517. // normal functions don't need to save it somewhere.
  518. CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[R14]>>>,
  519. // The 'CFGuardTarget' parameter, if any, is passed in RAX.
  520. CCIfCFGuardTarget<CCAssignToReg<[RAX]>>,
  521. // 128 bit vectors are passed by pointer
  522. CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCPassIndirect<i64>>,
  523. // 256 bit vectors are passed by pointer
  524. CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64], CCPassIndirect<i64>>,
  525. // 512 bit vectors are passed by pointer
  526. CCIfType<[v64i8, v32i16, v16i32, v32f16, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
  527. // Long doubles are passed by pointer
  528. CCIfType<[f80], CCPassIndirect<i64>>,
  529. // The first 4 MMX vector arguments are passed in GPRs.
  530. CCIfType<[x86mmx], CCBitConvertToType<i64>>,
  531. // If SSE was disabled, pass FP values smaller than 64-bits as integers in
  532. // GPRs or on the stack.
  533. CCIfType<[f32], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i32>>>,
  534. CCIfType<[f64], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i64>>>,
  535. // The first 4 FP/Vector arguments are passed in XMM registers.
  536. CCIfType<[f16, f32, f64],
  537. CCAssignToRegWithShadow<[XMM0, XMM1, XMM2, XMM3],
  538. [RCX , RDX , R8 , R9 ]>>,
  539. // The first 4 integer arguments are passed in integer registers.
  540. CCIfType<[i8 ], CCAssignToRegWithShadow<[CL , DL , R8B , R9B ],
  541. [XMM0, XMM1, XMM2, XMM3]>>,
  542. CCIfType<[i16], CCAssignToRegWithShadow<[CX , DX , R8W , R9W ],
  543. [XMM0, XMM1, XMM2, XMM3]>>,
  544. CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
  545. [XMM0, XMM1, XMM2, XMM3]>>,
  546. // Do not pass the sret argument in RCX, the Win64 thiscall calling
  547. // convention requires "this" to be passed in RCX.
  548. CCIfCC<"CallingConv::X86_ThisCall",
  549. CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8 , R9 ],
  550. [XMM1, XMM2, XMM3]>>>>,
  551. CCIfType<[i64], CCAssignToRegWithShadow<[RCX , RDX , R8 , R9 ],
  552. [XMM0, XMM1, XMM2, XMM3]>>,
  553. // Integer/FP values get stored in stack slots that are 8 bytes in size and
  554. // 8-byte aligned if there are no more registers to hold them.
  555. CCIfType<[i8, i16, i32, i64, f16, f32, f64], CCAssignToStack<8, 8>>
  556. ]>;
  557. def CC_X86_Win64_VectorCall : CallingConv<[
  558. CCCustom<"CC_X86_64_VectorCall">,
  559. // Delegate to fastcall to handle integer types.
  560. CCDelegateTo<CC_X86_Win64_C>
  561. ]>;
  562. def CC_X86_64_GHC : CallingConv<[
  563. // Promote i8/i16/i32 arguments to i64.
  564. CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
  565. // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
  566. CCIfType<[i64],
  567. CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>,
  568. // Pass in STG registers: F1, F2, F3, F4, D1, D2
  569. CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
  570. CCIfSubtarget<"hasSSE1()",
  571. CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>,
  572. // AVX
  573. CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
  574. CCIfSubtarget<"hasAVX()",
  575. CCAssignToReg<[YMM1, YMM2, YMM3, YMM4, YMM5, YMM6]>>>,
  576. // AVX-512
  577. CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
  578. CCIfSubtarget<"hasAVX512()",
  579. CCAssignToReg<[ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6]>>>
  580. ]>;
  581. def CC_X86_64_HiPE : CallingConv<[
  582. // Promote i8/i16/i32 arguments to i64.
  583. CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
  584. // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2, ARG3
  585. CCIfType<[i64], CCAssignToReg<[R15, RBP, RSI, RDX, RCX, R8]>>,
  586. // Integer/FP values get stored in stack slots that are 8 bytes in size and
  587. // 8-byte aligned if there are no more registers to hold them.
  588. CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
  589. ]>;
  590. def CC_X86_64_WebKit_JS : CallingConv<[
  591. // Promote i8/i16 arguments to i32.
  592. CCIfType<[i8, i16], CCPromoteToType<i32>>,
  593. // Only the first integer argument is passed in register.
  594. CCIfType<[i32], CCAssignToReg<[EAX]>>,
  595. CCIfType<[i64], CCAssignToReg<[RAX]>>,
  596. // The remaining integer arguments are passed on the stack. 32bit integer and
  597. // floating-point arguments are aligned to 4 byte and stored in 4 byte slots.
  598. // 64bit integer and floating-point arguments are aligned to 8 byte and stored
  599. // in 8 byte stack slots.
  600. CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
  601. CCIfType<[i64, f64], CCAssignToStack<8, 8>>
  602. ]>;
  603. // No explicit register is specified for the AnyReg calling convention. The
  604. // register allocator may assign the arguments to any free register.
  605. //
  606. // This calling convention is currently only supported by the stackmap and
  607. // patchpoint intrinsics. All other uses will result in an assert on Debug
  608. // builds. On Release builds we fallback to the X86 C calling convention.
  609. def CC_X86_64_AnyReg : CallingConv<[
  610. CCCustom<"CC_X86_AnyReg_Error">
  611. ]>;
  612. //===----------------------------------------------------------------------===//
  613. // X86 C Calling Convention
  614. //===----------------------------------------------------------------------===//
  615. /// CC_X86_32_Vector_Common - In all X86-32 calling conventions, extra vector
  616. /// values are spilled on the stack.
  617. def CC_X86_32_Vector_Common : CallingConv<[
  618. // Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
  619. CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
  620. CCAssignToStack<16, 16>>,
  621. // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
  622. CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
  623. CCAssignToStack<32, 32>>,
  624. // 512-bit AVX 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
  625. CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
  626. CCAssignToStack<64, 64>>
  627. ]>;
  628. /// CC_X86_Win32_Vector - In X86 Win32 calling conventions, extra vector
  629. /// values are spilled on the stack.
  630. def CC_X86_Win32_Vector : CallingConv<[
  631. // Other SSE vectors get 16-byte stack slots that are 4-byte aligned.
  632. CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
  633. CCAssignToStack<16, 4>>,
  634. // 256-bit AVX vectors get 32-byte stack slots that are 4-byte aligned.
  635. CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
  636. CCAssignToStack<32, 4>>,
  637. // 512-bit AVX 512-bit vectors get 64-byte stack slots that are 4-byte aligned.
  638. CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
  639. CCAssignToStack<64, 4>>
  640. ]>;
  641. // CC_X86_32_Vector_Standard - The first 3 vector arguments are passed in
  642. // vector registers
  643. def CC_X86_32_Vector_Standard : CallingConv<[
  644. // SSE vector arguments are passed in XMM registers.
  645. CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
  646. CCAssignToReg<[XMM0, XMM1, XMM2]>>>,
  647. // AVX 256-bit vector arguments are passed in YMM registers.
  648. CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
  649. CCIfSubtarget<"hasAVX()",
  650. CCAssignToReg<[YMM0, YMM1, YMM2]>>>>,
  651. // AVX 512-bit vector arguments are passed in ZMM registers.
  652. CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
  653. CCAssignToReg<[ZMM0, ZMM1, ZMM2]>>>,
  654. CCIfIsVarArgOnWin<CCDelegateTo<CC_X86_Win32_Vector>>,
  655. CCDelegateTo<CC_X86_32_Vector_Common>
  656. ]>;
  657. // CC_X86_32_Vector_Darwin - The first 4 vector arguments are passed in
  658. // vector registers.
  659. def CC_X86_32_Vector_Darwin : CallingConv<[
  660. // SSE vector arguments are passed in XMM registers.
  661. CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
  662. CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
  663. // AVX 256-bit vector arguments are passed in YMM registers.
  664. CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
  665. CCIfSubtarget<"hasAVX()",
  666. CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
  667. // AVX 512-bit vector arguments are passed in ZMM registers.
  668. CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
  669. CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>>,
  670. CCDelegateTo<CC_X86_32_Vector_Common>
  671. ]>;
  672. /// CC_X86_32_Common - In all X86-32 calling conventions, extra integers and FP
  673. /// values are spilled on the stack.
  674. def CC_X86_32_Common : CallingConv<[
  675. // Handles byval/preallocated parameters.
  676. CCIfByVal<CCPassByVal<4, 4>>,
  677. CCIfPreallocated<CCPassByVal<4, 4>>,
  678. // The first 3 float or double arguments, if marked 'inreg' and if the call
  679. // is not a vararg call and if SSE2 is available, are passed in SSE registers.
  680. CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
  681. CCIfSubtarget<"hasSSE2()",
  682. CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
  683. CCIfNotVarArg<CCIfInReg<CCIfType<[f16], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
  684. // The first 3 __m64 vector arguments are passed in mmx registers if the
  685. // call is not a vararg call.
  686. CCIfNotVarArg<CCIfType<[x86mmx],
  687. CCAssignToReg<[MM0, MM1, MM2]>>>,
  688. CCIfType<[f16], CCAssignToStack<4, 4>>,
  689. // Integer/Float values get stored in stack slots that are 4 bytes in
  690. // size and 4-byte aligned.
  691. CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
  692. // Doubles get 8-byte slots that are 4-byte aligned.
  693. CCIfType<[f64], CCAssignToStack<8, 4>>,
  694. // Long doubles get slots whose size and alignment depends on the subtarget.
  695. CCIfType<[f80], CCAssignToStack<0, 0>>,
  696. // Boolean vectors of AVX-512 are passed in SIMD registers.
  697. // The call from AVX to AVX-512 function should work,
  698. // since the boolean types in AVX/AVX2 are promoted by default.
  699. CCIfType<[v2i1], CCPromoteToType<v2i64>>,
  700. CCIfType<[v4i1], CCPromoteToType<v4i32>>,
  701. CCIfType<[v8i1], CCPromoteToType<v8i16>>,
  702. CCIfType<[v16i1], CCPromoteToType<v16i8>>,
  703. CCIfType<[v32i1], CCPromoteToType<v32i8>>,
  704. CCIfType<[v64i1], CCPromoteToType<v64i8>>,
  705. // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
  706. // passed in the parameter area.
  707. CCIfType<[x86mmx], CCAssignToStack<8, 4>>,
  708. // Darwin passes vectors in a form that differs from the i386 psABI
  709. CCIfSubtarget<"isTargetDarwin()", CCDelegateTo<CC_X86_32_Vector_Darwin>>,
  710. // Otherwise, drop to 'normal' X86-32 CC
  711. CCDelegateTo<CC_X86_32_Vector_Standard>
  712. ]>;
  713. def CC_X86_32_C : CallingConv<[
  714. // Promote i1/i8/i16/v1i1 arguments to i32.
  715. CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
  716. // The 'nest' parameter, if any, is passed in ECX.
  717. CCIfNest<CCAssignToReg<[ECX]>>,
  718. // On swifttailcc pass swiftself in ECX.
  719. CCIfCC<"CallingConv::SwiftTail",
  720. CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[ECX]>>>>,
  721. // The first 3 integer arguments, if marked 'inreg' and if the call is not
  722. // a vararg call, are passed in integer registers.
  723. CCIfNotVarArg<CCIfInReg<CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>>>,
  724. // Otherwise, same as everything else.
  725. CCDelegateTo<CC_X86_32_Common>
  726. ]>;
  727. def CC_X86_32_MCU : CallingConv<[
  728. // Handles byval parameters. Note that, like FastCC, we can't rely on
  729. // the delegation to CC_X86_32_Common because that happens after code that
  730. // puts arguments in registers.
  731. CCIfByVal<CCPassByVal<4, 4>>,
  732. // Promote i1/i8/i16/v1i1 arguments to i32.
  733. CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
  734. // If the call is not a vararg call, some arguments may be passed
  735. // in integer registers.
  736. CCIfNotVarArg<CCIfType<[i32], CCCustom<"CC_X86_32_MCUInReg">>>,
  737. // Otherwise, same as everything else.
  738. CCDelegateTo<CC_X86_32_Common>
  739. ]>;
  740. def CC_X86_32_FastCall : CallingConv<[
  741. // Promote i1 to i8.
  742. CCIfType<[i1], CCPromoteToType<i8>>,
  743. // The 'nest' parameter, if any, is passed in EAX.
  744. CCIfNest<CCAssignToReg<[EAX]>>,
  745. // The first 2 integer arguments are passed in ECX/EDX
  746. CCIfInReg<CCIfType<[ i8], CCAssignToReg<[ CL, DL]>>>,
  747. CCIfInReg<CCIfType<[i16], CCAssignToReg<[ CX, DX]>>>,
  748. CCIfInReg<CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>>,
  749. // Otherwise, same as everything else.
  750. CCDelegateTo<CC_X86_32_Common>
  751. ]>;
  752. def CC_X86_Win32_VectorCall : CallingConv<[
  753. // Pass floating point in XMMs
  754. CCCustom<"CC_X86_32_VectorCall">,
  755. // Delegate to fastcall to handle integer types.
  756. CCDelegateTo<CC_X86_32_FastCall>
  757. ]>;
  758. def CC_X86_32_ThisCall_Common : CallingConv<[
  759. // The first integer argument is passed in ECX
  760. CCIfType<[i32], CCAssignToReg<[ECX]>>,
  761. // Otherwise, same as everything else.
  762. CCDelegateTo<CC_X86_32_Common>
  763. ]>;
  764. def CC_X86_32_ThisCall_Mingw : CallingConv<[
  765. // Promote i1/i8/i16/v1i1 arguments to i32.
  766. CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
  767. CCDelegateTo<CC_X86_32_ThisCall_Common>
  768. ]>;
  769. def CC_X86_32_ThisCall_Win : CallingConv<[
  770. // Promote i1/i8/i16/v1i1 arguments to i32.
  771. CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
  772. // Pass sret arguments indirectly through stack.
  773. CCIfSRet<CCAssignToStack<4, 4>>,
  774. CCDelegateTo<CC_X86_32_ThisCall_Common>
  775. ]>;
  776. def CC_X86_32_ThisCall : CallingConv<[
  777. CCIfSubtarget<"isTargetCygMing()", CCDelegateTo<CC_X86_32_ThisCall_Mingw>>,
  778. CCDelegateTo<CC_X86_32_ThisCall_Win>
  779. ]>;
  780. def CC_X86_32_FastCC : CallingConv<[
  781. // Handles byval parameters. Note that we can't rely on the delegation
  782. // to CC_X86_32_Common for this because that happens after code that
  783. // puts arguments in registers.
  784. CCIfByVal<CCPassByVal<4, 4>>,
  785. // Promote i1/i8/i16/v1i1 arguments to i32.
  786. CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
  787. // The 'nest' parameter, if any, is passed in EAX.
  788. CCIfNest<CCAssignToReg<[EAX]>>,
  789. // The first 2 integer arguments are passed in ECX/EDX
  790. CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>,
  791. // The first 3 float or double arguments, if the call is not a vararg
  792. // call and if SSE2 is available, are passed in SSE registers.
  793. CCIfNotVarArg<CCIfType<[f32,f64],
  794. CCIfSubtarget<"hasSSE2()",
  795. CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
  796. // Doubles get 8-byte slots that are 8-byte aligned.
  797. CCIfType<[f64], CCAssignToStack<8, 8>>,
  798. // Otherwise, same as everything else.
  799. CCDelegateTo<CC_X86_32_Common>
  800. ]>;
  801. def CC_X86_Win32_CFGuard_Check : CallingConv<[
  802. // The CFGuard check call takes exactly one integer argument
  803. // (i.e. the target function address), which is passed in ECX.
  804. CCIfType<[i32], CCAssignToReg<[ECX]>>
  805. ]>;
  806. def CC_X86_32_GHC : CallingConv<[
  807. // Promote i8/i16 arguments to i32.
  808. CCIfType<[i8, i16], CCPromoteToType<i32>>,
  809. // Pass in STG registers: Base, Sp, Hp, R1
  810. CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>>
  811. ]>;
  812. def CC_X86_32_HiPE : CallingConv<[
  813. // Promote i8/i16 arguments to i32.
  814. CCIfType<[i8, i16], CCPromoteToType<i32>>,
  815. // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2
  816. CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX, ECX]>>,
  817. // Integer/Float values get stored in stack slots that are 4 bytes in
  818. // size and 4-byte aligned.
  819. CCIfType<[i32, f32], CCAssignToStack<4, 4>>
  820. ]>;
  821. // X86-64 Intel OpenCL built-ins calling convention.
  822. def CC_Intel_OCL_BI : CallingConv<[
  823. CCIfType<[i32], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[ECX, EDX, R8D, R9D]>>>,
  824. CCIfType<[i64], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[RCX, RDX, R8, R9 ]>>>,
  825. CCIfType<[i32], CCIfSubtarget<"is64Bit()", CCAssignToReg<[EDI, ESI, EDX, ECX]>>>,
  826. CCIfType<[i64], CCIfSubtarget<"is64Bit()", CCAssignToReg<[RDI, RSI, RDX, RCX]>>>,
  827. CCIfType<[i32], CCAssignToStack<4, 4>>,
  828. // The SSE vector arguments are passed in XMM registers.
  829. CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
  830. CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
  831. // The 256-bit vector arguments are passed in YMM registers.
  832. CCIfType<[v8f32, v4f64, v8i32, v4i64],
  833. CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
  834. // The 512-bit vector arguments are passed in ZMM registers.
  835. CCIfType<[v16f32, v8f64, v16i32, v8i64],
  836. CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
  837. // Pass masks in mask registers
  838. CCIfType<[v16i1, v8i1], CCAssignToReg<[K1]>>,
  839. CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
  840. CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64_C>>,
  841. CCDelegateTo<CC_X86_32_C>
  842. ]>;
  843. //===----------------------------------------------------------------------===//
  844. // X86 Root Argument Calling Conventions
  845. //===----------------------------------------------------------------------===//
  846. // This is the root argument convention for the X86-32 backend.
  847. def CC_X86_32 : CallingConv<[
  848. // X86_INTR calling convention is valid in MCU target and should override the
  849. // MCU calling convention. Thus, this should be checked before isTargetMCU().
  850. CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
  851. CCIfSubtarget<"isTargetMCU()", CCDelegateTo<CC_X86_32_MCU>>,
  852. CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
  853. CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win32_VectorCall>>,
  854. CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>,
  855. CCIfCC<"CallingConv::CFGuard_Check", CCDelegateTo<CC_X86_Win32_CFGuard_Check>>,
  856. CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
  857. CCIfCC<"CallingConv::Tail", CCDelegateTo<CC_X86_32_FastCC>>,
  858. CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
  859. CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>,
  860. CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_32_RegCall>>,
  861. // Otherwise, drop to normal X86-32 CC
  862. CCDelegateTo<CC_X86_32_C>
  863. ]>;
  864. // This is the root argument convention for the X86-64 backend.
  865. def CC_X86_64 : CallingConv<[
  866. CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
  867. CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
  868. CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<CC_X86_64_WebKit_JS>>,
  869. CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
  870. CCIfCC<"CallingConv::Win64", CCDelegateTo<CC_X86_Win64_C>>,
  871. CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
  872. CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
  873. CCIfCC<"CallingConv::HHVM", CCDelegateTo<CC_X86_64_HHVM>>,
  874. CCIfCC<"CallingConv::HHVM_C", CCDelegateTo<CC_X86_64_HHVM_C>>,
  875. CCIfCC<"CallingConv::X86_RegCall",
  876. CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_RegCall>>>,
  877. CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_SysV64_RegCall>>,
  878. CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
  879. // Mingw64 and native Win64 use Win64 CC
  880. CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
  881. // Otherwise, drop to normal X86-64 CC
  882. CCDelegateTo<CC_X86_64_C>
  883. ]>;
  884. // This is the argument convention used for the entire X86 backend.
  885. let Entry = 1 in
  886. def CC_X86 : CallingConv<[
  887. CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<CC_Intel_OCL_BI>>,
  888. CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
  889. CCDelegateTo<CC_X86_32>
  890. ]>;
  891. //===----------------------------------------------------------------------===//
  892. // Callee-saved Registers.
  893. //===----------------------------------------------------------------------===//
  894. def CSR_NoRegs : CalleeSavedRegs<(add)>;
  895. def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
  896. def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
  897. def CSR_64_SwiftError : CalleeSavedRegs<(sub CSR_64, R12)>;
  898. def CSR_64_SwiftTail : CalleeSavedRegs<(sub CSR_64, R13, R14)>;
  899. def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
  900. def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
  901. def CSR_Win64_NoSSE : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15)>;
  902. def CSR_Win64 : CalleeSavedRegs<(add CSR_Win64_NoSSE,
  903. (sequence "XMM%u", 6, 15))>;
  904. def CSR_Win64_SwiftError : CalleeSavedRegs<(sub CSR_Win64, R12)>;
  905. def CSR_Win64_SwiftTail : CalleeSavedRegs<(sub CSR_Win64, R13, R14)>;
  906. // The function used by Darwin to obtain the address of a thread-local variable
  907. // uses rdi to pass a single parameter and rax for the return value. All other
  908. // GPRs are preserved.
  909. def CSR_64_TLS_Darwin : CalleeSavedRegs<(add CSR_64, RCX, RDX, RSI,
  910. R8, R9, R10, R11)>;
  911. // CSRs that are handled by prologue, epilogue.
  912. def CSR_64_CXX_TLS_Darwin_PE : CalleeSavedRegs<(add RBP)>;
  913. // CSRs that are handled explicitly via copies.
  914. def CSR_64_CXX_TLS_Darwin_ViaCopy : CalleeSavedRegs<(sub CSR_64_TLS_Darwin, RBP)>;
  915. // All GPRs - except r11
  916. def CSR_64_RT_MostRegs : CalleeSavedRegs<(add CSR_64, RAX, RCX, RDX, RSI, RDI,
  917. R8, R9, R10)>;
  918. // All registers - except r11
  919. def CSR_64_RT_AllRegs : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
  920. (sequence "XMM%u", 0, 15))>;
  921. def CSR_64_RT_AllRegs_AVX : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
  922. (sequence "YMM%u", 0, 15))>;
  923. def CSR_64_MostRegs : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
  924. R11, R12, R13, R14, R15, RBP,
  925. (sequence "XMM%u", 0, 15))>;
  926. def CSR_32_AllRegs : CalleeSavedRegs<(add EAX, EBX, ECX, EDX, EBP, ESI,
  927. EDI)>;
  928. def CSR_32_AllRegs_SSE : CalleeSavedRegs<(add CSR_32_AllRegs,
  929. (sequence "XMM%u", 0, 7))>;
  930. def CSR_32_AllRegs_AVX : CalleeSavedRegs<(add CSR_32_AllRegs,
  931. (sequence "YMM%u", 0, 7))>;
  932. def CSR_32_AllRegs_AVX512 : CalleeSavedRegs<(add CSR_32_AllRegs,
  933. (sequence "ZMM%u", 0, 7),
  934. (sequence "K%u", 0, 7))>;
  935. def CSR_64_AllRegs : CalleeSavedRegs<(add CSR_64_MostRegs, RAX)>;
  936. def CSR_64_AllRegs_NoSSE : CalleeSavedRegs<(add RAX, RBX, RCX, RDX, RSI, RDI, R8, R9,
  937. R10, R11, R12, R13, R14, R15, RBP)>;
  938. def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
  939. (sequence "YMM%u", 0, 15)),
  940. (sequence "XMM%u", 0, 15))>;
  941. def CSR_64_AllRegs_AVX512 : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
  942. (sequence "ZMM%u", 0, 31),
  943. (sequence "K%u", 0, 7)),
  944. (sequence "XMM%u", 0, 15))>;
  945. // Standard C + YMM6-15
  946. def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
  947. R13, R14, R15,
  948. (sequence "YMM%u", 6, 15))>;
  949. def CSR_Win64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI,
  950. R12, R13, R14, R15,
  951. (sequence "ZMM%u", 6, 21),
  952. K4, K5, K6, K7)>;
  953. //Standard C + XMM 8-15
  954. def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64,
  955. (sequence "XMM%u", 8, 15))>;
  956. //Standard C + YMM 8-15
  957. def CSR_64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add CSR_64,
  958. (sequence "YMM%u", 8, 15))>;
  959. def CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RSI, R14, R15,
  960. (sequence "ZMM%u", 16, 31),
  961. K4, K5, K6, K7)>;
  962. // Only R12 is preserved for PHP calls in HHVM.
  963. def CSR_64_HHVM : CalleeSavedRegs<(add R12)>;
  964. // Register calling convention preserves few GPR and XMM8-15
  965. def CSR_32_RegCall_NoSSE : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
  966. def CSR_32_RegCall : CalleeSavedRegs<(add CSR_32_RegCall_NoSSE,
  967. (sequence "XMM%u", 4, 7))>;
  968. def CSR_Win32_CFGuard_Check_NoSSE : CalleeSavedRegs<(add CSR_32_RegCall_NoSSE, ECX)>;
  969. def CSR_Win32_CFGuard_Check : CalleeSavedRegs<(add CSR_32_RegCall, ECX)>;
  970. def CSR_Win64_RegCall_NoSSE : CalleeSavedRegs<(add RBX, RBP,
  971. (sequence "R%u", 10, 15))>;
  972. def CSR_Win64_RegCall : CalleeSavedRegs<(add CSR_Win64_RegCall_NoSSE,
  973. (sequence "XMM%u", 8, 15))>;
  974. def CSR_SysV64_RegCall_NoSSE : CalleeSavedRegs<(add RBX, RBP,
  975. (sequence "R%u", 12, 15))>;
  976. def CSR_SysV64_RegCall : CalleeSavedRegs<(add CSR_SysV64_RegCall_NoSSE,
  977. (sequence "XMM%u", 8, 15))>;