PPCFrameLowering.cpp 104 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714
  1. //===-- PPCFrameLowering.cpp - PPC Frame Information ----------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the PPC implementation of TargetFrameLowering class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "PPCFrameLowering.h"
  13. #include "MCTargetDesc/PPCPredicates.h"
  14. #include "PPCInstrBuilder.h"
  15. #include "PPCInstrInfo.h"
  16. #include "PPCMachineFunctionInfo.h"
  17. #include "PPCSubtarget.h"
  18. #include "PPCTargetMachine.h"
  19. #include "llvm/ADT/Statistic.h"
  20. #include "llvm/CodeGen/LivePhysRegs.h"
  21. #include "llvm/CodeGen/MachineFrameInfo.h"
  22. #include "llvm/CodeGen/MachineFunction.h"
  23. #include "llvm/CodeGen/MachineInstrBuilder.h"
  24. #include "llvm/CodeGen/MachineModuleInfo.h"
  25. #include "llvm/CodeGen/MachineRegisterInfo.h"
  26. #include "llvm/CodeGen/RegisterScavenging.h"
  27. #include "llvm/IR/Function.h"
  28. #include "llvm/Target/TargetOptions.h"
  29. using namespace llvm;
  30. #define DEBUG_TYPE "framelowering"
  31. STATISTIC(NumPESpillVSR, "Number of spills to vector in prologue");
  32. STATISTIC(NumPEReloadVSR, "Number of reloads from vector in epilogue");
  33. STATISTIC(NumPrologProbed, "Number of prologues probed");
  34. static cl::opt<bool>
  35. EnablePEVectorSpills("ppc-enable-pe-vector-spills",
  36. cl::desc("Enable spills in prologue to vector registers."),
  37. cl::init(false), cl::Hidden);
  38. static unsigned computeReturnSaveOffset(const PPCSubtarget &STI) {
  39. if (STI.isAIXABI())
  40. return STI.isPPC64() ? 16 : 8;
  41. // SVR4 ABI:
  42. return STI.isPPC64() ? 16 : 4;
  43. }
  44. static unsigned computeTOCSaveOffset(const PPCSubtarget &STI) {
  45. if (STI.isAIXABI())
  46. return STI.isPPC64() ? 40 : 20;
  47. return STI.isELFv2ABI() ? 24 : 40;
  48. }
  49. static unsigned computeFramePointerSaveOffset(const PPCSubtarget &STI) {
  50. // First slot in the general register save area.
  51. return STI.isPPC64() ? -8U : -4U;
  52. }
  53. static unsigned computeLinkageSize(const PPCSubtarget &STI) {
  54. if (STI.isAIXABI() || STI.isPPC64())
  55. return (STI.isELFv2ABI() ? 4 : 6) * (STI.isPPC64() ? 8 : 4);
  56. // 32-bit SVR4 ABI:
  57. return 8;
  58. }
  59. static unsigned computeBasePointerSaveOffset(const PPCSubtarget &STI) {
  60. // Third slot in the general purpose register save area.
  61. if (STI.is32BitELFABI() && STI.getTargetMachine().isPositionIndependent())
  62. return -12U;
  63. // Second slot in the general purpose register save area.
  64. return STI.isPPC64() ? -16U : -8U;
  65. }
  66. static unsigned computeCRSaveOffset(const PPCSubtarget &STI) {
  67. return (STI.isAIXABI() && !STI.isPPC64()) ? 4 : 8;
  68. }
  69. PPCFrameLowering::PPCFrameLowering(const PPCSubtarget &STI)
  70. : TargetFrameLowering(TargetFrameLowering::StackGrowsDown,
  71. STI.getPlatformStackAlignment(), 0),
  72. Subtarget(STI), ReturnSaveOffset(computeReturnSaveOffset(Subtarget)),
  73. TOCSaveOffset(computeTOCSaveOffset(Subtarget)),
  74. FramePointerSaveOffset(computeFramePointerSaveOffset(Subtarget)),
  75. LinkageSize(computeLinkageSize(Subtarget)),
  76. BasePointerSaveOffset(computeBasePointerSaveOffset(Subtarget)),
  77. CRSaveOffset(computeCRSaveOffset(Subtarget)) {}
  78. // With the SVR4 ABI, callee-saved registers have fixed offsets on the stack.
  79. const PPCFrameLowering::SpillSlot *PPCFrameLowering::getCalleeSavedSpillSlots(
  80. unsigned &NumEntries) const {
  81. // Floating-point register save area offsets.
  82. #define CALLEE_SAVED_FPRS \
  83. {PPC::F31, -8}, \
  84. {PPC::F30, -16}, \
  85. {PPC::F29, -24}, \
  86. {PPC::F28, -32}, \
  87. {PPC::F27, -40}, \
  88. {PPC::F26, -48}, \
  89. {PPC::F25, -56}, \
  90. {PPC::F24, -64}, \
  91. {PPC::F23, -72}, \
  92. {PPC::F22, -80}, \
  93. {PPC::F21, -88}, \
  94. {PPC::F20, -96}, \
  95. {PPC::F19, -104}, \
  96. {PPC::F18, -112}, \
  97. {PPC::F17, -120}, \
  98. {PPC::F16, -128}, \
  99. {PPC::F15, -136}, \
  100. {PPC::F14, -144}
  101. // 32-bit general purpose register save area offsets shared by ELF and
  102. // AIX. AIX has an extra CSR with r13.
  103. #define CALLEE_SAVED_GPRS32 \
  104. {PPC::R31, -4}, \
  105. {PPC::R30, -8}, \
  106. {PPC::R29, -12}, \
  107. {PPC::R28, -16}, \
  108. {PPC::R27, -20}, \
  109. {PPC::R26, -24}, \
  110. {PPC::R25, -28}, \
  111. {PPC::R24, -32}, \
  112. {PPC::R23, -36}, \
  113. {PPC::R22, -40}, \
  114. {PPC::R21, -44}, \
  115. {PPC::R20, -48}, \
  116. {PPC::R19, -52}, \
  117. {PPC::R18, -56}, \
  118. {PPC::R17, -60}, \
  119. {PPC::R16, -64}, \
  120. {PPC::R15, -68}, \
  121. {PPC::R14, -72}
  122. // 64-bit general purpose register save area offsets.
  123. #define CALLEE_SAVED_GPRS64 \
  124. {PPC::X31, -8}, \
  125. {PPC::X30, -16}, \
  126. {PPC::X29, -24}, \
  127. {PPC::X28, -32}, \
  128. {PPC::X27, -40}, \
  129. {PPC::X26, -48}, \
  130. {PPC::X25, -56}, \
  131. {PPC::X24, -64}, \
  132. {PPC::X23, -72}, \
  133. {PPC::X22, -80}, \
  134. {PPC::X21, -88}, \
  135. {PPC::X20, -96}, \
  136. {PPC::X19, -104}, \
  137. {PPC::X18, -112}, \
  138. {PPC::X17, -120}, \
  139. {PPC::X16, -128}, \
  140. {PPC::X15, -136}, \
  141. {PPC::X14, -144}
  142. // Vector register save area offsets.
  143. #define CALLEE_SAVED_VRS \
  144. {PPC::V31, -16}, \
  145. {PPC::V30, -32}, \
  146. {PPC::V29, -48}, \
  147. {PPC::V28, -64}, \
  148. {PPC::V27, -80}, \
  149. {PPC::V26, -96}, \
  150. {PPC::V25, -112}, \
  151. {PPC::V24, -128}, \
  152. {PPC::V23, -144}, \
  153. {PPC::V22, -160}, \
  154. {PPC::V21, -176}, \
  155. {PPC::V20, -192}
  156. // Note that the offsets here overlap, but this is fixed up in
  157. // processFunctionBeforeFrameFinalized.
  158. static const SpillSlot ELFOffsets32[] = {
  159. CALLEE_SAVED_FPRS,
  160. CALLEE_SAVED_GPRS32,
  161. // CR save area offset. We map each of the nonvolatile CR fields
  162. // to the slot for CR2, which is the first of the nonvolatile CR
  163. // fields to be assigned, so that we only allocate one save slot.
  164. // See PPCRegisterInfo::hasReservedSpillSlot() for more information.
  165. {PPC::CR2, -4},
  166. // VRSAVE save area offset.
  167. {PPC::VRSAVE, -4},
  168. CALLEE_SAVED_VRS,
  169. // SPE register save area (overlaps Vector save area).
  170. {PPC::S31, -8},
  171. {PPC::S30, -16},
  172. {PPC::S29, -24},
  173. {PPC::S28, -32},
  174. {PPC::S27, -40},
  175. {PPC::S26, -48},
  176. {PPC::S25, -56},
  177. {PPC::S24, -64},
  178. {PPC::S23, -72},
  179. {PPC::S22, -80},
  180. {PPC::S21, -88},
  181. {PPC::S20, -96},
  182. {PPC::S19, -104},
  183. {PPC::S18, -112},
  184. {PPC::S17, -120},
  185. {PPC::S16, -128},
  186. {PPC::S15, -136},
  187. {PPC::S14, -144}};
  188. static const SpillSlot ELFOffsets64[] = {
  189. CALLEE_SAVED_FPRS,
  190. CALLEE_SAVED_GPRS64,
  191. // VRSAVE save area offset.
  192. {PPC::VRSAVE, -4},
  193. CALLEE_SAVED_VRS
  194. };
  195. static const SpillSlot AIXOffsets32[] = {CALLEE_SAVED_FPRS,
  196. CALLEE_SAVED_GPRS32,
  197. // Add AIX's extra CSR.
  198. {PPC::R13, -76},
  199. CALLEE_SAVED_VRS};
  200. static const SpillSlot AIXOffsets64[] = {
  201. CALLEE_SAVED_FPRS, CALLEE_SAVED_GPRS64, CALLEE_SAVED_VRS};
  202. if (Subtarget.is64BitELFABI()) {
  203. NumEntries = std::size(ELFOffsets64);
  204. return ELFOffsets64;
  205. }
  206. if (Subtarget.is32BitELFABI()) {
  207. NumEntries = std::size(ELFOffsets32);
  208. return ELFOffsets32;
  209. }
  210. assert(Subtarget.isAIXABI() && "Unexpected ABI.");
  211. if (Subtarget.isPPC64()) {
  212. NumEntries = std::size(AIXOffsets64);
  213. return AIXOffsets64;
  214. }
  215. NumEntries = std::size(AIXOffsets32);
  216. return AIXOffsets32;
  217. }
  218. static bool spillsCR(const MachineFunction &MF) {
  219. const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
  220. return FuncInfo->isCRSpilled();
  221. }
  222. static bool hasSpills(const MachineFunction &MF) {
  223. const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
  224. return FuncInfo->hasSpills();
  225. }
  226. static bool hasNonRISpills(const MachineFunction &MF) {
  227. const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
  228. return FuncInfo->hasNonRISpills();
  229. }
  230. /// MustSaveLR - Return true if this function requires that we save the LR
  231. /// register onto the stack in the prolog and restore it in the epilog of the
  232. /// function.
  233. static bool MustSaveLR(const MachineFunction &MF, unsigned LR) {
  234. const PPCFunctionInfo *MFI = MF.getInfo<PPCFunctionInfo>();
  235. // We need a save/restore of LR if there is any def of LR (which is
  236. // defined by calls, including the PIC setup sequence), or if there is
  237. // some use of the LR stack slot (e.g. for builtin_return_address).
  238. // (LR comes in 32 and 64 bit versions.)
  239. MachineRegisterInfo::def_iterator RI = MF.getRegInfo().def_begin(LR);
  240. return RI !=MF.getRegInfo().def_end() || MFI->isLRStoreRequired();
  241. }
  242. /// determineFrameLayoutAndUpdate - Determine the size of the frame and maximum
  243. /// call frame size. Update the MachineFunction object with the stack size.
  244. uint64_t
  245. PPCFrameLowering::determineFrameLayoutAndUpdate(MachineFunction &MF,
  246. bool UseEstimate) const {
  247. unsigned NewMaxCallFrameSize = 0;
  248. uint64_t FrameSize = determineFrameLayout(MF, UseEstimate,
  249. &NewMaxCallFrameSize);
  250. MF.getFrameInfo().setStackSize(FrameSize);
  251. MF.getFrameInfo().setMaxCallFrameSize(NewMaxCallFrameSize);
  252. return FrameSize;
  253. }
  254. /// determineFrameLayout - Determine the size of the frame and maximum call
  255. /// frame size.
  256. uint64_t
  257. PPCFrameLowering::determineFrameLayout(const MachineFunction &MF,
  258. bool UseEstimate,
  259. unsigned *NewMaxCallFrameSize) const {
  260. const MachineFrameInfo &MFI = MF.getFrameInfo();
  261. const PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
  262. // Get the number of bytes to allocate from the FrameInfo
  263. uint64_t FrameSize =
  264. UseEstimate ? MFI.estimateStackSize(MF) : MFI.getStackSize();
  265. // Get stack alignments. The frame must be aligned to the greatest of these:
  266. Align TargetAlign = getStackAlign(); // alignment required per the ABI
  267. Align MaxAlign = MFI.getMaxAlign(); // algmt required by data in frame
  268. Align Alignment = std::max(TargetAlign, MaxAlign);
  269. const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  270. unsigned LR = RegInfo->getRARegister();
  271. bool DisableRedZone = MF.getFunction().hasFnAttribute(Attribute::NoRedZone);
  272. bool CanUseRedZone = !MFI.hasVarSizedObjects() && // No dynamic alloca.
  273. !MFI.adjustsStack() && // No calls.
  274. !MustSaveLR(MF, LR) && // No need to save LR.
  275. !FI->mustSaveTOC() && // No need to save TOC.
  276. !RegInfo->hasBasePointer(MF); // No special alignment.
  277. // Note: for PPC32 SVR4ABI, we can still generate stackless
  278. // code if all local vars are reg-allocated.
  279. bool FitsInRedZone = FrameSize <= Subtarget.getRedZoneSize();
  280. // Check whether we can skip adjusting the stack pointer (by using red zone)
  281. if (!DisableRedZone && CanUseRedZone && FitsInRedZone) {
  282. // No need for frame
  283. return 0;
  284. }
  285. // Get the maximum call frame size of all the calls.
  286. unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
  287. // Maximum call frame needs to be at least big enough for linkage area.
  288. unsigned minCallFrameSize = getLinkageSize();
  289. maxCallFrameSize = std::max(maxCallFrameSize, minCallFrameSize);
  290. // If we have dynamic alloca then maxCallFrameSize needs to be aligned so
  291. // that allocations will be aligned.
  292. if (MFI.hasVarSizedObjects())
  293. maxCallFrameSize = alignTo(maxCallFrameSize, Alignment);
  294. // Update the new max call frame size if the caller passes in a valid pointer.
  295. if (NewMaxCallFrameSize)
  296. *NewMaxCallFrameSize = maxCallFrameSize;
  297. // Include call frame size in total.
  298. FrameSize += maxCallFrameSize;
  299. // Make sure the frame is aligned.
  300. FrameSize = alignTo(FrameSize, Alignment);
  301. return FrameSize;
  302. }
  303. // hasFP - Return true if the specified function actually has a dedicated frame
  304. // pointer register.
  305. bool PPCFrameLowering::hasFP(const MachineFunction &MF) const {
  306. const MachineFrameInfo &MFI = MF.getFrameInfo();
  307. // FIXME: This is pretty much broken by design: hasFP() might be called really
  308. // early, before the stack layout was calculated and thus hasFP() might return
  309. // true or false here depending on the time of call.
  310. return (MFI.getStackSize()) && needsFP(MF);
  311. }
  312. // needsFP - Return true if the specified function should have a dedicated frame
  313. // pointer register. This is true if the function has variable sized allocas or
  314. // if frame pointer elimination is disabled.
  315. bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
  316. const MachineFrameInfo &MFI = MF.getFrameInfo();
  317. // Naked functions have no stack frame pushed, so we don't have a frame
  318. // pointer.
  319. if (MF.getFunction().hasFnAttribute(Attribute::Naked))
  320. return false;
  321. return MF.getTarget().Options.DisableFramePointerElim(MF) ||
  322. MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint() ||
  323. MF.exposesReturnsTwice() ||
  324. (MF.getTarget().Options.GuaranteedTailCallOpt &&
  325. MF.getInfo<PPCFunctionInfo>()->hasFastCall());
  326. }
  327. void PPCFrameLowering::replaceFPWithRealFP(MachineFunction &MF) const {
  328. bool is31 = needsFP(MF);
  329. unsigned FPReg = is31 ? PPC::R31 : PPC::R1;
  330. unsigned FP8Reg = is31 ? PPC::X31 : PPC::X1;
  331. const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  332. bool HasBP = RegInfo->hasBasePointer(MF);
  333. unsigned BPReg = HasBP ? (unsigned) RegInfo->getBaseRegister(MF) : FPReg;
  334. unsigned BP8Reg = HasBP ? (unsigned) PPC::X30 : FP8Reg;
  335. for (MachineBasicBlock &MBB : MF)
  336. for (MachineBasicBlock::iterator MBBI = MBB.end(); MBBI != MBB.begin();) {
  337. --MBBI;
  338. for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I) {
  339. MachineOperand &MO = MBBI->getOperand(I);
  340. if (!MO.isReg())
  341. continue;
  342. switch (MO.getReg()) {
  343. case PPC::FP:
  344. MO.setReg(FPReg);
  345. break;
  346. case PPC::FP8:
  347. MO.setReg(FP8Reg);
  348. break;
  349. case PPC::BP:
  350. MO.setReg(BPReg);
  351. break;
  352. case PPC::BP8:
  353. MO.setReg(BP8Reg);
  354. break;
  355. }
  356. }
  357. }
  358. }
  359. /* This function will do the following:
  360. - If MBB is an entry or exit block, set SR1 and SR2 to R0 and R12
  361. respectively (defaults recommended by the ABI) and return true
  362. - If MBB is not an entry block, initialize the register scavenger and look
  363. for available registers.
  364. - If the defaults (R0/R12) are available, return true
  365. - If TwoUniqueRegsRequired is set to true, it looks for two unique
  366. registers. Otherwise, look for a single available register.
  367. - If the required registers are found, set SR1 and SR2 and return true.
  368. - If the required registers are not found, set SR2 or both SR1 and SR2 to
  369. PPC::NoRegister and return false.
  370. Note that if both SR1 and SR2 are valid parameters and TwoUniqueRegsRequired
  371. is not set, this function will attempt to find two different registers, but
  372. still return true if only one register is available (and set SR1 == SR2).
  373. */
  374. bool
  375. PPCFrameLowering::findScratchRegister(MachineBasicBlock *MBB,
  376. bool UseAtEnd,
  377. bool TwoUniqueRegsRequired,
  378. Register *SR1,
  379. Register *SR2) const {
  380. RegScavenger RS;
  381. Register R0 = Subtarget.isPPC64() ? PPC::X0 : PPC::R0;
  382. Register R12 = Subtarget.isPPC64() ? PPC::X12 : PPC::R12;
  383. // Set the defaults for the two scratch registers.
  384. if (SR1)
  385. *SR1 = R0;
  386. if (SR2) {
  387. assert (SR1 && "Asking for the second scratch register but not the first?");
  388. *SR2 = R12;
  389. }
  390. // If MBB is an entry or exit block, use R0 and R12 as the scratch registers.
  391. if ((UseAtEnd && MBB->isReturnBlock()) ||
  392. (!UseAtEnd && (&MBB->getParent()->front() == MBB)))
  393. return true;
  394. RS.enterBasicBlock(*MBB);
  395. if (UseAtEnd && !MBB->empty()) {
  396. // The scratch register will be used at the end of the block, so must
  397. // consider all registers used within the block
  398. MachineBasicBlock::iterator MBBI = MBB->getFirstTerminator();
  399. // If no terminator, back iterator up to previous instruction.
  400. if (MBBI == MBB->end())
  401. MBBI = std::prev(MBBI);
  402. if (MBBI != MBB->begin())
  403. RS.forward(MBBI);
  404. }
  405. // If the two registers are available, we're all good.
  406. // Note that we only return here if both R0 and R12 are available because
  407. // although the function may not require two unique registers, it may benefit
  408. // from having two so we should try to provide them.
  409. if (!RS.isRegUsed(R0) && !RS.isRegUsed(R12))
  410. return true;
  411. // Get the list of callee-saved registers for the target.
  412. const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  413. const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(MBB->getParent());
  414. // Get all the available registers in the block.
  415. BitVector BV = RS.getRegsAvailable(Subtarget.isPPC64() ? &PPC::G8RCRegClass :
  416. &PPC::GPRCRegClass);
  417. // We shouldn't use callee-saved registers as scratch registers as they may be
  418. // available when looking for a candidate block for shrink wrapping but not
  419. // available when the actual prologue/epilogue is being emitted because they
  420. // were added as live-in to the prologue block by PrologueEpilogueInserter.
  421. for (int i = 0; CSRegs[i]; ++i)
  422. BV.reset(CSRegs[i]);
  423. // Set the first scratch register to the first available one.
  424. if (SR1) {
  425. int FirstScratchReg = BV.find_first();
  426. *SR1 = FirstScratchReg == -1 ? (unsigned)PPC::NoRegister : FirstScratchReg;
  427. }
  428. // If there is another one available, set the second scratch register to that.
  429. // Otherwise, set it to either PPC::NoRegister if this function requires two
  430. // or to whatever SR1 is set to if this function doesn't require two.
  431. if (SR2) {
  432. int SecondScratchReg = BV.find_next(*SR1);
  433. if (SecondScratchReg != -1)
  434. *SR2 = SecondScratchReg;
  435. else
  436. *SR2 = TwoUniqueRegsRequired ? Register() : *SR1;
  437. }
  438. // Now that we've done our best to provide both registers, double check
  439. // whether we were unable to provide enough.
  440. if (BV.count() < (TwoUniqueRegsRequired ? 2U : 1U))
  441. return false;
  442. return true;
  443. }
  444. // We need a scratch register for spilling LR and for spilling CR. By default,
  445. // we use two scratch registers to hide latency. However, if only one scratch
  446. // register is available, we can adjust for that by not overlapping the spill
  447. // code. However, if we need to realign the stack (i.e. have a base pointer)
  448. // and the stack frame is large, we need two scratch registers.
  449. // Also, stack probe requires two scratch registers, one for old sp, one for
  450. // large frame and large probe size.
  451. bool
  452. PPCFrameLowering::twoUniqueScratchRegsRequired(MachineBasicBlock *MBB) const {
  453. const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  454. MachineFunction &MF = *(MBB->getParent());
  455. bool HasBP = RegInfo->hasBasePointer(MF);
  456. unsigned FrameSize = determineFrameLayout(MF);
  457. int NegFrameSize = -FrameSize;
  458. bool IsLargeFrame = !isInt<16>(NegFrameSize);
  459. MachineFrameInfo &MFI = MF.getFrameInfo();
  460. Align MaxAlign = MFI.getMaxAlign();
  461. bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI();
  462. const PPCTargetLowering &TLI = *Subtarget.getTargetLowering();
  463. return ((IsLargeFrame || !HasRedZone) && HasBP && MaxAlign > 1) ||
  464. TLI.hasInlineStackProbe(MF);
  465. }
  466. bool PPCFrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
  467. MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
  468. return findScratchRegister(TmpMBB, false,
  469. twoUniqueScratchRegsRequired(TmpMBB));
  470. }
  471. bool PPCFrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
  472. MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
  473. return findScratchRegister(TmpMBB, true);
  474. }
  475. bool PPCFrameLowering::stackUpdateCanBeMoved(MachineFunction &MF) const {
  476. const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  477. PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
  478. // Abort if there is no register info or function info.
  479. if (!RegInfo || !FI)
  480. return false;
  481. // Only move the stack update on ELFv2 ABI and PPC64.
  482. if (!Subtarget.isELFv2ABI() || !Subtarget.isPPC64())
  483. return false;
  484. // Check the frame size first and return false if it does not fit the
  485. // requirements.
  486. // We need a non-zero frame size as well as a frame that will fit in the red
  487. // zone. This is because by moving the stack pointer update we are now storing
  488. // to the red zone until the stack pointer is updated. If we get an interrupt
  489. // inside the prologue but before the stack update we now have a number of
  490. // stores to the red zone and those stores must all fit.
  491. MachineFrameInfo &MFI = MF.getFrameInfo();
  492. unsigned FrameSize = MFI.getStackSize();
  493. if (!FrameSize || FrameSize > Subtarget.getRedZoneSize())
  494. return false;
  495. // Frame pointers and base pointers complicate matters so don't do anything
  496. // if we have them. For example having a frame pointer will sometimes require
  497. // a copy of r1 into r31 and that makes keeping track of updates to r1 more
  498. // difficult. Similar situation exists with setjmp.
  499. if (hasFP(MF) || RegInfo->hasBasePointer(MF) || MF.exposesReturnsTwice())
  500. return false;
  501. // Calls to fast_cc functions use different rules for passing parameters on
  502. // the stack from the ABI and using PIC base in the function imposes
  503. // similar restrictions to using the base pointer. It is not generally safe
  504. // to move the stack pointer update in these situations.
  505. if (FI->hasFastCall() || FI->usesPICBase())
  506. return false;
  507. // Finally we can move the stack update if we do not require register
  508. // scavenging. Register scavenging can introduce more spills and so
  509. // may make the frame size larger than we have computed.
  510. return !RegInfo->requiresFrameIndexScavenging(MF);
  511. }
  512. void PPCFrameLowering::emitPrologue(MachineFunction &MF,
  513. MachineBasicBlock &MBB) const {
  514. MachineBasicBlock::iterator MBBI = MBB.begin();
  515. MachineFrameInfo &MFI = MF.getFrameInfo();
  516. const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
  517. const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  518. const PPCTargetLowering &TLI = *Subtarget.getTargetLowering();
  519. MachineModuleInfo &MMI = MF.getMMI();
  520. const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
  521. DebugLoc dl;
  522. // AIX assembler does not support cfi directives.
  523. const bool needsCFI = MF.needsFrameMoves() && !Subtarget.isAIXABI();
  524. const bool HasFastMFLR = Subtarget.hasFastMFLR();
  525. // Get processor type.
  526. bool isPPC64 = Subtarget.isPPC64();
  527. // Get the ABI.
  528. bool isSVR4ABI = Subtarget.isSVR4ABI();
  529. bool isELFv2ABI = Subtarget.isELFv2ABI();
  530. assert((isSVR4ABI || Subtarget.isAIXABI()) && "Unsupported PPC ABI.");
  531. // Work out frame sizes.
  532. uint64_t FrameSize = determineFrameLayoutAndUpdate(MF);
  533. int64_t NegFrameSize = -FrameSize;
  534. if (!isPPC64 && (!isInt<32>(FrameSize) || !isInt<32>(NegFrameSize)))
  535. llvm_unreachable("Unhandled stack size!");
  536. if (MFI.isFrameAddressTaken())
  537. replaceFPWithRealFP(MF);
  538. // Check if the link register (LR) must be saved.
  539. PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
  540. bool MustSaveLR = FI->mustSaveLR();
  541. bool MustSaveTOC = FI->mustSaveTOC();
  542. const SmallVectorImpl<Register> &MustSaveCRs = FI->getMustSaveCRs();
  543. bool MustSaveCR = !MustSaveCRs.empty();
  544. // Do we have a frame pointer and/or base pointer for this function?
  545. bool HasFP = hasFP(MF);
  546. bool HasBP = RegInfo->hasBasePointer(MF);
  547. bool HasRedZone = isPPC64 || !isSVR4ABI;
  548. bool HasROPProtect = Subtarget.hasROPProtect();
  549. bool HasPrivileged = Subtarget.hasPrivileged();
  550. Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
  551. Register BPReg = RegInfo->getBaseRegister(MF);
  552. Register FPReg = isPPC64 ? PPC::X31 : PPC::R31;
  553. Register LRReg = isPPC64 ? PPC::LR8 : PPC::LR;
  554. Register TOCReg = isPPC64 ? PPC::X2 : PPC::R2;
  555. Register ScratchReg;
  556. Register TempReg = isPPC64 ? PPC::X12 : PPC::R12; // another scratch reg
  557. // ...(R12/X12 is volatile in both Darwin & SVR4, & can't be a function arg.)
  558. const MCInstrDesc& MFLRInst = TII.get(isPPC64 ? PPC::MFLR8
  559. : PPC::MFLR );
  560. const MCInstrDesc& StoreInst = TII.get(isPPC64 ? PPC::STD
  561. : PPC::STW );
  562. const MCInstrDesc& StoreUpdtInst = TII.get(isPPC64 ? PPC::STDU
  563. : PPC::STWU );
  564. const MCInstrDesc& StoreUpdtIdxInst = TII.get(isPPC64 ? PPC::STDUX
  565. : PPC::STWUX);
  566. const MCInstrDesc& OrInst = TII.get(isPPC64 ? PPC::OR8
  567. : PPC::OR );
  568. const MCInstrDesc& SubtractCarryingInst = TII.get(isPPC64 ? PPC::SUBFC8
  569. : PPC::SUBFC);
  570. const MCInstrDesc& SubtractImmCarryingInst = TII.get(isPPC64 ? PPC::SUBFIC8
  571. : PPC::SUBFIC);
  572. const MCInstrDesc &MoveFromCondRegInst = TII.get(isPPC64 ? PPC::MFCR8
  573. : PPC::MFCR);
  574. const MCInstrDesc &StoreWordInst = TII.get(isPPC64 ? PPC::STW8 : PPC::STW);
  575. const MCInstrDesc &HashST =
  576. TII.get(isPPC64 ? (HasPrivileged ? PPC::HASHSTP8 : PPC::HASHST8)
  577. : (HasPrivileged ? PPC::HASHSTP : PPC::HASHST));
  578. // Regarding this assert: Even though LR is saved in the caller's frame (i.e.,
  579. // LROffset is positive), that slot is callee-owned. Because PPC32 SVR4 has no
  580. // Red Zone, an asynchronous event (a form of "callee") could claim a frame &
  581. // overwrite it, so PPC32 SVR4 must claim at least a minimal frame to save LR.
  582. assert((isPPC64 || !isSVR4ABI || !(!FrameSize && (MustSaveLR || HasFP))) &&
  583. "FrameSize must be >0 to save/restore the FP or LR for 32-bit SVR4.");
  584. // Using the same bool variable as below to suppress compiler warnings.
  585. bool SingleScratchReg = findScratchRegister(
  586. &MBB, false, twoUniqueScratchRegsRequired(&MBB), &ScratchReg, &TempReg);
  587. assert(SingleScratchReg &&
  588. "Required number of registers not available in this block");
  589. SingleScratchReg = ScratchReg == TempReg;
  590. int64_t LROffset = getReturnSaveOffset();
  591. int64_t FPOffset = 0;
  592. if (HasFP) {
  593. MachineFrameInfo &MFI = MF.getFrameInfo();
  594. int FPIndex = FI->getFramePointerSaveIndex();
  595. assert(FPIndex && "No Frame Pointer Save Slot!");
  596. FPOffset = MFI.getObjectOffset(FPIndex);
  597. }
  598. int64_t BPOffset = 0;
  599. if (HasBP) {
  600. MachineFrameInfo &MFI = MF.getFrameInfo();
  601. int BPIndex = FI->getBasePointerSaveIndex();
  602. assert(BPIndex && "No Base Pointer Save Slot!");
  603. BPOffset = MFI.getObjectOffset(BPIndex);
  604. }
  605. int64_t PBPOffset = 0;
  606. if (FI->usesPICBase()) {
  607. MachineFrameInfo &MFI = MF.getFrameInfo();
  608. int PBPIndex = FI->getPICBasePointerSaveIndex();
  609. assert(PBPIndex && "No PIC Base Pointer Save Slot!");
  610. PBPOffset = MFI.getObjectOffset(PBPIndex);
  611. }
  612. // Get stack alignments.
  613. Align MaxAlign = MFI.getMaxAlign();
  614. if (HasBP && MaxAlign > 1)
  615. assert(Log2(MaxAlign) < 16 && "Invalid alignment!");
  616. // Frames of 32KB & larger require special handling because they cannot be
  617. // indexed into with a simple STDU/STWU/STD/STW immediate offset operand.
  618. bool isLargeFrame = !isInt<16>(NegFrameSize);
  619. // Check if we can move the stack update instruction (stdu) down the prologue
  620. // past the callee saves. Hopefully this will avoid the situation where the
  621. // saves are waiting for the update on the store with update to complete.
  622. MachineBasicBlock::iterator StackUpdateLoc = MBBI;
  623. bool MovingStackUpdateDown = false;
  624. // Check if we can move the stack update.
  625. if (stackUpdateCanBeMoved(MF)) {
  626. const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo();
  627. for (CalleeSavedInfo CSI : Info) {
  628. // If the callee saved register is spilled to a register instead of the
  629. // stack then the spill no longer uses the stack pointer.
  630. // This can lead to two consequences:
  631. // 1) We no longer need to update the stack because the function does not
  632. // spill any callee saved registers to stack.
  633. // 2) We have a situation where we still have to update the stack pointer
  634. // even though some registers are spilled to other registers. In
  635. // this case the current code moves the stack update to an incorrect
  636. // position.
  637. // In either case we should abort moving the stack update operation.
  638. if (CSI.isSpilledToReg()) {
  639. StackUpdateLoc = MBBI;
  640. MovingStackUpdateDown = false;
  641. break;
  642. }
  643. int FrIdx = CSI.getFrameIdx();
  644. // If the frame index is not negative the callee saved info belongs to a
  645. // stack object that is not a fixed stack object. We ignore non-fixed
  646. // stack objects because we won't move the stack update pointer past them.
  647. if (FrIdx >= 0)
  648. continue;
  649. if (MFI.isFixedObjectIndex(FrIdx) && MFI.getObjectOffset(FrIdx) < 0) {
  650. StackUpdateLoc++;
  651. MovingStackUpdateDown = true;
  652. } else {
  653. // We need all of the Frame Indices to meet these conditions.
  654. // If they do not, abort the whole operation.
  655. StackUpdateLoc = MBBI;
  656. MovingStackUpdateDown = false;
  657. break;
  658. }
  659. }
  660. // If the operation was not aborted then update the object offset.
  661. if (MovingStackUpdateDown) {
  662. for (CalleeSavedInfo CSI : Info) {
  663. int FrIdx = CSI.getFrameIdx();
  664. if (FrIdx < 0)
  665. MFI.setObjectOffset(FrIdx, MFI.getObjectOffset(FrIdx) + NegFrameSize);
  666. }
  667. }
  668. }
  669. // Where in the prologue we move the CR fields depends on how many scratch
  670. // registers we have, and if we need to save the link register or not. This
  671. // lambda is to avoid duplicating the logic in 2 places.
  672. auto BuildMoveFromCR = [&]() {
  673. if (isELFv2ABI && MustSaveCRs.size() == 1) {
  674. // In the ELFv2 ABI, we are not required to save all CR fields.
  675. // If only one CR field is clobbered, it is more efficient to use
  676. // mfocrf to selectively save just that field, because mfocrf has short
  677. // latency compares to mfcr.
  678. assert(isPPC64 && "V2 ABI is 64-bit only.");
  679. MachineInstrBuilder MIB =
  680. BuildMI(MBB, MBBI, dl, TII.get(PPC::MFOCRF8), TempReg);
  681. MIB.addReg(MustSaveCRs[0], RegState::Kill);
  682. } else {
  683. MachineInstrBuilder MIB =
  684. BuildMI(MBB, MBBI, dl, MoveFromCondRegInst, TempReg);
  685. for (unsigned CRfield : MustSaveCRs)
  686. MIB.addReg(CRfield, RegState::ImplicitKill);
  687. }
  688. };
  689. // If we need to spill the CR and the LR but we don't have two separate
  690. // registers available, we must spill them one at a time
  691. if (MustSaveCR && SingleScratchReg && MustSaveLR) {
  692. BuildMoveFromCR();
  693. BuildMI(MBB, MBBI, dl, StoreWordInst)
  694. .addReg(TempReg, getKillRegState(true))
  695. .addImm(CRSaveOffset)
  696. .addReg(SPReg);
  697. }
  698. if (MustSaveLR)
  699. BuildMI(MBB, MBBI, dl, MFLRInst, ScratchReg);
  700. if (MustSaveCR && !(SingleScratchReg && MustSaveLR))
  701. BuildMoveFromCR();
  702. if (HasRedZone) {
  703. if (HasFP)
  704. BuildMI(MBB, MBBI, dl, StoreInst)
  705. .addReg(FPReg)
  706. .addImm(FPOffset)
  707. .addReg(SPReg);
  708. if (FI->usesPICBase())
  709. BuildMI(MBB, MBBI, dl, StoreInst)
  710. .addReg(PPC::R30)
  711. .addImm(PBPOffset)
  712. .addReg(SPReg);
  713. if (HasBP)
  714. BuildMI(MBB, MBBI, dl, StoreInst)
  715. .addReg(BPReg)
  716. .addImm(BPOffset)
  717. .addReg(SPReg);
  718. }
  719. // Generate the instruction to store the LR. In the case where ROP protection
  720. // is required the register holding the LR should not be killed as it will be
  721. // used by the hash store instruction.
  722. auto SaveLR = [&](int64_t Offset) {
  723. assert(MustSaveLR && "LR is not required to be saved!");
  724. BuildMI(MBB, StackUpdateLoc, dl, StoreInst)
  725. .addReg(ScratchReg, getKillRegState(!HasROPProtect))
  726. .addImm(Offset)
  727. .addReg(SPReg);
  728. // Add the ROP protection Hash Store instruction.
  729. // NOTE: This is technically a violation of the ABI. The hash can be saved
  730. // up to 512 bytes into the Protected Zone. This can be outside of the
  731. // initial 288 byte volatile program storage region in the Protected Zone.
  732. // However, this restriction will be removed in an upcoming revision of the
  733. // ABI.
  734. if (HasROPProtect) {
  735. const int SaveIndex = FI->getROPProtectionHashSaveIndex();
  736. const int64_t ImmOffset = MFI.getObjectOffset(SaveIndex);
  737. assert((ImmOffset <= -8 && ImmOffset >= -512) &&
  738. "ROP hash save offset out of range.");
  739. assert(((ImmOffset & 0x7) == 0) &&
  740. "ROP hash save offset must be 8 byte aligned.");
  741. BuildMI(MBB, StackUpdateLoc, dl, HashST)
  742. .addReg(ScratchReg, getKillRegState(true))
  743. .addImm(ImmOffset)
  744. .addReg(SPReg);
  745. }
  746. };
  747. if (MustSaveLR && HasFastMFLR)
  748. SaveLR(LROffset);
  749. if (MustSaveCR &&
  750. !(SingleScratchReg && MustSaveLR)) {
  751. assert(HasRedZone && "A red zone is always available on PPC64");
  752. BuildMI(MBB, MBBI, dl, StoreWordInst)
  753. .addReg(TempReg, getKillRegState(true))
  754. .addImm(CRSaveOffset)
  755. .addReg(SPReg);
  756. }
  757. // Skip the rest if this is a leaf function & all spills fit in the Red Zone.
  758. if (!FrameSize) {
  759. if (MustSaveLR && !HasFastMFLR)
  760. SaveLR(LROffset);
  761. return;
  762. }
  763. // Adjust stack pointer: r1 += NegFrameSize.
  764. // If there is a preferred stack alignment, align R1 now
  765. if (HasBP && HasRedZone) {
  766. // Save a copy of r1 as the base pointer.
  767. BuildMI(MBB, MBBI, dl, OrInst, BPReg)
  768. .addReg(SPReg)
  769. .addReg(SPReg);
  770. }
  771. // Have we generated a STUX instruction to claim stack frame? If so,
  772. // the negated frame size will be placed in ScratchReg.
  773. bool HasSTUX =
  774. (TLI.hasInlineStackProbe(MF) && FrameSize > TLI.getStackProbeSize(MF)) ||
  775. (HasBP && MaxAlign > 1) || isLargeFrame;
  776. // If we use STUX to update the stack pointer, we need the two scratch
  777. // registers TempReg and ScratchReg, we have to save LR here which is stored
  778. // in ScratchReg.
  779. // If the offset can not be encoded into the store instruction, we also have
  780. // to save LR here.
  781. if (MustSaveLR && !HasFastMFLR &&
  782. (HasSTUX || !isInt<16>(FrameSize + LROffset)))
  783. SaveLR(LROffset);
  784. // If FrameSize <= TLI.getStackProbeSize(MF), as POWER ABI requires backchain
  785. // pointer is always stored at SP, we will get a free probe due to an essential
  786. // STU(X) instruction.
  787. if (TLI.hasInlineStackProbe(MF) && FrameSize > TLI.getStackProbeSize(MF)) {
  788. // To be consistent with other targets, a pseudo instruction is emitted and
  789. // will be later expanded in `inlineStackProbe`.
  790. BuildMI(MBB, MBBI, dl,
  791. TII.get(isPPC64 ? PPC::PROBED_STACKALLOC_64
  792. : PPC::PROBED_STACKALLOC_32))
  793. .addDef(TempReg)
  794. .addDef(ScratchReg) // ScratchReg stores the old sp.
  795. .addImm(NegFrameSize);
  796. // FIXME: HasSTUX is only read if HasRedZone is not set, in such case, we
  797. // update the ScratchReg to meet the assumption that ScratchReg contains
  798. // the NegFrameSize. This solution is rather tricky.
  799. if (!HasRedZone) {
  800. BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBF), ScratchReg)
  801. .addReg(ScratchReg)
  802. .addReg(SPReg);
  803. }
  804. } else {
  805. // This condition must be kept in sync with canUseAsPrologue.
  806. if (HasBP && MaxAlign > 1) {
  807. if (isPPC64)
  808. BuildMI(MBB, MBBI, dl, TII.get(PPC::RLDICL), ScratchReg)
  809. .addReg(SPReg)
  810. .addImm(0)
  811. .addImm(64 - Log2(MaxAlign));
  812. else // PPC32...
  813. BuildMI(MBB, MBBI, dl, TII.get(PPC::RLWINM), ScratchReg)
  814. .addReg(SPReg)
  815. .addImm(0)
  816. .addImm(32 - Log2(MaxAlign))
  817. .addImm(31);
  818. if (!isLargeFrame) {
  819. BuildMI(MBB, MBBI, dl, SubtractImmCarryingInst, ScratchReg)
  820. .addReg(ScratchReg, RegState::Kill)
  821. .addImm(NegFrameSize);
  822. } else {
  823. assert(!SingleScratchReg && "Only a single scratch reg available");
  824. TII.materializeImmPostRA(MBB, MBBI, dl, TempReg, NegFrameSize);
  825. BuildMI(MBB, MBBI, dl, SubtractCarryingInst, ScratchReg)
  826. .addReg(ScratchReg, RegState::Kill)
  827. .addReg(TempReg, RegState::Kill);
  828. }
  829. BuildMI(MBB, MBBI, dl, StoreUpdtIdxInst, SPReg)
  830. .addReg(SPReg, RegState::Kill)
  831. .addReg(SPReg)
  832. .addReg(ScratchReg);
  833. } else if (!isLargeFrame) {
  834. BuildMI(MBB, StackUpdateLoc, dl, StoreUpdtInst, SPReg)
  835. .addReg(SPReg)
  836. .addImm(NegFrameSize)
  837. .addReg(SPReg);
  838. } else {
  839. TII.materializeImmPostRA(MBB, MBBI, dl, ScratchReg, NegFrameSize);
  840. BuildMI(MBB, MBBI, dl, StoreUpdtIdxInst, SPReg)
  841. .addReg(SPReg, RegState::Kill)
  842. .addReg(SPReg)
  843. .addReg(ScratchReg);
  844. }
  845. }
  846. // Save the TOC register after the stack pointer update if a prologue TOC
  847. // save is required for the function.
  848. if (MustSaveTOC) {
  849. assert(isELFv2ABI && "TOC saves in the prologue only supported on ELFv2");
  850. BuildMI(MBB, StackUpdateLoc, dl, TII.get(PPC::STD))
  851. .addReg(TOCReg, getKillRegState(true))
  852. .addImm(TOCSaveOffset)
  853. .addReg(SPReg);
  854. }
  855. if (!HasRedZone) {
  856. assert(!isPPC64 && "A red zone is always available on PPC64");
  857. if (HasSTUX) {
  858. // The negated frame size is in ScratchReg, and the SPReg has been
  859. // decremented by the frame size: SPReg = old SPReg + ScratchReg.
  860. // Since FPOffset, PBPOffset, etc. are relative to the beginning of
  861. // the stack frame (i.e. the old SP), ideally, we would put the old
  862. // SP into a register and use it as the base for the stores. The
  863. // problem is that the only available register may be ScratchReg,
  864. // which could be R0, and R0 cannot be used as a base address.
  865. // First, set ScratchReg to the old SP. This may need to be modified
  866. // later.
  867. BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBF), ScratchReg)
  868. .addReg(ScratchReg, RegState::Kill)
  869. .addReg(SPReg);
  870. if (ScratchReg == PPC::R0) {
  871. // R0 cannot be used as a base register, but it can be used as an
  872. // index in a store-indexed.
  873. int LastOffset = 0;
  874. if (HasFP) {
  875. // R0 += (FPOffset-LastOffset).
  876. // Need addic, since addi treats R0 as 0.
  877. BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg)
  878. .addReg(ScratchReg)
  879. .addImm(FPOffset-LastOffset);
  880. LastOffset = FPOffset;
  881. // Store FP into *R0.
  882. BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX))
  883. .addReg(FPReg, RegState::Kill) // Save FP.
  884. .addReg(PPC::ZERO)
  885. .addReg(ScratchReg); // This will be the index (R0 is ok here).
  886. }
  887. if (FI->usesPICBase()) {
  888. // R0 += (PBPOffset-LastOffset).
  889. BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg)
  890. .addReg(ScratchReg)
  891. .addImm(PBPOffset-LastOffset);
  892. LastOffset = PBPOffset;
  893. BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX))
  894. .addReg(PPC::R30, RegState::Kill) // Save PIC base pointer.
  895. .addReg(PPC::ZERO)
  896. .addReg(ScratchReg); // This will be the index (R0 is ok here).
  897. }
  898. if (HasBP) {
  899. // R0 += (BPOffset-LastOffset).
  900. BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg)
  901. .addReg(ScratchReg)
  902. .addImm(BPOffset-LastOffset);
  903. LastOffset = BPOffset;
  904. BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX))
  905. .addReg(BPReg, RegState::Kill) // Save BP.
  906. .addReg(PPC::ZERO)
  907. .addReg(ScratchReg); // This will be the index (R0 is ok here).
  908. // BP = R0-LastOffset
  909. BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), BPReg)
  910. .addReg(ScratchReg, RegState::Kill)
  911. .addImm(-LastOffset);
  912. }
  913. } else {
  914. // ScratchReg is not R0, so use it as the base register. It is
  915. // already set to the old SP, so we can use the offsets directly.
  916. // Now that the stack frame has been allocated, save all the necessary
  917. // registers using ScratchReg as the base address.
  918. if (HasFP)
  919. BuildMI(MBB, MBBI, dl, StoreInst)
  920. .addReg(FPReg)
  921. .addImm(FPOffset)
  922. .addReg(ScratchReg);
  923. if (FI->usesPICBase())
  924. BuildMI(MBB, MBBI, dl, StoreInst)
  925. .addReg(PPC::R30)
  926. .addImm(PBPOffset)
  927. .addReg(ScratchReg);
  928. if (HasBP) {
  929. BuildMI(MBB, MBBI, dl, StoreInst)
  930. .addReg(BPReg)
  931. .addImm(BPOffset)
  932. .addReg(ScratchReg);
  933. BuildMI(MBB, MBBI, dl, OrInst, BPReg)
  934. .addReg(ScratchReg, RegState::Kill)
  935. .addReg(ScratchReg);
  936. }
  937. }
  938. } else {
  939. // The frame size is a known 16-bit constant (fitting in the immediate
  940. // field of STWU). To be here we have to be compiling for PPC32.
  941. // Since the SPReg has been decreased by FrameSize, add it back to each
  942. // offset.
  943. if (HasFP)
  944. BuildMI(MBB, MBBI, dl, StoreInst)
  945. .addReg(FPReg)
  946. .addImm(FrameSize + FPOffset)
  947. .addReg(SPReg);
  948. if (FI->usesPICBase())
  949. BuildMI(MBB, MBBI, dl, StoreInst)
  950. .addReg(PPC::R30)
  951. .addImm(FrameSize + PBPOffset)
  952. .addReg(SPReg);
  953. if (HasBP) {
  954. BuildMI(MBB, MBBI, dl, StoreInst)
  955. .addReg(BPReg)
  956. .addImm(FrameSize + BPOffset)
  957. .addReg(SPReg);
  958. BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), BPReg)
  959. .addReg(SPReg)
  960. .addImm(FrameSize);
  961. }
  962. }
  963. }
  964. // Save the LR now.
  965. if (!HasSTUX && MustSaveLR && !HasFastMFLR && isInt<16>(FrameSize + LROffset))
  966. SaveLR(LROffset + FrameSize);
  967. // Add Call Frame Information for the instructions we generated above.
  968. if (needsCFI) {
  969. unsigned CFIIndex;
  970. if (HasBP) {
  971. // Define CFA in terms of BP. Do this in preference to using FP/SP,
  972. // because if the stack needed aligning then CFA won't be at a fixed
  973. // offset from FP/SP.
  974. unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
  975. CFIIndex = MF.addFrameInst(
  976. MCCFIInstruction::createDefCfaRegister(nullptr, Reg));
  977. } else {
  978. // Adjust the definition of CFA to account for the change in SP.
  979. assert(NegFrameSize);
  980. CFIIndex = MF.addFrameInst(
  981. MCCFIInstruction::cfiDefCfaOffset(nullptr, -NegFrameSize));
  982. }
  983. BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
  984. .addCFIIndex(CFIIndex);
  985. if (HasFP) {
  986. // Describe where FP was saved, at a fixed offset from CFA.
  987. unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
  988. CFIIndex = MF.addFrameInst(
  989. MCCFIInstruction::createOffset(nullptr, Reg, FPOffset));
  990. BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
  991. .addCFIIndex(CFIIndex);
  992. }
  993. if (FI->usesPICBase()) {
  994. // Describe where FP was saved, at a fixed offset from CFA.
  995. unsigned Reg = MRI->getDwarfRegNum(PPC::R30, true);
  996. CFIIndex = MF.addFrameInst(
  997. MCCFIInstruction::createOffset(nullptr, Reg, PBPOffset));
  998. BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
  999. .addCFIIndex(CFIIndex);
  1000. }
  1001. if (HasBP) {
  1002. // Describe where BP was saved, at a fixed offset from CFA.
  1003. unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
  1004. CFIIndex = MF.addFrameInst(
  1005. MCCFIInstruction::createOffset(nullptr, Reg, BPOffset));
  1006. BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
  1007. .addCFIIndex(CFIIndex);
  1008. }
  1009. if (MustSaveLR) {
  1010. // Describe where LR was saved, at a fixed offset from CFA.
  1011. unsigned Reg = MRI->getDwarfRegNum(LRReg, true);
  1012. CFIIndex = MF.addFrameInst(
  1013. MCCFIInstruction::createOffset(nullptr, Reg, LROffset));
  1014. BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
  1015. .addCFIIndex(CFIIndex);
  1016. }
  1017. }
  1018. // If there is a frame pointer, copy R1 into R31
  1019. if (HasFP) {
  1020. BuildMI(MBB, MBBI, dl, OrInst, FPReg)
  1021. .addReg(SPReg)
  1022. .addReg(SPReg);
  1023. if (!HasBP && needsCFI) {
  1024. // Change the definition of CFA from SP+offset to FP+offset, because SP
  1025. // will change at every alloca.
  1026. unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
  1027. unsigned CFIIndex = MF.addFrameInst(
  1028. MCCFIInstruction::createDefCfaRegister(nullptr, Reg));
  1029. BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
  1030. .addCFIIndex(CFIIndex);
  1031. }
  1032. }
  1033. if (needsCFI) {
  1034. // Describe where callee saved registers were saved, at fixed offsets from
  1035. // CFA.
  1036. const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
  1037. for (const CalleeSavedInfo &I : CSI) {
  1038. Register Reg = I.getReg();
  1039. if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue;
  1040. // This is a bit of a hack: CR2LT, CR2GT, CR2EQ and CR2UN are just
  1041. // subregisters of CR2. We just need to emit a move of CR2.
  1042. if (PPC::CRBITRCRegClass.contains(Reg))
  1043. continue;
  1044. if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC)
  1045. continue;
  1046. // For SVR4, don't emit a move for the CR spill slot if we haven't
  1047. // spilled CRs.
  1048. if (isSVR4ABI && (PPC::CR2 <= Reg && Reg <= PPC::CR4)
  1049. && !MustSaveCR)
  1050. continue;
  1051. // For 64-bit SVR4 when we have spilled CRs, the spill location
  1052. // is SP+8, not a frame-relative slot.
  1053. if (isSVR4ABI && isPPC64 && (PPC::CR2 <= Reg && Reg <= PPC::CR4)) {
  1054. // In the ELFv1 ABI, only CR2 is noted in CFI and stands in for
  1055. // the whole CR word. In the ELFv2 ABI, every CR that was
  1056. // actually saved gets its own CFI record.
  1057. Register CRReg = isELFv2ABI? Reg : PPC::CR2;
  1058. unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
  1059. nullptr, MRI->getDwarfRegNum(CRReg, true), CRSaveOffset));
  1060. BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
  1061. .addCFIIndex(CFIIndex);
  1062. continue;
  1063. }
  1064. if (I.isSpilledToReg()) {
  1065. unsigned SpilledReg = I.getDstReg();
  1066. unsigned CFIRegister = MF.addFrameInst(MCCFIInstruction::createRegister(
  1067. nullptr, MRI->getDwarfRegNum(Reg, true),
  1068. MRI->getDwarfRegNum(SpilledReg, true)));
  1069. BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
  1070. .addCFIIndex(CFIRegister);
  1071. } else {
  1072. int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
  1073. // We have changed the object offset above but we do not want to change
  1074. // the actual offsets in the CFI instruction so we have to undo the
  1075. // offset change here.
  1076. if (MovingStackUpdateDown)
  1077. Offset -= NegFrameSize;
  1078. unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
  1079. nullptr, MRI->getDwarfRegNum(Reg, true), Offset));
  1080. BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
  1081. .addCFIIndex(CFIIndex);
  1082. }
  1083. }
  1084. }
  1085. }
  1086. void PPCFrameLowering::inlineStackProbe(MachineFunction &MF,
  1087. MachineBasicBlock &PrologMBB) const {
  1088. bool isPPC64 = Subtarget.isPPC64();
  1089. const PPCTargetLowering &TLI = *Subtarget.getTargetLowering();
  1090. const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
  1091. MachineFrameInfo &MFI = MF.getFrameInfo();
  1092. MachineModuleInfo &MMI = MF.getMMI();
  1093. const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
  1094. // AIX assembler does not support cfi directives.
  1095. const bool needsCFI = MF.needsFrameMoves() && !Subtarget.isAIXABI();
  1096. auto StackAllocMIPos = llvm::find_if(PrologMBB, [](MachineInstr &MI) {
  1097. int Opc = MI.getOpcode();
  1098. return Opc == PPC::PROBED_STACKALLOC_64 || Opc == PPC::PROBED_STACKALLOC_32;
  1099. });
  1100. if (StackAllocMIPos == PrologMBB.end())
  1101. return;
  1102. const BasicBlock *ProbedBB = PrologMBB.getBasicBlock();
  1103. MachineBasicBlock *CurrentMBB = &PrologMBB;
  1104. DebugLoc DL = PrologMBB.findDebugLoc(StackAllocMIPos);
  1105. MachineInstr &MI = *StackAllocMIPos;
  1106. int64_t NegFrameSize = MI.getOperand(2).getImm();
  1107. unsigned ProbeSize = TLI.getStackProbeSize(MF);
  1108. int64_t NegProbeSize = -(int64_t)ProbeSize;
  1109. assert(isInt<32>(NegProbeSize) && "Unhandled probe size");
  1110. int64_t NumBlocks = NegFrameSize / NegProbeSize;
  1111. int64_t NegResidualSize = NegFrameSize % NegProbeSize;
  1112. Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
  1113. Register ScratchReg = MI.getOperand(0).getReg();
  1114. Register FPReg = MI.getOperand(1).getReg();
  1115. const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  1116. bool HasBP = RegInfo->hasBasePointer(MF);
  1117. Register BPReg = RegInfo->getBaseRegister(MF);
  1118. Align MaxAlign = MFI.getMaxAlign();
  1119. bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI();
  1120. const MCInstrDesc &CopyInst = TII.get(isPPC64 ? PPC::OR8 : PPC::OR);
  1121. // Subroutines to generate .cfi_* directives.
  1122. auto buildDefCFAReg = [&](MachineBasicBlock &MBB,
  1123. MachineBasicBlock::iterator MBBI, Register Reg) {
  1124. unsigned RegNum = MRI->getDwarfRegNum(Reg, true);
  1125. unsigned CFIIndex = MF.addFrameInst(
  1126. MCCFIInstruction::createDefCfaRegister(nullptr, RegNum));
  1127. BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
  1128. .addCFIIndex(CFIIndex);
  1129. };
  1130. auto buildDefCFA = [&](MachineBasicBlock &MBB,
  1131. MachineBasicBlock::iterator MBBI, Register Reg,
  1132. int Offset) {
  1133. unsigned RegNum = MRI->getDwarfRegNum(Reg, true);
  1134. unsigned CFIIndex = MBB.getParent()->addFrameInst(
  1135. MCCFIInstruction::cfiDefCfa(nullptr, RegNum, Offset));
  1136. BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
  1137. .addCFIIndex(CFIIndex);
  1138. };
  1139. // Subroutine to determine if we can use the Imm as part of d-form.
  1140. auto CanUseDForm = [](int64_t Imm) { return isInt<16>(Imm) && Imm % 4 == 0; };
  1141. // Subroutine to materialize the Imm into TempReg.
  1142. auto MaterializeImm = [&](MachineBasicBlock &MBB,
  1143. MachineBasicBlock::iterator MBBI, int64_t Imm,
  1144. Register &TempReg) {
  1145. assert(isInt<32>(Imm) && "Unhandled imm");
  1146. if (isInt<16>(Imm))
  1147. BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::LI8 : PPC::LI), TempReg)
  1148. .addImm(Imm);
  1149. else {
  1150. BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
  1151. .addImm(Imm >> 16);
  1152. BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::ORI8 : PPC::ORI), TempReg)
  1153. .addReg(TempReg)
  1154. .addImm(Imm & 0xFFFF);
  1155. }
  1156. };
  1157. // Subroutine to store frame pointer and decrease stack pointer by probe size.
  1158. auto allocateAndProbe = [&](MachineBasicBlock &MBB,
  1159. MachineBasicBlock::iterator MBBI, int64_t NegSize,
  1160. Register NegSizeReg, bool UseDForm,
  1161. Register StoreReg) {
  1162. if (UseDForm)
  1163. BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::STDU : PPC::STWU), SPReg)
  1164. .addReg(StoreReg)
  1165. .addImm(NegSize)
  1166. .addReg(SPReg);
  1167. else
  1168. BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
  1169. .addReg(StoreReg)
  1170. .addReg(SPReg)
  1171. .addReg(NegSizeReg);
  1172. };
  1173. // Used to probe stack when realignment is required.
  1174. // Note that, according to ABI's requirement, *sp must always equals the
  1175. // value of back-chain pointer, only st(w|d)u(x) can be used to update sp.
  1176. // Following is pseudo code:
  1177. // final_sp = (sp & align) + negframesize;
  1178. // neg_gap = final_sp - sp;
  1179. // while (neg_gap < negprobesize) {
  1180. // stdu fp, negprobesize(sp);
  1181. // neg_gap -= negprobesize;
  1182. // }
  1183. // stdux fp, sp, neg_gap
  1184. //
  1185. // When HasBP & HasRedzone, back-chain pointer is already saved in BPReg
  1186. // before probe code, we don't need to save it, so we get one additional reg
  1187. // that can be used to materialize the probeside if needed to use xform.
  1188. // Otherwise, we can NOT materialize probeside, so we can only use Dform for
  1189. // now.
  1190. //
  1191. // The allocations are:
  1192. // if (HasBP && HasRedzone) {
  1193. // r0: materialize the probesize if needed so that we can use xform.
  1194. // r12: `neg_gap`
  1195. // } else {
  1196. // r0: back-chain pointer
  1197. // r12: `neg_gap`.
  1198. // }
  1199. auto probeRealignedStack = [&](MachineBasicBlock &MBB,
  1200. MachineBasicBlock::iterator MBBI,
  1201. Register ScratchReg, Register TempReg) {
  1202. assert(HasBP && "The function is supposed to have base pointer when its "
  1203. "stack is realigned.");
  1204. assert(isPowerOf2_64(ProbeSize) && "Probe size should be power of 2");
  1205. // FIXME: We can eliminate this limitation if we get more infomation about
  1206. // which part of redzone are already used. Used redzone can be treated
  1207. // probed. But there might be `holes' in redzone probed, this could
  1208. // complicate the implementation.
  1209. assert(ProbeSize >= Subtarget.getRedZoneSize() &&
  1210. "Probe size should be larger or equal to the size of red-zone so "
  1211. "that red-zone is not clobbered by probing.");
  1212. Register &FinalStackPtr = TempReg;
  1213. // FIXME: We only support NegProbeSize materializable by DForm currently.
  1214. // When HasBP && HasRedzone, we can use xform if we have an additional idle
  1215. // register.
  1216. NegProbeSize = std::max(NegProbeSize, -((int64_t)1 << 15));
  1217. assert(isInt<16>(NegProbeSize) &&
  1218. "NegProbeSize should be materializable by DForm");
  1219. Register CRReg = PPC::CR0;
  1220. // Layout of output assembly kinda like:
  1221. // bb.0:
  1222. // ...
  1223. // sub $scratchreg, $finalsp, r1
  1224. // cmpdi $scratchreg, <negprobesize>
  1225. // bge bb.2
  1226. // bb.1:
  1227. // stdu <backchain>, <negprobesize>(r1)
  1228. // sub $scratchreg, $scratchreg, negprobesize
  1229. // cmpdi $scratchreg, <negprobesize>
  1230. // blt bb.1
  1231. // bb.2:
  1232. // stdux <backchain>, r1, $scratchreg
  1233. MachineFunction::iterator MBBInsertPoint = std::next(MBB.getIterator());
  1234. MachineBasicBlock *ProbeLoopBodyMBB = MF.CreateMachineBasicBlock(ProbedBB);
  1235. MF.insert(MBBInsertPoint, ProbeLoopBodyMBB);
  1236. MachineBasicBlock *ProbeExitMBB = MF.CreateMachineBasicBlock(ProbedBB);
  1237. MF.insert(MBBInsertPoint, ProbeExitMBB);
  1238. // bb.2
  1239. {
  1240. Register BackChainPointer = HasRedZone ? BPReg : TempReg;
  1241. allocateAndProbe(*ProbeExitMBB, ProbeExitMBB->end(), 0, ScratchReg, false,
  1242. BackChainPointer);
  1243. if (HasRedZone)
  1244. // PROBED_STACKALLOC_64 assumes Operand(1) stores the old sp, copy BPReg
  1245. // to TempReg to satisfy it.
  1246. BuildMI(*ProbeExitMBB, ProbeExitMBB->end(), DL, CopyInst, TempReg)
  1247. .addReg(BPReg)
  1248. .addReg(BPReg);
  1249. ProbeExitMBB->splice(ProbeExitMBB->end(), &MBB, MBBI, MBB.end());
  1250. ProbeExitMBB->transferSuccessorsAndUpdatePHIs(&MBB);
  1251. }
  1252. // bb.0
  1253. {
  1254. BuildMI(&MBB, DL, TII.get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), ScratchReg)
  1255. .addReg(SPReg)
  1256. .addReg(FinalStackPtr);
  1257. if (!HasRedZone)
  1258. BuildMI(&MBB, DL, CopyInst, TempReg).addReg(SPReg).addReg(SPReg);
  1259. BuildMI(&MBB, DL, TII.get(isPPC64 ? PPC::CMPDI : PPC::CMPWI), CRReg)
  1260. .addReg(ScratchReg)
  1261. .addImm(NegProbeSize);
  1262. BuildMI(&MBB, DL, TII.get(PPC::BCC))
  1263. .addImm(PPC::PRED_GE)
  1264. .addReg(CRReg)
  1265. .addMBB(ProbeExitMBB);
  1266. MBB.addSuccessor(ProbeLoopBodyMBB);
  1267. MBB.addSuccessor(ProbeExitMBB);
  1268. }
  1269. // bb.1
  1270. {
  1271. Register BackChainPointer = HasRedZone ? BPReg : TempReg;
  1272. allocateAndProbe(*ProbeLoopBodyMBB, ProbeLoopBodyMBB->end(), NegProbeSize,
  1273. 0, true /*UseDForm*/, BackChainPointer);
  1274. BuildMI(ProbeLoopBodyMBB, DL, TII.get(isPPC64 ? PPC::ADDI8 : PPC::ADDI),
  1275. ScratchReg)
  1276. .addReg(ScratchReg)
  1277. .addImm(-NegProbeSize);
  1278. BuildMI(ProbeLoopBodyMBB, DL, TII.get(isPPC64 ? PPC::CMPDI : PPC::CMPWI),
  1279. CRReg)
  1280. .addReg(ScratchReg)
  1281. .addImm(NegProbeSize);
  1282. BuildMI(ProbeLoopBodyMBB, DL, TII.get(PPC::BCC))
  1283. .addImm(PPC::PRED_LT)
  1284. .addReg(CRReg)
  1285. .addMBB(ProbeLoopBodyMBB);
  1286. ProbeLoopBodyMBB->addSuccessor(ProbeExitMBB);
  1287. ProbeLoopBodyMBB->addSuccessor(ProbeLoopBodyMBB);
  1288. }
  1289. // Update liveins.
  1290. recomputeLiveIns(*ProbeLoopBodyMBB);
  1291. recomputeLiveIns(*ProbeExitMBB);
  1292. return ProbeExitMBB;
  1293. };
  1294. // For case HasBP && MaxAlign > 1, we have to realign the SP by performing
  1295. // SP = SP - SP % MaxAlign, thus make the probe more like dynamic probe since
  1296. // the offset subtracted from SP is determined by SP's runtime value.
  1297. if (HasBP && MaxAlign > 1) {
  1298. // Calculate final stack pointer.
  1299. if (isPPC64)
  1300. BuildMI(*CurrentMBB, {MI}, DL, TII.get(PPC::RLDICL), ScratchReg)
  1301. .addReg(SPReg)
  1302. .addImm(0)
  1303. .addImm(64 - Log2(MaxAlign));
  1304. else
  1305. BuildMI(*CurrentMBB, {MI}, DL, TII.get(PPC::RLWINM), ScratchReg)
  1306. .addReg(SPReg)
  1307. .addImm(0)
  1308. .addImm(32 - Log2(MaxAlign))
  1309. .addImm(31);
  1310. BuildMI(*CurrentMBB, {MI}, DL, TII.get(isPPC64 ? PPC::SUBF8 : PPC::SUBF),
  1311. FPReg)
  1312. .addReg(ScratchReg)
  1313. .addReg(SPReg);
  1314. MaterializeImm(*CurrentMBB, {MI}, NegFrameSize, ScratchReg);
  1315. BuildMI(*CurrentMBB, {MI}, DL, TII.get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
  1316. FPReg)
  1317. .addReg(ScratchReg)
  1318. .addReg(FPReg);
  1319. CurrentMBB = probeRealignedStack(*CurrentMBB, {MI}, ScratchReg, FPReg);
  1320. if (needsCFI)
  1321. buildDefCFAReg(*CurrentMBB, {MI}, FPReg);
  1322. } else {
  1323. // Initialize current frame pointer.
  1324. BuildMI(*CurrentMBB, {MI}, DL, CopyInst, FPReg).addReg(SPReg).addReg(SPReg);
  1325. // Use FPReg to calculate CFA.
  1326. if (needsCFI)
  1327. buildDefCFA(*CurrentMBB, {MI}, FPReg, 0);
  1328. // Probe residual part.
  1329. if (NegResidualSize) {
  1330. bool ResidualUseDForm = CanUseDForm(NegResidualSize);
  1331. if (!ResidualUseDForm)
  1332. MaterializeImm(*CurrentMBB, {MI}, NegResidualSize, ScratchReg);
  1333. allocateAndProbe(*CurrentMBB, {MI}, NegResidualSize, ScratchReg,
  1334. ResidualUseDForm, FPReg);
  1335. }
  1336. bool UseDForm = CanUseDForm(NegProbeSize);
  1337. // If number of blocks is small, just probe them directly.
  1338. if (NumBlocks < 3) {
  1339. if (!UseDForm)
  1340. MaterializeImm(*CurrentMBB, {MI}, NegProbeSize, ScratchReg);
  1341. for (int i = 0; i < NumBlocks; ++i)
  1342. allocateAndProbe(*CurrentMBB, {MI}, NegProbeSize, ScratchReg, UseDForm,
  1343. FPReg);
  1344. if (needsCFI) {
  1345. // Restore using SPReg to calculate CFA.
  1346. buildDefCFAReg(*CurrentMBB, {MI}, SPReg);
  1347. }
  1348. } else {
  1349. // Since CTR is a volatile register and current shrinkwrap implementation
  1350. // won't choose an MBB in a loop as the PrologMBB, it's safe to synthesize a
  1351. // CTR loop to probe.
  1352. // Calculate trip count and stores it in CTRReg.
  1353. MaterializeImm(*CurrentMBB, {MI}, NumBlocks, ScratchReg);
  1354. BuildMI(*CurrentMBB, {MI}, DL, TII.get(isPPC64 ? PPC::MTCTR8 : PPC::MTCTR))
  1355. .addReg(ScratchReg, RegState::Kill);
  1356. if (!UseDForm)
  1357. MaterializeImm(*CurrentMBB, {MI}, NegProbeSize, ScratchReg);
  1358. // Create MBBs of the loop.
  1359. MachineFunction::iterator MBBInsertPoint =
  1360. std::next(CurrentMBB->getIterator());
  1361. MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(ProbedBB);
  1362. MF.insert(MBBInsertPoint, LoopMBB);
  1363. MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(ProbedBB);
  1364. MF.insert(MBBInsertPoint, ExitMBB);
  1365. // Synthesize the loop body.
  1366. allocateAndProbe(*LoopMBB, LoopMBB->end(), NegProbeSize, ScratchReg,
  1367. UseDForm, FPReg);
  1368. BuildMI(LoopMBB, DL, TII.get(isPPC64 ? PPC::BDNZ8 : PPC::BDNZ))
  1369. .addMBB(LoopMBB);
  1370. LoopMBB->addSuccessor(ExitMBB);
  1371. LoopMBB->addSuccessor(LoopMBB);
  1372. // Synthesize the exit MBB.
  1373. ExitMBB->splice(ExitMBB->end(), CurrentMBB,
  1374. std::next(MachineBasicBlock::iterator(MI)),
  1375. CurrentMBB->end());
  1376. ExitMBB->transferSuccessorsAndUpdatePHIs(CurrentMBB);
  1377. CurrentMBB->addSuccessor(LoopMBB);
  1378. if (needsCFI) {
  1379. // Restore using SPReg to calculate CFA.
  1380. buildDefCFAReg(*ExitMBB, ExitMBB->begin(), SPReg);
  1381. }
  1382. // Update liveins.
  1383. recomputeLiveIns(*LoopMBB);
  1384. recomputeLiveIns(*ExitMBB);
  1385. }
  1386. }
  1387. ++NumPrologProbed;
  1388. MI.eraseFromParent();
  1389. }
  1390. void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
  1391. MachineBasicBlock &MBB) const {
  1392. MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
  1393. DebugLoc dl;
  1394. if (MBBI != MBB.end())
  1395. dl = MBBI->getDebugLoc();
  1396. const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
  1397. const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  1398. // Get alignment info so we know how to restore the SP.
  1399. const MachineFrameInfo &MFI = MF.getFrameInfo();
  1400. // Get the number of bytes allocated from the FrameInfo.
  1401. int64_t FrameSize = MFI.getStackSize();
  1402. // Get processor type.
  1403. bool isPPC64 = Subtarget.isPPC64();
  1404. // Check if the link register (LR) has been saved.
  1405. PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
  1406. bool MustSaveLR = FI->mustSaveLR();
  1407. const SmallVectorImpl<Register> &MustSaveCRs = FI->getMustSaveCRs();
  1408. bool MustSaveCR = !MustSaveCRs.empty();
  1409. // Do we have a frame pointer and/or base pointer for this function?
  1410. bool HasFP = hasFP(MF);
  1411. bool HasBP = RegInfo->hasBasePointer(MF);
  1412. bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI();
  1413. bool HasROPProtect = Subtarget.hasROPProtect();
  1414. bool HasPrivileged = Subtarget.hasPrivileged();
  1415. Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
  1416. Register BPReg = RegInfo->getBaseRegister(MF);
  1417. Register FPReg = isPPC64 ? PPC::X31 : PPC::R31;
  1418. Register ScratchReg;
  1419. Register TempReg = isPPC64 ? PPC::X12 : PPC::R12; // another scratch reg
  1420. const MCInstrDesc& MTLRInst = TII.get( isPPC64 ? PPC::MTLR8
  1421. : PPC::MTLR );
  1422. const MCInstrDesc& LoadInst = TII.get( isPPC64 ? PPC::LD
  1423. : PPC::LWZ );
  1424. const MCInstrDesc& LoadImmShiftedInst = TII.get( isPPC64 ? PPC::LIS8
  1425. : PPC::LIS );
  1426. const MCInstrDesc& OrInst = TII.get(isPPC64 ? PPC::OR8
  1427. : PPC::OR );
  1428. const MCInstrDesc& OrImmInst = TII.get( isPPC64 ? PPC::ORI8
  1429. : PPC::ORI );
  1430. const MCInstrDesc& AddImmInst = TII.get( isPPC64 ? PPC::ADDI8
  1431. : PPC::ADDI );
  1432. const MCInstrDesc& AddInst = TII.get( isPPC64 ? PPC::ADD8
  1433. : PPC::ADD4 );
  1434. const MCInstrDesc& LoadWordInst = TII.get( isPPC64 ? PPC::LWZ8
  1435. : PPC::LWZ);
  1436. const MCInstrDesc& MoveToCRInst = TII.get( isPPC64 ? PPC::MTOCRF8
  1437. : PPC::MTOCRF);
  1438. const MCInstrDesc &HashChk =
  1439. TII.get(isPPC64 ? (HasPrivileged ? PPC::HASHCHKP8 : PPC::HASHCHK8)
  1440. : (HasPrivileged ? PPC::HASHCHKP : PPC::HASHCHK));
  1441. int64_t LROffset = getReturnSaveOffset();
  1442. int64_t FPOffset = 0;
  1443. // Using the same bool variable as below to suppress compiler warnings.
  1444. bool SingleScratchReg = findScratchRegister(&MBB, true, false, &ScratchReg,
  1445. &TempReg);
  1446. assert(SingleScratchReg &&
  1447. "Could not find an available scratch register");
  1448. SingleScratchReg = ScratchReg == TempReg;
  1449. if (HasFP) {
  1450. int FPIndex = FI->getFramePointerSaveIndex();
  1451. assert(FPIndex && "No Frame Pointer Save Slot!");
  1452. FPOffset = MFI.getObjectOffset(FPIndex);
  1453. }
  1454. int64_t BPOffset = 0;
  1455. if (HasBP) {
  1456. int BPIndex = FI->getBasePointerSaveIndex();
  1457. assert(BPIndex && "No Base Pointer Save Slot!");
  1458. BPOffset = MFI.getObjectOffset(BPIndex);
  1459. }
  1460. int64_t PBPOffset = 0;
  1461. if (FI->usesPICBase()) {
  1462. int PBPIndex = FI->getPICBasePointerSaveIndex();
  1463. assert(PBPIndex && "No PIC Base Pointer Save Slot!");
  1464. PBPOffset = MFI.getObjectOffset(PBPIndex);
  1465. }
  1466. bool IsReturnBlock = (MBBI != MBB.end() && MBBI->isReturn());
  1467. if (IsReturnBlock) {
  1468. unsigned RetOpcode = MBBI->getOpcode();
  1469. bool UsesTCRet = RetOpcode == PPC::TCRETURNri ||
  1470. RetOpcode == PPC::TCRETURNdi ||
  1471. RetOpcode == PPC::TCRETURNai ||
  1472. RetOpcode == PPC::TCRETURNri8 ||
  1473. RetOpcode == PPC::TCRETURNdi8 ||
  1474. RetOpcode == PPC::TCRETURNai8;
  1475. if (UsesTCRet) {
  1476. int MaxTCRetDelta = FI->getTailCallSPDelta();
  1477. MachineOperand &StackAdjust = MBBI->getOperand(1);
  1478. assert(StackAdjust.isImm() && "Expecting immediate value.");
  1479. // Adjust stack pointer.
  1480. int StackAdj = StackAdjust.getImm();
  1481. int Delta = StackAdj - MaxTCRetDelta;
  1482. assert((Delta >= 0) && "Delta must be positive");
  1483. if (MaxTCRetDelta>0)
  1484. FrameSize += (StackAdj +Delta);
  1485. else
  1486. FrameSize += StackAdj;
  1487. }
  1488. }
  1489. // Frames of 32KB & larger require special handling because they cannot be
  1490. // indexed into with a simple LD/LWZ immediate offset operand.
  1491. bool isLargeFrame = !isInt<16>(FrameSize);
  1492. // On targets without red zone, the SP needs to be restored last, so that
  1493. // all live contents of the stack frame are upwards of the SP. This means
  1494. // that we cannot restore SP just now, since there may be more registers
  1495. // to restore from the stack frame (e.g. R31). If the frame size is not
  1496. // a simple immediate value, we will need a spare register to hold the
  1497. // restored SP. If the frame size is known and small, we can simply adjust
  1498. // the offsets of the registers to be restored, and still use SP to restore
  1499. // them. In such case, the final update of SP will be to add the frame
  1500. // size to it.
  1501. // To simplify the code, set RBReg to the base register used to restore
  1502. // values from the stack, and set SPAdd to the value that needs to be added
  1503. // to the SP at the end. The default values are as if red zone was present.
  1504. unsigned RBReg = SPReg;
  1505. uint64_t SPAdd = 0;
  1506. // Check if we can move the stack update instruction up the epilogue
  1507. // past the callee saves. This will allow the move to LR instruction
  1508. // to be executed before the restores of the callee saves which means
  1509. // that the callee saves can hide the latency from the MTLR instrcution.
  1510. MachineBasicBlock::iterator StackUpdateLoc = MBBI;
  1511. if (stackUpdateCanBeMoved(MF)) {
  1512. const std::vector<CalleeSavedInfo> & Info = MFI.getCalleeSavedInfo();
  1513. for (CalleeSavedInfo CSI : Info) {
  1514. // If the callee saved register is spilled to another register abort the
  1515. // stack update movement.
  1516. if (CSI.isSpilledToReg()) {
  1517. StackUpdateLoc = MBBI;
  1518. break;
  1519. }
  1520. int FrIdx = CSI.getFrameIdx();
  1521. // If the frame index is not negative the callee saved info belongs to a
  1522. // stack object that is not a fixed stack object. We ignore non-fixed
  1523. // stack objects because we won't move the update of the stack pointer
  1524. // past them.
  1525. if (FrIdx >= 0)
  1526. continue;
  1527. if (MFI.isFixedObjectIndex(FrIdx) && MFI.getObjectOffset(FrIdx) < 0)
  1528. StackUpdateLoc--;
  1529. else {
  1530. // Abort the operation as we can't update all CSR restores.
  1531. StackUpdateLoc = MBBI;
  1532. break;
  1533. }
  1534. }
  1535. }
  1536. if (FrameSize) {
  1537. // In the prologue, the loaded (or persistent) stack pointer value is
  1538. // offset by the STDU/STDUX/STWU/STWUX instruction. For targets with red
  1539. // zone add this offset back now.
  1540. // If the function has a base pointer, the stack pointer has been copied
  1541. // to it so we can restore it by copying in the other direction.
  1542. if (HasRedZone && HasBP) {
  1543. BuildMI(MBB, MBBI, dl, OrInst, RBReg).
  1544. addReg(BPReg).
  1545. addReg(BPReg);
  1546. }
  1547. // If this function contained a fastcc call and GuaranteedTailCallOpt is
  1548. // enabled (=> hasFastCall()==true) the fastcc call might contain a tail
  1549. // call which invalidates the stack pointer value in SP(0). So we use the
  1550. // value of R31 in this case. Similar situation exists with setjmp.
  1551. else if (FI->hasFastCall() || MF.exposesReturnsTwice()) {
  1552. assert(HasFP && "Expecting a valid frame pointer.");
  1553. if (!HasRedZone)
  1554. RBReg = FPReg;
  1555. if (!isLargeFrame) {
  1556. BuildMI(MBB, MBBI, dl, AddImmInst, RBReg)
  1557. .addReg(FPReg).addImm(FrameSize);
  1558. } else {
  1559. TII.materializeImmPostRA(MBB, MBBI, dl, ScratchReg, FrameSize);
  1560. BuildMI(MBB, MBBI, dl, AddInst)
  1561. .addReg(RBReg)
  1562. .addReg(FPReg)
  1563. .addReg(ScratchReg);
  1564. }
  1565. } else if (!isLargeFrame && !HasBP && !MFI.hasVarSizedObjects()) {
  1566. if (HasRedZone) {
  1567. BuildMI(MBB, StackUpdateLoc, dl, AddImmInst, SPReg)
  1568. .addReg(SPReg)
  1569. .addImm(FrameSize);
  1570. } else {
  1571. // Make sure that adding FrameSize will not overflow the max offset
  1572. // size.
  1573. assert(FPOffset <= 0 && BPOffset <= 0 && PBPOffset <= 0 &&
  1574. "Local offsets should be negative");
  1575. SPAdd = FrameSize;
  1576. FPOffset += FrameSize;
  1577. BPOffset += FrameSize;
  1578. PBPOffset += FrameSize;
  1579. }
  1580. } else {
  1581. // We don't want to use ScratchReg as a base register, because it
  1582. // could happen to be R0. Use FP instead, but make sure to preserve it.
  1583. if (!HasRedZone) {
  1584. // If FP is not saved, copy it to ScratchReg.
  1585. if (!HasFP)
  1586. BuildMI(MBB, MBBI, dl, OrInst, ScratchReg)
  1587. .addReg(FPReg)
  1588. .addReg(FPReg);
  1589. RBReg = FPReg;
  1590. }
  1591. BuildMI(MBB, StackUpdateLoc, dl, LoadInst, RBReg)
  1592. .addImm(0)
  1593. .addReg(SPReg);
  1594. }
  1595. }
  1596. assert(RBReg != ScratchReg && "Should have avoided ScratchReg");
  1597. // If there is no red zone, ScratchReg may be needed for holding a useful
  1598. // value (although not the base register). Make sure it is not overwritten
  1599. // too early.
  1600. // If we need to restore both the LR and the CR and we only have one
  1601. // available scratch register, we must do them one at a time.
  1602. if (MustSaveCR && SingleScratchReg && MustSaveLR) {
  1603. // Here TempReg == ScratchReg, and in the absence of red zone ScratchReg
  1604. // is live here.
  1605. assert(HasRedZone && "Expecting red zone");
  1606. BuildMI(MBB, MBBI, dl, LoadWordInst, TempReg)
  1607. .addImm(CRSaveOffset)
  1608. .addReg(SPReg);
  1609. for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
  1610. BuildMI(MBB, MBBI, dl, MoveToCRInst, MustSaveCRs[i])
  1611. .addReg(TempReg, getKillRegState(i == e-1));
  1612. }
  1613. // Delay restoring of the LR if ScratchReg is needed. This is ok, since
  1614. // LR is stored in the caller's stack frame. ScratchReg will be needed
  1615. // if RBReg is anything other than SP. We shouldn't use ScratchReg as
  1616. // a base register anyway, because it may happen to be R0.
  1617. bool LoadedLR = false;
  1618. if (MustSaveLR && RBReg == SPReg && isInt<16>(LROffset+SPAdd)) {
  1619. BuildMI(MBB, StackUpdateLoc, dl, LoadInst, ScratchReg)
  1620. .addImm(LROffset+SPAdd)
  1621. .addReg(RBReg);
  1622. LoadedLR = true;
  1623. }
  1624. if (MustSaveCR && !(SingleScratchReg && MustSaveLR)) {
  1625. assert(RBReg == SPReg && "Should be using SP as a base register");
  1626. BuildMI(MBB, MBBI, dl, LoadWordInst, TempReg)
  1627. .addImm(CRSaveOffset)
  1628. .addReg(RBReg);
  1629. }
  1630. if (HasFP) {
  1631. // If there is red zone, restore FP directly, since SP has already been
  1632. // restored. Otherwise, restore the value of FP into ScratchReg.
  1633. if (HasRedZone || RBReg == SPReg)
  1634. BuildMI(MBB, MBBI, dl, LoadInst, FPReg)
  1635. .addImm(FPOffset)
  1636. .addReg(SPReg);
  1637. else
  1638. BuildMI(MBB, MBBI, dl, LoadInst, ScratchReg)
  1639. .addImm(FPOffset)
  1640. .addReg(RBReg);
  1641. }
  1642. if (FI->usesPICBase())
  1643. BuildMI(MBB, MBBI, dl, LoadInst, PPC::R30)
  1644. .addImm(PBPOffset)
  1645. .addReg(RBReg);
  1646. if (HasBP)
  1647. BuildMI(MBB, MBBI, dl, LoadInst, BPReg)
  1648. .addImm(BPOffset)
  1649. .addReg(RBReg);
  1650. // There is nothing more to be loaded from the stack, so now we can
  1651. // restore SP: SP = RBReg + SPAdd.
  1652. if (RBReg != SPReg || SPAdd != 0) {
  1653. assert(!HasRedZone && "This should not happen with red zone");
  1654. // If SPAdd is 0, generate a copy.
  1655. if (SPAdd == 0)
  1656. BuildMI(MBB, MBBI, dl, OrInst, SPReg)
  1657. .addReg(RBReg)
  1658. .addReg(RBReg);
  1659. else
  1660. BuildMI(MBB, MBBI, dl, AddImmInst, SPReg)
  1661. .addReg(RBReg)
  1662. .addImm(SPAdd);
  1663. assert(RBReg != ScratchReg && "Should be using FP or SP as base register");
  1664. if (RBReg == FPReg)
  1665. BuildMI(MBB, MBBI, dl, OrInst, FPReg)
  1666. .addReg(ScratchReg)
  1667. .addReg(ScratchReg);
  1668. // Now load the LR from the caller's stack frame.
  1669. if (MustSaveLR && !LoadedLR)
  1670. BuildMI(MBB, MBBI, dl, LoadInst, ScratchReg)
  1671. .addImm(LROffset)
  1672. .addReg(SPReg);
  1673. }
  1674. if (MustSaveCR &&
  1675. !(SingleScratchReg && MustSaveLR))
  1676. for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
  1677. BuildMI(MBB, MBBI, dl, MoveToCRInst, MustSaveCRs[i])
  1678. .addReg(TempReg, getKillRegState(i == e-1));
  1679. if (MustSaveLR) {
  1680. // If ROP protection is required, an extra instruction is added to compute a
  1681. // hash and then compare it to the hash stored in the prologue.
  1682. if (HasROPProtect) {
  1683. const int SaveIndex = FI->getROPProtectionHashSaveIndex();
  1684. const int64_t ImmOffset = MFI.getObjectOffset(SaveIndex);
  1685. assert((ImmOffset <= -8 && ImmOffset >= -512) &&
  1686. "ROP hash check location offset out of range.");
  1687. assert(((ImmOffset & 0x7) == 0) &&
  1688. "ROP hash check location offset must be 8 byte aligned.");
  1689. BuildMI(MBB, StackUpdateLoc, dl, HashChk)
  1690. .addReg(ScratchReg)
  1691. .addImm(ImmOffset)
  1692. .addReg(SPReg);
  1693. }
  1694. BuildMI(MBB, StackUpdateLoc, dl, MTLRInst).addReg(ScratchReg);
  1695. }
  1696. // Callee pop calling convention. Pop parameter/linkage area. Used for tail
  1697. // call optimization
  1698. if (IsReturnBlock) {
  1699. unsigned RetOpcode = MBBI->getOpcode();
  1700. if (MF.getTarget().Options.GuaranteedTailCallOpt &&
  1701. (RetOpcode == PPC::BLR || RetOpcode == PPC::BLR8) &&
  1702. MF.getFunction().getCallingConv() == CallingConv::Fast) {
  1703. PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
  1704. unsigned CallerAllocatedAmt = FI->getMinReservedArea();
  1705. if (CallerAllocatedAmt && isInt<16>(CallerAllocatedAmt)) {
  1706. BuildMI(MBB, MBBI, dl, AddImmInst, SPReg)
  1707. .addReg(SPReg).addImm(CallerAllocatedAmt);
  1708. } else {
  1709. BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg)
  1710. .addImm(CallerAllocatedAmt >> 16);
  1711. BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg)
  1712. .addReg(ScratchReg, RegState::Kill)
  1713. .addImm(CallerAllocatedAmt & 0xFFFF);
  1714. BuildMI(MBB, MBBI, dl, AddInst)
  1715. .addReg(SPReg)
  1716. .addReg(FPReg)
  1717. .addReg(ScratchReg);
  1718. }
  1719. } else {
  1720. createTailCallBranchInstr(MBB);
  1721. }
  1722. }
  1723. }
  1724. void PPCFrameLowering::createTailCallBranchInstr(MachineBasicBlock &MBB) const {
  1725. MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
  1726. // If we got this far a first terminator should exist.
  1727. assert(MBBI != MBB.end() && "Failed to find the first terminator.");
  1728. DebugLoc dl = MBBI->getDebugLoc();
  1729. const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
  1730. // Create branch instruction for pseudo tail call return instruction.
  1731. // The TCRETURNdi variants are direct calls. Valid targets for those are
  1732. // MO_GlobalAddress operands as well as MO_ExternalSymbol with PC-Rel
  1733. // since we can tail call external functions with PC-Rel (i.e. we don't need
  1734. // to worry about different TOC pointers). Some of the external functions will
  1735. // be MO_GlobalAddress while others like memcpy for example, are going to
  1736. // be MO_ExternalSymbol.
  1737. unsigned RetOpcode = MBBI->getOpcode();
  1738. if (RetOpcode == PPC::TCRETURNdi) {
  1739. MBBI = MBB.getLastNonDebugInstr();
  1740. MachineOperand &JumpTarget = MBBI->getOperand(0);
  1741. if (JumpTarget.isGlobal())
  1742. BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)).
  1743. addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
  1744. else if (JumpTarget.isSymbol())
  1745. BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)).
  1746. addExternalSymbol(JumpTarget.getSymbolName());
  1747. else
  1748. llvm_unreachable("Expecting Global or External Symbol");
  1749. } else if (RetOpcode == PPC::TCRETURNri) {
  1750. MBBI = MBB.getLastNonDebugInstr();
  1751. assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
  1752. BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR));
  1753. } else if (RetOpcode == PPC::TCRETURNai) {
  1754. MBBI = MBB.getLastNonDebugInstr();
  1755. MachineOperand &JumpTarget = MBBI->getOperand(0);
  1756. BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA)).addImm(JumpTarget.getImm());
  1757. } else if (RetOpcode == PPC::TCRETURNdi8) {
  1758. MBBI = MBB.getLastNonDebugInstr();
  1759. MachineOperand &JumpTarget = MBBI->getOperand(0);
  1760. if (JumpTarget.isGlobal())
  1761. BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)).
  1762. addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
  1763. else if (JumpTarget.isSymbol())
  1764. BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)).
  1765. addExternalSymbol(JumpTarget.getSymbolName());
  1766. else
  1767. llvm_unreachable("Expecting Global or External Symbol");
  1768. } else if (RetOpcode == PPC::TCRETURNri8) {
  1769. MBBI = MBB.getLastNonDebugInstr();
  1770. assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
  1771. BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR8));
  1772. } else if (RetOpcode == PPC::TCRETURNai8) {
  1773. MBBI = MBB.getLastNonDebugInstr();
  1774. MachineOperand &JumpTarget = MBBI->getOperand(0);
  1775. BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA8)).addImm(JumpTarget.getImm());
  1776. }
  1777. }
  1778. void PPCFrameLowering::determineCalleeSaves(MachineFunction &MF,
  1779. BitVector &SavedRegs,
  1780. RegScavenger *RS) const {
  1781. TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
  1782. const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  1783. // Do not explicitly save the callee saved VSRp registers.
  1784. // The individual VSR subregisters will be saved instead.
  1785. SavedRegs.reset(PPC::VSRp26);
  1786. SavedRegs.reset(PPC::VSRp27);
  1787. SavedRegs.reset(PPC::VSRp28);
  1788. SavedRegs.reset(PPC::VSRp29);
  1789. SavedRegs.reset(PPC::VSRp30);
  1790. SavedRegs.reset(PPC::VSRp31);
  1791. // Save and clear the LR state.
  1792. PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
  1793. unsigned LR = RegInfo->getRARegister();
  1794. FI->setMustSaveLR(MustSaveLR(MF, LR));
  1795. SavedRegs.reset(LR);
  1796. // Save R31 if necessary
  1797. int FPSI = FI->getFramePointerSaveIndex();
  1798. const bool isPPC64 = Subtarget.isPPC64();
  1799. MachineFrameInfo &MFI = MF.getFrameInfo();
  1800. // If the frame pointer save index hasn't been defined yet.
  1801. if (!FPSI && needsFP(MF)) {
  1802. // Find out what the fix offset of the frame pointer save area.
  1803. int FPOffset = getFramePointerSaveOffset();
  1804. // Allocate the frame index for frame pointer save area.
  1805. FPSI = MFI.CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
  1806. // Save the result.
  1807. FI->setFramePointerSaveIndex(FPSI);
  1808. }
  1809. int BPSI = FI->getBasePointerSaveIndex();
  1810. if (!BPSI && RegInfo->hasBasePointer(MF)) {
  1811. int BPOffset = getBasePointerSaveOffset();
  1812. // Allocate the frame index for the base pointer save area.
  1813. BPSI = MFI.CreateFixedObject(isPPC64? 8 : 4, BPOffset, true);
  1814. // Save the result.
  1815. FI->setBasePointerSaveIndex(BPSI);
  1816. }
  1817. // Reserve stack space for the PIC Base register (R30).
  1818. // Only used in SVR4 32-bit.
  1819. if (FI->usesPICBase()) {
  1820. int PBPSI = MFI.CreateFixedObject(4, -8, true);
  1821. FI->setPICBasePointerSaveIndex(PBPSI);
  1822. }
  1823. // Make sure we don't explicitly spill r31, because, for example, we have
  1824. // some inline asm which explicitly clobbers it, when we otherwise have a
  1825. // frame pointer and are using r31's spill slot for the prologue/epilogue
  1826. // code. Same goes for the base pointer and the PIC base register.
  1827. if (needsFP(MF))
  1828. SavedRegs.reset(isPPC64 ? PPC::X31 : PPC::R31);
  1829. if (RegInfo->hasBasePointer(MF))
  1830. SavedRegs.reset(RegInfo->getBaseRegister(MF));
  1831. if (FI->usesPICBase())
  1832. SavedRegs.reset(PPC::R30);
  1833. // Reserve stack space to move the linkage area to in case of a tail call.
  1834. int TCSPDelta = 0;
  1835. if (MF.getTarget().Options.GuaranteedTailCallOpt &&
  1836. (TCSPDelta = FI->getTailCallSPDelta()) < 0) {
  1837. MFI.CreateFixedObject(-1 * TCSPDelta, TCSPDelta, true);
  1838. }
  1839. // Allocate the nonvolatile CR spill slot iff the function uses CR 2, 3, or 4.
  1840. // For 64-bit SVR4, and all flavors of AIX we create a FixedStack
  1841. // object at the offset of the CR-save slot in the linkage area. The actual
  1842. // save and restore of the condition register will be created as part of the
  1843. // prologue and epilogue insertion, but the FixedStack object is needed to
  1844. // keep the CalleSavedInfo valid.
  1845. if ((SavedRegs.test(PPC::CR2) || SavedRegs.test(PPC::CR3) ||
  1846. SavedRegs.test(PPC::CR4))) {
  1847. const uint64_t SpillSize = 4; // Condition register is always 4 bytes.
  1848. const int64_t SpillOffset =
  1849. Subtarget.isPPC64() ? 8 : Subtarget.isAIXABI() ? 4 : -4;
  1850. int FrameIdx =
  1851. MFI.CreateFixedObject(SpillSize, SpillOffset,
  1852. /* IsImmutable */ true, /* IsAliased */ false);
  1853. FI->setCRSpillFrameIndex(FrameIdx);
  1854. }
  1855. }
  1856. void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
  1857. RegScavenger *RS) const {
  1858. // Get callee saved register information.
  1859. MachineFrameInfo &MFI = MF.getFrameInfo();
  1860. const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
  1861. // If the function is shrink-wrapped, and if the function has a tail call, the
  1862. // tail call might not be in the new RestoreBlock, so real branch instruction
  1863. // won't be generated by emitEpilogue(), because shrink-wrap has chosen new
  1864. // RestoreBlock. So we handle this case here.
  1865. if (MFI.getSavePoint() && MFI.hasTailCall()) {
  1866. MachineBasicBlock *RestoreBlock = MFI.getRestorePoint();
  1867. for (MachineBasicBlock &MBB : MF) {
  1868. if (MBB.isReturnBlock() && (&MBB) != RestoreBlock)
  1869. createTailCallBranchInstr(MBB);
  1870. }
  1871. }
  1872. // Early exit if no callee saved registers are modified!
  1873. if (CSI.empty() && !needsFP(MF)) {
  1874. addScavengingSpillSlot(MF, RS);
  1875. return;
  1876. }
  1877. unsigned MinGPR = PPC::R31;
  1878. unsigned MinG8R = PPC::X31;
  1879. unsigned MinFPR = PPC::F31;
  1880. unsigned MinVR = Subtarget.hasSPE() ? PPC::S31 : PPC::V31;
  1881. bool HasGPSaveArea = false;
  1882. bool HasG8SaveArea = false;
  1883. bool HasFPSaveArea = false;
  1884. bool HasVRSaveArea = false;
  1885. SmallVector<CalleeSavedInfo, 18> GPRegs;
  1886. SmallVector<CalleeSavedInfo, 18> G8Regs;
  1887. SmallVector<CalleeSavedInfo, 18> FPRegs;
  1888. SmallVector<CalleeSavedInfo, 18> VRegs;
  1889. for (const CalleeSavedInfo &I : CSI) {
  1890. Register Reg = I.getReg();
  1891. assert((!MF.getInfo<PPCFunctionInfo>()->mustSaveTOC() ||
  1892. (Reg != PPC::X2 && Reg != PPC::R2)) &&
  1893. "Not expecting to try to spill R2 in a function that must save TOC");
  1894. if (PPC::GPRCRegClass.contains(Reg)) {
  1895. HasGPSaveArea = true;
  1896. GPRegs.push_back(I);
  1897. if (Reg < MinGPR) {
  1898. MinGPR = Reg;
  1899. }
  1900. } else if (PPC::G8RCRegClass.contains(Reg)) {
  1901. HasG8SaveArea = true;
  1902. G8Regs.push_back(I);
  1903. if (Reg < MinG8R) {
  1904. MinG8R = Reg;
  1905. }
  1906. } else if (PPC::F8RCRegClass.contains(Reg)) {
  1907. HasFPSaveArea = true;
  1908. FPRegs.push_back(I);
  1909. if (Reg < MinFPR) {
  1910. MinFPR = Reg;
  1911. }
  1912. } else if (PPC::CRBITRCRegClass.contains(Reg) ||
  1913. PPC::CRRCRegClass.contains(Reg)) {
  1914. ; // do nothing, as we already know whether CRs are spilled
  1915. } else if (PPC::VRRCRegClass.contains(Reg) ||
  1916. PPC::SPERCRegClass.contains(Reg)) {
  1917. // Altivec and SPE are mutually exclusive, but have the same stack
  1918. // alignment requirements, so overload the save area for both cases.
  1919. HasVRSaveArea = true;
  1920. VRegs.push_back(I);
  1921. if (Reg < MinVR) {
  1922. MinVR = Reg;
  1923. }
  1924. } else {
  1925. llvm_unreachable("Unknown RegisterClass!");
  1926. }
  1927. }
  1928. PPCFunctionInfo *PFI = MF.getInfo<PPCFunctionInfo>();
  1929. const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
  1930. int64_t LowerBound = 0;
  1931. // Take into account stack space reserved for tail calls.
  1932. int TCSPDelta = 0;
  1933. if (MF.getTarget().Options.GuaranteedTailCallOpt &&
  1934. (TCSPDelta = PFI->getTailCallSPDelta()) < 0) {
  1935. LowerBound = TCSPDelta;
  1936. }
  1937. // The Floating-point register save area is right below the back chain word
  1938. // of the previous stack frame.
  1939. if (HasFPSaveArea) {
  1940. for (unsigned i = 0, e = FPRegs.size(); i != e; ++i) {
  1941. int FI = FPRegs[i].getFrameIdx();
  1942. MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
  1943. }
  1944. LowerBound -= (31 - TRI->getEncodingValue(MinFPR) + 1) * 8;
  1945. }
  1946. // Check whether the frame pointer register is allocated. If so, make sure it
  1947. // is spilled to the correct offset.
  1948. if (needsFP(MF)) {
  1949. int FI = PFI->getFramePointerSaveIndex();
  1950. assert(FI && "No Frame Pointer Save Slot!");
  1951. MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
  1952. // FP is R31/X31, so no need to update MinGPR/MinG8R.
  1953. HasGPSaveArea = true;
  1954. }
  1955. if (PFI->usesPICBase()) {
  1956. int FI = PFI->getPICBasePointerSaveIndex();
  1957. assert(FI && "No PIC Base Pointer Save Slot!");
  1958. MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
  1959. MinGPR = std::min<unsigned>(MinGPR, PPC::R30);
  1960. HasGPSaveArea = true;
  1961. }
  1962. const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  1963. if (RegInfo->hasBasePointer(MF)) {
  1964. int FI = PFI->getBasePointerSaveIndex();
  1965. assert(FI && "No Base Pointer Save Slot!");
  1966. MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
  1967. Register BP = RegInfo->getBaseRegister(MF);
  1968. if (PPC::G8RCRegClass.contains(BP)) {
  1969. MinG8R = std::min<unsigned>(MinG8R, BP);
  1970. HasG8SaveArea = true;
  1971. } else if (PPC::GPRCRegClass.contains(BP)) {
  1972. MinGPR = std::min<unsigned>(MinGPR, BP);
  1973. HasGPSaveArea = true;
  1974. }
  1975. }
  1976. // General register save area starts right below the Floating-point
  1977. // register save area.
  1978. if (HasGPSaveArea || HasG8SaveArea) {
  1979. // Move general register save area spill slots down, taking into account
  1980. // the size of the Floating-point register save area.
  1981. for (unsigned i = 0, e = GPRegs.size(); i != e; ++i) {
  1982. if (!GPRegs[i].isSpilledToReg()) {
  1983. int FI = GPRegs[i].getFrameIdx();
  1984. MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
  1985. }
  1986. }
  1987. // Move general register save area spill slots down, taking into account
  1988. // the size of the Floating-point register save area.
  1989. for (unsigned i = 0, e = G8Regs.size(); i != e; ++i) {
  1990. if (!G8Regs[i].isSpilledToReg()) {
  1991. int FI = G8Regs[i].getFrameIdx();
  1992. MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
  1993. }
  1994. }
  1995. unsigned MinReg =
  1996. std::min<unsigned>(TRI->getEncodingValue(MinGPR),
  1997. TRI->getEncodingValue(MinG8R));
  1998. const unsigned GPRegSize = Subtarget.isPPC64() ? 8 : 4;
  1999. LowerBound -= (31 - MinReg + 1) * GPRegSize;
  2000. }
  2001. // For 32-bit only, the CR save area is below the general register
  2002. // save area. For 64-bit SVR4, the CR save area is addressed relative
  2003. // to the stack pointer and hence does not need an adjustment here.
  2004. // Only CR2 (the first nonvolatile spilled) has an associated frame
  2005. // index so that we have a single uniform save area.
  2006. if (spillsCR(MF) && Subtarget.is32BitELFABI()) {
  2007. // Adjust the frame index of the CR spill slot.
  2008. for (const auto &CSInfo : CSI) {
  2009. if (CSInfo.getReg() == PPC::CR2) {
  2010. int FI = CSInfo.getFrameIdx();
  2011. MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
  2012. break;
  2013. }
  2014. }
  2015. LowerBound -= 4; // The CR save area is always 4 bytes long.
  2016. }
  2017. // Both Altivec and SPE have the same alignment and padding requirements
  2018. // within the stack frame.
  2019. if (HasVRSaveArea) {
  2020. // Insert alignment padding, we need 16-byte alignment. Note: for positive
  2021. // number the alignment formula is : y = (x + (n-1)) & (~(n-1)). But since
  2022. // we are using negative number here (the stack grows downward). We should
  2023. // use formula : y = x & (~(n-1)). Where x is the size before aligning, n
  2024. // is the alignment size ( n = 16 here) and y is the size after aligning.
  2025. assert(LowerBound <= 0 && "Expect LowerBound have a non-positive value!");
  2026. LowerBound &= ~(15);
  2027. for (unsigned i = 0, e = VRegs.size(); i != e; ++i) {
  2028. int FI = VRegs[i].getFrameIdx();
  2029. MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
  2030. }
  2031. }
  2032. addScavengingSpillSlot(MF, RS);
  2033. }
  2034. void
  2035. PPCFrameLowering::addScavengingSpillSlot(MachineFunction &MF,
  2036. RegScavenger *RS) const {
  2037. // Reserve a slot closest to SP or frame pointer if we have a dynalloc or
  2038. // a large stack, which will require scavenging a register to materialize a
  2039. // large offset.
  2040. // We need to have a scavenger spill slot for spills if the frame size is
  2041. // large. In case there is no free register for large-offset addressing,
  2042. // this slot is used for the necessary emergency spill. Also, we need the
  2043. // slot for dynamic stack allocations.
  2044. // The scavenger might be invoked if the frame offset does not fit into
  2045. // the 16-bit immediate. We don't know the complete frame size here
  2046. // because we've not yet computed callee-saved register spills or the
  2047. // needed alignment padding.
  2048. unsigned StackSize = determineFrameLayout(MF, true);
  2049. MachineFrameInfo &MFI = MF.getFrameInfo();
  2050. if (MFI.hasVarSizedObjects() || spillsCR(MF) || hasNonRISpills(MF) ||
  2051. (hasSpills(MF) && !isInt<16>(StackSize))) {
  2052. const TargetRegisterClass &GPRC = PPC::GPRCRegClass;
  2053. const TargetRegisterClass &G8RC = PPC::G8RCRegClass;
  2054. const TargetRegisterClass &RC = Subtarget.isPPC64() ? G8RC : GPRC;
  2055. const TargetRegisterInfo &TRI = *Subtarget.getRegisterInfo();
  2056. unsigned Size = TRI.getSpillSize(RC);
  2057. Align Alignment = TRI.getSpillAlign(RC);
  2058. RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Alignment, false));
  2059. // Might we have over-aligned allocas?
  2060. bool HasAlVars =
  2061. MFI.hasVarSizedObjects() && MFI.getMaxAlign() > getStackAlign();
  2062. // These kinds of spills might need two registers.
  2063. if (spillsCR(MF) || HasAlVars)
  2064. RS->addScavengingFrameIndex(
  2065. MFI.CreateStackObject(Size, Alignment, false));
  2066. }
  2067. }
  2068. // This function checks if a callee saved gpr can be spilled to a volatile
  2069. // vector register. This occurs for leaf functions when the option
  2070. // ppc-enable-pe-vector-spills is enabled. If there are any remaining registers
  2071. // which were not spilled to vectors, return false so the target independent
  2072. // code can handle them by assigning a FrameIdx to a stack slot.
  2073. bool PPCFrameLowering::assignCalleeSavedSpillSlots(
  2074. MachineFunction &MF, const TargetRegisterInfo *TRI,
  2075. std::vector<CalleeSavedInfo> &CSI) const {
  2076. if (CSI.empty())
  2077. return true; // Early exit if no callee saved registers are modified!
  2078. // Early exit if cannot spill gprs to volatile vector registers.
  2079. MachineFrameInfo &MFI = MF.getFrameInfo();
  2080. if (!EnablePEVectorSpills || MFI.hasCalls() || !Subtarget.hasP9Vector())
  2081. return false;
  2082. // Build a BitVector of VSRs that can be used for spilling GPRs.
  2083. BitVector BVAllocatable = TRI->getAllocatableSet(MF);
  2084. BitVector BVCalleeSaved(TRI->getNumRegs());
  2085. const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
  2086. const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
  2087. for (unsigned i = 0; CSRegs[i]; ++i)
  2088. BVCalleeSaved.set(CSRegs[i]);
  2089. for (unsigned Reg : BVAllocatable.set_bits()) {
  2090. // Set to 0 if the register is not a volatile VSX register, or if it is
  2091. // used in the function.
  2092. if (BVCalleeSaved[Reg] || !PPC::VSRCRegClass.contains(Reg) ||
  2093. MF.getRegInfo().isPhysRegUsed(Reg))
  2094. BVAllocatable.reset(Reg);
  2095. }
  2096. bool AllSpilledToReg = true;
  2097. unsigned LastVSRUsedForSpill = 0;
  2098. for (auto &CS : CSI) {
  2099. if (BVAllocatable.none())
  2100. return false;
  2101. Register Reg = CS.getReg();
  2102. if (!PPC::G8RCRegClass.contains(Reg)) {
  2103. AllSpilledToReg = false;
  2104. continue;
  2105. }
  2106. // For P9, we can reuse LastVSRUsedForSpill to spill two GPRs
  2107. // into one VSR using the mtvsrdd instruction.
  2108. if (LastVSRUsedForSpill != 0) {
  2109. CS.setDstReg(LastVSRUsedForSpill);
  2110. BVAllocatable.reset(LastVSRUsedForSpill);
  2111. LastVSRUsedForSpill = 0;
  2112. continue;
  2113. }
  2114. unsigned VolatileVFReg = BVAllocatable.find_first();
  2115. if (VolatileVFReg < BVAllocatable.size()) {
  2116. CS.setDstReg(VolatileVFReg);
  2117. LastVSRUsedForSpill = VolatileVFReg;
  2118. } else {
  2119. AllSpilledToReg = false;
  2120. }
  2121. }
  2122. return AllSpilledToReg;
  2123. }
  2124. bool PPCFrameLowering::spillCalleeSavedRegisters(
  2125. MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
  2126. ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
  2127. MachineFunction *MF = MBB.getParent();
  2128. const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
  2129. PPCFunctionInfo *FI = MF->getInfo<PPCFunctionInfo>();
  2130. bool MustSaveTOC = FI->mustSaveTOC();
  2131. DebugLoc DL;
  2132. bool CRSpilled = false;
  2133. MachineInstrBuilder CRMIB;
  2134. BitVector Spilled(TRI->getNumRegs());
  2135. VSRContainingGPRs.clear();
  2136. // Map each VSR to GPRs to be spilled with into it. Single VSR can contain one
  2137. // or two GPRs, so we need table to record information for later save/restore.
  2138. for (const CalleeSavedInfo &Info : CSI) {
  2139. if (Info.isSpilledToReg()) {
  2140. auto &SpilledVSR =
  2141. VSRContainingGPRs.FindAndConstruct(Info.getDstReg()).second;
  2142. assert(SpilledVSR.second == 0 &&
  2143. "Can't spill more than two GPRs into VSR!");
  2144. if (SpilledVSR.first == 0)
  2145. SpilledVSR.first = Info.getReg();
  2146. else
  2147. SpilledVSR.second = Info.getReg();
  2148. }
  2149. }
  2150. for (const CalleeSavedInfo &I : CSI) {
  2151. Register Reg = I.getReg();
  2152. // CR2 through CR4 are the nonvolatile CR fields.
  2153. bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4;
  2154. // Add the callee-saved register as live-in; it's killed at the spill.
  2155. // Do not do this for callee-saved registers that are live-in to the
  2156. // function because they will already be marked live-in and this will be
  2157. // adding it for a second time. It is an error to add the same register
  2158. // to the set more than once.
  2159. const MachineRegisterInfo &MRI = MF->getRegInfo();
  2160. bool IsLiveIn = MRI.isLiveIn(Reg);
  2161. if (!IsLiveIn)
  2162. MBB.addLiveIn(Reg);
  2163. if (CRSpilled && IsCRField) {
  2164. CRMIB.addReg(Reg, RegState::ImplicitKill);
  2165. continue;
  2166. }
  2167. // The actual spill will happen in the prologue.
  2168. if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC)
  2169. continue;
  2170. // Insert the spill to the stack frame.
  2171. if (IsCRField) {
  2172. PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>();
  2173. if (!Subtarget.is32BitELFABI()) {
  2174. // The actual spill will happen at the start of the prologue.
  2175. FuncInfo->addMustSaveCR(Reg);
  2176. } else {
  2177. CRSpilled = true;
  2178. FuncInfo->setSpillsCR();
  2179. // 32-bit: FP-relative. Note that we made sure CR2-CR4 all have
  2180. // the same frame index in PPCRegisterInfo::hasReservedSpillSlot.
  2181. CRMIB = BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::R12)
  2182. .addReg(Reg, RegState::ImplicitKill);
  2183. MBB.insert(MI, CRMIB);
  2184. MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::STW))
  2185. .addReg(PPC::R12,
  2186. getKillRegState(true)),
  2187. I.getFrameIdx()));
  2188. }
  2189. } else {
  2190. if (I.isSpilledToReg()) {
  2191. unsigned Dst = I.getDstReg();
  2192. if (Spilled[Dst])
  2193. continue;
  2194. if (VSRContainingGPRs[Dst].second != 0) {
  2195. assert(Subtarget.hasP9Vector() &&
  2196. "mtvsrdd is unavailable on pre-P9 targets.");
  2197. NumPESpillVSR += 2;
  2198. BuildMI(MBB, MI, DL, TII.get(PPC::MTVSRDD), Dst)
  2199. .addReg(VSRContainingGPRs[Dst].first, getKillRegState(true))
  2200. .addReg(VSRContainingGPRs[Dst].second, getKillRegState(true));
  2201. } else if (VSRContainingGPRs[Dst].second == 0) {
  2202. assert(Subtarget.hasP8Vector() &&
  2203. "Can't move GPR to VSR on pre-P8 targets.");
  2204. ++NumPESpillVSR;
  2205. BuildMI(MBB, MI, DL, TII.get(PPC::MTVSRD),
  2206. TRI->getSubReg(Dst, PPC::sub_64))
  2207. .addReg(VSRContainingGPRs[Dst].first, getKillRegState(true));
  2208. } else {
  2209. llvm_unreachable("More than two GPRs spilled to a VSR!");
  2210. }
  2211. Spilled.set(Dst);
  2212. } else {
  2213. const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
  2214. // Use !IsLiveIn for the kill flag.
  2215. // We do not want to kill registers that are live in this function
  2216. // before their use because they will become undefined registers.
  2217. // Functions without NoUnwind need to preserve the order of elements in
  2218. // saved vector registers.
  2219. if (Subtarget.needsSwapsForVSXMemOps() &&
  2220. !MF->getFunction().hasFnAttribute(Attribute::NoUnwind))
  2221. TII.storeRegToStackSlotNoUpd(MBB, MI, Reg, !IsLiveIn,
  2222. I.getFrameIdx(), RC, TRI);
  2223. else
  2224. TII.storeRegToStackSlot(MBB, MI, Reg, !IsLiveIn, I.getFrameIdx(), RC,
  2225. TRI, Register());
  2226. }
  2227. }
  2228. }
  2229. return true;
  2230. }
  2231. static void restoreCRs(bool is31, bool CR2Spilled, bool CR3Spilled,
  2232. bool CR4Spilled, MachineBasicBlock &MBB,
  2233. MachineBasicBlock::iterator MI,
  2234. ArrayRef<CalleeSavedInfo> CSI, unsigned CSIIndex) {
  2235. MachineFunction *MF = MBB.getParent();
  2236. const PPCInstrInfo &TII = *MF->getSubtarget<PPCSubtarget>().getInstrInfo();
  2237. DebugLoc DL;
  2238. unsigned MoveReg = PPC::R12;
  2239. // 32-bit: FP-relative
  2240. MBB.insert(MI,
  2241. addFrameReference(BuildMI(*MF, DL, TII.get(PPC::LWZ), MoveReg),
  2242. CSI[CSIIndex].getFrameIdx()));
  2243. unsigned RestoreOp = PPC::MTOCRF;
  2244. if (CR2Spilled)
  2245. MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR2)
  2246. .addReg(MoveReg, getKillRegState(!CR3Spilled && !CR4Spilled)));
  2247. if (CR3Spilled)
  2248. MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR3)
  2249. .addReg(MoveReg, getKillRegState(!CR4Spilled)));
  2250. if (CR4Spilled)
  2251. MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR4)
  2252. .addReg(MoveReg, getKillRegState(true)));
  2253. }
  2254. MachineBasicBlock::iterator PPCFrameLowering::
  2255. eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
  2256. MachineBasicBlock::iterator I) const {
  2257. const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
  2258. if (MF.getTarget().Options.GuaranteedTailCallOpt &&
  2259. I->getOpcode() == PPC::ADJCALLSTACKUP) {
  2260. // Add (actually subtract) back the amount the callee popped on return.
  2261. if (int CalleeAmt = I->getOperand(1).getImm()) {
  2262. bool is64Bit = Subtarget.isPPC64();
  2263. CalleeAmt *= -1;
  2264. unsigned StackReg = is64Bit ? PPC::X1 : PPC::R1;
  2265. unsigned TmpReg = is64Bit ? PPC::X0 : PPC::R0;
  2266. unsigned ADDIInstr = is64Bit ? PPC::ADDI8 : PPC::ADDI;
  2267. unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4;
  2268. unsigned LISInstr = is64Bit ? PPC::LIS8 : PPC::LIS;
  2269. unsigned ORIInstr = is64Bit ? PPC::ORI8 : PPC::ORI;
  2270. const DebugLoc &dl = I->getDebugLoc();
  2271. if (isInt<16>(CalleeAmt)) {
  2272. BuildMI(MBB, I, dl, TII.get(ADDIInstr), StackReg)
  2273. .addReg(StackReg, RegState::Kill)
  2274. .addImm(CalleeAmt);
  2275. } else {
  2276. MachineBasicBlock::iterator MBBI = I;
  2277. BuildMI(MBB, MBBI, dl, TII.get(LISInstr), TmpReg)
  2278. .addImm(CalleeAmt >> 16);
  2279. BuildMI(MBB, MBBI, dl, TII.get(ORIInstr), TmpReg)
  2280. .addReg(TmpReg, RegState::Kill)
  2281. .addImm(CalleeAmt & 0xFFFF);
  2282. BuildMI(MBB, MBBI, dl, TII.get(ADDInstr), StackReg)
  2283. .addReg(StackReg, RegState::Kill)
  2284. .addReg(TmpReg);
  2285. }
  2286. }
  2287. }
  2288. // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
  2289. return MBB.erase(I);
  2290. }
  2291. static bool isCalleeSavedCR(unsigned Reg) {
  2292. return PPC::CR2 == Reg || Reg == PPC::CR3 || Reg == PPC::CR4;
  2293. }
  2294. bool PPCFrameLowering::restoreCalleeSavedRegisters(
  2295. MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
  2296. MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
  2297. MachineFunction *MF = MBB.getParent();
  2298. const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
  2299. PPCFunctionInfo *FI = MF->getInfo<PPCFunctionInfo>();
  2300. bool MustSaveTOC = FI->mustSaveTOC();
  2301. bool CR2Spilled = false;
  2302. bool CR3Spilled = false;
  2303. bool CR4Spilled = false;
  2304. unsigned CSIIndex = 0;
  2305. BitVector Restored(TRI->getNumRegs());
  2306. // Initialize insertion-point logic; we will be restoring in reverse
  2307. // order of spill.
  2308. MachineBasicBlock::iterator I = MI, BeforeI = I;
  2309. bool AtStart = I == MBB.begin();
  2310. if (!AtStart)
  2311. --BeforeI;
  2312. for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
  2313. Register Reg = CSI[i].getReg();
  2314. if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC)
  2315. continue;
  2316. // Restore of callee saved condition register field is handled during
  2317. // epilogue insertion.
  2318. if (isCalleeSavedCR(Reg) && !Subtarget.is32BitELFABI())
  2319. continue;
  2320. if (Reg == PPC::CR2) {
  2321. CR2Spilled = true;
  2322. // The spill slot is associated only with CR2, which is the
  2323. // first nonvolatile spilled. Save it here.
  2324. CSIIndex = i;
  2325. continue;
  2326. } else if (Reg == PPC::CR3) {
  2327. CR3Spilled = true;
  2328. continue;
  2329. } else if (Reg == PPC::CR4) {
  2330. CR4Spilled = true;
  2331. continue;
  2332. } else {
  2333. // On 32-bit ELF when we first encounter a non-CR register after seeing at
  2334. // least one CR register, restore all spilled CRs together.
  2335. if (CR2Spilled || CR3Spilled || CR4Spilled) {
  2336. bool is31 = needsFP(*MF);
  2337. restoreCRs(is31, CR2Spilled, CR3Spilled, CR4Spilled, MBB, I, CSI,
  2338. CSIIndex);
  2339. CR2Spilled = CR3Spilled = CR4Spilled = false;
  2340. }
  2341. if (CSI[i].isSpilledToReg()) {
  2342. DebugLoc DL;
  2343. unsigned Dst = CSI[i].getDstReg();
  2344. if (Restored[Dst])
  2345. continue;
  2346. if (VSRContainingGPRs[Dst].second != 0) {
  2347. assert(Subtarget.hasP9Vector());
  2348. NumPEReloadVSR += 2;
  2349. BuildMI(MBB, I, DL, TII.get(PPC::MFVSRLD),
  2350. VSRContainingGPRs[Dst].second)
  2351. .addReg(Dst);
  2352. BuildMI(MBB, I, DL, TII.get(PPC::MFVSRD),
  2353. VSRContainingGPRs[Dst].first)
  2354. .addReg(TRI->getSubReg(Dst, PPC::sub_64), getKillRegState(true));
  2355. } else if (VSRContainingGPRs[Dst].second == 0) {
  2356. assert(Subtarget.hasP8Vector());
  2357. ++NumPEReloadVSR;
  2358. BuildMI(MBB, I, DL, TII.get(PPC::MFVSRD),
  2359. VSRContainingGPRs[Dst].first)
  2360. .addReg(TRI->getSubReg(Dst, PPC::sub_64), getKillRegState(true));
  2361. } else {
  2362. llvm_unreachable("More than two GPRs spilled to a VSR!");
  2363. }
  2364. Restored.set(Dst);
  2365. } else {
  2366. // Default behavior for non-CR saves.
  2367. const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
  2368. // Functions without NoUnwind need to preserve the order of elements in
  2369. // saved vector registers.
  2370. if (Subtarget.needsSwapsForVSXMemOps() &&
  2371. !MF->getFunction().hasFnAttribute(Attribute::NoUnwind))
  2372. TII.loadRegFromStackSlotNoUpd(MBB, I, Reg, CSI[i].getFrameIdx(), RC,
  2373. TRI);
  2374. else
  2375. TII.loadRegFromStackSlot(MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI,
  2376. Register());
  2377. assert(I != MBB.begin() &&
  2378. "loadRegFromStackSlot didn't insert any code!");
  2379. }
  2380. }
  2381. // Insert in reverse order.
  2382. if (AtStart)
  2383. I = MBB.begin();
  2384. else {
  2385. I = BeforeI;
  2386. ++I;
  2387. }
  2388. }
  2389. // If we haven't yet spilled the CRs, do so now.
  2390. if (CR2Spilled || CR3Spilled || CR4Spilled) {
  2391. assert(Subtarget.is32BitELFABI() &&
  2392. "Only set CR[2|3|4]Spilled on 32-bit SVR4.");
  2393. bool is31 = needsFP(*MF);
  2394. restoreCRs(is31, CR2Spilled, CR3Spilled, CR4Spilled, MBB, I, CSI, CSIIndex);
  2395. }
  2396. return true;
  2397. }
  2398. uint64_t PPCFrameLowering::getTOCSaveOffset() const {
  2399. return TOCSaveOffset;
  2400. }
  2401. uint64_t PPCFrameLowering::getFramePointerSaveOffset() const {
  2402. return FramePointerSaveOffset;
  2403. }
  2404. uint64_t PPCFrameLowering::getBasePointerSaveOffset() const {
  2405. return BasePointerSaveOffset;
  2406. }
  2407. bool PPCFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
  2408. if (MF.getInfo<PPCFunctionInfo>()->shrinkWrapDisabled())
  2409. return false;
  2410. return !MF.getSubtarget<PPCSubtarget>().is32BitELFABI();
  2411. }