PrologEpilogInserter.cpp 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367
  1. //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass is responsible for finalizing the functions frame layout, saving
  10. // callee saved registers, and for emitting prolog & epilog code for the
  11. // function.
  12. //
  13. // This pass must be run after register allocation. After this pass is
  14. // executed, it is illegal to construct MO_FrameIndex operands.
  15. //
  16. //===----------------------------------------------------------------------===//
  17. #include "llvm/ADT/ArrayRef.h"
  18. #include "llvm/ADT/BitVector.h"
  19. #include "llvm/ADT/DepthFirstIterator.h"
  20. #include "llvm/ADT/STLExtras.h"
  21. #include "llvm/ADT/SetVector.h"
  22. #include "llvm/ADT/SmallPtrSet.h"
  23. #include "llvm/ADT/SmallSet.h"
  24. #include "llvm/ADT/SmallVector.h"
  25. #include "llvm/ADT/Statistic.h"
  26. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  27. #include "llvm/CodeGen/MachineBasicBlock.h"
  28. #include "llvm/CodeGen/MachineDominators.h"
  29. #include "llvm/CodeGen/MachineFrameInfo.h"
  30. #include "llvm/CodeGen/MachineFunction.h"
  31. #include "llvm/CodeGen/MachineFunctionPass.h"
  32. #include "llvm/CodeGen/MachineInstr.h"
  33. #include "llvm/CodeGen/MachineInstrBuilder.h"
  34. #include "llvm/CodeGen/MachineLoopInfo.h"
  35. #include "llvm/CodeGen/MachineModuleInfo.h"
  36. #include "llvm/CodeGen/MachineOperand.h"
  37. #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
  38. #include "llvm/CodeGen/MachineRegisterInfo.h"
  39. #include "llvm/CodeGen/RegisterScavenging.h"
  40. #include "llvm/CodeGen/TargetFrameLowering.h"
  41. #include "llvm/CodeGen/TargetInstrInfo.h"
  42. #include "llvm/CodeGen/TargetOpcodes.h"
  43. #include "llvm/CodeGen/TargetRegisterInfo.h"
  44. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  45. #include "llvm/CodeGen/WinEHFuncInfo.h"
  46. #include "llvm/IR/Attributes.h"
  47. #include "llvm/IR/CallingConv.h"
  48. #include "llvm/IR/DebugInfoMetadata.h"
  49. #include "llvm/IR/DiagnosticInfo.h"
  50. #include "llvm/IR/Function.h"
  51. #include "llvm/IR/InlineAsm.h"
  52. #include "llvm/IR/LLVMContext.h"
  53. #include "llvm/InitializePasses.h"
  54. #include "llvm/MC/MCRegisterInfo.h"
  55. #include "llvm/Pass.h"
  56. #include "llvm/Support/CodeGen.h"
  57. #include "llvm/Support/CommandLine.h"
  58. #include "llvm/Support/Debug.h"
  59. #include "llvm/Support/ErrorHandling.h"
  60. #include "llvm/Support/MathExtras.h"
  61. #include "llvm/Support/raw_ostream.h"
  62. #include "llvm/Target/TargetMachine.h"
  63. #include "llvm/Target/TargetOptions.h"
  64. #include <algorithm>
  65. #include <cassert>
  66. #include <cstdint>
  67. #include <functional>
  68. #include <limits>
  69. #include <utility>
  70. #include <vector>
  71. using namespace llvm;
  72. #define DEBUG_TYPE "prologepilog"
  73. using MBBVector = SmallVector<MachineBasicBlock *, 4>;
  74. STATISTIC(NumLeafFuncWithSpills, "Number of leaf functions with CSRs");
  75. STATISTIC(NumFuncSeen, "Number of functions seen in PEI");
  76. namespace {
  77. class PEI : public MachineFunctionPass {
  78. public:
  79. static char ID;
  80. PEI() : MachineFunctionPass(ID) {
  81. initializePEIPass(*PassRegistry::getPassRegistry());
  82. }
  83. void getAnalysisUsage(AnalysisUsage &AU) const override;
  84. /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
  85. /// frame indexes with appropriate references.
  86. bool runOnMachineFunction(MachineFunction &MF) override;
  87. private:
  88. RegScavenger *RS;
  89. // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved
  90. // stack frame indexes.
  91. unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max();
  92. unsigned MaxCSFrameIndex = 0;
  93. // Save and Restore blocks of the current function. Typically there is a
  94. // single save block, unless Windows EH funclets are involved.
  95. MBBVector SaveBlocks;
  96. MBBVector RestoreBlocks;
  97. // Flag to control whether to use the register scavenger to resolve
  98. // frame index materialization registers. Set according to
  99. // TRI->requiresFrameIndexScavenging() for the current function.
  100. bool FrameIndexVirtualScavenging;
  101. // Flag to control whether the scavenger should be passed even though
  102. // FrameIndexVirtualScavenging is used.
  103. bool FrameIndexEliminationScavenging;
  104. // Emit remarks.
  105. MachineOptimizationRemarkEmitter *ORE = nullptr;
  106. void calculateCallFrameInfo(MachineFunction &MF);
  107. void calculateSaveRestoreBlocks(MachineFunction &MF);
  108. void spillCalleeSavedRegs(MachineFunction &MF);
  109. void calculateFrameObjectOffsets(MachineFunction &MF);
  110. void replaceFrameIndices(MachineFunction &MF);
  111. void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF,
  112. int &SPAdj);
  113. void insertPrologEpilogCode(MachineFunction &MF);
  114. };
  115. } // end anonymous namespace
  116. char PEI::ID = 0;
  117. char &llvm::PrologEpilogCodeInserterID = PEI::ID;
  118. INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false,
  119. false)
  120. INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
  121. INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
  122. INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass)
  123. INITIALIZE_PASS_END(PEI, DEBUG_TYPE,
  124. "Prologue/Epilogue Insertion & Frame Finalization", false,
  125. false)
  126. MachineFunctionPass *llvm::createPrologEpilogInserterPass() {
  127. return new PEI();
  128. }
  129. STATISTIC(NumBytesStackSpace,
  130. "Number of bytes used for stack in all functions");
  131. void PEI::getAnalysisUsage(AnalysisUsage &AU) const {
  132. AU.setPreservesCFG();
  133. AU.addPreserved<MachineLoopInfo>();
  134. AU.addPreserved<MachineDominatorTree>();
  135. AU.addRequired<MachineOptimizationRemarkEmitterPass>();
  136. MachineFunctionPass::getAnalysisUsage(AU);
  137. }
  138. /// StackObjSet - A set of stack object indexes
  139. using StackObjSet = SmallSetVector<int, 8>;
  140. using SavedDbgValuesMap =
  141. SmallDenseMap<MachineBasicBlock *, SmallVector<MachineInstr *, 4>, 4>;
  142. /// Stash DBG_VALUEs that describe parameters and which are placed at the start
  143. /// of the block. Later on, after the prologue code has been emitted, the
  144. /// stashed DBG_VALUEs will be reinserted at the start of the block.
  145. static void stashEntryDbgValues(MachineBasicBlock &MBB,
  146. SavedDbgValuesMap &EntryDbgValues) {
  147. SmallVector<const MachineInstr *, 4> FrameIndexValues;
  148. for (auto &MI : MBB) {
  149. if (!MI.isDebugInstr())
  150. break;
  151. if (!MI.isDebugValue() || !MI.getDebugVariable()->isParameter())
  152. continue;
  153. if (any_of(MI.debug_operands(),
  154. [](const MachineOperand &MO) { return MO.isFI(); })) {
  155. // We can only emit valid locations for frame indices after the frame
  156. // setup, so do not stash away them.
  157. FrameIndexValues.push_back(&MI);
  158. continue;
  159. }
  160. const DILocalVariable *Var = MI.getDebugVariable();
  161. const DIExpression *Expr = MI.getDebugExpression();
  162. auto Overlaps = [Var, Expr](const MachineInstr *DV) {
  163. return Var == DV->getDebugVariable() &&
  164. Expr->fragmentsOverlap(DV->getDebugExpression());
  165. };
  166. // See if the debug value overlaps with any preceding debug value that will
  167. // not be stashed. If that is the case, then we can't stash this value, as
  168. // we would then reorder the values at reinsertion.
  169. if (llvm::none_of(FrameIndexValues, Overlaps))
  170. EntryDbgValues[&MBB].push_back(&MI);
  171. }
  172. // Remove stashed debug values from the block.
  173. if (EntryDbgValues.count(&MBB))
  174. for (auto *MI : EntryDbgValues[&MBB])
  175. MI->removeFromParent();
  176. }
  177. /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
  178. /// frame indexes with appropriate references.
  179. bool PEI::runOnMachineFunction(MachineFunction &MF) {
  180. NumFuncSeen++;
  181. const Function &F = MF.getFunction();
  182. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
  183. const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
  184. RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr;
  185. FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF);
  186. ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
  187. // Calculate the MaxCallFrameSize and AdjustsStack variables for the
  188. // function's frame information. Also eliminates call frame pseudo
  189. // instructions.
  190. calculateCallFrameInfo(MF);
  191. // Determine placement of CSR spill/restore code and prolog/epilog code:
  192. // place all spills in the entry block, all restores in return blocks.
  193. calculateSaveRestoreBlocks(MF);
  194. // Stash away DBG_VALUEs that should not be moved by insertion of prolog code.
  195. SavedDbgValuesMap EntryDbgValues;
  196. for (MachineBasicBlock *SaveBlock : SaveBlocks)
  197. stashEntryDbgValues(*SaveBlock, EntryDbgValues);
  198. // Handle CSR spilling and restoring, for targets that need it.
  199. if (MF.getTarget().usesPhysRegsForValues())
  200. spillCalleeSavedRegs(MF);
  201. // Allow the target machine to make final modifications to the function
  202. // before the frame layout is finalized.
  203. TFI->processFunctionBeforeFrameFinalized(MF, RS);
  204. // Calculate actual frame offsets for all abstract stack objects...
  205. calculateFrameObjectOffsets(MF);
  206. // Add prolog and epilog code to the function. This function is required
  207. // to align the stack frame as necessary for any stack variables or
  208. // called functions. Because of this, calculateCalleeSavedRegisters()
  209. // must be called before this function in order to set the AdjustsStack
  210. // and MaxCallFrameSize variables.
  211. if (!F.hasFnAttribute(Attribute::Naked))
  212. insertPrologEpilogCode(MF);
  213. // Reinsert stashed debug values at the start of the entry blocks.
  214. for (auto &I : EntryDbgValues)
  215. I.first->insert(I.first->begin(), I.second.begin(), I.second.end());
  216. // Allow the target machine to make final modifications to the function
  217. // before the frame layout is finalized.
  218. TFI->processFunctionBeforeFrameIndicesReplaced(MF, RS);
  219. // Replace all MO_FrameIndex operands with physical register references
  220. // and actual offsets.
  221. //
  222. replaceFrameIndices(MF);
  223. // If register scavenging is needed, as we've enabled doing it as a
  224. // post-pass, scavenge the virtual registers that frame index elimination
  225. // inserted.
  226. if (TRI->requiresRegisterScavenging(MF) && FrameIndexVirtualScavenging)
  227. scavengeFrameVirtualRegs(MF, *RS);
  228. // Warn on stack size when we exceeds the given limit.
  229. MachineFrameInfo &MFI = MF.getFrameInfo();
  230. uint64_t StackSize = MFI.getStackSize();
  231. unsigned Threshold = UINT_MAX;
  232. if (MF.getFunction().hasFnAttribute("warn-stack-size")) {
  233. bool Failed = MF.getFunction()
  234. .getFnAttribute("warn-stack-size")
  235. .getValueAsString()
  236. .getAsInteger(10, Threshold);
  237. // Verifier should have caught this.
  238. assert(!Failed && "Invalid warn-stack-size fn attr value");
  239. (void)Failed;
  240. }
  241. if (StackSize > Threshold) {
  242. DiagnosticInfoStackSize DiagStackSize(F, StackSize, Threshold, DS_Warning);
  243. F.getContext().diagnose(DiagStackSize);
  244. }
  245. ORE->emit([&]() {
  246. return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize",
  247. MF.getFunction().getSubprogram(),
  248. &MF.front())
  249. << ore::NV("NumStackBytes", StackSize) << " stack bytes in function";
  250. });
  251. delete RS;
  252. SaveBlocks.clear();
  253. RestoreBlocks.clear();
  254. MFI.setSavePoint(nullptr);
  255. MFI.setRestorePoint(nullptr);
  256. return true;
  257. }
  258. /// Calculate the MaxCallFrameSize and AdjustsStack
  259. /// variables for the function's frame information and eliminate call frame
  260. /// pseudo instructions.
  261. void PEI::calculateCallFrameInfo(MachineFunction &MF) {
  262. const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
  263. const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
  264. MachineFrameInfo &MFI = MF.getFrameInfo();
  265. unsigned MaxCallFrameSize = 0;
  266. bool AdjustsStack = MFI.adjustsStack();
  267. // Get the function call frame set-up and tear-down instruction opcode
  268. unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
  269. unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
  270. // Early exit for targets which have no call frame setup/destroy pseudo
  271. // instructions.
  272. if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
  273. return;
  274. std::vector<MachineBasicBlock::iterator> FrameSDOps;
  275. for (MachineBasicBlock &BB : MF)
  276. for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I)
  277. if (TII.isFrameInstr(*I)) {
  278. unsigned Size = TII.getFrameSize(*I);
  279. if (Size > MaxCallFrameSize) MaxCallFrameSize = Size;
  280. AdjustsStack = true;
  281. FrameSDOps.push_back(I);
  282. } else if (I->isInlineAsm()) {
  283. // Some inline asm's need a stack frame, as indicated by operand 1.
  284. unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
  285. if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
  286. AdjustsStack = true;
  287. }
  288. assert(!MFI.isMaxCallFrameSizeComputed() ||
  289. (MFI.getMaxCallFrameSize() == MaxCallFrameSize &&
  290. MFI.adjustsStack() == AdjustsStack));
  291. MFI.setAdjustsStack(AdjustsStack);
  292. MFI.setMaxCallFrameSize(MaxCallFrameSize);
  293. for (MachineBasicBlock::iterator I : FrameSDOps) {
  294. // If call frames are not being included as part of the stack frame, and
  295. // the target doesn't indicate otherwise, remove the call frame pseudos
  296. // here. The sub/add sp instruction pairs are still inserted, but we don't
  297. // need to track the SP adjustment for frame index elimination.
  298. if (TFI->canSimplifyCallFramePseudos(MF))
  299. TFI->eliminateCallFramePseudoInstr(MF, *I->getParent(), I);
  300. }
  301. }
  302. /// Compute the sets of entry and return blocks for saving and restoring
  303. /// callee-saved registers, and placing prolog and epilog code.
  304. void PEI::calculateSaveRestoreBlocks(MachineFunction &MF) {
  305. const MachineFrameInfo &MFI = MF.getFrameInfo();
  306. // Even when we do not change any CSR, we still want to insert the
  307. // prologue and epilogue of the function.
  308. // So set the save points for those.
  309. // Use the points found by shrink-wrapping, if any.
  310. if (MFI.getSavePoint()) {
  311. SaveBlocks.push_back(MFI.getSavePoint());
  312. assert(MFI.getRestorePoint() && "Both restore and save must be set");
  313. MachineBasicBlock *RestoreBlock = MFI.getRestorePoint();
  314. // If RestoreBlock does not have any successor and is not a return block
  315. // then the end point is unreachable and we do not need to insert any
  316. // epilogue.
  317. if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock())
  318. RestoreBlocks.push_back(RestoreBlock);
  319. return;
  320. }
  321. // Save refs to entry and return blocks.
  322. SaveBlocks.push_back(&MF.front());
  323. for (MachineBasicBlock &MBB : MF) {
  324. if (MBB.isEHFuncletEntry())
  325. SaveBlocks.push_back(&MBB);
  326. if (MBB.isReturnBlock())
  327. RestoreBlocks.push_back(&MBB);
  328. }
  329. }
  330. static void assignCalleeSavedSpillSlots(MachineFunction &F,
  331. const BitVector &SavedRegs,
  332. unsigned &MinCSFrameIndex,
  333. unsigned &MaxCSFrameIndex) {
  334. if (SavedRegs.empty())
  335. return;
  336. const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo();
  337. const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs();
  338. BitVector CSMask(SavedRegs.size());
  339. for (unsigned i = 0; CSRegs[i]; ++i)
  340. CSMask.set(CSRegs[i]);
  341. std::vector<CalleeSavedInfo> CSI;
  342. for (unsigned i = 0; CSRegs[i]; ++i) {
  343. unsigned Reg = CSRegs[i];
  344. if (SavedRegs.test(Reg)) {
  345. bool SavedSuper = false;
  346. for (const MCPhysReg &SuperReg : RegInfo->superregs(Reg)) {
  347. // Some backends set all aliases for some registers as saved, such as
  348. // Mips's $fp, so they appear in SavedRegs but not CSRegs.
  349. if (SavedRegs.test(SuperReg) && CSMask.test(SuperReg)) {
  350. SavedSuper = true;
  351. break;
  352. }
  353. }
  354. if (!SavedSuper)
  355. CSI.push_back(CalleeSavedInfo(Reg));
  356. }
  357. }
  358. const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering();
  359. MachineFrameInfo &MFI = F.getFrameInfo();
  360. if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI, MinCSFrameIndex,
  361. MaxCSFrameIndex)) {
  362. // If target doesn't implement this, use generic code.
  363. if (CSI.empty())
  364. return; // Early exit if no callee saved registers are modified!
  365. unsigned NumFixedSpillSlots;
  366. const TargetFrameLowering::SpillSlot *FixedSpillSlots =
  367. TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots);
  368. // Now that we know which registers need to be saved and restored, allocate
  369. // stack slots for them.
  370. for (auto &CS : CSI) {
  371. // If the target has spilled this register to another register, we don't
  372. // need to allocate a stack slot.
  373. if (CS.isSpilledToReg())
  374. continue;
  375. unsigned Reg = CS.getReg();
  376. const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
  377. int FrameIdx;
  378. if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) {
  379. CS.setFrameIdx(FrameIdx);
  380. continue;
  381. }
  382. // Check to see if this physreg must be spilled to a particular stack slot
  383. // on this target.
  384. const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots;
  385. while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots &&
  386. FixedSlot->Reg != Reg)
  387. ++FixedSlot;
  388. unsigned Size = RegInfo->getSpillSize(*RC);
  389. if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) {
  390. // Nope, just spill it anywhere convenient.
  391. Align Alignment = RegInfo->getSpillAlign(*RC);
  392. // We may not be able to satisfy the desired alignment specification of
  393. // the TargetRegisterClass if the stack alignment is smaller. Use the
  394. // min.
  395. Alignment = std::min(Alignment, TFI->getStackAlign());
  396. FrameIdx = MFI.CreateStackObject(Size, Alignment, true);
  397. if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
  398. if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
  399. } else {
  400. // Spill it to the stack where we must.
  401. FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset);
  402. }
  403. CS.setFrameIdx(FrameIdx);
  404. }
  405. }
  406. MFI.setCalleeSavedInfo(CSI);
  407. }
  408. /// Helper function to update the liveness information for the callee-saved
  409. /// registers.
  410. static void updateLiveness(MachineFunction &MF) {
  411. MachineFrameInfo &MFI = MF.getFrameInfo();
  412. // Visited will contain all the basic blocks that are in the region
  413. // where the callee saved registers are alive:
  414. // - Anything that is not Save or Restore -> LiveThrough.
  415. // - Save -> LiveIn.
  416. // - Restore -> LiveOut.
  417. // The live-out is not attached to the block, so no need to keep
  418. // Restore in this set.
  419. SmallPtrSet<MachineBasicBlock *, 8> Visited;
  420. SmallVector<MachineBasicBlock *, 8> WorkList;
  421. MachineBasicBlock *Entry = &MF.front();
  422. MachineBasicBlock *Save = MFI.getSavePoint();
  423. if (!Save)
  424. Save = Entry;
  425. if (Entry != Save) {
  426. WorkList.push_back(Entry);
  427. Visited.insert(Entry);
  428. }
  429. Visited.insert(Save);
  430. MachineBasicBlock *Restore = MFI.getRestorePoint();
  431. if (Restore)
  432. // By construction Restore cannot be visited, otherwise it
  433. // means there exists a path to Restore that does not go
  434. // through Save.
  435. WorkList.push_back(Restore);
  436. while (!WorkList.empty()) {
  437. const MachineBasicBlock *CurBB = WorkList.pop_back_val();
  438. // By construction, the region that is after the save point is
  439. // dominated by the Save and post-dominated by the Restore.
  440. if (CurBB == Save && Save != Restore)
  441. continue;
  442. // Enqueue all the successors not already visited.
  443. // Those are by construction either before Save or after Restore.
  444. for (MachineBasicBlock *SuccBB : CurBB->successors())
  445. if (Visited.insert(SuccBB).second)
  446. WorkList.push_back(SuccBB);
  447. }
  448. const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
  449. MachineRegisterInfo &MRI = MF.getRegInfo();
  450. for (const CalleeSavedInfo &I : CSI) {
  451. for (MachineBasicBlock *MBB : Visited) {
  452. MCPhysReg Reg = I.getReg();
  453. // Add the callee-saved register as live-in.
  454. // It's killed at the spill.
  455. if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg))
  456. MBB->addLiveIn(Reg);
  457. }
  458. // If callee-saved register is spilled to another register rather than
  459. // spilling to stack, the destination register has to be marked as live for
  460. // each MBB between the prologue and epilogue so that it is not clobbered
  461. // before it is reloaded in the epilogue. The Visited set contains all
  462. // blocks outside of the region delimited by prologue/epilogue.
  463. if (I.isSpilledToReg()) {
  464. for (MachineBasicBlock &MBB : MF) {
  465. if (Visited.count(&MBB))
  466. continue;
  467. MCPhysReg DstReg = I.getDstReg();
  468. if (!MBB.isLiveIn(DstReg))
  469. MBB.addLiveIn(DstReg);
  470. }
  471. }
  472. }
  473. }
  474. /// Insert restore code for the callee-saved registers used in the function.
  475. static void insertCSRSaves(MachineBasicBlock &SaveBlock,
  476. ArrayRef<CalleeSavedInfo> CSI) {
  477. MachineFunction &MF = *SaveBlock.getParent();
  478. const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
  479. const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
  480. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
  481. MachineBasicBlock::iterator I = SaveBlock.begin();
  482. if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) {
  483. for (const CalleeSavedInfo &CS : CSI) {
  484. // Insert the spill to the stack frame.
  485. unsigned Reg = CS.getReg();
  486. if (CS.isSpilledToReg()) {
  487. BuildMI(SaveBlock, I, DebugLoc(),
  488. TII.get(TargetOpcode::COPY), CS.getDstReg())
  489. .addReg(Reg, getKillRegState(true));
  490. } else {
  491. const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
  492. TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC,
  493. TRI);
  494. }
  495. }
  496. }
  497. }
  498. /// Insert restore code for the callee-saved registers used in the function.
  499. static void insertCSRRestores(MachineBasicBlock &RestoreBlock,
  500. std::vector<CalleeSavedInfo> &CSI) {
  501. MachineFunction &MF = *RestoreBlock.getParent();
  502. const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
  503. const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
  504. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
  505. // Restore all registers immediately before the return and any
  506. // terminators that precede it.
  507. MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator();
  508. if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) {
  509. for (const CalleeSavedInfo &CI : reverse(CSI)) {
  510. unsigned Reg = CI.getReg();
  511. if (CI.isSpilledToReg()) {
  512. BuildMI(RestoreBlock, I, DebugLoc(), TII.get(TargetOpcode::COPY), Reg)
  513. .addReg(CI.getDstReg(), getKillRegState(true));
  514. } else {
  515. const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
  516. TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI);
  517. assert(I != RestoreBlock.begin() &&
  518. "loadRegFromStackSlot didn't insert any code!");
  519. // Insert in reverse order. loadRegFromStackSlot can insert
  520. // multiple instructions.
  521. }
  522. }
  523. }
  524. }
  525. void PEI::spillCalleeSavedRegs(MachineFunction &MF) {
  526. // We can't list this requirement in getRequiredProperties because some
  527. // targets (WebAssembly) use virtual registers past this point, and the pass
  528. // pipeline is set up without giving the passes a chance to look at the
  529. // TargetMachine.
  530. // FIXME: Find a way to express this in getRequiredProperties.
  531. assert(MF.getProperties().hasProperty(
  532. MachineFunctionProperties::Property::NoVRegs));
  533. const Function &F = MF.getFunction();
  534. const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
  535. MachineFrameInfo &MFI = MF.getFrameInfo();
  536. MinCSFrameIndex = std::numeric_limits<unsigned>::max();
  537. MaxCSFrameIndex = 0;
  538. // Determine which of the registers in the callee save list should be saved.
  539. BitVector SavedRegs;
  540. TFI->determineCalleeSaves(MF, SavedRegs, RS);
  541. // Assign stack slots for any callee-saved registers that must be spilled.
  542. assignCalleeSavedSpillSlots(MF, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex);
  543. // Add the code to save and restore the callee saved registers.
  544. if (!F.hasFnAttribute(Attribute::Naked)) {
  545. MFI.setCalleeSavedInfoValid(true);
  546. std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
  547. if (!CSI.empty()) {
  548. if (!MFI.hasCalls())
  549. NumLeafFuncWithSpills++;
  550. for (MachineBasicBlock *SaveBlock : SaveBlocks)
  551. insertCSRSaves(*SaveBlock, CSI);
  552. // Update the live-in information of all the blocks up to the save point.
  553. updateLiveness(MF);
  554. for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
  555. insertCSRRestores(*RestoreBlock, CSI);
  556. }
  557. }
  558. }
  559. /// AdjustStackOffset - Helper function used to adjust the stack frame offset.
  560. static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx,
  561. bool StackGrowsDown, int64_t &Offset,
  562. Align &MaxAlign, unsigned Skew) {
  563. // If the stack grows down, add the object size to find the lowest address.
  564. if (StackGrowsDown)
  565. Offset += MFI.getObjectSize(FrameIdx);
  566. Align Alignment = MFI.getObjectAlign(FrameIdx);
  567. // If the alignment of this object is greater than that of the stack, then
  568. // increase the stack alignment to match.
  569. MaxAlign = std::max(MaxAlign, Alignment);
  570. // Adjust to alignment boundary.
  571. Offset = alignTo(Offset, Alignment, Skew);
  572. if (StackGrowsDown) {
  573. LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset
  574. << "]\n");
  575. MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset
  576. } else {
  577. LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset
  578. << "]\n");
  579. MFI.setObjectOffset(FrameIdx, Offset);
  580. Offset += MFI.getObjectSize(FrameIdx);
  581. }
  582. }
  583. /// Compute which bytes of fixed and callee-save stack area are unused and keep
  584. /// track of them in StackBytesFree.
  585. static inline void
  586. computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown,
  587. unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex,
  588. int64_t FixedCSEnd, BitVector &StackBytesFree) {
  589. // Avoid undefined int64_t -> int conversion below in extreme case.
  590. if (FixedCSEnd > std::numeric_limits<int>::max())
  591. return;
  592. StackBytesFree.resize(FixedCSEnd, true);
  593. SmallVector<int, 16> AllocatedFrameSlots;
  594. // Add fixed objects.
  595. for (int i = MFI.getObjectIndexBegin(); i != 0; ++i)
  596. // StackSlot scavenging is only implemented for the default stack.
  597. if (MFI.getStackID(i) == TargetStackID::Default)
  598. AllocatedFrameSlots.push_back(i);
  599. // Add callee-save objects if there are any.
  600. if (MinCSFrameIndex <= MaxCSFrameIndex) {
  601. for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i)
  602. if (MFI.getStackID(i) == TargetStackID::Default)
  603. AllocatedFrameSlots.push_back(i);
  604. }
  605. for (int i : AllocatedFrameSlots) {
  606. // These are converted from int64_t, but they should always fit in int
  607. // because of the FixedCSEnd check above.
  608. int ObjOffset = MFI.getObjectOffset(i);
  609. int ObjSize = MFI.getObjectSize(i);
  610. int ObjStart, ObjEnd;
  611. if (StackGrowsDown) {
  612. // ObjOffset is negative when StackGrowsDown is true.
  613. ObjStart = -ObjOffset - ObjSize;
  614. ObjEnd = -ObjOffset;
  615. } else {
  616. ObjStart = ObjOffset;
  617. ObjEnd = ObjOffset + ObjSize;
  618. }
  619. // Ignore fixed holes that are in the previous stack frame.
  620. if (ObjEnd > 0)
  621. StackBytesFree.reset(ObjStart, ObjEnd);
  622. }
  623. }
  624. /// Assign frame object to an unused portion of the stack in the fixed stack
  625. /// object range. Return true if the allocation was successful.
  626. static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx,
  627. bool StackGrowsDown, Align MaxAlign,
  628. BitVector &StackBytesFree) {
  629. if (MFI.isVariableSizedObjectIndex(FrameIdx))
  630. return false;
  631. if (StackBytesFree.none()) {
  632. // clear it to speed up later scavengeStackSlot calls to
  633. // StackBytesFree.none()
  634. StackBytesFree.clear();
  635. return false;
  636. }
  637. Align ObjAlign = MFI.getObjectAlign(FrameIdx);
  638. if (ObjAlign > MaxAlign)
  639. return false;
  640. int64_t ObjSize = MFI.getObjectSize(FrameIdx);
  641. int FreeStart;
  642. for (FreeStart = StackBytesFree.find_first(); FreeStart != -1;
  643. FreeStart = StackBytesFree.find_next(FreeStart)) {
  644. // Check that free space has suitable alignment.
  645. unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart;
  646. if (alignTo(ObjStart, ObjAlign) != ObjStart)
  647. continue;
  648. if (FreeStart + ObjSize > StackBytesFree.size())
  649. return false;
  650. bool AllBytesFree = true;
  651. for (unsigned Byte = 0; Byte < ObjSize; ++Byte)
  652. if (!StackBytesFree.test(FreeStart + Byte)) {
  653. AllBytesFree = false;
  654. break;
  655. }
  656. if (AllBytesFree)
  657. break;
  658. }
  659. if (FreeStart == -1)
  660. return false;
  661. if (StackGrowsDown) {
  662. int ObjStart = -(FreeStart + ObjSize);
  663. LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP["
  664. << ObjStart << "]\n");
  665. MFI.setObjectOffset(FrameIdx, ObjStart);
  666. } else {
  667. LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP["
  668. << FreeStart << "]\n");
  669. MFI.setObjectOffset(FrameIdx, FreeStart);
  670. }
  671. StackBytesFree.reset(FreeStart, FreeStart + ObjSize);
  672. return true;
  673. }
  674. /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
  675. /// those required to be close to the Stack Protector) to stack offsets.
  676. static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
  677. SmallSet<int, 16> &ProtectedObjs,
  678. MachineFrameInfo &MFI, bool StackGrowsDown,
  679. int64_t &Offset, Align &MaxAlign,
  680. unsigned Skew) {
  681. for (int i : UnassignedObjs) {
  682. AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew);
  683. ProtectedObjs.insert(i);
  684. }
  685. }
  686. /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
  687. /// abstract stack objects.
  688. void PEI::calculateFrameObjectOffsets(MachineFunction &MF) {
  689. const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
  690. bool StackGrowsDown =
  691. TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
  692. // Loop over all of the stack objects, assigning sequential addresses...
  693. MachineFrameInfo &MFI = MF.getFrameInfo();
  694. // Start at the beginning of the local area.
  695. // The Offset is the distance from the stack top in the direction
  696. // of stack growth -- so it's always nonnegative.
  697. int LocalAreaOffset = TFI.getOffsetOfLocalArea();
  698. if (StackGrowsDown)
  699. LocalAreaOffset = -LocalAreaOffset;
  700. assert(LocalAreaOffset >= 0
  701. && "Local area offset should be in direction of stack growth");
  702. int64_t Offset = LocalAreaOffset;
  703. // Skew to be applied to alignment.
  704. unsigned Skew = TFI.getStackAlignmentSkew(MF);
  705. #ifdef EXPENSIVE_CHECKS
  706. for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i)
  707. if (!MFI.isDeadObjectIndex(i) &&
  708. MFI.getStackID(i) == TargetStackID::Default)
  709. assert(MFI.getObjectAlign(i) <= MFI.getMaxAlign() &&
  710. "MaxAlignment is invalid");
  711. #endif
  712. // If there are fixed sized objects that are preallocated in the local area,
  713. // non-fixed objects can't be allocated right at the start of local area.
  714. // Adjust 'Offset' to point to the end of last fixed sized preallocated
  715. // object.
  716. for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) {
  717. if (MFI.getStackID(i) !=
  718. TargetStackID::Default) // Only allocate objects on the default stack.
  719. continue;
  720. int64_t FixedOff;
  721. if (StackGrowsDown) {
  722. // The maximum distance from the stack pointer is at lower address of
  723. // the object -- which is given by offset. For down growing stack
  724. // the offset is negative, so we negate the offset to get the distance.
  725. FixedOff = -MFI.getObjectOffset(i);
  726. } else {
  727. // The maximum distance from the start pointer is at the upper
  728. // address of the object.
  729. FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i);
  730. }
  731. if (FixedOff > Offset) Offset = FixedOff;
  732. }
  733. // First assign frame offsets to stack objects that are used to spill
  734. // callee saved registers.
  735. if (StackGrowsDown && MaxCSFrameIndex >= MinCSFrameIndex) {
  736. for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) {
  737. if (MFI.getStackID(i) !=
  738. TargetStackID::Default) // Only allocate objects on the default stack.
  739. continue;
  740. // If the stack grows down, we need to add the size to find the lowest
  741. // address of the object.
  742. Offset += MFI.getObjectSize(i);
  743. // Adjust to alignment boundary
  744. Offset = alignTo(Offset, MFI.getObjectAlign(i), Skew);
  745. LLVM_DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << -Offset << "]\n");
  746. MFI.setObjectOffset(i, -Offset); // Set the computed offset
  747. }
  748. } else if (MaxCSFrameIndex >= MinCSFrameIndex) {
  749. // Be careful about underflow in comparisons agains MinCSFrameIndex.
  750. for (unsigned i = MaxCSFrameIndex; i != MinCSFrameIndex - 1; --i) {
  751. if (MFI.getStackID(i) !=
  752. TargetStackID::Default) // Only allocate objects on the default stack.
  753. continue;
  754. if (MFI.isDeadObjectIndex(i))
  755. continue;
  756. // Adjust to alignment boundary
  757. Offset = alignTo(Offset, MFI.getObjectAlign(i), Skew);
  758. LLVM_DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << Offset << "]\n");
  759. MFI.setObjectOffset(i, Offset);
  760. Offset += MFI.getObjectSize(i);
  761. }
  762. }
  763. // FixedCSEnd is the stack offset to the end of the fixed and callee-save
  764. // stack area.
  765. int64_t FixedCSEnd = Offset;
  766. Align MaxAlign = MFI.getMaxAlign();
  767. // Make sure the special register scavenging spill slot is closest to the
  768. // incoming stack pointer if a frame pointer is required and is closer
  769. // to the incoming rather than the final stack pointer.
  770. const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
  771. bool EarlyScavengingSlots = TFI.allocateScavengingFrameIndexesNearIncomingSP(MF);
  772. if (RS && EarlyScavengingSlots) {
  773. SmallVector<int, 2> SFIs;
  774. RS->getScavengingFrameIndices(SFIs);
  775. for (int SFI : SFIs)
  776. AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew);
  777. }
  778. // FIXME: Once this is working, then enable flag will change to a target
  779. // check for whether the frame is large enough to want to use virtual
  780. // frame index registers. Functions which don't want/need this optimization
  781. // will continue to use the existing code path.
  782. if (MFI.getUseLocalStackAllocationBlock()) {
  783. Align Alignment = MFI.getLocalFrameMaxAlign();
  784. // Adjust to alignment boundary.
  785. Offset = alignTo(Offset, Alignment, Skew);
  786. LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
  787. // Resolve offsets for objects in the local block.
  788. for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) {
  789. std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i);
  790. int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
  791. LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset
  792. << "]\n");
  793. MFI.setObjectOffset(Entry.first, FIOffset);
  794. }
  795. // Allocate the local block
  796. Offset += MFI.getLocalFrameSize();
  797. MaxAlign = std::max(Alignment, MaxAlign);
  798. }
  799. // Retrieve the Exception Handler registration node.
  800. int EHRegNodeFrameIndex = std::numeric_limits<int>::max();
  801. if (const WinEHFuncInfo *FuncInfo = MF.getWinEHFuncInfo())
  802. EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex;
  803. // Make sure that the stack protector comes before the local variables on the
  804. // stack.
  805. SmallSet<int, 16> ProtectedObjs;
  806. if (MFI.hasStackProtectorIndex()) {
  807. int StackProtectorFI = MFI.getStackProtectorIndex();
  808. StackObjSet LargeArrayObjs;
  809. StackObjSet SmallArrayObjs;
  810. StackObjSet AddrOfObjs;
  811. // If we need a stack protector, we need to make sure that
  812. // LocalStackSlotPass didn't already allocate a slot for it.
  813. // If we are told to use the LocalStackAllocationBlock, the stack protector
  814. // is expected to be already pre-allocated.
  815. if (MFI.getStackID(StackProtectorFI) != TargetStackID::Default) {
  816. // If the stack protector isn't on the default stack then it's up to the
  817. // target to set the stack offset.
  818. assert(MFI.getObjectOffset(StackProtectorFI) != 0 &&
  819. "Offset of stack protector on non-default stack expected to be "
  820. "already set.");
  821. assert(!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex()) &&
  822. "Stack protector on non-default stack expected to not be "
  823. "pre-allocated by LocalStackSlotPass.");
  824. } else if (!MFI.getUseLocalStackAllocationBlock()) {
  825. AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, MaxAlign,
  826. Skew);
  827. } else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) {
  828. llvm_unreachable(
  829. "Stack protector not pre-allocated by LocalStackSlotPass.");
  830. }
  831. // Assign large stack objects first.
  832. for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
  833. if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock())
  834. continue;
  835. if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
  836. continue;
  837. if (RS && RS->isScavengingFrameIndex((int)i))
  838. continue;
  839. if (MFI.isDeadObjectIndex(i))
  840. continue;
  841. if (StackProtectorFI == (int)i || EHRegNodeFrameIndex == (int)i)
  842. continue;
  843. if (MFI.getStackID(i) !=
  844. TargetStackID::Default) // Only allocate objects on the default stack.
  845. continue;
  846. switch (MFI.getObjectSSPLayout(i)) {
  847. case MachineFrameInfo::SSPLK_None:
  848. continue;
  849. case MachineFrameInfo::SSPLK_SmallArray:
  850. SmallArrayObjs.insert(i);
  851. continue;
  852. case MachineFrameInfo::SSPLK_AddrOf:
  853. AddrOfObjs.insert(i);
  854. continue;
  855. case MachineFrameInfo::SSPLK_LargeArray:
  856. LargeArrayObjs.insert(i);
  857. continue;
  858. }
  859. llvm_unreachable("Unexpected SSPLayoutKind.");
  860. }
  861. // We expect **all** the protected stack objects to be pre-allocated by
  862. // LocalStackSlotPass. If it turns out that PEI still has to allocate some
  863. // of them, we may end up messing up the expected order of the objects.
  864. if (MFI.getUseLocalStackAllocationBlock() &&
  865. !(LargeArrayObjs.empty() && SmallArrayObjs.empty() &&
  866. AddrOfObjs.empty()))
  867. llvm_unreachable("Found protected stack objects not pre-allocated by "
  868. "LocalStackSlotPass.");
  869. AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
  870. Offset, MaxAlign, Skew);
  871. AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
  872. Offset, MaxAlign, Skew);
  873. AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
  874. Offset, MaxAlign, Skew);
  875. }
  876. SmallVector<int, 8> ObjectsToAllocate;
  877. // Then prepare to assign frame offsets to stack objects that are not used to
  878. // spill callee saved registers.
  879. for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
  880. if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock())
  881. continue;
  882. if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
  883. continue;
  884. if (RS && RS->isScavengingFrameIndex((int)i))
  885. continue;
  886. if (MFI.isDeadObjectIndex(i))
  887. continue;
  888. if (MFI.getStackProtectorIndex() == (int)i || EHRegNodeFrameIndex == (int)i)
  889. continue;
  890. if (ProtectedObjs.count(i))
  891. continue;
  892. if (MFI.getStackID(i) !=
  893. TargetStackID::Default) // Only allocate objects on the default stack.
  894. continue;
  895. // Add the objects that we need to allocate to our working set.
  896. ObjectsToAllocate.push_back(i);
  897. }
  898. // Allocate the EH registration node first if one is present.
  899. if (EHRegNodeFrameIndex != std::numeric_limits<int>::max())
  900. AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset,
  901. MaxAlign, Skew);
  902. // Give the targets a chance to order the objects the way they like it.
  903. if (MF.getTarget().getOptLevel() != CodeGenOpt::None &&
  904. MF.getTarget().Options.StackSymbolOrdering)
  905. TFI.orderFrameObjects(MF, ObjectsToAllocate);
  906. // Keep track of which bytes in the fixed and callee-save range are used so we
  907. // can use the holes when allocating later stack objects. Only do this if
  908. // stack protector isn't being used and the target requests it and we're
  909. // optimizing.
  910. BitVector StackBytesFree;
  911. if (!ObjectsToAllocate.empty() &&
  912. MF.getTarget().getOptLevel() != CodeGenOpt::None &&
  913. MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(MF))
  914. computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex,
  915. FixedCSEnd, StackBytesFree);
  916. // Now walk the objects and actually assign base offsets to them.
  917. for (auto &Object : ObjectsToAllocate)
  918. if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign,
  919. StackBytesFree))
  920. AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew);
  921. // Make sure the special register scavenging spill slot is closest to the
  922. // stack pointer.
  923. if (RS && !EarlyScavengingSlots) {
  924. SmallVector<int, 2> SFIs;
  925. RS->getScavengingFrameIndices(SFIs);
  926. for (int SFI : SFIs)
  927. AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew);
  928. }
  929. if (!TFI.targetHandlesStackFrameRounding()) {
  930. // If we have reserved argument space for call sites in the function
  931. // immediately on entry to the current function, count it as part of the
  932. // overall stack size.
  933. if (MFI.adjustsStack() && TFI.hasReservedCallFrame(MF))
  934. Offset += MFI.getMaxCallFrameSize();
  935. // Round up the size to a multiple of the alignment. If the function has
  936. // any calls or alloca's, align to the target's StackAlignment value to
  937. // ensure that the callee's frame or the alloca data is suitably aligned;
  938. // otherwise, for leaf functions, align to the TransientStackAlignment
  939. // value.
  940. Align StackAlign;
  941. if (MFI.adjustsStack() || MFI.hasVarSizedObjects() ||
  942. (RegInfo->hasStackRealignment(MF) && MFI.getObjectIndexEnd() != 0))
  943. StackAlign = TFI.getStackAlign();
  944. else
  945. StackAlign = TFI.getTransientStackAlign();
  946. // If the frame pointer is eliminated, all frame offsets will be relative to
  947. // SP not FP. Align to MaxAlign so this works.
  948. StackAlign = std::max(StackAlign, MaxAlign);
  949. int64_t OffsetBeforeAlignment = Offset;
  950. Offset = alignTo(Offset, StackAlign, Skew);
  951. // If we have increased the offset to fulfill the alignment constrants,
  952. // then the scavenging spill slots may become harder to reach from the
  953. // stack pointer, float them so they stay close.
  954. if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS &&
  955. !EarlyScavengingSlots) {
  956. SmallVector<int, 2> SFIs;
  957. RS->getScavengingFrameIndices(SFIs);
  958. LLVM_DEBUG(if (!SFIs.empty()) llvm::dbgs()
  959. << "Adjusting emergency spill slots!\n";);
  960. int64_t Delta = Offset - OffsetBeforeAlignment;
  961. for (int SFI : SFIs) {
  962. LLVM_DEBUG(llvm::dbgs()
  963. << "Adjusting offset of emergency spill slot #" << SFI
  964. << " from " << MFI.getObjectOffset(SFI););
  965. MFI.setObjectOffset(SFI, MFI.getObjectOffset(SFI) - Delta);
  966. LLVM_DEBUG(llvm::dbgs() << " to " << MFI.getObjectOffset(SFI) << "\n";);
  967. }
  968. }
  969. }
  970. // Update frame info to pretend that this is part of the stack...
  971. int64_t StackSize = Offset - LocalAreaOffset;
  972. MFI.setStackSize(StackSize);
  973. NumBytesStackSpace += StackSize;
  974. }
  975. /// insertPrologEpilogCode - Scan the function for modified callee saved
  976. /// registers, insert spill code for these callee saved registers, then add
  977. /// prolog and epilog code to the function.
  978. void PEI::insertPrologEpilogCode(MachineFunction &MF) {
  979. const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
  980. // Add prologue to the function...
  981. for (MachineBasicBlock *SaveBlock : SaveBlocks)
  982. TFI.emitPrologue(MF, *SaveBlock);
  983. // Add epilogue to restore the callee-save registers in each exiting block.
  984. for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
  985. TFI.emitEpilogue(MF, *RestoreBlock);
  986. for (MachineBasicBlock *SaveBlock : SaveBlocks)
  987. TFI.inlineStackProbe(MF, *SaveBlock);
  988. // Emit additional code that is required to support segmented stacks, if
  989. // we've been asked for it. This, when linked with a runtime with support
  990. // for segmented stacks (libgcc is one), will result in allocating stack
  991. // space in small chunks instead of one large contiguous block.
  992. if (MF.shouldSplitStack()) {
  993. for (MachineBasicBlock *SaveBlock : SaveBlocks)
  994. TFI.adjustForSegmentedStacks(MF, *SaveBlock);
  995. // Record that there are split-stack functions, so we will emit a
  996. // special section to tell the linker.
  997. MF.getMMI().setHasSplitStack(true);
  998. } else
  999. MF.getMMI().setHasNosplitStack(true);
  1000. // Emit additional code that is required to explicitly handle the stack in
  1001. // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The
  1002. // approach is rather similar to that of Segmented Stacks, but it uses a
  1003. // different conditional check and another BIF for allocating more stack
  1004. // space.
  1005. if (MF.getFunction().getCallingConv() == CallingConv::HiPE)
  1006. for (MachineBasicBlock *SaveBlock : SaveBlocks)
  1007. TFI.adjustForHiPEPrologue(MF, *SaveBlock);
  1008. }
  1009. /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
  1010. /// register references and actual offsets.
  1011. void PEI::replaceFrameIndices(MachineFunction &MF) {
  1012. const auto &ST = MF.getSubtarget();
  1013. const TargetFrameLowering &TFI = *ST.getFrameLowering();
  1014. if (!TFI.needsFrameIndexResolution(MF))
  1015. return;
  1016. const TargetRegisterInfo *TRI = ST.getRegisterInfo();
  1017. // Allow the target to determine this after knowing the frame size.
  1018. FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) ||
  1019. TRI->requiresFrameIndexReplacementScavenging(MF);
  1020. // Store SPAdj at exit of a basic block.
  1021. SmallVector<int, 8> SPState;
  1022. SPState.resize(MF.getNumBlockIDs());
  1023. df_iterator_default_set<MachineBasicBlock*> Reachable;
  1024. // Iterate over the reachable blocks in DFS order.
  1025. for (auto DFI = df_ext_begin(&MF, Reachable), DFE = df_ext_end(&MF, Reachable);
  1026. DFI != DFE; ++DFI) {
  1027. int SPAdj = 0;
  1028. // Check the exit state of the DFS stack predecessor.
  1029. if (DFI.getPathLength() >= 2) {
  1030. MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
  1031. assert(Reachable.count(StackPred) &&
  1032. "DFS stack predecessor is already visited.\n");
  1033. SPAdj = SPState[StackPred->getNumber()];
  1034. }
  1035. MachineBasicBlock *BB = *DFI;
  1036. replaceFrameIndices(BB, MF, SPAdj);
  1037. SPState[BB->getNumber()] = SPAdj;
  1038. }
  1039. // Handle the unreachable blocks.
  1040. for (auto &BB : MF) {
  1041. if (Reachable.count(&BB))
  1042. // Already handled in DFS traversal.
  1043. continue;
  1044. int SPAdj = 0;
  1045. replaceFrameIndices(&BB, MF, SPAdj);
  1046. }
  1047. }
  1048. void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF,
  1049. int &SPAdj) {
  1050. assert(MF.getSubtarget().getRegisterInfo() &&
  1051. "getRegisterInfo() must be implemented!");
  1052. const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
  1053. const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
  1054. const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
  1055. if (RS && FrameIndexEliminationScavenging)
  1056. RS->enterBasicBlock(*BB);
  1057. bool InsideCallSequence = false;
  1058. for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
  1059. if (TII.isFrameInstr(*I)) {
  1060. InsideCallSequence = TII.isFrameSetup(*I);
  1061. SPAdj += TII.getSPAdjust(*I);
  1062. I = TFI->eliminateCallFramePseudoInstr(MF, *BB, I);
  1063. continue;
  1064. }
  1065. MachineInstr &MI = *I;
  1066. bool DoIncr = true;
  1067. bool DidFinishLoop = true;
  1068. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
  1069. if (!MI.getOperand(i).isFI())
  1070. continue;
  1071. // Frame indices in debug values are encoded in a target independent
  1072. // way with simply the frame index and offset rather than any
  1073. // target-specific addressing mode.
  1074. if (MI.isDebugValue()) {
  1075. MachineOperand &Op = MI.getOperand(i);
  1076. assert(
  1077. MI.isDebugOperand(&Op) &&
  1078. "Frame indices can only appear as a debug operand in a DBG_VALUE*"
  1079. " machine instruction");
  1080. Register Reg;
  1081. unsigned FrameIdx = Op.getIndex();
  1082. unsigned Size = MF.getFrameInfo().getObjectSize(FrameIdx);
  1083. StackOffset Offset =
  1084. TFI->getFrameIndexReference(MF, FrameIdx, Reg);
  1085. Op.ChangeToRegister(Reg, false /*isDef*/);
  1086. const DIExpression *DIExpr = MI.getDebugExpression();
  1087. // If we have a direct DBG_VALUE, and its location expression isn't
  1088. // currently complex, then adding an offset will morph it into a
  1089. // complex location that is interpreted as being a memory address.
  1090. // This changes a pointer-valued variable to dereference that pointer,
  1091. // which is incorrect. Fix by adding DW_OP_stack_value.
  1092. if (MI.isNonListDebugValue()) {
  1093. unsigned PrependFlags = DIExpression::ApplyOffset;
  1094. if (!MI.isIndirectDebugValue() && !DIExpr->isComplex())
  1095. PrependFlags |= DIExpression::StackValue;
  1096. // If we have DBG_VALUE that is indirect and has a Implicit location
  1097. // expression need to insert a deref before prepending a Memory
  1098. // location expression. Also after doing this we change the DBG_VALUE
  1099. // to be direct.
  1100. if (MI.isIndirectDebugValue() && DIExpr->isImplicit()) {
  1101. SmallVector<uint64_t, 2> Ops = {dwarf::DW_OP_deref_size, Size};
  1102. bool WithStackValue = true;
  1103. DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue);
  1104. // Make the DBG_VALUE direct.
  1105. MI.getDebugOffset().ChangeToRegister(0, false);
  1106. }
  1107. DIExpr = TRI.prependOffsetExpression(DIExpr, PrependFlags, Offset);
  1108. } else {
  1109. // The debug operand at DebugOpIndex was a frame index at offset
  1110. // `Offset`; now the operand has been replaced with the frame
  1111. // register, we must add Offset with `register x, plus Offset`.
  1112. unsigned DebugOpIndex = MI.getDebugOperandIndex(&Op);
  1113. SmallVector<uint64_t, 3> Ops;
  1114. TRI.getOffsetOpcodes(Offset, Ops);
  1115. DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, DebugOpIndex);
  1116. }
  1117. MI.getDebugExpressionOp().setMetadata(DIExpr);
  1118. continue;
  1119. } else if (MI.isDebugPHI()) {
  1120. // Allow stack ref to continue onwards.
  1121. continue;
  1122. }
  1123. // TODO: This code should be commoned with the code for
  1124. // PATCHPOINT. There's no good reason for the difference in
  1125. // implementation other than historical accident. The only
  1126. // remaining difference is the unconditional use of the stack
  1127. // pointer as the base register.
  1128. if (MI.getOpcode() == TargetOpcode::STATEPOINT) {
  1129. assert((!MI.isDebugValue() || i == 0) &&
  1130. "Frame indicies can only appear as the first operand of a "
  1131. "DBG_VALUE machine instruction");
  1132. Register Reg;
  1133. MachineOperand &Offset = MI.getOperand(i + 1);
  1134. StackOffset refOffset = TFI->getFrameIndexReferencePreferSP(
  1135. MF, MI.getOperand(i).getIndex(), Reg, /*IgnoreSPUpdates*/ false);
  1136. assert(!refOffset.getScalable() &&
  1137. "Frame offsets with a scalable component are not supported");
  1138. Offset.setImm(Offset.getImm() + refOffset.getFixed() + SPAdj);
  1139. MI.getOperand(i).ChangeToRegister(Reg, false /*isDef*/);
  1140. continue;
  1141. }
  1142. // Some instructions (e.g. inline asm instructions) can have
  1143. // multiple frame indices and/or cause eliminateFrameIndex
  1144. // to insert more than one instruction. We need the register
  1145. // scavenger to go through all of these instructions so that
  1146. // it can update its register information. We keep the
  1147. // iterator at the point before insertion so that we can
  1148. // revisit them in full.
  1149. bool AtBeginning = (I == BB->begin());
  1150. if (!AtBeginning) --I;
  1151. // If this instruction has a FrameIndex operand, we need to
  1152. // use that target machine register info object to eliminate
  1153. // it.
  1154. TRI.eliminateFrameIndex(MI, SPAdj, i,
  1155. FrameIndexEliminationScavenging ? RS : nullptr);
  1156. // Reset the iterator if we were at the beginning of the BB.
  1157. if (AtBeginning) {
  1158. I = BB->begin();
  1159. DoIncr = false;
  1160. }
  1161. DidFinishLoop = false;
  1162. break;
  1163. }
  1164. // If we are looking at a call sequence, we need to keep track of
  1165. // the SP adjustment made by each instruction in the sequence.
  1166. // This includes both the frame setup/destroy pseudos (handled above),
  1167. // as well as other instructions that have side effects w.r.t the SP.
  1168. // Note that this must come after eliminateFrameIndex, because
  1169. // if I itself referred to a frame index, we shouldn't count its own
  1170. // adjustment.
  1171. if (DidFinishLoop && InsideCallSequence)
  1172. SPAdj += TII.getSPAdjust(MI);
  1173. if (DoIncr && I != BB->end()) ++I;
  1174. // Update register states.
  1175. if (RS && FrameIndexEliminationScavenging && DidFinishLoop)
  1176. RS->forward(MI);
  1177. }
  1178. }