MachineLICM.cpp 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519
  1. //===- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ----------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass performs loop invariant code motion on machine instructions. We
  10. // attempt to remove as much code from the body of a loop as possible.
  11. //
  12. // This pass is not intended to be a replacement or a complete alternative
  13. // for the LLVM-IR-level LICM pass. It is only designed to hoist simple
  14. // constructs that are not exposed before lowering and instruction selection.
  15. //
  16. //===----------------------------------------------------------------------===//
  17. #include "llvm/ADT/BitVector.h"
  18. #include "llvm/ADT/DenseMap.h"
  19. #include "llvm/ADT/STLExtras.h"
  20. #include "llvm/ADT/SmallSet.h"
  21. #include "llvm/ADT/SmallVector.h"
  22. #include "llvm/ADT/Statistic.h"
  23. #include "llvm/Analysis/AliasAnalysis.h"
  24. #include "llvm/CodeGen/MachineBasicBlock.h"
  25. #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
  26. #include "llvm/CodeGen/MachineDominators.h"
  27. #include "llvm/CodeGen/MachineFrameInfo.h"
  28. #include "llvm/CodeGen/MachineFunction.h"
  29. #include "llvm/CodeGen/MachineFunctionPass.h"
  30. #include "llvm/CodeGen/MachineInstr.h"
  31. #include "llvm/CodeGen/MachineLoopInfo.h"
  32. #include "llvm/CodeGen/MachineMemOperand.h"
  33. #include "llvm/CodeGen/MachineOperand.h"
  34. #include "llvm/CodeGen/MachineRegisterInfo.h"
  35. #include "llvm/CodeGen/PseudoSourceValue.h"
  36. #include "llvm/CodeGen/TargetInstrInfo.h"
  37. #include "llvm/CodeGen/TargetLowering.h"
  38. #include "llvm/CodeGen/TargetRegisterInfo.h"
  39. #include "llvm/CodeGen/TargetSchedule.h"
  40. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  41. #include "llvm/IR/DebugLoc.h"
  42. #include "llvm/InitializePasses.h"
  43. #include "llvm/MC/MCInstrDesc.h"
  44. #include "llvm/MC/MCRegister.h"
  45. #include "llvm/MC/MCRegisterInfo.h"
  46. #include "llvm/Pass.h"
  47. #include "llvm/Support/Casting.h"
  48. #include "llvm/Support/CommandLine.h"
  49. #include "llvm/Support/Debug.h"
  50. #include "llvm/Support/raw_ostream.h"
  51. #include <algorithm>
  52. #include <cassert>
  53. #include <limits>
  54. #include <vector>
  55. using namespace llvm;
  56. #define DEBUG_TYPE "machinelicm"
  57. static cl::opt<bool>
  58. AvoidSpeculation("avoid-speculation",
  59. cl::desc("MachineLICM should avoid speculation"),
  60. cl::init(true), cl::Hidden);
  61. static cl::opt<bool>
  62. HoistCheapInsts("hoist-cheap-insts",
  63. cl::desc("MachineLICM should hoist even cheap instructions"),
  64. cl::init(false), cl::Hidden);
  65. static cl::opt<bool>
  66. HoistConstStores("hoist-const-stores",
  67. cl::desc("Hoist invariant stores"),
  68. cl::init(true), cl::Hidden);
  69. // The default threshold of 100 (i.e. if target block is 100 times hotter)
  70. // is based on empirical data on a single target and is subject to tuning.
  71. static cl::opt<unsigned>
  72. BlockFrequencyRatioThreshold("block-freq-ratio-threshold",
  73. cl::desc("Do not hoist instructions if target"
  74. "block is N times hotter than the source."),
  75. cl::init(100), cl::Hidden);
  76. enum class UseBFI { None, PGO, All };
  77. static cl::opt<UseBFI>
  78. DisableHoistingToHotterBlocks("disable-hoisting-to-hotter-blocks",
  79. cl::desc("Disable hoisting instructions to"
  80. " hotter blocks"),
  81. cl::init(UseBFI::PGO), cl::Hidden,
  82. cl::values(clEnumValN(UseBFI::None, "none",
  83. "disable the feature"),
  84. clEnumValN(UseBFI::PGO, "pgo",
  85. "enable the feature when using profile data"),
  86. clEnumValN(UseBFI::All, "all",
  87. "enable the feature with/wo profile data")));
  88. STATISTIC(NumHoisted,
  89. "Number of machine instructions hoisted out of loops");
  90. STATISTIC(NumLowRP,
  91. "Number of instructions hoisted in low reg pressure situation");
  92. STATISTIC(NumHighLatency,
  93. "Number of high latency instructions hoisted");
  94. STATISTIC(NumCSEed,
  95. "Number of hoisted machine instructions CSEed");
  96. STATISTIC(NumPostRAHoisted,
  97. "Number of machine instructions hoisted out of loops post regalloc");
  98. STATISTIC(NumStoreConst,
  99. "Number of stores of const phys reg hoisted out of loops");
  100. STATISTIC(NumNotHoistedDueToHotness,
  101. "Number of instructions not hoisted due to block frequency");
  102. namespace {
  103. class MachineLICMBase : public MachineFunctionPass {
  104. const TargetInstrInfo *TII;
  105. const TargetLoweringBase *TLI;
  106. const TargetRegisterInfo *TRI;
  107. const MachineFrameInfo *MFI;
  108. MachineRegisterInfo *MRI;
  109. TargetSchedModel SchedModel;
  110. bool PreRegAlloc;
  111. bool HasProfileData;
  112. // Various analyses that we use...
  113. AliasAnalysis *AA; // Alias analysis info.
  114. MachineBlockFrequencyInfo *MBFI; // Machine block frequncy info
  115. MachineLoopInfo *MLI; // Current MachineLoopInfo
  116. MachineDominatorTree *DT; // Machine dominator tree for the cur loop
  117. // State that is updated as we process loops
  118. bool Changed; // True if a loop is changed.
  119. bool FirstInLoop; // True if it's the first LICM in the loop.
  120. MachineLoop *CurLoop; // The current loop we are working on.
  121. MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
  122. // Exit blocks for CurLoop.
  123. SmallVector<MachineBasicBlock *, 8> ExitBlocks;
  124. bool isExitBlock(const MachineBasicBlock *MBB) const {
  125. return is_contained(ExitBlocks, MBB);
  126. }
  127. // Track 'estimated' register pressure.
  128. SmallSet<Register, 32> RegSeen;
  129. SmallVector<unsigned, 8> RegPressure;
  130. // Register pressure "limit" per register pressure set. If the pressure
  131. // is higher than the limit, then it's considered high.
  132. SmallVector<unsigned, 8> RegLimit;
  133. // Register pressure on path leading from loop preheader to current BB.
  134. SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
  135. // For each opcode, keep a list of potential CSE instructions.
  136. DenseMap<unsigned, std::vector<MachineInstr *>> CSEMap;
  137. enum {
  138. SpeculateFalse = 0,
  139. SpeculateTrue = 1,
  140. SpeculateUnknown = 2
  141. };
  142. // If a MBB does not dominate loop exiting blocks then it may not safe
  143. // to hoist loads from this block.
  144. // Tri-state: 0 - false, 1 - true, 2 - unknown
  145. unsigned SpeculationState;
  146. public:
  147. MachineLICMBase(char &PassID, bool PreRegAlloc)
  148. : MachineFunctionPass(PassID), PreRegAlloc(PreRegAlloc) {}
  149. bool runOnMachineFunction(MachineFunction &MF) override;
  150. void getAnalysisUsage(AnalysisUsage &AU) const override {
  151. AU.addRequired<MachineLoopInfo>();
  152. if (DisableHoistingToHotterBlocks != UseBFI::None)
  153. AU.addRequired<MachineBlockFrequencyInfo>();
  154. AU.addRequired<MachineDominatorTree>();
  155. AU.addRequired<AAResultsWrapperPass>();
  156. AU.addPreserved<MachineLoopInfo>();
  157. MachineFunctionPass::getAnalysisUsage(AU);
  158. }
  159. void releaseMemory() override {
  160. RegSeen.clear();
  161. RegPressure.clear();
  162. RegLimit.clear();
  163. BackTrace.clear();
  164. CSEMap.clear();
  165. }
  166. private:
  167. /// Keep track of information about hoisting candidates.
  168. struct CandidateInfo {
  169. MachineInstr *MI;
  170. unsigned Def;
  171. int FI;
  172. CandidateInfo(MachineInstr *mi, unsigned def, int fi)
  173. : MI(mi), Def(def), FI(fi) {}
  174. };
  175. void HoistRegionPostRA();
  176. void HoistPostRA(MachineInstr *MI, unsigned Def);
  177. void ProcessMI(MachineInstr *MI, BitVector &PhysRegDefs,
  178. BitVector &PhysRegClobbers, SmallSet<int, 32> &StoredFIs,
  179. SmallVectorImpl<CandidateInfo> &Candidates);
  180. void AddToLiveIns(MCRegister Reg);
  181. bool IsLICMCandidate(MachineInstr &I);
  182. bool IsLoopInvariantInst(MachineInstr &I);
  183. bool HasLoopPHIUse(const MachineInstr *MI) const;
  184. bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
  185. Register Reg) const;
  186. bool IsCheapInstruction(MachineInstr &MI) const;
  187. bool CanCauseHighRegPressure(const DenseMap<unsigned, int> &Cost,
  188. bool Cheap);
  189. void UpdateBackTraceRegPressure(const MachineInstr *MI);
  190. bool IsProfitableToHoist(MachineInstr &MI);
  191. bool IsGuaranteedToExecute(MachineBasicBlock *BB);
  192. bool isTriviallyReMaterializable(const MachineInstr &MI) const;
  193. void EnterScope(MachineBasicBlock *MBB);
  194. void ExitScope(MachineBasicBlock *MBB);
  195. void ExitScopeIfDone(
  196. MachineDomTreeNode *Node,
  197. DenseMap<MachineDomTreeNode *, unsigned> &OpenChildren,
  198. const DenseMap<MachineDomTreeNode *, MachineDomTreeNode *> &ParentMap);
  199. void HoistOutOfLoop(MachineDomTreeNode *HeaderN);
  200. void InitRegPressure(MachineBasicBlock *BB);
  201. DenseMap<unsigned, int> calcRegisterCost(const MachineInstr *MI,
  202. bool ConsiderSeen,
  203. bool ConsiderUnseenAsDef);
  204. void UpdateRegPressure(const MachineInstr *MI,
  205. bool ConsiderUnseenAsDef = false);
  206. MachineInstr *ExtractHoistableLoad(MachineInstr *MI);
  207. MachineInstr *LookForDuplicate(const MachineInstr *MI,
  208. std::vector<MachineInstr *> &PrevMIs);
  209. bool
  210. EliminateCSE(MachineInstr *MI,
  211. DenseMap<unsigned, std::vector<MachineInstr *>>::iterator &CI);
  212. bool MayCSE(MachineInstr *MI);
  213. bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader);
  214. void InitCSEMap(MachineBasicBlock *BB);
  215. bool isTgtHotterThanSrc(MachineBasicBlock *SrcBlock,
  216. MachineBasicBlock *TgtBlock);
  217. MachineBasicBlock *getCurPreheader();
  218. };
  219. class MachineLICM : public MachineLICMBase {
  220. public:
  221. static char ID;
  222. MachineLICM() : MachineLICMBase(ID, false) {
  223. initializeMachineLICMPass(*PassRegistry::getPassRegistry());
  224. }
  225. };
  226. class EarlyMachineLICM : public MachineLICMBase {
  227. public:
  228. static char ID;
  229. EarlyMachineLICM() : MachineLICMBase(ID, true) {
  230. initializeEarlyMachineLICMPass(*PassRegistry::getPassRegistry());
  231. }
  232. };
  233. } // end anonymous namespace
  234. char MachineLICM::ID;
  235. char EarlyMachineLICM::ID;
  236. char &llvm::MachineLICMID = MachineLICM::ID;
  237. char &llvm::EarlyMachineLICMID = EarlyMachineLICM::ID;
  238. INITIALIZE_PASS_BEGIN(MachineLICM, DEBUG_TYPE,
  239. "Machine Loop Invariant Code Motion", false, false)
  240. INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
  241. INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfo)
  242. INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
  243. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  244. INITIALIZE_PASS_END(MachineLICM, DEBUG_TYPE,
  245. "Machine Loop Invariant Code Motion", false, false)
  246. INITIALIZE_PASS_BEGIN(EarlyMachineLICM, "early-machinelicm",
  247. "Early Machine Loop Invariant Code Motion", false, false)
  248. INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
  249. INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfo)
  250. INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
  251. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  252. INITIALIZE_PASS_END(EarlyMachineLICM, "early-machinelicm",
  253. "Early Machine Loop Invariant Code Motion", false, false)
  254. /// Test if the given loop is the outer-most loop that has a unique predecessor.
  255. static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
  256. // Check whether this loop even has a unique predecessor.
  257. if (!CurLoop->getLoopPredecessor())
  258. return false;
  259. // Ok, now check to see if any of its outer loops do.
  260. for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
  261. if (L->getLoopPredecessor())
  262. return false;
  263. // None of them did, so this is the outermost with a unique predecessor.
  264. return true;
  265. }
  266. bool MachineLICMBase::runOnMachineFunction(MachineFunction &MF) {
  267. if (skipFunction(MF.getFunction()))
  268. return false;
  269. Changed = FirstInLoop = false;
  270. const TargetSubtargetInfo &ST = MF.getSubtarget();
  271. TII = ST.getInstrInfo();
  272. TLI = ST.getTargetLowering();
  273. TRI = ST.getRegisterInfo();
  274. MFI = &MF.getFrameInfo();
  275. MRI = &MF.getRegInfo();
  276. SchedModel.init(&ST);
  277. PreRegAlloc = MRI->isSSA();
  278. HasProfileData = MF.getFunction().hasProfileData();
  279. if (PreRegAlloc)
  280. LLVM_DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
  281. else
  282. LLVM_DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
  283. LLVM_DEBUG(dbgs() << MF.getName() << " ********\n");
  284. if (PreRegAlloc) {
  285. // Estimate register pressure during pre-regalloc pass.
  286. unsigned NumRPS = TRI->getNumRegPressureSets();
  287. RegPressure.resize(NumRPS);
  288. std::fill(RegPressure.begin(), RegPressure.end(), 0);
  289. RegLimit.resize(NumRPS);
  290. for (unsigned i = 0, e = NumRPS; i != e; ++i)
  291. RegLimit[i] = TRI->getRegPressureSetLimit(MF, i);
  292. }
  293. // Get our Loop information...
  294. if (DisableHoistingToHotterBlocks != UseBFI::None)
  295. MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
  296. MLI = &getAnalysis<MachineLoopInfo>();
  297. DT = &getAnalysis<MachineDominatorTree>();
  298. AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  299. SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
  300. while (!Worklist.empty()) {
  301. CurLoop = Worklist.pop_back_val();
  302. CurPreheader = nullptr;
  303. ExitBlocks.clear();
  304. // If this is done before regalloc, only visit outer-most preheader-sporting
  305. // loops.
  306. if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
  307. Worklist.append(CurLoop->begin(), CurLoop->end());
  308. continue;
  309. }
  310. CurLoop->getExitBlocks(ExitBlocks);
  311. if (!PreRegAlloc)
  312. HoistRegionPostRA();
  313. else {
  314. // CSEMap is initialized for loop header when the first instruction is
  315. // being hoisted.
  316. MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
  317. FirstInLoop = true;
  318. HoistOutOfLoop(N);
  319. CSEMap.clear();
  320. }
  321. }
  322. return Changed;
  323. }
  324. /// Return true if instruction stores to the specified frame.
  325. static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
  326. // Check mayStore before memory operands so that e.g. DBG_VALUEs will return
  327. // true since they have no memory operands.
  328. if (!MI->mayStore())
  329. return false;
  330. // If we lost memory operands, conservatively assume that the instruction
  331. // writes to all slots.
  332. if (MI->memoperands_empty())
  333. return true;
  334. for (const MachineMemOperand *MemOp : MI->memoperands()) {
  335. if (!MemOp->isStore() || !MemOp->getPseudoValue())
  336. continue;
  337. if (const FixedStackPseudoSourceValue *Value =
  338. dyn_cast<FixedStackPseudoSourceValue>(MemOp->getPseudoValue())) {
  339. if (Value->getFrameIndex() == FI)
  340. return true;
  341. }
  342. }
  343. return false;
  344. }
  345. /// Examine the instruction for potentai LICM candidate. Also
  346. /// gather register def and frame object update information.
  347. void MachineLICMBase::ProcessMI(MachineInstr *MI,
  348. BitVector &PhysRegDefs,
  349. BitVector &PhysRegClobbers,
  350. SmallSet<int, 32> &StoredFIs,
  351. SmallVectorImpl<CandidateInfo> &Candidates) {
  352. bool RuledOut = false;
  353. bool HasNonInvariantUse = false;
  354. unsigned Def = 0;
  355. for (const MachineOperand &MO : MI->operands()) {
  356. if (MO.isFI()) {
  357. // Remember if the instruction stores to the frame index.
  358. int FI = MO.getIndex();
  359. if (!StoredFIs.count(FI) &&
  360. MFI->isSpillSlotObjectIndex(FI) &&
  361. InstructionStoresToFI(MI, FI))
  362. StoredFIs.insert(FI);
  363. HasNonInvariantUse = true;
  364. continue;
  365. }
  366. // We can't hoist an instruction defining a physreg that is clobbered in
  367. // the loop.
  368. if (MO.isRegMask()) {
  369. PhysRegClobbers.setBitsNotInMask(MO.getRegMask());
  370. continue;
  371. }
  372. if (!MO.isReg())
  373. continue;
  374. Register Reg = MO.getReg();
  375. if (!Reg)
  376. continue;
  377. assert(Reg.isPhysical() && "Not expecting virtual register!");
  378. if (!MO.isDef()) {
  379. if (Reg && (PhysRegDefs.test(Reg) || PhysRegClobbers.test(Reg)))
  380. // If it's using a non-loop-invariant register, then it's obviously not
  381. // safe to hoist.
  382. HasNonInvariantUse = true;
  383. continue;
  384. }
  385. if (MO.isImplicit()) {
  386. for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
  387. PhysRegClobbers.set(*AI);
  388. if (!MO.isDead())
  389. // Non-dead implicit def? This cannot be hoisted.
  390. RuledOut = true;
  391. // No need to check if a dead implicit def is also defined by
  392. // another instruction.
  393. continue;
  394. }
  395. // FIXME: For now, avoid instructions with multiple defs, unless
  396. // it's a dead implicit def.
  397. if (Def)
  398. RuledOut = true;
  399. else
  400. Def = Reg;
  401. // If we have already seen another instruction that defines the same
  402. // register, then this is not safe. Two defs is indicated by setting a
  403. // PhysRegClobbers bit.
  404. for (MCRegAliasIterator AS(Reg, TRI, true); AS.isValid(); ++AS) {
  405. if (PhysRegDefs.test(*AS))
  406. PhysRegClobbers.set(*AS);
  407. }
  408. // Need a second loop because MCRegAliasIterator can visit the same
  409. // register twice.
  410. for (MCRegAliasIterator AS(Reg, TRI, true); AS.isValid(); ++AS)
  411. PhysRegDefs.set(*AS);
  412. if (PhysRegClobbers.test(Reg))
  413. // MI defined register is seen defined by another instruction in
  414. // the loop, it cannot be a LICM candidate.
  415. RuledOut = true;
  416. }
  417. // Only consider reloads for now and remats which do not have register
  418. // operands. FIXME: Consider unfold load folding instructions.
  419. if (Def && !RuledOut) {
  420. int FI = std::numeric_limits<int>::min();
  421. if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
  422. (TII->isLoadFromStackSlot(*MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
  423. Candidates.push_back(CandidateInfo(MI, Def, FI));
  424. }
  425. }
  426. /// Walk the specified region of the CFG and hoist loop invariants out to the
  427. /// preheader.
  428. void MachineLICMBase::HoistRegionPostRA() {
  429. MachineBasicBlock *Preheader = getCurPreheader();
  430. if (!Preheader)
  431. return;
  432. unsigned NumRegs = TRI->getNumRegs();
  433. BitVector PhysRegDefs(NumRegs); // Regs defined once in the loop.
  434. BitVector PhysRegClobbers(NumRegs); // Regs defined more than once.
  435. SmallVector<CandidateInfo, 32> Candidates;
  436. SmallSet<int, 32> StoredFIs;
  437. // Walk the entire region, count number of defs for each register, and
  438. // collect potential LICM candidates.
  439. for (MachineBasicBlock *BB : CurLoop->getBlocks()) {
  440. // If the header of the loop containing this basic block is a landing pad,
  441. // then don't try to hoist instructions out of this loop.
  442. const MachineLoop *ML = MLI->getLoopFor(BB);
  443. if (ML && ML->getHeader()->isEHPad()) continue;
  444. // Conservatively treat live-in's as an external def.
  445. // FIXME: That means a reload that're reused in successor block(s) will not
  446. // be LICM'ed.
  447. for (const auto &LI : BB->liveins()) {
  448. for (MCRegAliasIterator AI(LI.PhysReg, TRI, true); AI.isValid(); ++AI)
  449. PhysRegDefs.set(*AI);
  450. }
  451. SpeculationState = SpeculateUnknown;
  452. for (MachineInstr &MI : *BB)
  453. ProcessMI(&MI, PhysRegDefs, PhysRegClobbers, StoredFIs, Candidates);
  454. }
  455. // Gather the registers read / clobbered by the terminator.
  456. BitVector TermRegs(NumRegs);
  457. MachineBasicBlock::iterator TI = Preheader->getFirstTerminator();
  458. if (TI != Preheader->end()) {
  459. for (const MachineOperand &MO : TI->operands()) {
  460. if (!MO.isReg())
  461. continue;
  462. Register Reg = MO.getReg();
  463. if (!Reg)
  464. continue;
  465. for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
  466. TermRegs.set(*AI);
  467. }
  468. }
  469. // Now evaluate whether the potential candidates qualify.
  470. // 1. Check if the candidate defined register is defined by another
  471. // instruction in the loop.
  472. // 2. If the candidate is a load from stack slot (always true for now),
  473. // check if the slot is stored anywhere in the loop.
  474. // 3. Make sure candidate def should not clobber
  475. // registers read by the terminator. Similarly its def should not be
  476. // clobbered by the terminator.
  477. for (CandidateInfo &Candidate : Candidates) {
  478. if (Candidate.FI != std::numeric_limits<int>::min() &&
  479. StoredFIs.count(Candidate.FI))
  480. continue;
  481. unsigned Def = Candidate.Def;
  482. if (!PhysRegClobbers.test(Def) && !TermRegs.test(Def)) {
  483. bool Safe = true;
  484. MachineInstr *MI = Candidate.MI;
  485. for (const MachineOperand &MO : MI->operands()) {
  486. if (!MO.isReg() || MO.isDef() || !MO.getReg())
  487. continue;
  488. Register Reg = MO.getReg();
  489. if (PhysRegDefs.test(Reg) ||
  490. PhysRegClobbers.test(Reg)) {
  491. // If it's using a non-loop-invariant register, then it's obviously
  492. // not safe to hoist.
  493. Safe = false;
  494. break;
  495. }
  496. }
  497. if (Safe)
  498. HoistPostRA(MI, Candidate.Def);
  499. }
  500. }
  501. }
  502. /// Add register 'Reg' to the livein sets of BBs in the current loop, and make
  503. /// sure it is not killed by any instructions in the loop.
  504. void MachineLICMBase::AddToLiveIns(MCRegister Reg) {
  505. for (MachineBasicBlock *BB : CurLoop->getBlocks()) {
  506. if (!BB->isLiveIn(Reg))
  507. BB->addLiveIn(Reg);
  508. for (MachineInstr &MI : *BB) {
  509. for (MachineOperand &MO : MI.operands()) {
  510. if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue;
  511. if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg()))
  512. MO.setIsKill(false);
  513. }
  514. }
  515. }
  516. }
  517. /// When an instruction is found to only use loop invariant operands that is
  518. /// safe to hoist, this instruction is called to do the dirty work.
  519. void MachineLICMBase::HoistPostRA(MachineInstr *MI, unsigned Def) {
  520. MachineBasicBlock *Preheader = getCurPreheader();
  521. // Now move the instructions to the predecessor, inserting it before any
  522. // terminator instructions.
  523. LLVM_DEBUG(dbgs() << "Hoisting to " << printMBBReference(*Preheader)
  524. << " from " << printMBBReference(*MI->getParent()) << ": "
  525. << *MI);
  526. // Splice the instruction to the preheader.
  527. MachineBasicBlock *MBB = MI->getParent();
  528. Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
  529. // Since we are moving the instruction out of its basic block, we do not
  530. // retain its debug location. Doing so would degrade the debugging
  531. // experience and adversely affect the accuracy of profiling information.
  532. assert(!MI->isDebugInstr() && "Should not hoist debug inst");
  533. MI->setDebugLoc(DebugLoc());
  534. // Add register to livein list to all the BBs in the current loop since a
  535. // loop invariant must be kept live throughout the whole loop. This is
  536. // important to ensure later passes do not scavenge the def register.
  537. AddToLiveIns(Def);
  538. ++NumPostRAHoisted;
  539. Changed = true;
  540. }
  541. /// Check if this mbb is guaranteed to execute. If not then a load from this mbb
  542. /// may not be safe to hoist.
  543. bool MachineLICMBase::IsGuaranteedToExecute(MachineBasicBlock *BB) {
  544. if (SpeculationState != SpeculateUnknown)
  545. return SpeculationState == SpeculateFalse;
  546. if (BB != CurLoop->getHeader()) {
  547. // Check loop exiting blocks.
  548. SmallVector<MachineBasicBlock*, 8> CurrentLoopExitingBlocks;
  549. CurLoop->getExitingBlocks(CurrentLoopExitingBlocks);
  550. for (MachineBasicBlock *CurrentLoopExitingBlock : CurrentLoopExitingBlocks)
  551. if (!DT->dominates(BB, CurrentLoopExitingBlock)) {
  552. SpeculationState = SpeculateTrue;
  553. return false;
  554. }
  555. }
  556. SpeculationState = SpeculateFalse;
  557. return true;
  558. }
  559. /// Check if \p MI is trivially remateralizable and if it does not have any
  560. /// virtual register uses. Even though rematerializable RA might not actually
  561. /// rematerialize it in this scenario. In that case we do not want to hoist such
  562. /// instruction out of the loop in a belief RA will sink it back if needed.
  563. bool MachineLICMBase::isTriviallyReMaterializable(
  564. const MachineInstr &MI) const {
  565. if (!TII->isTriviallyReMaterializable(MI))
  566. return false;
  567. for (const MachineOperand &MO : MI.operands()) {
  568. if (MO.isReg() && MO.isUse() && MO.getReg().isVirtual())
  569. return false;
  570. }
  571. return true;
  572. }
  573. void MachineLICMBase::EnterScope(MachineBasicBlock *MBB) {
  574. LLVM_DEBUG(dbgs() << "Entering " << printMBBReference(*MBB) << '\n');
  575. // Remember livein register pressure.
  576. BackTrace.push_back(RegPressure);
  577. }
  578. void MachineLICMBase::ExitScope(MachineBasicBlock *MBB) {
  579. LLVM_DEBUG(dbgs() << "Exiting " << printMBBReference(*MBB) << '\n');
  580. BackTrace.pop_back();
  581. }
  582. /// Destroy scope for the MBB that corresponds to the given dominator tree node
  583. /// if its a leaf or all of its children are done. Walk up the dominator tree to
  584. /// destroy ancestors which are now done.
  585. void MachineLICMBase::ExitScopeIfDone(MachineDomTreeNode *Node,
  586. DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
  587. const DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) {
  588. if (OpenChildren[Node])
  589. return;
  590. for(;;) {
  591. ExitScope(Node->getBlock());
  592. // Now traverse upwards to pop ancestors whose offsprings are all done.
  593. MachineDomTreeNode *Parent = ParentMap.lookup(Node);
  594. if (!Parent || --OpenChildren[Parent] != 0)
  595. break;
  596. Node = Parent;
  597. }
  598. }
  599. /// Walk the specified loop in the CFG (defined by all blocks dominated by the
  600. /// specified header block, and that are in the current loop) in depth first
  601. /// order w.r.t the DominatorTree. This allows us to visit definitions before
  602. /// uses, allowing us to hoist a loop body in one pass without iteration.
  603. void MachineLICMBase::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
  604. MachineBasicBlock *Preheader = getCurPreheader();
  605. if (!Preheader)
  606. return;
  607. SmallVector<MachineDomTreeNode*, 32> Scopes;
  608. SmallVector<MachineDomTreeNode*, 8> WorkList;
  609. DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
  610. DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
  611. // Perform a DFS walk to determine the order of visit.
  612. WorkList.push_back(HeaderN);
  613. while (!WorkList.empty()) {
  614. MachineDomTreeNode *Node = WorkList.pop_back_val();
  615. assert(Node && "Null dominator tree node?");
  616. MachineBasicBlock *BB = Node->getBlock();
  617. // If the header of the loop containing this basic block is a landing pad,
  618. // then don't try to hoist instructions out of this loop.
  619. const MachineLoop *ML = MLI->getLoopFor(BB);
  620. if (ML && ML->getHeader()->isEHPad())
  621. continue;
  622. // If this subregion is not in the top level loop at all, exit.
  623. if (!CurLoop->contains(BB))
  624. continue;
  625. Scopes.push_back(Node);
  626. unsigned NumChildren = Node->getNumChildren();
  627. // Don't hoist things out of a large switch statement. This often causes
  628. // code to be hoisted that wasn't going to be executed, and increases
  629. // register pressure in a situation where it's likely to matter.
  630. if (BB->succ_size() >= 25)
  631. NumChildren = 0;
  632. OpenChildren[Node] = NumChildren;
  633. if (NumChildren) {
  634. // Add children in reverse order as then the next popped worklist node is
  635. // the first child of this node. This means we ultimately traverse the
  636. // DOM tree in exactly the same order as if we'd recursed.
  637. for (MachineDomTreeNode *Child : reverse(Node->children())) {
  638. ParentMap[Child] = Node;
  639. WorkList.push_back(Child);
  640. }
  641. }
  642. }
  643. if (Scopes.size() == 0)
  644. return;
  645. // Compute registers which are livein into the loop headers.
  646. RegSeen.clear();
  647. BackTrace.clear();
  648. InitRegPressure(Preheader);
  649. // Now perform LICM.
  650. for (MachineDomTreeNode *Node : Scopes) {
  651. MachineBasicBlock *MBB = Node->getBlock();
  652. EnterScope(MBB);
  653. // Process the block
  654. SpeculationState = SpeculateUnknown;
  655. for (MachineInstr &MI : llvm::make_early_inc_range(*MBB)) {
  656. if (!Hoist(&MI, Preheader))
  657. UpdateRegPressure(&MI);
  658. // If we have hoisted an instruction that may store, it can only be a
  659. // constant store.
  660. }
  661. // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
  662. ExitScopeIfDone(Node, OpenChildren, ParentMap);
  663. }
  664. }
  665. static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
  666. return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
  667. }
  668. /// Find all virtual register references that are liveout of the preheader to
  669. /// initialize the starting "register pressure". Note this does not count live
  670. /// through (livein but not used) registers.
  671. void MachineLICMBase::InitRegPressure(MachineBasicBlock *BB) {
  672. std::fill(RegPressure.begin(), RegPressure.end(), 0);
  673. // If the preheader has only a single predecessor and it ends with a
  674. // fallthrough or an unconditional branch, then scan its predecessor for live
  675. // defs as well. This happens whenever the preheader is created by splitting
  676. // the critical edge from the loop predecessor to the loop header.
  677. if (BB->pred_size() == 1) {
  678. MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
  679. SmallVector<MachineOperand, 4> Cond;
  680. if (!TII->analyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
  681. InitRegPressure(*BB->pred_begin());
  682. }
  683. for (const MachineInstr &MI : *BB)
  684. UpdateRegPressure(&MI, /*ConsiderUnseenAsDef=*/true);
  685. }
  686. /// Update estimate of register pressure after the specified instruction.
  687. void MachineLICMBase::UpdateRegPressure(const MachineInstr *MI,
  688. bool ConsiderUnseenAsDef) {
  689. auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/true, ConsiderUnseenAsDef);
  690. for (const auto &RPIdAndCost : Cost) {
  691. unsigned Class = RPIdAndCost.first;
  692. if (static_cast<int>(RegPressure[Class]) < -RPIdAndCost.second)
  693. RegPressure[Class] = 0;
  694. else
  695. RegPressure[Class] += RPIdAndCost.second;
  696. }
  697. }
  698. /// Calculate the additional register pressure that the registers used in MI
  699. /// cause.
  700. ///
  701. /// If 'ConsiderSeen' is true, updates 'RegSeen' and uses the information to
  702. /// figure out which usages are live-ins.
  703. /// FIXME: Figure out a way to consider 'RegSeen' from all code paths.
  704. DenseMap<unsigned, int>
  705. MachineLICMBase::calcRegisterCost(const MachineInstr *MI, bool ConsiderSeen,
  706. bool ConsiderUnseenAsDef) {
  707. DenseMap<unsigned, int> Cost;
  708. if (MI->isImplicitDef())
  709. return Cost;
  710. for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
  711. const MachineOperand &MO = MI->getOperand(i);
  712. if (!MO.isReg() || MO.isImplicit())
  713. continue;
  714. Register Reg = MO.getReg();
  715. if (!Reg.isVirtual())
  716. continue;
  717. // FIXME: It seems bad to use RegSeen only for some of these calculations.
  718. bool isNew = ConsiderSeen ? RegSeen.insert(Reg).second : false;
  719. const TargetRegisterClass *RC = MRI->getRegClass(Reg);
  720. RegClassWeight W = TRI->getRegClassWeight(RC);
  721. int RCCost = 0;
  722. if (MO.isDef())
  723. RCCost = W.RegWeight;
  724. else {
  725. bool isKill = isOperandKill(MO, MRI);
  726. if (isNew && !isKill && ConsiderUnseenAsDef)
  727. // Haven't seen this, it must be a livein.
  728. RCCost = W.RegWeight;
  729. else if (!isNew && isKill)
  730. RCCost = -W.RegWeight;
  731. }
  732. if (RCCost == 0)
  733. continue;
  734. const int *PS = TRI->getRegClassPressureSets(RC);
  735. for (; *PS != -1; ++PS) {
  736. if (Cost.find(*PS) == Cost.end())
  737. Cost[*PS] = RCCost;
  738. else
  739. Cost[*PS] += RCCost;
  740. }
  741. }
  742. return Cost;
  743. }
  744. /// Return true if this machine instruction loads from global offset table or
  745. /// constant pool.
  746. static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI) {
  747. assert(MI.mayLoad() && "Expected MI that loads!");
  748. // If we lost memory operands, conservatively assume that the instruction
  749. // reads from everything..
  750. if (MI.memoperands_empty())
  751. return true;
  752. for (MachineMemOperand *MemOp : MI.memoperands())
  753. if (const PseudoSourceValue *PSV = MemOp->getPseudoValue())
  754. if (PSV->isGOT() || PSV->isConstantPool())
  755. return true;
  756. return false;
  757. }
  758. // This function iterates through all the operands of the input store MI and
  759. // checks that each register operand statisfies isCallerPreservedPhysReg.
  760. // This means, the value being stored and the address where it is being stored
  761. // is constant throughout the body of the function (not including prologue and
  762. // epilogue). When called with an MI that isn't a store, it returns false.
  763. // A future improvement can be to check if the store registers are constant
  764. // throughout the loop rather than throughout the funtion.
  765. static bool isInvariantStore(const MachineInstr &MI,
  766. const TargetRegisterInfo *TRI,
  767. const MachineRegisterInfo *MRI) {
  768. bool FoundCallerPresReg = false;
  769. if (!MI.mayStore() || MI.hasUnmodeledSideEffects() ||
  770. (MI.getNumOperands() == 0))
  771. return false;
  772. // Check that all register operands are caller-preserved physical registers.
  773. for (const MachineOperand &MO : MI.operands()) {
  774. if (MO.isReg()) {
  775. Register Reg = MO.getReg();
  776. // If operand is a virtual register, check if it comes from a copy of a
  777. // physical register.
  778. if (Reg.isVirtual())
  779. Reg = TRI->lookThruCopyLike(MO.getReg(), MRI);
  780. if (Reg.isVirtual())
  781. return false;
  782. if (!TRI->isCallerPreservedPhysReg(Reg.asMCReg(), *MI.getMF()))
  783. return false;
  784. else
  785. FoundCallerPresReg = true;
  786. } else if (!MO.isImm()) {
  787. return false;
  788. }
  789. }
  790. return FoundCallerPresReg;
  791. }
  792. // Return true if the input MI is a copy instruction that feeds an invariant
  793. // store instruction. This means that the src of the copy has to satisfy
  794. // isCallerPreservedPhysReg and atleast one of it's users should satisfy
  795. // isInvariantStore.
  796. static bool isCopyFeedingInvariantStore(const MachineInstr &MI,
  797. const MachineRegisterInfo *MRI,
  798. const TargetRegisterInfo *TRI) {
  799. // FIXME: If targets would like to look through instructions that aren't
  800. // pure copies, this can be updated to a query.
  801. if (!MI.isCopy())
  802. return false;
  803. const MachineFunction *MF = MI.getMF();
  804. // Check that we are copying a constant physical register.
  805. Register CopySrcReg = MI.getOperand(1).getReg();
  806. if (CopySrcReg.isVirtual())
  807. return false;
  808. if (!TRI->isCallerPreservedPhysReg(CopySrcReg.asMCReg(), *MF))
  809. return false;
  810. Register CopyDstReg = MI.getOperand(0).getReg();
  811. // Check if any of the uses of the copy are invariant stores.
  812. assert(CopyDstReg.isVirtual() && "copy dst is not a virtual reg");
  813. for (MachineInstr &UseMI : MRI->use_instructions(CopyDstReg)) {
  814. if (UseMI.mayStore() && isInvariantStore(UseMI, TRI, MRI))
  815. return true;
  816. }
  817. return false;
  818. }
  819. /// Returns true if the instruction may be a suitable candidate for LICM.
  820. /// e.g. If the instruction is a call, then it's obviously not safe to hoist it.
  821. bool MachineLICMBase::IsLICMCandidate(MachineInstr &I) {
  822. // Check if it's safe to move the instruction.
  823. bool DontMoveAcrossStore = true;
  824. if ((!I.isSafeToMove(AA, DontMoveAcrossStore)) &&
  825. !(HoistConstStores && isInvariantStore(I, TRI, MRI))) {
  826. LLVM_DEBUG(dbgs() << "LICM: Instruction not safe to move.\n");
  827. return false;
  828. }
  829. // If it is a load then check if it is guaranteed to execute by making sure
  830. // that it dominates all exiting blocks. If it doesn't, then there is a path
  831. // out of the loop which does not execute this load, so we can't hoist it.
  832. // Loads from constant memory are safe to speculate, for example indexed load
  833. // from a jump table.
  834. // Stores and side effects are already checked by isSafeToMove.
  835. if (I.mayLoad() && !mayLoadFromGOTOrConstantPool(I) &&
  836. !IsGuaranteedToExecute(I.getParent())) {
  837. LLVM_DEBUG(dbgs() << "LICM: Load not guaranteed to execute.\n");
  838. return false;
  839. }
  840. // Convergent attribute has been used on operations that involve inter-thread
  841. // communication which results are implicitly affected by the enclosing
  842. // control flows. It is not safe to hoist or sink such operations across
  843. // control flow.
  844. if (I.isConvergent())
  845. return false;
  846. if (!TII->shouldHoist(I, CurLoop))
  847. return false;
  848. return true;
  849. }
  850. /// Returns true if the instruction is loop invariant.
  851. bool MachineLICMBase::IsLoopInvariantInst(MachineInstr &I) {
  852. if (!IsLICMCandidate(I)) {
  853. LLVM_DEBUG(dbgs() << "LICM: Instruction not a LICM candidate\n");
  854. return false;
  855. }
  856. return CurLoop->isLoopInvariant(I);
  857. }
  858. /// Return true if the specified instruction is used by a phi node and hoisting
  859. /// it could cause a copy to be inserted.
  860. bool MachineLICMBase::HasLoopPHIUse(const MachineInstr *MI) const {
  861. SmallVector<const MachineInstr*, 8> Work(1, MI);
  862. do {
  863. MI = Work.pop_back_val();
  864. for (const MachineOperand &MO : MI->operands()) {
  865. if (!MO.isReg() || !MO.isDef())
  866. continue;
  867. Register Reg = MO.getReg();
  868. if (!Reg.isVirtual())
  869. continue;
  870. for (MachineInstr &UseMI : MRI->use_instructions(Reg)) {
  871. // A PHI may cause a copy to be inserted.
  872. if (UseMI.isPHI()) {
  873. // A PHI inside the loop causes a copy because the live range of Reg is
  874. // extended across the PHI.
  875. if (CurLoop->contains(&UseMI))
  876. return true;
  877. // A PHI in an exit block can cause a copy to be inserted if the PHI
  878. // has multiple predecessors in the loop with different values.
  879. // For now, approximate by rejecting all exit blocks.
  880. if (isExitBlock(UseMI.getParent()))
  881. return true;
  882. continue;
  883. }
  884. // Look past copies as well.
  885. if (UseMI.isCopy() && CurLoop->contains(&UseMI))
  886. Work.push_back(&UseMI);
  887. }
  888. }
  889. } while (!Work.empty());
  890. return false;
  891. }
  892. /// Compute operand latency between a def of 'Reg' and an use in the current
  893. /// loop, return true if the target considered it high.
  894. bool MachineLICMBase::HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
  895. Register Reg) const {
  896. if (MRI->use_nodbg_empty(Reg))
  897. return false;
  898. for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) {
  899. if (UseMI.isCopyLike())
  900. continue;
  901. if (!CurLoop->contains(UseMI.getParent()))
  902. continue;
  903. for (unsigned i = 0, e = UseMI.getNumOperands(); i != e; ++i) {
  904. const MachineOperand &MO = UseMI.getOperand(i);
  905. if (!MO.isReg() || !MO.isUse())
  906. continue;
  907. Register MOReg = MO.getReg();
  908. if (MOReg != Reg)
  909. continue;
  910. if (TII->hasHighOperandLatency(SchedModel, MRI, MI, DefIdx, UseMI, i))
  911. return true;
  912. }
  913. // Only look at the first in loop use.
  914. break;
  915. }
  916. return false;
  917. }
  918. /// Return true if the instruction is marked "cheap" or the operand latency
  919. /// between its def and a use is one or less.
  920. bool MachineLICMBase::IsCheapInstruction(MachineInstr &MI) const {
  921. if (TII->isAsCheapAsAMove(MI) || MI.isCopyLike())
  922. return true;
  923. bool isCheap = false;
  924. unsigned NumDefs = MI.getDesc().getNumDefs();
  925. for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) {
  926. MachineOperand &DefMO = MI.getOperand(i);
  927. if (!DefMO.isReg() || !DefMO.isDef())
  928. continue;
  929. --NumDefs;
  930. Register Reg = DefMO.getReg();
  931. if (Reg.isPhysical())
  932. continue;
  933. if (!TII->hasLowDefLatency(SchedModel, MI, i))
  934. return false;
  935. isCheap = true;
  936. }
  937. return isCheap;
  938. }
  939. /// Visit BBs from header to current BB, check if hoisting an instruction of the
  940. /// given cost matrix can cause high register pressure.
  941. bool
  942. MachineLICMBase::CanCauseHighRegPressure(const DenseMap<unsigned, int>& Cost,
  943. bool CheapInstr) {
  944. for (const auto &RPIdAndCost : Cost) {
  945. if (RPIdAndCost.second <= 0)
  946. continue;
  947. unsigned Class = RPIdAndCost.first;
  948. int Limit = RegLimit[Class];
  949. // Don't hoist cheap instructions if they would increase register pressure,
  950. // even if we're under the limit.
  951. if (CheapInstr && !HoistCheapInsts)
  952. return true;
  953. for (const auto &RP : BackTrace)
  954. if (static_cast<int>(RP[Class]) + RPIdAndCost.second >= Limit)
  955. return true;
  956. }
  957. return false;
  958. }
  959. /// Traverse the back trace from header to the current block and update their
  960. /// register pressures to reflect the effect of hoisting MI from the current
  961. /// block to the preheader.
  962. void MachineLICMBase::UpdateBackTraceRegPressure(const MachineInstr *MI) {
  963. // First compute the 'cost' of the instruction, i.e. its contribution
  964. // to register pressure.
  965. auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/false,
  966. /*ConsiderUnseenAsDef=*/false);
  967. // Update register pressure of blocks from loop header to current block.
  968. for (auto &RP : BackTrace)
  969. for (const auto &RPIdAndCost : Cost)
  970. RP[RPIdAndCost.first] += RPIdAndCost.second;
  971. }
  972. /// Return true if it is potentially profitable to hoist the given loop
  973. /// invariant.
  974. bool MachineLICMBase::IsProfitableToHoist(MachineInstr &MI) {
  975. if (MI.isImplicitDef())
  976. return true;
  977. // Besides removing computation from the loop, hoisting an instruction has
  978. // these effects:
  979. //
  980. // - The value defined by the instruction becomes live across the entire
  981. // loop. This increases register pressure in the loop.
  982. //
  983. // - If the value is used by a PHI in the loop, a copy will be required for
  984. // lowering the PHI after extending the live range.
  985. //
  986. // - When hoisting the last use of a value in the loop, that value no longer
  987. // needs to be live in the loop. This lowers register pressure in the loop.
  988. if (HoistConstStores && isCopyFeedingInvariantStore(MI, MRI, TRI))
  989. return true;
  990. bool CheapInstr = IsCheapInstruction(MI);
  991. bool CreatesCopy = HasLoopPHIUse(&MI);
  992. // Don't hoist a cheap instruction if it would create a copy in the loop.
  993. if (CheapInstr && CreatesCopy) {
  994. LLVM_DEBUG(dbgs() << "Won't hoist cheap instr with loop PHI use: " << MI);
  995. return false;
  996. }
  997. // Rematerializable instructions should always be hoisted providing the
  998. // register allocator can just pull them down again when needed.
  999. if (isTriviallyReMaterializable(MI))
  1000. return true;
  1001. // FIXME: If there are long latency loop-invariant instructions inside the
  1002. // loop at this point, why didn't the optimizer's LICM hoist them?
  1003. for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
  1004. const MachineOperand &MO = MI.getOperand(i);
  1005. if (!MO.isReg() || MO.isImplicit())
  1006. continue;
  1007. Register Reg = MO.getReg();
  1008. if (!Reg.isVirtual())
  1009. continue;
  1010. if (MO.isDef() && HasHighOperandLatency(MI, i, Reg)) {
  1011. LLVM_DEBUG(dbgs() << "Hoist High Latency: " << MI);
  1012. ++NumHighLatency;
  1013. return true;
  1014. }
  1015. }
  1016. // Estimate register pressure to determine whether to LICM the instruction.
  1017. // In low register pressure situation, we can be more aggressive about
  1018. // hoisting. Also, favors hoisting long latency instructions even in
  1019. // moderately high pressure situation.
  1020. // Cheap instructions will only be hoisted if they don't increase register
  1021. // pressure at all.
  1022. auto Cost = calcRegisterCost(&MI, /*ConsiderSeen=*/false,
  1023. /*ConsiderUnseenAsDef=*/false);
  1024. // Visit BBs from header to current BB, if hoisting this doesn't cause
  1025. // high register pressure, then it's safe to proceed.
  1026. if (!CanCauseHighRegPressure(Cost, CheapInstr)) {
  1027. LLVM_DEBUG(dbgs() << "Hoist non-reg-pressure: " << MI);
  1028. ++NumLowRP;
  1029. return true;
  1030. }
  1031. // Don't risk increasing register pressure if it would create copies.
  1032. if (CreatesCopy) {
  1033. LLVM_DEBUG(dbgs() << "Won't hoist instr with loop PHI use: " << MI);
  1034. return false;
  1035. }
  1036. // Do not "speculate" in high register pressure situation. If an
  1037. // instruction is not guaranteed to be executed in the loop, it's best to be
  1038. // conservative.
  1039. if (AvoidSpeculation &&
  1040. (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI))) {
  1041. LLVM_DEBUG(dbgs() << "Won't speculate: " << MI);
  1042. return false;
  1043. }
  1044. // High register pressure situation, only hoist if the instruction is going
  1045. // to be remat'ed.
  1046. if (!isTriviallyReMaterializable(MI) &&
  1047. !MI.isDereferenceableInvariantLoad()) {
  1048. LLVM_DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI);
  1049. return false;
  1050. }
  1051. return true;
  1052. }
  1053. /// Unfold a load from the given machineinstr if the load itself could be
  1054. /// hoisted. Return the unfolded and hoistable load, or null if the load
  1055. /// couldn't be unfolded or if it wouldn't be hoistable.
  1056. MachineInstr *MachineLICMBase::ExtractHoistableLoad(MachineInstr *MI) {
  1057. // Don't unfold simple loads.
  1058. if (MI->canFoldAsLoad())
  1059. return nullptr;
  1060. // If not, we may be able to unfold a load and hoist that.
  1061. // First test whether the instruction is loading from an amenable
  1062. // memory location.
  1063. if (!MI->isDereferenceableInvariantLoad())
  1064. return nullptr;
  1065. // Next determine the register class for a temporary register.
  1066. unsigned LoadRegIndex;
  1067. unsigned NewOpc =
  1068. TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(),
  1069. /*UnfoldLoad=*/true,
  1070. /*UnfoldStore=*/false,
  1071. &LoadRegIndex);
  1072. if (NewOpc == 0) return nullptr;
  1073. const MCInstrDesc &MID = TII->get(NewOpc);
  1074. MachineFunction &MF = *MI->getMF();
  1075. const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI, MF);
  1076. // Ok, we're unfolding. Create a temporary register and do the unfold.
  1077. Register Reg = MRI->createVirtualRegister(RC);
  1078. SmallVector<MachineInstr *, 2> NewMIs;
  1079. bool Success = TII->unfoldMemoryOperand(MF, *MI, Reg,
  1080. /*UnfoldLoad=*/true,
  1081. /*UnfoldStore=*/false, NewMIs);
  1082. (void)Success;
  1083. assert(Success &&
  1084. "unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold "
  1085. "succeeded!");
  1086. assert(NewMIs.size() == 2 &&
  1087. "Unfolded a load into multiple instructions!");
  1088. MachineBasicBlock *MBB = MI->getParent();
  1089. MachineBasicBlock::iterator Pos = MI;
  1090. MBB->insert(Pos, NewMIs[0]);
  1091. MBB->insert(Pos, NewMIs[1]);
  1092. // If unfolding produced a load that wasn't loop-invariant or profitable to
  1093. // hoist, discard the new instructions and bail.
  1094. if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) {
  1095. NewMIs[0]->eraseFromParent();
  1096. NewMIs[1]->eraseFromParent();
  1097. return nullptr;
  1098. }
  1099. // Update register pressure for the unfolded instruction.
  1100. UpdateRegPressure(NewMIs[1]);
  1101. // Otherwise we successfully unfolded a load that we can hoist.
  1102. // Update the call site info.
  1103. if (MI->shouldUpdateCallSiteInfo())
  1104. MF.eraseCallSiteInfo(MI);
  1105. MI->eraseFromParent();
  1106. return NewMIs[0];
  1107. }
  1108. /// Initialize the CSE map with instructions that are in the current loop
  1109. /// preheader that may become duplicates of instructions that are hoisted
  1110. /// out of the loop.
  1111. void MachineLICMBase::InitCSEMap(MachineBasicBlock *BB) {
  1112. for (MachineInstr &MI : *BB)
  1113. CSEMap[MI.getOpcode()].push_back(&MI);
  1114. }
  1115. /// Find an instruction amount PrevMIs that is a duplicate of MI.
  1116. /// Return this instruction if it's found.
  1117. MachineInstr *
  1118. MachineLICMBase::LookForDuplicate(const MachineInstr *MI,
  1119. std::vector<MachineInstr *> &PrevMIs) {
  1120. for (MachineInstr *PrevMI : PrevMIs)
  1121. if (TII->produceSameValue(*MI, *PrevMI, (PreRegAlloc ? MRI : nullptr)))
  1122. return PrevMI;
  1123. return nullptr;
  1124. }
  1125. /// Given a LICM'ed instruction, look for an instruction on the preheader that
  1126. /// computes the same value. If it's found, do a RAU on with the definition of
  1127. /// the existing instruction rather than hoisting the instruction to the
  1128. /// preheader.
  1129. bool MachineLICMBase::EliminateCSE(
  1130. MachineInstr *MI,
  1131. DenseMap<unsigned, std::vector<MachineInstr *>>::iterator &CI) {
  1132. // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
  1133. // the undef property onto uses.
  1134. if (CI == CSEMap.end() || MI->isImplicitDef())
  1135. return false;
  1136. if (MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
  1137. LLVM_DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup);
  1138. // Replace virtual registers defined by MI by their counterparts defined
  1139. // by Dup.
  1140. SmallVector<unsigned, 2> Defs;
  1141. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  1142. const MachineOperand &MO = MI->getOperand(i);
  1143. // Physical registers may not differ here.
  1144. assert((!MO.isReg() || MO.getReg() == 0 || !MO.getReg().isPhysical() ||
  1145. MO.getReg() == Dup->getOperand(i).getReg()) &&
  1146. "Instructions with different phys regs are not identical!");
  1147. if (MO.isReg() && MO.isDef() && !MO.getReg().isPhysical())
  1148. Defs.push_back(i);
  1149. }
  1150. SmallVector<const TargetRegisterClass*, 2> OrigRCs;
  1151. for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
  1152. unsigned Idx = Defs[i];
  1153. Register Reg = MI->getOperand(Idx).getReg();
  1154. Register DupReg = Dup->getOperand(Idx).getReg();
  1155. OrigRCs.push_back(MRI->getRegClass(DupReg));
  1156. if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) {
  1157. // Restore old RCs if more than one defs.
  1158. for (unsigned j = 0; j != i; ++j)
  1159. MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]);
  1160. return false;
  1161. }
  1162. }
  1163. for (unsigned Idx : Defs) {
  1164. Register Reg = MI->getOperand(Idx).getReg();
  1165. Register DupReg = Dup->getOperand(Idx).getReg();
  1166. MRI->replaceRegWith(Reg, DupReg);
  1167. MRI->clearKillFlags(DupReg);
  1168. // Clear Dup dead flag if any, we reuse it for Reg.
  1169. if (!MRI->use_nodbg_empty(DupReg))
  1170. Dup->getOperand(Idx).setIsDead(false);
  1171. }
  1172. MI->eraseFromParent();
  1173. ++NumCSEed;
  1174. return true;
  1175. }
  1176. return false;
  1177. }
  1178. /// Return true if the given instruction will be CSE'd if it's hoisted out of
  1179. /// the loop.
  1180. bool MachineLICMBase::MayCSE(MachineInstr *MI) {
  1181. unsigned Opcode = MI->getOpcode();
  1182. DenseMap<unsigned, std::vector<MachineInstr *>>::iterator CI =
  1183. CSEMap.find(Opcode);
  1184. // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
  1185. // the undef property onto uses.
  1186. if (CI == CSEMap.end() || MI->isImplicitDef())
  1187. return false;
  1188. return LookForDuplicate(MI, CI->second) != nullptr;
  1189. }
  1190. /// When an instruction is found to use only loop invariant operands
  1191. /// that are safe to hoist, this instruction is called to do the dirty work.
  1192. /// It returns true if the instruction is hoisted.
  1193. bool MachineLICMBase::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
  1194. MachineBasicBlock *SrcBlock = MI->getParent();
  1195. // Disable the instruction hoisting due to block hotness
  1196. if ((DisableHoistingToHotterBlocks == UseBFI::All ||
  1197. (DisableHoistingToHotterBlocks == UseBFI::PGO && HasProfileData)) &&
  1198. isTgtHotterThanSrc(SrcBlock, Preheader)) {
  1199. ++NumNotHoistedDueToHotness;
  1200. return false;
  1201. }
  1202. // First check whether we should hoist this instruction.
  1203. if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
  1204. // If not, try unfolding a hoistable load.
  1205. MI = ExtractHoistableLoad(MI);
  1206. if (!MI) return false;
  1207. }
  1208. // If we have hoisted an instruction that may store, it can only be a constant
  1209. // store.
  1210. if (MI->mayStore())
  1211. NumStoreConst++;
  1212. // Now move the instructions to the predecessor, inserting it before any
  1213. // terminator instructions.
  1214. LLVM_DEBUG({
  1215. dbgs() << "Hoisting " << *MI;
  1216. if (MI->getParent()->getBasicBlock())
  1217. dbgs() << " from " << printMBBReference(*MI->getParent());
  1218. if (Preheader->getBasicBlock())
  1219. dbgs() << " to " << printMBBReference(*Preheader);
  1220. dbgs() << "\n";
  1221. });
  1222. // If this is the first instruction being hoisted to the preheader,
  1223. // initialize the CSE map with potential common expressions.
  1224. if (FirstInLoop) {
  1225. InitCSEMap(Preheader);
  1226. FirstInLoop = false;
  1227. }
  1228. // Look for opportunity to CSE the hoisted instruction.
  1229. unsigned Opcode = MI->getOpcode();
  1230. DenseMap<unsigned, std::vector<MachineInstr *>>::iterator CI =
  1231. CSEMap.find(Opcode);
  1232. if (!EliminateCSE(MI, CI)) {
  1233. // Otherwise, splice the instruction to the preheader.
  1234. Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
  1235. // Since we are moving the instruction out of its basic block, we do not
  1236. // retain its debug location. Doing so would degrade the debugging
  1237. // experience and adversely affect the accuracy of profiling information.
  1238. assert(!MI->isDebugInstr() && "Should not hoist debug inst");
  1239. MI->setDebugLoc(DebugLoc());
  1240. // Update register pressure for BBs from header to this block.
  1241. UpdateBackTraceRegPressure(MI);
  1242. // Clear the kill flags of any register this instruction defines,
  1243. // since they may need to be live throughout the entire loop
  1244. // rather than just live for part of it.
  1245. for (MachineOperand &MO : MI->operands())
  1246. if (MO.isReg() && MO.isDef() && !MO.isDead())
  1247. MRI->clearKillFlags(MO.getReg());
  1248. // Add to the CSE map.
  1249. if (CI != CSEMap.end())
  1250. CI->second.push_back(MI);
  1251. else
  1252. CSEMap[Opcode].push_back(MI);
  1253. }
  1254. ++NumHoisted;
  1255. Changed = true;
  1256. return true;
  1257. }
  1258. /// Get the preheader for the current loop, splitting a critical edge if needed.
  1259. MachineBasicBlock *MachineLICMBase::getCurPreheader() {
  1260. // Determine the block to which to hoist instructions. If we can't find a
  1261. // suitable loop predecessor, we can't do any hoisting.
  1262. // If we've tried to get a preheader and failed, don't try again.
  1263. if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
  1264. return nullptr;
  1265. if (!CurPreheader) {
  1266. CurPreheader = CurLoop->getLoopPreheader();
  1267. if (!CurPreheader) {
  1268. MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
  1269. if (!Pred) {
  1270. CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
  1271. return nullptr;
  1272. }
  1273. CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), *this);
  1274. if (!CurPreheader) {
  1275. CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
  1276. return nullptr;
  1277. }
  1278. }
  1279. }
  1280. return CurPreheader;
  1281. }
  1282. /// Is the target basic block at least "BlockFrequencyRatioThreshold"
  1283. /// times hotter than the source basic block.
  1284. bool MachineLICMBase::isTgtHotterThanSrc(MachineBasicBlock *SrcBlock,
  1285. MachineBasicBlock *TgtBlock) {
  1286. // Parse source and target basic block frequency from MBFI
  1287. uint64_t SrcBF = MBFI->getBlockFreq(SrcBlock).getFrequency();
  1288. uint64_t DstBF = MBFI->getBlockFreq(TgtBlock).getFrequency();
  1289. // Disable the hoisting if source block frequency is zero
  1290. if (!SrcBF)
  1291. return true;
  1292. double Ratio = (double)DstBF / SrcBF;
  1293. // Compare the block frequency ratio with the threshold
  1294. return Ratio > BlockFrequencyRatioThreshold;
  1295. }