LoopPeel.cpp 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919
  1. //===- LoopPeel.cpp -------------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Loop Peeling Utilities.
  10. //===----------------------------------------------------------------------===//
  11. #include "llvm/Transforms/Utils/LoopPeel.h"
  12. #include "llvm/ADT/DenseMap.h"
  13. #include "llvm/ADT/Optional.h"
  14. #include "llvm/ADT/SmallVector.h"
  15. #include "llvm/ADT/Statistic.h"
  16. #include "llvm/Analysis/Loads.h"
  17. #include "llvm/Analysis/LoopInfo.h"
  18. #include "llvm/Analysis/LoopIterator.h"
  19. #include "llvm/Analysis/ScalarEvolution.h"
  20. #include "llvm/Analysis/ScalarEvolutionExpressions.h"
  21. #include "llvm/Analysis/TargetTransformInfo.h"
  22. #include "llvm/IR/BasicBlock.h"
  23. #include "llvm/IR/Dominators.h"
  24. #include "llvm/IR/Function.h"
  25. #include "llvm/IR/InstrTypes.h"
  26. #include "llvm/IR/Instruction.h"
  27. #include "llvm/IR/Instructions.h"
  28. #include "llvm/IR/LLVMContext.h"
  29. #include "llvm/IR/MDBuilder.h"
  30. #include "llvm/IR/Metadata.h"
  31. #include "llvm/IR/PatternMatch.h"
  32. #include "llvm/Support/Casting.h"
  33. #include "llvm/Support/CommandLine.h"
  34. #include "llvm/Support/Debug.h"
  35. #include "llvm/Support/raw_ostream.h"
  36. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  37. #include "llvm/Transforms/Utils/Cloning.h"
  38. #include "llvm/Transforms/Utils/LoopSimplify.h"
  39. #include "llvm/Transforms/Utils/LoopUtils.h"
  40. #include "llvm/Transforms/Utils/UnrollLoop.h"
  41. #include "llvm/Transforms/Utils/ValueMapper.h"
  42. #include <algorithm>
  43. #include <cassert>
  44. #include <cstdint>
  45. #include <limits>
  46. using namespace llvm;
  47. using namespace llvm::PatternMatch;
  48. #define DEBUG_TYPE "loop-peel"
  49. STATISTIC(NumPeeled, "Number of loops peeled");
  50. static cl::opt<unsigned> UnrollPeelCount(
  51. "unroll-peel-count", cl::Hidden,
  52. cl::desc("Set the unroll peeling count, for testing purposes"));
  53. static cl::opt<bool>
  54. UnrollAllowPeeling("unroll-allow-peeling", cl::init(true), cl::Hidden,
  55. cl::desc("Allows loops to be peeled when the dynamic "
  56. "trip count is known to be low."));
  57. static cl::opt<bool>
  58. UnrollAllowLoopNestsPeeling("unroll-allow-loop-nests-peeling",
  59. cl::init(false), cl::Hidden,
  60. cl::desc("Allows loop nests to be peeled."));
  61. static cl::opt<unsigned> UnrollPeelMaxCount(
  62. "unroll-peel-max-count", cl::init(7), cl::Hidden,
  63. cl::desc("Max average trip count which will cause loop peeling."));
  64. static cl::opt<unsigned> UnrollForcePeelCount(
  65. "unroll-force-peel-count", cl::init(0), cl::Hidden,
  66. cl::desc("Force a peel count regardless of profiling information."));
  67. static const char *PeeledCountMetaData = "llvm.loop.peeled.count";
  68. // Check whether we are capable of peeling this loop.
  69. bool llvm::canPeel(Loop *L) {
  70. // Make sure the loop is in simplified form
  71. if (!L->isLoopSimplifyForm())
  72. return false;
  73. // Don't try to peel loops where the latch is not the exiting block.
  74. // This can be an indication of two different things:
  75. // 1) The loop is not rotated.
  76. // 2) The loop contains irreducible control flow that involves the latch.
  77. const BasicBlock *Latch = L->getLoopLatch();
  78. if (!L->isLoopExiting(Latch))
  79. return false;
  80. // Peeling is only supported if the latch is a branch.
  81. if (!isa<BranchInst>(Latch->getTerminator()))
  82. return false;
  83. SmallVector<BasicBlock *, 4> Exits;
  84. L->getUniqueNonLatchExitBlocks(Exits);
  85. // The latch must either be the only exiting block or all non-latch exit
  86. // blocks have either a deopt or unreachable terminator or compose a chain of
  87. // blocks where the last one is either deopt or unreachable terminated. Both
  88. // deopt and unreachable terminators are a strong indication they are not
  89. // taken. Note that this is a profitability check, not a legality check. Also
  90. // note that LoopPeeling currently can only update the branch weights of latch
  91. // blocks and branch weights to blocks with deopt or unreachable do not need
  92. // updating.
  93. return llvm::all_of(Exits, IsBlockFollowedByDeoptOrUnreachable);
  94. }
  95. // This function calculates the number of iterations after which the given Phi
  96. // becomes an invariant. The pre-calculated values are memorized in the map. The
  97. // function (shortcut is I) is calculated according to the following definition:
  98. // Given %x = phi <Inputs from above the loop>, ..., [%y, %back.edge].
  99. // If %y is a loop invariant, then I(%x) = 1.
  100. // If %y is a Phi from the loop header, I(%x) = I(%y) + 1.
  101. // Otherwise, I(%x) is infinite.
  102. // TODO: Actually if %y is an expression that depends only on Phi %z and some
  103. // loop invariants, we can estimate I(%x) = I(%z) + 1. The example
  104. // looks like:
  105. // %x = phi(0, %a), <-- becomes invariant starting from 3rd iteration.
  106. // %y = phi(0, 5),
  107. // %a = %y + 1.
  108. static Optional<unsigned> calculateIterationsToInvariance(
  109. PHINode *Phi, Loop *L, BasicBlock *BackEdge,
  110. SmallDenseMap<PHINode *, Optional<unsigned> > &IterationsToInvariance) {
  111. assert(Phi->getParent() == L->getHeader() &&
  112. "Non-loop Phi should not be checked for turning into invariant.");
  113. assert(BackEdge == L->getLoopLatch() && "Wrong latch?");
  114. // If we already know the answer, take it from the map.
  115. auto I = IterationsToInvariance.find(Phi);
  116. if (I != IterationsToInvariance.end())
  117. return I->second;
  118. // Otherwise we need to analyze the input from the back edge.
  119. Value *Input = Phi->getIncomingValueForBlock(BackEdge);
  120. // Place infinity to map to avoid infinite recursion for cycled Phis. Such
  121. // cycles can never stop on an invariant.
  122. IterationsToInvariance[Phi] = None;
  123. Optional<unsigned> ToInvariance = None;
  124. if (L->isLoopInvariant(Input))
  125. ToInvariance = 1u;
  126. else if (PHINode *IncPhi = dyn_cast<PHINode>(Input)) {
  127. // Only consider Phis in header block.
  128. if (IncPhi->getParent() != L->getHeader())
  129. return None;
  130. // If the input becomes an invariant after X iterations, then our Phi
  131. // becomes an invariant after X + 1 iterations.
  132. auto InputToInvariance = calculateIterationsToInvariance(
  133. IncPhi, L, BackEdge, IterationsToInvariance);
  134. if (InputToInvariance)
  135. ToInvariance = *InputToInvariance + 1u;
  136. }
  137. // If we found that this Phi lies in an invariant chain, update the map.
  138. if (ToInvariance)
  139. IterationsToInvariance[Phi] = ToInvariance;
  140. return ToInvariance;
  141. }
  142. // Try to find any invariant memory reads that will become dereferenceable in
  143. // the remainder loop after peeling. The load must also be used (transitively)
  144. // by an exit condition. Returns the number of iterations to peel off (at the
  145. // moment either 0 or 1).
  146. static unsigned peelToTurnInvariantLoadsDerefencebale(Loop &L,
  147. DominatorTree &DT) {
  148. // Skip loops with a single exiting block, because there should be no benefit
  149. // for the heuristic below.
  150. if (L.getExitingBlock())
  151. return 0;
  152. // All non-latch exit blocks must have an UnreachableInst terminator.
  153. // Otherwise the heuristic below may not be profitable.
  154. SmallVector<BasicBlock *, 4> Exits;
  155. L.getUniqueNonLatchExitBlocks(Exits);
  156. if (any_of(Exits, [](const BasicBlock *BB) {
  157. return !isa<UnreachableInst>(BB->getTerminator());
  158. }))
  159. return 0;
  160. // Now look for invariant loads that dominate the latch and are not known to
  161. // be dereferenceable. If there are such loads and no writes, they will become
  162. // dereferenceable in the loop if the first iteration is peeled off. Also
  163. // collect the set of instructions controlled by such loads. Only peel if an
  164. // exit condition uses (transitively) such a load.
  165. BasicBlock *Header = L.getHeader();
  166. BasicBlock *Latch = L.getLoopLatch();
  167. SmallPtrSet<Value *, 8> LoadUsers;
  168. const DataLayout &DL = L.getHeader()->getModule()->getDataLayout();
  169. for (BasicBlock *BB : L.blocks()) {
  170. for (Instruction &I : *BB) {
  171. if (I.mayWriteToMemory())
  172. return 0;
  173. auto Iter = LoadUsers.find(&I);
  174. if (Iter != LoadUsers.end()) {
  175. for (Value *U : I.users())
  176. LoadUsers.insert(U);
  177. }
  178. // Do not look for reads in the header; they can already be hoisted
  179. // without peeling.
  180. if (BB == Header)
  181. continue;
  182. if (auto *LI = dyn_cast<LoadInst>(&I)) {
  183. Value *Ptr = LI->getPointerOperand();
  184. if (DT.dominates(BB, Latch) && L.isLoopInvariant(Ptr) &&
  185. !isDereferenceablePointer(Ptr, LI->getType(), DL, LI, &DT))
  186. for (Value *U : I.users())
  187. LoadUsers.insert(U);
  188. }
  189. }
  190. }
  191. SmallVector<BasicBlock *> ExitingBlocks;
  192. L.getExitingBlocks(ExitingBlocks);
  193. if (any_of(ExitingBlocks, [&LoadUsers](BasicBlock *Exiting) {
  194. return LoadUsers.contains(Exiting->getTerminator());
  195. }))
  196. return 1;
  197. return 0;
  198. }
  199. // Return the number of iterations to peel off that make conditions in the
  200. // body true/false. For example, if we peel 2 iterations off the loop below,
  201. // the condition i < 2 can be evaluated at compile time.
  202. // for (i = 0; i < n; i++)
  203. // if (i < 2)
  204. // ..
  205. // else
  206. // ..
  207. // }
  208. static unsigned countToEliminateCompares(Loop &L, unsigned MaxPeelCount,
  209. ScalarEvolution &SE) {
  210. assert(L.isLoopSimplifyForm() && "Loop needs to be in loop simplify form");
  211. unsigned DesiredPeelCount = 0;
  212. for (auto *BB : L.blocks()) {
  213. auto *BI = dyn_cast<BranchInst>(BB->getTerminator());
  214. if (!BI || BI->isUnconditional())
  215. continue;
  216. // Ignore loop exit condition.
  217. if (L.getLoopLatch() == BB)
  218. continue;
  219. Value *Condition = BI->getCondition();
  220. Value *LeftVal, *RightVal;
  221. CmpInst::Predicate Pred;
  222. if (!match(Condition, m_ICmp(Pred, m_Value(LeftVal), m_Value(RightVal))))
  223. continue;
  224. const SCEV *LeftSCEV = SE.getSCEV(LeftVal);
  225. const SCEV *RightSCEV = SE.getSCEV(RightVal);
  226. // Do not consider predicates that are known to be true or false
  227. // independently of the loop iteration.
  228. if (SE.evaluatePredicate(Pred, LeftSCEV, RightSCEV))
  229. continue;
  230. // Check if we have a condition with one AddRec and one non AddRec
  231. // expression. Normalize LeftSCEV to be the AddRec.
  232. if (!isa<SCEVAddRecExpr>(LeftSCEV)) {
  233. if (isa<SCEVAddRecExpr>(RightSCEV)) {
  234. std::swap(LeftSCEV, RightSCEV);
  235. Pred = ICmpInst::getSwappedPredicate(Pred);
  236. } else
  237. continue;
  238. }
  239. const SCEVAddRecExpr *LeftAR = cast<SCEVAddRecExpr>(LeftSCEV);
  240. // Avoid huge SCEV computations in the loop below, make sure we only
  241. // consider AddRecs of the loop we are trying to peel.
  242. if (!LeftAR->isAffine() || LeftAR->getLoop() != &L)
  243. continue;
  244. if (!(ICmpInst::isEquality(Pred) && LeftAR->hasNoSelfWrap()) &&
  245. !SE.getMonotonicPredicateType(LeftAR, Pred))
  246. continue;
  247. // Check if extending the current DesiredPeelCount lets us evaluate Pred
  248. // or !Pred in the loop body statically.
  249. unsigned NewPeelCount = DesiredPeelCount;
  250. const SCEV *IterVal = LeftAR->evaluateAtIteration(
  251. SE.getConstant(LeftSCEV->getType(), NewPeelCount), SE);
  252. // If the original condition is not known, get the negated predicate
  253. // (which holds on the else branch) and check if it is known. This allows
  254. // us to peel of iterations that make the original condition false.
  255. if (!SE.isKnownPredicate(Pred, IterVal, RightSCEV))
  256. Pred = ICmpInst::getInversePredicate(Pred);
  257. const SCEV *Step = LeftAR->getStepRecurrence(SE);
  258. const SCEV *NextIterVal = SE.getAddExpr(IterVal, Step);
  259. auto PeelOneMoreIteration = [&IterVal, &NextIterVal, &SE, Step,
  260. &NewPeelCount]() {
  261. IterVal = NextIterVal;
  262. NextIterVal = SE.getAddExpr(IterVal, Step);
  263. NewPeelCount++;
  264. };
  265. auto CanPeelOneMoreIteration = [&NewPeelCount, &MaxPeelCount]() {
  266. return NewPeelCount < MaxPeelCount;
  267. };
  268. while (CanPeelOneMoreIteration() &&
  269. SE.isKnownPredicate(Pred, IterVal, RightSCEV))
  270. PeelOneMoreIteration();
  271. // With *that* peel count, does the predicate !Pred become known in the
  272. // first iteration of the loop body after peeling?
  273. if (!SE.isKnownPredicate(ICmpInst::getInversePredicate(Pred), IterVal,
  274. RightSCEV))
  275. continue; // If not, give up.
  276. // However, for equality comparisons, that isn't always sufficient to
  277. // eliminate the comparsion in loop body, we may need to peel one more
  278. // iteration. See if that makes !Pred become unknown again.
  279. if (ICmpInst::isEquality(Pred) &&
  280. !SE.isKnownPredicate(ICmpInst::getInversePredicate(Pred), NextIterVal,
  281. RightSCEV) &&
  282. !SE.isKnownPredicate(Pred, IterVal, RightSCEV) &&
  283. SE.isKnownPredicate(Pred, NextIterVal, RightSCEV)) {
  284. if (!CanPeelOneMoreIteration())
  285. continue; // Need to peel one more iteration, but can't. Give up.
  286. PeelOneMoreIteration(); // Great!
  287. }
  288. DesiredPeelCount = std::max(DesiredPeelCount, NewPeelCount);
  289. }
  290. return DesiredPeelCount;
  291. }
  292. /// This "heuristic" exactly matches implicit behavior which used to exist
  293. /// inside getLoopEstimatedTripCount. It was added here to keep an
  294. /// improvement inside that API from causing peeling to become more agressive.
  295. /// This should probably be removed.
  296. static bool violatesLegacyMultiExitLoopCheck(Loop *L) {
  297. BasicBlock *Latch = L->getLoopLatch();
  298. if (!Latch)
  299. return true;
  300. BranchInst *LatchBR = dyn_cast<BranchInst>(Latch->getTerminator());
  301. if (!LatchBR || LatchBR->getNumSuccessors() != 2 || !L->isLoopExiting(Latch))
  302. return true;
  303. assert((LatchBR->getSuccessor(0) == L->getHeader() ||
  304. LatchBR->getSuccessor(1) == L->getHeader()) &&
  305. "At least one edge out of the latch must go to the header");
  306. SmallVector<BasicBlock *, 4> ExitBlocks;
  307. L->getUniqueNonLatchExitBlocks(ExitBlocks);
  308. return any_of(ExitBlocks, [](const BasicBlock *EB) {
  309. return !EB->getTerminatingDeoptimizeCall();
  310. });
  311. }
  312. // Return the number of iterations we want to peel off.
  313. void llvm::computePeelCount(Loop *L, unsigned LoopSize,
  314. TargetTransformInfo::PeelingPreferences &PP,
  315. unsigned TripCount, DominatorTree &DT,
  316. ScalarEvolution &SE, unsigned Threshold) {
  317. assert(LoopSize > 0 && "Zero loop size is not allowed!");
  318. // Save the PP.PeelCount value set by the target in
  319. // TTI.getPeelingPreferences or by the flag -unroll-peel-count.
  320. unsigned TargetPeelCount = PP.PeelCount;
  321. PP.PeelCount = 0;
  322. if (!canPeel(L))
  323. return;
  324. // Only try to peel innermost loops by default.
  325. // The constraint can be relaxed by the target in TTI.getPeelingPreferences
  326. // or by the flag -unroll-allow-loop-nests-peeling.
  327. if (!PP.AllowLoopNestsPeeling && !L->isInnermost())
  328. return;
  329. // If the user provided a peel count, use that.
  330. bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0;
  331. if (UserPeelCount) {
  332. LLVM_DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount
  333. << " iterations.\n");
  334. PP.PeelCount = UnrollForcePeelCount;
  335. PP.PeelProfiledIterations = true;
  336. return;
  337. }
  338. // Skip peeling if it's disabled.
  339. if (!PP.AllowPeeling)
  340. return;
  341. unsigned AlreadyPeeled = 0;
  342. if (auto Peeled = getOptionalIntLoopAttribute(L, PeeledCountMetaData))
  343. AlreadyPeeled = *Peeled;
  344. // Stop if we already peeled off the maximum number of iterations.
  345. if (AlreadyPeeled >= UnrollPeelMaxCount)
  346. return;
  347. // Here we try to get rid of Phis which become invariants after 1, 2, ..., N
  348. // iterations of the loop. For this we compute the number for iterations after
  349. // which every Phi is guaranteed to become an invariant, and try to peel the
  350. // maximum number of iterations among these values, thus turning all those
  351. // Phis into invariants.
  352. // First, check that we can peel at least one iteration.
  353. if (2 * LoopSize <= Threshold && UnrollPeelMaxCount > 0) {
  354. // Store the pre-calculated values here.
  355. SmallDenseMap<PHINode *, Optional<unsigned> > IterationsToInvariance;
  356. // Now go through all Phis to calculate their the number of iterations they
  357. // need to become invariants.
  358. // Start the max computation with the PP.PeelCount value set by the target
  359. // in TTI.getPeelingPreferences or by the flag -unroll-peel-count.
  360. unsigned DesiredPeelCount = TargetPeelCount;
  361. BasicBlock *BackEdge = L->getLoopLatch();
  362. assert(BackEdge && "Loop is not in simplified form?");
  363. for (auto BI = L->getHeader()->begin(); isa<PHINode>(&*BI); ++BI) {
  364. PHINode *Phi = cast<PHINode>(&*BI);
  365. auto ToInvariance = calculateIterationsToInvariance(
  366. Phi, L, BackEdge, IterationsToInvariance);
  367. if (ToInvariance)
  368. DesiredPeelCount = std::max(DesiredPeelCount, *ToInvariance);
  369. }
  370. // Pay respect to limitations implied by loop size and the max peel count.
  371. unsigned MaxPeelCount = UnrollPeelMaxCount;
  372. MaxPeelCount = std::min(MaxPeelCount, Threshold / LoopSize - 1);
  373. DesiredPeelCount = std::max(DesiredPeelCount,
  374. countToEliminateCompares(*L, MaxPeelCount, SE));
  375. if (DesiredPeelCount == 0)
  376. DesiredPeelCount = peelToTurnInvariantLoadsDerefencebale(*L, DT);
  377. if (DesiredPeelCount > 0) {
  378. DesiredPeelCount = std::min(DesiredPeelCount, MaxPeelCount);
  379. // Consider max peel count limitation.
  380. assert(DesiredPeelCount > 0 && "Wrong loop size estimation?");
  381. if (DesiredPeelCount + AlreadyPeeled <= UnrollPeelMaxCount) {
  382. LLVM_DEBUG(dbgs() << "Peel " << DesiredPeelCount
  383. << " iteration(s) to turn"
  384. << " some Phis into invariants.\n");
  385. PP.PeelCount = DesiredPeelCount;
  386. PP.PeelProfiledIterations = false;
  387. return;
  388. }
  389. }
  390. }
  391. // Bail if we know the statically calculated trip count.
  392. // In this case we rather prefer partial unrolling.
  393. if (TripCount)
  394. return;
  395. // Do not apply profile base peeling if it is disabled.
  396. if (!PP.PeelProfiledIterations)
  397. return;
  398. // If we don't know the trip count, but have reason to believe the average
  399. // trip count is low, peeling should be beneficial, since we will usually
  400. // hit the peeled section.
  401. // We only do this in the presence of profile information, since otherwise
  402. // our estimates of the trip count are not reliable enough.
  403. if (L->getHeader()->getParent()->hasProfileData()) {
  404. if (violatesLegacyMultiExitLoopCheck(L))
  405. return;
  406. Optional<unsigned> PeelCount = getLoopEstimatedTripCount(L);
  407. if (!PeelCount)
  408. return;
  409. LLVM_DEBUG(dbgs() << "Profile-based estimated trip count is " << *PeelCount
  410. << "\n");
  411. if (*PeelCount) {
  412. if ((*PeelCount + AlreadyPeeled <= UnrollPeelMaxCount) &&
  413. (LoopSize * (*PeelCount + 1) <= Threshold)) {
  414. LLVM_DEBUG(dbgs() << "Peeling first " << *PeelCount
  415. << " iterations.\n");
  416. PP.PeelCount = *PeelCount;
  417. return;
  418. }
  419. LLVM_DEBUG(dbgs() << "Requested peel count: " << *PeelCount << "\n");
  420. LLVM_DEBUG(dbgs() << "Already peel count: " << AlreadyPeeled << "\n");
  421. LLVM_DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n");
  422. LLVM_DEBUG(dbgs() << "Peel cost: " << LoopSize * (*PeelCount + 1)
  423. << "\n");
  424. LLVM_DEBUG(dbgs() << "Max peel cost: " << Threshold << "\n");
  425. }
  426. }
  427. }
  428. /// Update the branch weights of the latch of a peeled-off loop
  429. /// iteration.
  430. /// This sets the branch weights for the latch of the recently peeled off loop
  431. /// iteration correctly.
  432. /// Let F is a weight of the edge from latch to header.
  433. /// Let E is a weight of the edge from latch to exit.
  434. /// F/(F+E) is a probability to go to loop and E/(F+E) is a probability to
  435. /// go to exit.
  436. /// Then, Estimated TripCount = F / E.
  437. /// For I-th (counting from 0) peeled off iteration we set the the weights for
  438. /// the peeled latch as (TC - I, 1). It gives us reasonable distribution,
  439. /// The probability to go to exit 1/(TC-I) increases. At the same time
  440. /// the estimated trip count of remaining loop reduces by I.
  441. /// To avoid dealing with division rounding we can just multiple both part
  442. /// of weights to E and use weight as (F - I * E, E).
  443. ///
  444. /// \param Header The copy of the header block that belongs to next iteration.
  445. /// \param LatchBR The copy of the latch branch that belongs to this iteration.
  446. /// \param[in,out] FallThroughWeight The weight of the edge from latch to
  447. /// header before peeling (in) and after peeled off one iteration (out).
  448. static void updateBranchWeights(BasicBlock *Header, BranchInst *LatchBR,
  449. uint64_t ExitWeight,
  450. uint64_t &FallThroughWeight) {
  451. // FallThroughWeight is 0 means that there is no branch weights on original
  452. // latch block or estimated trip count is zero.
  453. if (!FallThroughWeight)
  454. return;
  455. unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1);
  456. MDBuilder MDB(LatchBR->getContext());
  457. MDNode *WeightNode =
  458. HeaderIdx ? MDB.createBranchWeights(ExitWeight, FallThroughWeight)
  459. : MDB.createBranchWeights(FallThroughWeight, ExitWeight);
  460. LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode);
  461. FallThroughWeight =
  462. FallThroughWeight > ExitWeight ? FallThroughWeight - ExitWeight : 1;
  463. }
  464. /// Initialize the weights.
  465. ///
  466. /// \param Header The header block.
  467. /// \param LatchBR The latch branch.
  468. /// \param[out] ExitWeight The weight of the edge from Latch to Exit.
  469. /// \param[out] FallThroughWeight The weight of the edge from Latch to Header.
  470. static void initBranchWeights(BasicBlock *Header, BranchInst *LatchBR,
  471. uint64_t &ExitWeight,
  472. uint64_t &FallThroughWeight) {
  473. uint64_t TrueWeight, FalseWeight;
  474. if (!LatchBR->extractProfMetadata(TrueWeight, FalseWeight))
  475. return;
  476. unsigned HeaderIdx = LatchBR->getSuccessor(0) == Header ? 0 : 1;
  477. ExitWeight = HeaderIdx ? TrueWeight : FalseWeight;
  478. FallThroughWeight = HeaderIdx ? FalseWeight : TrueWeight;
  479. }
  480. /// Update the weights of original Latch block after peeling off all iterations.
  481. ///
  482. /// \param Header The header block.
  483. /// \param LatchBR The latch branch.
  484. /// \param ExitWeight The weight of the edge from Latch to Exit.
  485. /// \param FallThroughWeight The weight of the edge from Latch to Header.
  486. static void fixupBranchWeights(BasicBlock *Header, BranchInst *LatchBR,
  487. uint64_t ExitWeight,
  488. uint64_t FallThroughWeight) {
  489. // FallThroughWeight is 0 means that there is no branch weights on original
  490. // latch block or estimated trip count is zero.
  491. if (!FallThroughWeight)
  492. return;
  493. // Sets the branch weights on the loop exit.
  494. MDBuilder MDB(LatchBR->getContext());
  495. unsigned HeaderIdx = LatchBR->getSuccessor(0) == Header ? 0 : 1;
  496. MDNode *WeightNode =
  497. HeaderIdx ? MDB.createBranchWeights(ExitWeight, FallThroughWeight)
  498. : MDB.createBranchWeights(FallThroughWeight, ExitWeight);
  499. LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode);
  500. }
  501. /// Clones the body of the loop L, putting it between \p InsertTop and \p
  502. /// InsertBot.
  503. /// \param IterNumber The serial number of the iteration currently being
  504. /// peeled off.
  505. /// \param ExitEdges The exit edges of the original loop.
  506. /// \param[out] NewBlocks A list of the blocks in the newly created clone
  507. /// \param[out] VMap The value map between the loop and the new clone.
  508. /// \param LoopBlocks A helper for DFS-traversal of the loop.
  509. /// \param LVMap A value-map that maps instructions from the original loop to
  510. /// instructions in the last peeled-off iteration.
  511. static void cloneLoopBlocks(
  512. Loop *L, unsigned IterNumber, BasicBlock *InsertTop, BasicBlock *InsertBot,
  513. SmallVectorImpl<std::pair<BasicBlock *, BasicBlock *>> &ExitEdges,
  514. SmallVectorImpl<BasicBlock *> &NewBlocks, LoopBlocksDFS &LoopBlocks,
  515. ValueToValueMapTy &VMap, ValueToValueMapTy &LVMap, DominatorTree *DT,
  516. LoopInfo *LI, ArrayRef<MDNode *> LoopLocalNoAliasDeclScopes) {
  517. BasicBlock *Header = L->getHeader();
  518. BasicBlock *Latch = L->getLoopLatch();
  519. BasicBlock *PreHeader = L->getLoopPreheader();
  520. Function *F = Header->getParent();
  521. LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO();
  522. LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO();
  523. Loop *ParentLoop = L->getParentLoop();
  524. // For each block in the original loop, create a new copy,
  525. // and update the value map with the newly created values.
  526. for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
  527. BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, ".peel", F);
  528. NewBlocks.push_back(NewBB);
  529. // If an original block is an immediate child of the loop L, its copy
  530. // is a child of a ParentLoop after peeling. If a block is a child of
  531. // a nested loop, it is handled in the cloneLoop() call below.
  532. if (ParentLoop && LI->getLoopFor(*BB) == L)
  533. ParentLoop->addBasicBlockToLoop(NewBB, *LI);
  534. VMap[*BB] = NewBB;
  535. // If dominator tree is available, insert nodes to represent cloned blocks.
  536. if (DT) {
  537. if (Header == *BB)
  538. DT->addNewBlock(NewBB, InsertTop);
  539. else {
  540. DomTreeNode *IDom = DT->getNode(*BB)->getIDom();
  541. // VMap must contain entry for IDom, as the iteration order is RPO.
  542. DT->addNewBlock(NewBB, cast<BasicBlock>(VMap[IDom->getBlock()]));
  543. }
  544. }
  545. }
  546. {
  547. // Identify what other metadata depends on the cloned version. After
  548. // cloning, replace the metadata with the corrected version for both
  549. // memory instructions and noalias intrinsics.
  550. std::string Ext = (Twine("Peel") + Twine(IterNumber)).str();
  551. cloneAndAdaptNoAliasScopes(LoopLocalNoAliasDeclScopes, NewBlocks,
  552. Header->getContext(), Ext);
  553. }
  554. // Recursively create the new Loop objects for nested loops, if any,
  555. // to preserve LoopInfo.
  556. for (Loop *ChildLoop : *L) {
  557. cloneLoop(ChildLoop, ParentLoop, VMap, LI, nullptr);
  558. }
  559. // Hook-up the control flow for the newly inserted blocks.
  560. // The new header is hooked up directly to the "top", which is either
  561. // the original loop preheader (for the first iteration) or the previous
  562. // iteration's exiting block (for every other iteration)
  563. InsertTop->getTerminator()->setSuccessor(0, cast<BasicBlock>(VMap[Header]));
  564. // Similarly, for the latch:
  565. // The original exiting edge is still hooked up to the loop exit.
  566. // The backedge now goes to the "bottom", which is either the loop's real
  567. // header (for the last peeled iteration) or the copied header of the next
  568. // iteration (for every other iteration)
  569. BasicBlock *NewLatch = cast<BasicBlock>(VMap[Latch]);
  570. BranchInst *LatchBR = cast<BranchInst>(NewLatch->getTerminator());
  571. for (unsigned idx = 0, e = LatchBR->getNumSuccessors(); idx < e; ++idx)
  572. if (LatchBR->getSuccessor(idx) == Header) {
  573. LatchBR->setSuccessor(idx, InsertBot);
  574. break;
  575. }
  576. if (DT)
  577. DT->changeImmediateDominator(InsertBot, NewLatch);
  578. // The new copy of the loop body starts with a bunch of PHI nodes
  579. // that pick an incoming value from either the preheader, or the previous
  580. // loop iteration. Since this copy is no longer part of the loop, we
  581. // resolve this statically:
  582. // For the first iteration, we use the value from the preheader directly.
  583. // For any other iteration, we replace the phi with the value generated by
  584. // the immediately preceding clone of the loop body (which represents
  585. // the previous iteration).
  586. for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
  587. PHINode *NewPHI = cast<PHINode>(VMap[&*I]);
  588. if (IterNumber == 0) {
  589. VMap[&*I] = NewPHI->getIncomingValueForBlock(PreHeader);
  590. } else {
  591. Value *LatchVal = NewPHI->getIncomingValueForBlock(Latch);
  592. Instruction *LatchInst = dyn_cast<Instruction>(LatchVal);
  593. if (LatchInst && L->contains(LatchInst))
  594. VMap[&*I] = LVMap[LatchInst];
  595. else
  596. VMap[&*I] = LatchVal;
  597. }
  598. cast<BasicBlock>(VMap[Header])->getInstList().erase(NewPHI);
  599. }
  600. // Fix up the outgoing values - we need to add a value for the iteration
  601. // we've just created. Note that this must happen *after* the incoming
  602. // values are adjusted, since the value going out of the latch may also be
  603. // a value coming into the header.
  604. for (auto Edge : ExitEdges)
  605. for (PHINode &PHI : Edge.second->phis()) {
  606. Value *LatchVal = PHI.getIncomingValueForBlock(Edge.first);
  607. Instruction *LatchInst = dyn_cast<Instruction>(LatchVal);
  608. if (LatchInst && L->contains(LatchInst))
  609. LatchVal = VMap[LatchVal];
  610. PHI.addIncoming(LatchVal, cast<BasicBlock>(VMap[Edge.first]));
  611. }
  612. // LastValueMap is updated with the values for the current loop
  613. // which are used the next time this function is called.
  614. for (auto KV : VMap)
  615. LVMap[KV.first] = KV.second;
  616. }
  617. TargetTransformInfo::PeelingPreferences llvm::gatherPeelingPreferences(
  618. Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI,
  619. Optional<bool> UserAllowPeeling,
  620. Optional<bool> UserAllowProfileBasedPeeling, bool UnrollingSpecficValues) {
  621. TargetTransformInfo::PeelingPreferences PP;
  622. // Set the default values.
  623. PP.PeelCount = 0;
  624. PP.AllowPeeling = true;
  625. PP.AllowLoopNestsPeeling = false;
  626. PP.PeelProfiledIterations = true;
  627. // Get the target specifc values.
  628. TTI.getPeelingPreferences(L, SE, PP);
  629. // User specified values using cl::opt.
  630. if (UnrollingSpecficValues) {
  631. if (UnrollPeelCount.getNumOccurrences() > 0)
  632. PP.PeelCount = UnrollPeelCount;
  633. if (UnrollAllowPeeling.getNumOccurrences() > 0)
  634. PP.AllowPeeling = UnrollAllowPeeling;
  635. if (UnrollAllowLoopNestsPeeling.getNumOccurrences() > 0)
  636. PP.AllowLoopNestsPeeling = UnrollAllowLoopNestsPeeling;
  637. }
  638. // User specifed values provided by argument.
  639. if (UserAllowPeeling.hasValue())
  640. PP.AllowPeeling = *UserAllowPeeling;
  641. if (UserAllowProfileBasedPeeling.hasValue())
  642. PP.PeelProfiledIterations = *UserAllowProfileBasedPeeling;
  643. return PP;
  644. }
  645. /// Peel off the first \p PeelCount iterations of loop \p L.
  646. ///
  647. /// Note that this does not peel them off as a single straight-line block.
  648. /// Rather, each iteration is peeled off separately, and needs to check the
  649. /// exit condition.
  650. /// For loops that dynamically execute \p PeelCount iterations or less
  651. /// this provides a benefit, since the peeled off iterations, which account
  652. /// for the bulk of dynamic execution, can be further simplified by scalar
  653. /// optimizations.
  654. bool llvm::peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI,
  655. ScalarEvolution *SE, DominatorTree &DT, AssumptionCache *AC,
  656. bool PreserveLCSSA) {
  657. assert(PeelCount > 0 && "Attempt to peel out zero iterations?");
  658. assert(canPeel(L) && "Attempt to peel a loop which is not peelable?");
  659. LoopBlocksDFS LoopBlocks(L);
  660. LoopBlocks.perform(LI);
  661. BasicBlock *Header = L->getHeader();
  662. BasicBlock *PreHeader = L->getLoopPreheader();
  663. BasicBlock *Latch = L->getLoopLatch();
  664. SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> ExitEdges;
  665. L->getExitEdges(ExitEdges);
  666. // Remember dominators of blocks we might reach through exits to change them
  667. // later. Immediate dominator of such block might change, because we add more
  668. // routes which can lead to the exit: we can reach it from the peeled
  669. // iterations too.
  670. DenseMap<BasicBlock *, BasicBlock *> NonLoopBlocksIDom;
  671. for (auto *BB : L->blocks()) {
  672. auto *BBDomNode = DT.getNode(BB);
  673. SmallVector<BasicBlock *, 16> ChildrenToUpdate;
  674. for (auto *ChildDomNode : BBDomNode->children()) {
  675. auto *ChildBB = ChildDomNode->getBlock();
  676. if (!L->contains(ChildBB))
  677. ChildrenToUpdate.push_back(ChildBB);
  678. }
  679. // The new idom of the block will be the nearest common dominator
  680. // of all copies of the previous idom. This is equivalent to the
  681. // nearest common dominator of the previous idom and the first latch,
  682. // which dominates all copies of the previous idom.
  683. BasicBlock *NewIDom = DT.findNearestCommonDominator(BB, Latch);
  684. for (auto *ChildBB : ChildrenToUpdate)
  685. NonLoopBlocksIDom[ChildBB] = NewIDom;
  686. }
  687. Function *F = Header->getParent();
  688. // Set up all the necessary basic blocks. It is convenient to split the
  689. // preheader into 3 parts - two blocks to anchor the peeled copy of the loop
  690. // body, and a new preheader for the "real" loop.
  691. // Peeling the first iteration transforms.
  692. //
  693. // PreHeader:
  694. // ...
  695. // Header:
  696. // LoopBody
  697. // If (cond) goto Header
  698. // Exit:
  699. //
  700. // into
  701. //
  702. // InsertTop:
  703. // LoopBody
  704. // If (!cond) goto Exit
  705. // InsertBot:
  706. // NewPreHeader:
  707. // ...
  708. // Header:
  709. // LoopBody
  710. // If (cond) goto Header
  711. // Exit:
  712. //
  713. // Each following iteration will split the current bottom anchor in two,
  714. // and put the new copy of the loop body between these two blocks. That is,
  715. // after peeling another iteration from the example above, we'll split
  716. // InsertBot, and get:
  717. //
  718. // InsertTop:
  719. // LoopBody
  720. // If (!cond) goto Exit
  721. // InsertBot:
  722. // LoopBody
  723. // If (!cond) goto Exit
  724. // InsertBot.next:
  725. // NewPreHeader:
  726. // ...
  727. // Header:
  728. // LoopBody
  729. // If (cond) goto Header
  730. // Exit:
  731. BasicBlock *InsertTop = SplitEdge(PreHeader, Header, &DT, LI);
  732. BasicBlock *InsertBot =
  733. SplitBlock(InsertTop, InsertTop->getTerminator(), &DT, LI);
  734. BasicBlock *NewPreHeader =
  735. SplitBlock(InsertBot, InsertBot->getTerminator(), &DT, LI);
  736. InsertTop->setName(Header->getName() + ".peel.begin");
  737. InsertBot->setName(Header->getName() + ".peel.next");
  738. NewPreHeader->setName(PreHeader->getName() + ".peel.newph");
  739. ValueToValueMapTy LVMap;
  740. // If we have branch weight information, we'll want to update it for the
  741. // newly created branches.
  742. BranchInst *LatchBR =
  743. cast<BranchInst>(cast<BasicBlock>(Latch)->getTerminator());
  744. uint64_t ExitWeight = 0, FallThroughWeight = 0;
  745. initBranchWeights(Header, LatchBR, ExitWeight, FallThroughWeight);
  746. // Identify what noalias metadata is inside the loop: if it is inside the
  747. // loop, the associated metadata must be cloned for each iteration.
  748. SmallVector<MDNode *, 6> LoopLocalNoAliasDeclScopes;
  749. identifyNoAliasScopesToClone(L->getBlocks(), LoopLocalNoAliasDeclScopes);
  750. // For each peeled-off iteration, make a copy of the loop.
  751. for (unsigned Iter = 0; Iter < PeelCount; ++Iter) {
  752. SmallVector<BasicBlock *, 8> NewBlocks;
  753. ValueToValueMapTy VMap;
  754. cloneLoopBlocks(L, Iter, InsertTop, InsertBot, ExitEdges, NewBlocks,
  755. LoopBlocks, VMap, LVMap, &DT, LI,
  756. LoopLocalNoAliasDeclScopes);
  757. // Remap to use values from the current iteration instead of the
  758. // previous one.
  759. remapInstructionsInBlocks(NewBlocks, VMap);
  760. // Update IDoms of the blocks reachable through exits.
  761. if (Iter == 0)
  762. for (auto BBIDom : NonLoopBlocksIDom)
  763. DT.changeImmediateDominator(BBIDom.first,
  764. cast<BasicBlock>(LVMap[BBIDom.second]));
  765. #ifdef EXPENSIVE_CHECKS
  766. assert(DT.verify(DominatorTree::VerificationLevel::Fast));
  767. #endif
  768. auto *LatchBRCopy = cast<BranchInst>(VMap[LatchBR]);
  769. updateBranchWeights(InsertBot, LatchBRCopy, ExitWeight, FallThroughWeight);
  770. // Remove Loop metadata from the latch branch instruction
  771. // because it is not the Loop's latch branch anymore.
  772. LatchBRCopy->setMetadata(LLVMContext::MD_loop, nullptr);
  773. InsertTop = InsertBot;
  774. InsertBot = SplitBlock(InsertBot, InsertBot->getTerminator(), &DT, LI);
  775. InsertBot->setName(Header->getName() + ".peel.next");
  776. F->getBasicBlockList().splice(InsertTop->getIterator(),
  777. F->getBasicBlockList(),
  778. NewBlocks[0]->getIterator(), F->end());
  779. }
  780. // Now adjust the phi nodes in the loop header to get their initial values
  781. // from the last peeled-off iteration instead of the preheader.
  782. for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
  783. PHINode *PHI = cast<PHINode>(I);
  784. Value *NewVal = PHI->getIncomingValueForBlock(Latch);
  785. Instruction *LatchInst = dyn_cast<Instruction>(NewVal);
  786. if (LatchInst && L->contains(LatchInst))
  787. NewVal = LVMap[LatchInst];
  788. PHI->setIncomingValueForBlock(NewPreHeader, NewVal);
  789. }
  790. fixupBranchWeights(Header, LatchBR, ExitWeight, FallThroughWeight);
  791. // Update Metadata for count of peeled off iterations.
  792. unsigned AlreadyPeeled = 0;
  793. if (auto Peeled = getOptionalIntLoopAttribute(L, PeeledCountMetaData))
  794. AlreadyPeeled = *Peeled;
  795. addStringMetadataToLoop(L, PeeledCountMetaData, AlreadyPeeled + PeelCount);
  796. if (Loop *ParentLoop = L->getParentLoop())
  797. L = ParentLoop;
  798. // We modified the loop, update SE.
  799. SE->forgetTopmostLoop(L);
  800. // Finally DomtTree must be correct.
  801. assert(DT.verify(DominatorTree::VerificationLevel::Fast));
  802. // FIXME: Incrementally update loop-simplify
  803. simplifyLoop(L, &DT, LI, SE, AC, nullptr, PreserveLCSSA);
  804. NumPeeled++;
  805. return true;
  806. }