LoopUnrollAndJam.cpp 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. //===-- LoopUnrollAndJam.cpp - Loop unrolling utilities -------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements loop unroll and jam as a routine, much like
  10. // LoopUnroll.cpp implements loop unroll.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/ADT/ArrayRef.h"
  14. #include "llvm/ADT/DenseMap.h"
  15. #include "llvm/ADT/Optional.h"
  16. #include "llvm/ADT/STLExtras.h"
  17. #include "llvm/ADT/Sequence.h"
  18. #include "llvm/ADT/SmallPtrSet.h"
  19. #include "llvm/ADT/SmallVector.h"
  20. #include "llvm/ADT/Statistic.h"
  21. #include "llvm/ADT/StringRef.h"
  22. #include "llvm/ADT/Twine.h"
  23. #include "llvm/ADT/iterator_range.h"
  24. #include "llvm/Analysis/AssumptionCache.h"
  25. #include "llvm/Analysis/DependenceAnalysis.h"
  26. #include "llvm/Analysis/DomTreeUpdater.h"
  27. #include "llvm/Analysis/LoopInfo.h"
  28. #include "llvm/Analysis/LoopIterator.h"
  29. #include "llvm/Analysis/MustExecute.h"
  30. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  31. #include "llvm/Analysis/ScalarEvolution.h"
  32. #include "llvm/IR/BasicBlock.h"
  33. #include "llvm/IR/DebugInfoMetadata.h"
  34. #include "llvm/IR/DebugLoc.h"
  35. #include "llvm/IR/DiagnosticInfo.h"
  36. #include "llvm/IR/Dominators.h"
  37. #include "llvm/IR/Function.h"
  38. #include "llvm/IR/Instruction.h"
  39. #include "llvm/IR/Instructions.h"
  40. #include "llvm/IR/IntrinsicInst.h"
  41. #include "llvm/IR/Use.h"
  42. #include "llvm/IR/User.h"
  43. #include "llvm/IR/Value.h"
  44. #include "llvm/IR/ValueHandle.h"
  45. #include "llvm/IR/ValueMap.h"
  46. #include "llvm/Support/Casting.h"
  47. #include "llvm/Support/Debug.h"
  48. #include "llvm/Support/ErrorHandling.h"
  49. #include "llvm/Support/GenericDomTree.h"
  50. #include "llvm/Support/raw_ostream.h"
  51. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  52. #include "llvm/Transforms/Utils/Cloning.h"
  53. #include "llvm/Transforms/Utils/LoopUtils.h"
  54. #include "llvm/Transforms/Utils/UnrollLoop.h"
  55. #include "llvm/Transforms/Utils/ValueMapper.h"
  56. #include <assert.h>
  57. #include <memory>
  58. #include <type_traits>
  59. #include <vector>
  60. using namespace llvm;
  61. #define DEBUG_TYPE "loop-unroll-and-jam"
  62. STATISTIC(NumUnrolledAndJammed, "Number of loops unroll and jammed");
  63. STATISTIC(NumCompletelyUnrolledAndJammed, "Number of loops unroll and jammed");
  64. typedef SmallPtrSet<BasicBlock *, 4> BasicBlockSet;
  65. // Partition blocks in an outer/inner loop pair into blocks before and after
  66. // the loop
  67. static bool partitionLoopBlocks(Loop &L, BasicBlockSet &ForeBlocks,
  68. BasicBlockSet &AftBlocks, DominatorTree &DT) {
  69. Loop *SubLoop = L.getSubLoops()[0];
  70. BasicBlock *SubLoopLatch = SubLoop->getLoopLatch();
  71. for (BasicBlock *BB : L.blocks()) {
  72. if (!SubLoop->contains(BB)) {
  73. if (DT.dominates(SubLoopLatch, BB))
  74. AftBlocks.insert(BB);
  75. else
  76. ForeBlocks.insert(BB);
  77. }
  78. }
  79. // Check that all blocks in ForeBlocks together dominate the subloop
  80. // TODO: This might ideally be done better with a dominator/postdominators.
  81. BasicBlock *SubLoopPreHeader = SubLoop->getLoopPreheader();
  82. for (BasicBlock *BB : ForeBlocks) {
  83. if (BB == SubLoopPreHeader)
  84. continue;
  85. Instruction *TI = BB->getTerminator();
  86. for (BasicBlock *Succ : successors(TI))
  87. if (!ForeBlocks.count(Succ))
  88. return false;
  89. }
  90. return true;
  91. }
  92. /// Partition blocks in a loop nest into blocks before and after each inner
  93. /// loop.
  94. static bool partitionOuterLoopBlocks(
  95. Loop &Root, Loop &JamLoop, BasicBlockSet &JamLoopBlocks,
  96. DenseMap<Loop *, BasicBlockSet> &ForeBlocksMap,
  97. DenseMap<Loop *, BasicBlockSet> &AftBlocksMap, DominatorTree &DT) {
  98. JamLoopBlocks.insert(JamLoop.block_begin(), JamLoop.block_end());
  99. for (Loop *L : Root.getLoopsInPreorder()) {
  100. if (L == &JamLoop)
  101. break;
  102. if (!partitionLoopBlocks(*L, ForeBlocksMap[L], AftBlocksMap[L], DT))
  103. return false;
  104. }
  105. return true;
  106. }
  107. // TODO Remove when UnrollAndJamLoop changed to support unroll and jamming more
  108. // than 2 levels loop.
  109. static bool partitionOuterLoopBlocks(Loop *L, Loop *SubLoop,
  110. BasicBlockSet &ForeBlocks,
  111. BasicBlockSet &SubLoopBlocks,
  112. BasicBlockSet &AftBlocks,
  113. DominatorTree *DT) {
  114. SubLoopBlocks.insert(SubLoop->block_begin(), SubLoop->block_end());
  115. return partitionLoopBlocks(*L, ForeBlocks, AftBlocks, *DT);
  116. }
  117. // Looks at the phi nodes in Header for values coming from Latch. For these
  118. // instructions and all their operands calls Visit on them, keeping going for
  119. // all the operands in AftBlocks. Returns false if Visit returns false,
  120. // otherwise returns true. This is used to process the instructions in the
  121. // Aft blocks that need to be moved before the subloop. It is used in two
  122. // places. One to check that the required set of instructions can be moved
  123. // before the loop. Then to collect the instructions to actually move in
  124. // moveHeaderPhiOperandsToForeBlocks.
  125. template <typename T>
  126. static bool processHeaderPhiOperands(BasicBlock *Header, BasicBlock *Latch,
  127. BasicBlockSet &AftBlocks, T Visit) {
  128. SmallVector<Instruction *, 8> Worklist;
  129. SmallPtrSet<Instruction *, 8> VisitedInstr;
  130. for (auto &Phi : Header->phis()) {
  131. Value *V = Phi.getIncomingValueForBlock(Latch);
  132. if (Instruction *I = dyn_cast<Instruction>(V))
  133. Worklist.push_back(I);
  134. }
  135. while (!Worklist.empty()) {
  136. Instruction *I = Worklist.pop_back_val();
  137. if (!Visit(I))
  138. return false;
  139. VisitedInstr.insert(I);
  140. if (AftBlocks.count(I->getParent()))
  141. for (auto &U : I->operands())
  142. if (Instruction *II = dyn_cast<Instruction>(U))
  143. if (!VisitedInstr.count(II))
  144. Worklist.push_back(II);
  145. }
  146. return true;
  147. }
  148. // Move the phi operands of Header from Latch out of AftBlocks to InsertLoc.
  149. static void moveHeaderPhiOperandsToForeBlocks(BasicBlock *Header,
  150. BasicBlock *Latch,
  151. Instruction *InsertLoc,
  152. BasicBlockSet &AftBlocks) {
  153. // We need to ensure we move the instructions in the correct order,
  154. // starting with the earliest required instruction and moving forward.
  155. std::vector<Instruction *> Visited;
  156. processHeaderPhiOperands(Header, Latch, AftBlocks,
  157. [&Visited, &AftBlocks](Instruction *I) {
  158. if (AftBlocks.count(I->getParent()))
  159. Visited.push_back(I);
  160. return true;
  161. });
  162. // Move all instructions in program order to before the InsertLoc
  163. BasicBlock *InsertLocBB = InsertLoc->getParent();
  164. for (Instruction *I : reverse(Visited)) {
  165. if (I->getParent() != InsertLocBB)
  166. I->moveBefore(InsertLoc);
  167. }
  168. }
  169. /*
  170. This method performs Unroll and Jam. For a simple loop like:
  171. for (i = ..)
  172. Fore(i)
  173. for (j = ..)
  174. SubLoop(i, j)
  175. Aft(i)
  176. Instead of doing normal inner or outer unrolling, we do:
  177. for (i = .., i+=2)
  178. Fore(i)
  179. Fore(i+1)
  180. for (j = ..)
  181. SubLoop(i, j)
  182. SubLoop(i+1, j)
  183. Aft(i)
  184. Aft(i+1)
  185. So the outer loop is essetially unrolled and then the inner loops are fused
  186. ("jammed") together into a single loop. This can increase speed when there
  187. are loads in SubLoop that are invariant to i, as they become shared between
  188. the now jammed inner loops.
  189. We do this by spliting the blocks in the loop into Fore, Subloop and Aft.
  190. Fore blocks are those before the inner loop, Aft are those after. Normal
  191. Unroll code is used to copy each of these sets of blocks and the results are
  192. combined together into the final form above.
  193. isSafeToUnrollAndJam should be used prior to calling this to make sure the
  194. unrolling will be valid. Checking profitablility is also advisable.
  195. If EpilogueLoop is non-null, it receives the epilogue loop (if it was
  196. necessary to create one and not fully unrolled).
  197. */
  198. LoopUnrollResult
  199. llvm::UnrollAndJamLoop(Loop *L, unsigned Count, unsigned TripCount,
  200. unsigned TripMultiple, bool UnrollRemainder,
  201. LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
  202. AssumptionCache *AC, const TargetTransformInfo *TTI,
  203. OptimizationRemarkEmitter *ORE, Loop **EpilogueLoop) {
  204. // When we enter here we should have already checked that it is safe
  205. BasicBlock *Header = L->getHeader();
  206. assert(Header && "No header.");
  207. assert(L->getSubLoops().size() == 1);
  208. Loop *SubLoop = *L->begin();
  209. // Don't enter the unroll code if there is nothing to do.
  210. if (TripCount == 0 && Count < 2) {
  211. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; almost nothing to do\n");
  212. return LoopUnrollResult::Unmodified;
  213. }
  214. assert(Count > 0);
  215. assert(TripMultiple > 0);
  216. assert(TripCount == 0 || TripCount % TripMultiple == 0);
  217. // Are we eliminating the loop control altogether?
  218. bool CompletelyUnroll = (Count == TripCount);
  219. // We use the runtime remainder in cases where we don't know trip multiple
  220. if (TripMultiple % Count != 0) {
  221. if (!UnrollRuntimeLoopRemainder(L, Count, /*AllowExpensiveTripCount*/ false,
  222. /*UseEpilogRemainder*/ true,
  223. UnrollRemainder, /*ForgetAllSCEV*/ false,
  224. LI, SE, DT, AC, TTI, true, EpilogueLoop)) {
  225. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; remainder loop could not be "
  226. "generated when assuming runtime trip count\n");
  227. return LoopUnrollResult::Unmodified;
  228. }
  229. }
  230. // Notify ScalarEvolution that the loop will be substantially changed,
  231. // if not outright eliminated.
  232. if (SE) {
  233. SE->forgetLoop(L);
  234. SE->forgetLoop(SubLoop);
  235. }
  236. using namespace ore;
  237. // Report the unrolling decision.
  238. if (CompletelyUnroll) {
  239. LLVM_DEBUG(dbgs() << "COMPLETELY UNROLL AND JAMMING loop %"
  240. << Header->getName() << " with trip count " << TripCount
  241. << "!\n");
  242. ORE->emit(OptimizationRemark(DEBUG_TYPE, "FullyUnrolled", L->getStartLoc(),
  243. L->getHeader())
  244. << "completely unroll and jammed loop with "
  245. << NV("UnrollCount", TripCount) << " iterations");
  246. } else {
  247. auto DiagBuilder = [&]() {
  248. OptimizationRemark Diag(DEBUG_TYPE, "PartialUnrolled", L->getStartLoc(),
  249. L->getHeader());
  250. return Diag << "unroll and jammed loop by a factor of "
  251. << NV("UnrollCount", Count);
  252. };
  253. LLVM_DEBUG(dbgs() << "UNROLL AND JAMMING loop %" << Header->getName()
  254. << " by " << Count);
  255. if (TripMultiple != 1) {
  256. LLVM_DEBUG(dbgs() << " with " << TripMultiple << " trips per branch");
  257. ORE->emit([&]() {
  258. return DiagBuilder() << " with " << NV("TripMultiple", TripMultiple)
  259. << " trips per branch";
  260. });
  261. } else {
  262. LLVM_DEBUG(dbgs() << " with run-time trip count");
  263. ORE->emit([&]() { return DiagBuilder() << " with run-time trip count"; });
  264. }
  265. LLVM_DEBUG(dbgs() << "!\n");
  266. }
  267. BasicBlock *Preheader = L->getLoopPreheader();
  268. BasicBlock *LatchBlock = L->getLoopLatch();
  269. assert(Preheader && "No preheader");
  270. assert(LatchBlock && "No latch block");
  271. BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator());
  272. assert(BI && !BI->isUnconditional());
  273. bool ContinueOnTrue = L->contains(BI->getSuccessor(0));
  274. BasicBlock *LoopExit = BI->getSuccessor(ContinueOnTrue);
  275. bool SubLoopContinueOnTrue = SubLoop->contains(
  276. SubLoop->getLoopLatch()->getTerminator()->getSuccessor(0));
  277. // Partition blocks in an outer/inner loop pair into blocks before and after
  278. // the loop
  279. BasicBlockSet SubLoopBlocks;
  280. BasicBlockSet ForeBlocks;
  281. BasicBlockSet AftBlocks;
  282. partitionOuterLoopBlocks(L, SubLoop, ForeBlocks, SubLoopBlocks, AftBlocks,
  283. DT);
  284. // We keep track of the entering/first and exiting/last block of each of
  285. // Fore/SubLoop/Aft in each iteration. This helps make the stapling up of
  286. // blocks easier.
  287. std::vector<BasicBlock *> ForeBlocksFirst;
  288. std::vector<BasicBlock *> ForeBlocksLast;
  289. std::vector<BasicBlock *> SubLoopBlocksFirst;
  290. std::vector<BasicBlock *> SubLoopBlocksLast;
  291. std::vector<BasicBlock *> AftBlocksFirst;
  292. std::vector<BasicBlock *> AftBlocksLast;
  293. ForeBlocksFirst.push_back(Header);
  294. ForeBlocksLast.push_back(SubLoop->getLoopPreheader());
  295. SubLoopBlocksFirst.push_back(SubLoop->getHeader());
  296. SubLoopBlocksLast.push_back(SubLoop->getExitingBlock());
  297. AftBlocksFirst.push_back(SubLoop->getExitBlock());
  298. AftBlocksLast.push_back(L->getExitingBlock());
  299. // Maps Blocks[0] -> Blocks[It]
  300. ValueToValueMapTy LastValueMap;
  301. // Move any instructions from fore phi operands from AftBlocks into Fore.
  302. moveHeaderPhiOperandsToForeBlocks(
  303. Header, LatchBlock, ForeBlocksLast[0]->getTerminator(), AftBlocks);
  304. // The current on-the-fly SSA update requires blocks to be processed in
  305. // reverse postorder so that LastValueMap contains the correct value at each
  306. // exit.
  307. LoopBlocksDFS DFS(L);
  308. DFS.perform(LI);
  309. // Stash the DFS iterators before adding blocks to the loop.
  310. LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO();
  311. LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO();
  312. // When a FSDiscriminator is enabled, we don't need to add the multiply
  313. // factors to the discriminators.
  314. if (Header->getParent()->isDebugInfoForProfiling() && !EnableFSDiscriminator)
  315. for (BasicBlock *BB : L->getBlocks())
  316. for (Instruction &I : *BB)
  317. if (!isa<DbgInfoIntrinsic>(&I))
  318. if (const DILocation *DIL = I.getDebugLoc()) {
  319. auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(Count);
  320. if (NewDIL)
  321. I.setDebugLoc(NewDIL.getValue());
  322. else
  323. LLVM_DEBUG(dbgs()
  324. << "Failed to create new discriminator: "
  325. << DIL->getFilename() << " Line: " << DIL->getLine());
  326. }
  327. // Copy all blocks
  328. for (unsigned It = 1; It != Count; ++It) {
  329. SmallVector<BasicBlock *, 8> NewBlocks;
  330. // Maps Blocks[It] -> Blocks[It-1]
  331. DenseMap<Value *, Value *> PrevItValueMap;
  332. SmallDenseMap<const Loop *, Loop *, 4> NewLoops;
  333. NewLoops[L] = L;
  334. NewLoops[SubLoop] = SubLoop;
  335. for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
  336. ValueToValueMapTy VMap;
  337. BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It));
  338. Header->getParent()->getBasicBlockList().push_back(New);
  339. // Tell LI about New.
  340. addClonedBlockToLoopInfo(*BB, New, LI, NewLoops);
  341. if (ForeBlocks.count(*BB)) {
  342. if (*BB == ForeBlocksFirst[0])
  343. ForeBlocksFirst.push_back(New);
  344. if (*BB == ForeBlocksLast[0])
  345. ForeBlocksLast.push_back(New);
  346. } else if (SubLoopBlocks.count(*BB)) {
  347. if (*BB == SubLoopBlocksFirst[0])
  348. SubLoopBlocksFirst.push_back(New);
  349. if (*BB == SubLoopBlocksLast[0])
  350. SubLoopBlocksLast.push_back(New);
  351. } else if (AftBlocks.count(*BB)) {
  352. if (*BB == AftBlocksFirst[0])
  353. AftBlocksFirst.push_back(New);
  354. if (*BB == AftBlocksLast[0])
  355. AftBlocksLast.push_back(New);
  356. } else {
  357. llvm_unreachable("BB being cloned should be in Fore/Sub/Aft");
  358. }
  359. // Update our running maps of newest clones
  360. PrevItValueMap[New] = (It == 1 ? *BB : LastValueMap[*BB]);
  361. LastValueMap[*BB] = New;
  362. for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
  363. VI != VE; ++VI) {
  364. PrevItValueMap[VI->second] =
  365. const_cast<Value *>(It == 1 ? VI->first : LastValueMap[VI->first]);
  366. LastValueMap[VI->first] = VI->second;
  367. }
  368. NewBlocks.push_back(New);
  369. // Update DomTree:
  370. if (*BB == ForeBlocksFirst[0])
  371. DT->addNewBlock(New, ForeBlocksLast[It - 1]);
  372. else if (*BB == SubLoopBlocksFirst[0])
  373. DT->addNewBlock(New, SubLoopBlocksLast[It - 1]);
  374. else if (*BB == AftBlocksFirst[0])
  375. DT->addNewBlock(New, AftBlocksLast[It - 1]);
  376. else {
  377. // Each set of blocks (Fore/Sub/Aft) will have the same internal domtree
  378. // structure.
  379. auto BBDomNode = DT->getNode(*BB);
  380. auto BBIDom = BBDomNode->getIDom();
  381. BasicBlock *OriginalBBIDom = BBIDom->getBlock();
  382. assert(OriginalBBIDom);
  383. assert(LastValueMap[cast<Value>(OriginalBBIDom)]);
  384. DT->addNewBlock(
  385. New, cast<BasicBlock>(LastValueMap[cast<Value>(OriginalBBIDom)]));
  386. }
  387. }
  388. // Remap all instructions in the most recent iteration
  389. remapInstructionsInBlocks(NewBlocks, LastValueMap);
  390. for (BasicBlock *NewBlock : NewBlocks) {
  391. for (Instruction &I : *NewBlock) {
  392. if (auto *II = dyn_cast<AssumeInst>(&I))
  393. AC->registerAssumption(II);
  394. }
  395. }
  396. // Alter the ForeBlocks phi's, pointing them at the latest version of the
  397. // value from the previous iteration's phis
  398. for (PHINode &Phi : ForeBlocksFirst[It]->phis()) {
  399. Value *OldValue = Phi.getIncomingValueForBlock(AftBlocksLast[It]);
  400. assert(OldValue && "should have incoming edge from Aft[It]");
  401. Value *NewValue = OldValue;
  402. if (Value *PrevValue = PrevItValueMap[OldValue])
  403. NewValue = PrevValue;
  404. assert(Phi.getNumOperands() == 2);
  405. Phi.setIncomingBlock(0, ForeBlocksLast[It - 1]);
  406. Phi.setIncomingValue(0, NewValue);
  407. Phi.removeIncomingValue(1);
  408. }
  409. }
  410. // Now that all the basic blocks for the unrolled iterations are in place,
  411. // finish up connecting the blocks and phi nodes. At this point LastValueMap
  412. // is the last unrolled iterations values.
  413. // Update Phis in BB from OldBB to point to NewBB and use the latest value
  414. // from LastValueMap
  415. auto updatePHIBlocksAndValues = [](BasicBlock *BB, BasicBlock *OldBB,
  416. BasicBlock *NewBB,
  417. ValueToValueMapTy &LastValueMap) {
  418. for (PHINode &Phi : BB->phis()) {
  419. for (unsigned b = 0; b < Phi.getNumIncomingValues(); ++b) {
  420. if (Phi.getIncomingBlock(b) == OldBB) {
  421. Value *OldValue = Phi.getIncomingValue(b);
  422. if (Value *LastValue = LastValueMap[OldValue])
  423. Phi.setIncomingValue(b, LastValue);
  424. Phi.setIncomingBlock(b, NewBB);
  425. break;
  426. }
  427. }
  428. }
  429. };
  430. // Move all the phis from Src into Dest
  431. auto movePHIs = [](BasicBlock *Src, BasicBlock *Dest) {
  432. Instruction *insertPoint = Dest->getFirstNonPHI();
  433. while (PHINode *Phi = dyn_cast<PHINode>(Src->begin()))
  434. Phi->moveBefore(insertPoint);
  435. };
  436. // Update the PHI values outside the loop to point to the last block
  437. updatePHIBlocksAndValues(LoopExit, AftBlocksLast[0], AftBlocksLast.back(),
  438. LastValueMap);
  439. // Update ForeBlocks successors and phi nodes
  440. BranchInst *ForeTerm =
  441. cast<BranchInst>(ForeBlocksLast.back()->getTerminator());
  442. assert(ForeTerm->getNumSuccessors() == 1 && "Expecting one successor");
  443. ForeTerm->setSuccessor(0, SubLoopBlocksFirst[0]);
  444. if (CompletelyUnroll) {
  445. while (PHINode *Phi = dyn_cast<PHINode>(ForeBlocksFirst[0]->begin())) {
  446. Phi->replaceAllUsesWith(Phi->getIncomingValueForBlock(Preheader));
  447. Phi->getParent()->getInstList().erase(Phi);
  448. }
  449. } else {
  450. // Update the PHI values to point to the last aft block
  451. updatePHIBlocksAndValues(ForeBlocksFirst[0], AftBlocksLast[0],
  452. AftBlocksLast.back(), LastValueMap);
  453. }
  454. for (unsigned It = 1; It != Count; It++) {
  455. // Remap ForeBlock successors from previous iteration to this
  456. BranchInst *ForeTerm =
  457. cast<BranchInst>(ForeBlocksLast[It - 1]->getTerminator());
  458. assert(ForeTerm->getNumSuccessors() == 1 && "Expecting one successor");
  459. ForeTerm->setSuccessor(0, ForeBlocksFirst[It]);
  460. }
  461. // Subloop successors and phis
  462. BranchInst *SubTerm =
  463. cast<BranchInst>(SubLoopBlocksLast.back()->getTerminator());
  464. SubTerm->setSuccessor(!SubLoopContinueOnTrue, SubLoopBlocksFirst[0]);
  465. SubTerm->setSuccessor(SubLoopContinueOnTrue, AftBlocksFirst[0]);
  466. SubLoopBlocksFirst[0]->replacePhiUsesWith(ForeBlocksLast[0],
  467. ForeBlocksLast.back());
  468. SubLoopBlocksFirst[0]->replacePhiUsesWith(SubLoopBlocksLast[0],
  469. SubLoopBlocksLast.back());
  470. for (unsigned It = 1; It != Count; It++) {
  471. // Replace the conditional branch of the previous iteration subloop with an
  472. // unconditional one to this one
  473. BranchInst *SubTerm =
  474. cast<BranchInst>(SubLoopBlocksLast[It - 1]->getTerminator());
  475. BranchInst::Create(SubLoopBlocksFirst[It], SubTerm);
  476. SubTerm->eraseFromParent();
  477. SubLoopBlocksFirst[It]->replacePhiUsesWith(ForeBlocksLast[It],
  478. ForeBlocksLast.back());
  479. SubLoopBlocksFirst[It]->replacePhiUsesWith(SubLoopBlocksLast[It],
  480. SubLoopBlocksLast.back());
  481. movePHIs(SubLoopBlocksFirst[It], SubLoopBlocksFirst[0]);
  482. }
  483. // Aft blocks successors and phis
  484. BranchInst *AftTerm = cast<BranchInst>(AftBlocksLast.back()->getTerminator());
  485. if (CompletelyUnroll) {
  486. BranchInst::Create(LoopExit, AftTerm);
  487. AftTerm->eraseFromParent();
  488. } else {
  489. AftTerm->setSuccessor(!ContinueOnTrue, ForeBlocksFirst[0]);
  490. assert(AftTerm->getSuccessor(ContinueOnTrue) == LoopExit &&
  491. "Expecting the ContinueOnTrue successor of AftTerm to be LoopExit");
  492. }
  493. AftBlocksFirst[0]->replacePhiUsesWith(SubLoopBlocksLast[0],
  494. SubLoopBlocksLast.back());
  495. for (unsigned It = 1; It != Count; It++) {
  496. // Replace the conditional branch of the previous iteration subloop with an
  497. // unconditional one to this one
  498. BranchInst *AftTerm =
  499. cast<BranchInst>(AftBlocksLast[It - 1]->getTerminator());
  500. BranchInst::Create(AftBlocksFirst[It], AftTerm);
  501. AftTerm->eraseFromParent();
  502. AftBlocksFirst[It]->replacePhiUsesWith(SubLoopBlocksLast[It],
  503. SubLoopBlocksLast.back());
  504. movePHIs(AftBlocksFirst[It], AftBlocksFirst[0]);
  505. }
  506. DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
  507. // Dominator Tree. Remove the old links between Fore, Sub and Aft, adding the
  508. // new ones required.
  509. if (Count != 1) {
  510. SmallVector<DominatorTree::UpdateType, 4> DTUpdates;
  511. DTUpdates.emplace_back(DominatorTree::UpdateKind::Delete, ForeBlocksLast[0],
  512. SubLoopBlocksFirst[0]);
  513. DTUpdates.emplace_back(DominatorTree::UpdateKind::Delete,
  514. SubLoopBlocksLast[0], AftBlocksFirst[0]);
  515. DTUpdates.emplace_back(DominatorTree::UpdateKind::Insert,
  516. ForeBlocksLast.back(), SubLoopBlocksFirst[0]);
  517. DTUpdates.emplace_back(DominatorTree::UpdateKind::Insert,
  518. SubLoopBlocksLast.back(), AftBlocksFirst[0]);
  519. DTU.applyUpdatesPermissive(DTUpdates);
  520. }
  521. // Merge adjacent basic blocks, if possible.
  522. SmallPtrSet<BasicBlock *, 16> MergeBlocks;
  523. MergeBlocks.insert(ForeBlocksLast.begin(), ForeBlocksLast.end());
  524. MergeBlocks.insert(SubLoopBlocksLast.begin(), SubLoopBlocksLast.end());
  525. MergeBlocks.insert(AftBlocksLast.begin(), AftBlocksLast.end());
  526. MergeBlockSuccessorsIntoGivenBlocks(MergeBlocks, L, &DTU, LI);
  527. // Apply updates to the DomTree.
  528. DT = &DTU.getDomTree();
  529. // At this point, the code is well formed. We now do a quick sweep over the
  530. // inserted code, doing constant propagation and dead code elimination as we
  531. // go.
  532. simplifyLoopAfterUnroll(SubLoop, true, LI, SE, DT, AC, TTI);
  533. simplifyLoopAfterUnroll(L, !CompletelyUnroll && Count > 1, LI, SE, DT, AC,
  534. TTI);
  535. NumCompletelyUnrolledAndJammed += CompletelyUnroll;
  536. ++NumUnrolledAndJammed;
  537. // Update LoopInfo if the loop is completely removed.
  538. if (CompletelyUnroll)
  539. LI->erase(L);
  540. #ifndef NDEBUG
  541. // We shouldn't have done anything to break loop simplify form or LCSSA.
  542. Loop *OutestLoop = SubLoop->getParentLoop()
  543. ? SubLoop->getParentLoop()->getParentLoop()
  544. ? SubLoop->getParentLoop()->getParentLoop()
  545. : SubLoop->getParentLoop()
  546. : SubLoop;
  547. assert(DT->verify());
  548. LI->verify(*DT);
  549. assert(OutestLoop->isRecursivelyLCSSAForm(*DT, *LI));
  550. if (!CompletelyUnroll)
  551. assert(L->isLoopSimplifyForm());
  552. assert(SubLoop->isLoopSimplifyForm());
  553. SE->verify();
  554. #endif
  555. return CompletelyUnroll ? LoopUnrollResult::FullyUnrolled
  556. : LoopUnrollResult::PartiallyUnrolled;
  557. }
  558. static bool getLoadsAndStores(BasicBlockSet &Blocks,
  559. SmallVector<Instruction *, 4> &MemInstr) {
  560. // Scan the BBs and collect legal loads and stores.
  561. // Returns false if non-simple loads/stores are found.
  562. for (BasicBlock *BB : Blocks) {
  563. for (Instruction &I : *BB) {
  564. if (auto *Ld = dyn_cast<LoadInst>(&I)) {
  565. if (!Ld->isSimple())
  566. return false;
  567. MemInstr.push_back(&I);
  568. } else if (auto *St = dyn_cast<StoreInst>(&I)) {
  569. if (!St->isSimple())
  570. return false;
  571. MemInstr.push_back(&I);
  572. } else if (I.mayReadOrWriteMemory()) {
  573. return false;
  574. }
  575. }
  576. }
  577. return true;
  578. }
  579. static bool preservesForwardDependence(Instruction *Src, Instruction *Dst,
  580. unsigned UnrollLevel, unsigned JamLevel,
  581. bool Sequentialized, Dependence *D) {
  582. // UnrollLevel might carry the dependency Src --> Dst
  583. // Does a different loop after unrolling?
  584. for (unsigned CurLoopDepth = UnrollLevel + 1; CurLoopDepth <= JamLevel;
  585. ++CurLoopDepth) {
  586. auto JammedDir = D->getDirection(CurLoopDepth);
  587. if (JammedDir == Dependence::DVEntry::LT)
  588. return true;
  589. if (JammedDir & Dependence::DVEntry::GT)
  590. return false;
  591. }
  592. return true;
  593. }
  594. static bool preservesBackwardDependence(Instruction *Src, Instruction *Dst,
  595. unsigned UnrollLevel, unsigned JamLevel,
  596. bool Sequentialized, Dependence *D) {
  597. // UnrollLevel might carry the dependency Dst --> Src
  598. for (unsigned CurLoopDepth = UnrollLevel + 1; CurLoopDepth <= JamLevel;
  599. ++CurLoopDepth) {
  600. auto JammedDir = D->getDirection(CurLoopDepth);
  601. if (JammedDir == Dependence::DVEntry::GT)
  602. return true;
  603. if (JammedDir & Dependence::DVEntry::LT)
  604. return false;
  605. }
  606. // Backward dependencies are only preserved if not interleaved.
  607. return Sequentialized;
  608. }
  609. // Check whether it is semantically safe Src and Dst considering any potential
  610. // dependency between them.
  611. //
  612. // @param UnrollLevel The level of the loop being unrolled
  613. // @param JamLevel The level of the loop being jammed; if Src and Dst are on
  614. // different levels, the outermost common loop counts as jammed level
  615. //
  616. // @return true if is safe and false if there is a dependency violation.
  617. static bool checkDependency(Instruction *Src, Instruction *Dst,
  618. unsigned UnrollLevel, unsigned JamLevel,
  619. bool Sequentialized, DependenceInfo &DI) {
  620. assert(UnrollLevel <= JamLevel &&
  621. "Expecting JamLevel to be at least UnrollLevel");
  622. if (Src == Dst)
  623. return true;
  624. // Ignore Input dependencies.
  625. if (isa<LoadInst>(Src) && isa<LoadInst>(Dst))
  626. return true;
  627. // Check whether unroll-and-jam may violate a dependency.
  628. // By construction, every dependency will be lexicographically non-negative
  629. // (if it was, it would violate the current execution order), such as
  630. // (0,0,>,*,*)
  631. // Unroll-and-jam changes the GT execution of two executions to the same
  632. // iteration of the chosen unroll level. That is, a GT dependence becomes a GE
  633. // dependence (or EQ, if we fully unrolled the loop) at the loop's position:
  634. // (0,0,>=,*,*)
  635. // Now, the dependency is not necessarily non-negative anymore, i.e.
  636. // unroll-and-jam may violate correctness.
  637. std::unique_ptr<Dependence> D = DI.depends(Src, Dst, true);
  638. if (!D)
  639. return true;
  640. assert(D->isOrdered() && "Expected an output, flow or anti dep.");
  641. if (D->isConfused()) {
  642. LLVM_DEBUG(dbgs() << " Confused dependency between:\n"
  643. << " " << *Src << "\n"
  644. << " " << *Dst << "\n");
  645. return false;
  646. }
  647. // If outer levels (levels enclosing the loop being unroll-and-jammed) have a
  648. // non-equal direction, then the locations accessed in the inner levels cannot
  649. // overlap in memory. We assumes the indexes never overlap into neighboring
  650. // dimensions.
  651. for (unsigned CurLoopDepth = 1; CurLoopDepth < UnrollLevel; ++CurLoopDepth)
  652. if (!(D->getDirection(CurLoopDepth) & Dependence::DVEntry::EQ))
  653. return true;
  654. auto UnrollDirection = D->getDirection(UnrollLevel);
  655. // If the distance carried by the unrolled loop is 0, then after unrolling
  656. // that distance will become non-zero resulting in non-overlapping accesses in
  657. // the inner loops.
  658. if (UnrollDirection == Dependence::DVEntry::EQ)
  659. return true;
  660. if (UnrollDirection & Dependence::DVEntry::LT &&
  661. !preservesForwardDependence(Src, Dst, UnrollLevel, JamLevel,
  662. Sequentialized, D.get()))
  663. return false;
  664. if (UnrollDirection & Dependence::DVEntry::GT &&
  665. !preservesBackwardDependence(Src, Dst, UnrollLevel, JamLevel,
  666. Sequentialized, D.get()))
  667. return false;
  668. return true;
  669. }
  670. static bool
  671. checkDependencies(Loop &Root, const BasicBlockSet &SubLoopBlocks,
  672. const DenseMap<Loop *, BasicBlockSet> &ForeBlocksMap,
  673. const DenseMap<Loop *, BasicBlockSet> &AftBlocksMap,
  674. DependenceInfo &DI, LoopInfo &LI) {
  675. SmallVector<BasicBlockSet, 8> AllBlocks;
  676. for (Loop *L : Root.getLoopsInPreorder())
  677. if (ForeBlocksMap.find(L) != ForeBlocksMap.end())
  678. AllBlocks.push_back(ForeBlocksMap.lookup(L));
  679. AllBlocks.push_back(SubLoopBlocks);
  680. for (Loop *L : Root.getLoopsInPreorder())
  681. if (AftBlocksMap.find(L) != AftBlocksMap.end())
  682. AllBlocks.push_back(AftBlocksMap.lookup(L));
  683. unsigned LoopDepth = Root.getLoopDepth();
  684. SmallVector<Instruction *, 4> EarlierLoadsAndStores;
  685. SmallVector<Instruction *, 4> CurrentLoadsAndStores;
  686. for (BasicBlockSet &Blocks : AllBlocks) {
  687. CurrentLoadsAndStores.clear();
  688. if (!getLoadsAndStores(Blocks, CurrentLoadsAndStores))
  689. return false;
  690. Loop *CurLoop = LI.getLoopFor((*Blocks.begin())->front().getParent());
  691. unsigned CurLoopDepth = CurLoop->getLoopDepth();
  692. for (auto *Earlier : EarlierLoadsAndStores) {
  693. Loop *EarlierLoop = LI.getLoopFor(Earlier->getParent());
  694. unsigned EarlierDepth = EarlierLoop->getLoopDepth();
  695. unsigned CommonLoopDepth = std::min(EarlierDepth, CurLoopDepth);
  696. for (auto *Later : CurrentLoadsAndStores) {
  697. if (!checkDependency(Earlier, Later, LoopDepth, CommonLoopDepth, false,
  698. DI))
  699. return false;
  700. }
  701. }
  702. size_t NumInsts = CurrentLoadsAndStores.size();
  703. for (size_t I = 0; I < NumInsts; ++I) {
  704. for (size_t J = I; J < NumInsts; ++J) {
  705. if (!checkDependency(CurrentLoadsAndStores[I], CurrentLoadsAndStores[J],
  706. LoopDepth, CurLoopDepth, true, DI))
  707. return false;
  708. }
  709. }
  710. EarlierLoadsAndStores.append(CurrentLoadsAndStores.begin(),
  711. CurrentLoadsAndStores.end());
  712. }
  713. return true;
  714. }
  715. static bool isEligibleLoopForm(const Loop &Root) {
  716. // Root must have a child.
  717. if (Root.getSubLoops().size() != 1)
  718. return false;
  719. const Loop *L = &Root;
  720. do {
  721. // All loops in Root need to be in simplify and rotated form.
  722. if (!L->isLoopSimplifyForm())
  723. return false;
  724. if (!L->isRotatedForm())
  725. return false;
  726. if (L->getHeader()->hasAddressTaken()) {
  727. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Address taken\n");
  728. return false;
  729. }
  730. unsigned SubLoopsSize = L->getSubLoops().size();
  731. if (SubLoopsSize == 0)
  732. return true;
  733. // Only one child is allowed.
  734. if (SubLoopsSize != 1)
  735. return false;
  736. // Only loops with a single exit block can be unrolled and jammed.
  737. // The function getExitBlock() is used for this check, rather than
  738. // getUniqueExitBlock() to ensure loops with mulitple exit edges are
  739. // disallowed.
  740. if (!L->getExitBlock()) {
  741. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; only loops with single exit "
  742. "blocks can be unrolled and jammed.\n");
  743. return false;
  744. }
  745. // Only loops with a single exiting block can be unrolled and jammed.
  746. if (!L->getExitingBlock()) {
  747. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; only loops with single "
  748. "exiting blocks can be unrolled and jammed.\n");
  749. return false;
  750. }
  751. L = L->getSubLoops()[0];
  752. } while (L);
  753. return true;
  754. }
  755. static Loop *getInnerMostLoop(Loop *L) {
  756. while (!L->getSubLoops().empty())
  757. L = L->getSubLoops()[0];
  758. return L;
  759. }
  760. bool llvm::isSafeToUnrollAndJam(Loop *L, ScalarEvolution &SE, DominatorTree &DT,
  761. DependenceInfo &DI, LoopInfo &LI) {
  762. if (!isEligibleLoopForm(*L)) {
  763. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Ineligible loop form\n");
  764. return false;
  765. }
  766. /* We currently handle outer loops like this:
  767. |
  768. ForeFirst <------\ }
  769. Blocks | } ForeBlocks of L
  770. ForeLast | }
  771. | |
  772. ... |
  773. | |
  774. ForeFirst <----\ | }
  775. Blocks | | } ForeBlocks of a inner loop of L
  776. ForeLast | | }
  777. | | |
  778. JamLoopFirst <\ | | }
  779. Blocks | | | } JamLoopBlocks of the innermost loop
  780. JamLoopLast -/ | | }
  781. | | |
  782. AftFirst | | }
  783. Blocks | | } AftBlocks of a inner loop of L
  784. AftLast ------/ | }
  785. | |
  786. ... |
  787. | |
  788. AftFirst | }
  789. Blocks | } AftBlocks of L
  790. AftLast --------/ }
  791. |
  792. There are (theoretically) any number of blocks in ForeBlocks, SubLoopBlocks
  793. and AftBlocks, providing that there is one edge from Fores to SubLoops,
  794. one edge from SubLoops to Afts and a single outer loop exit (from Afts).
  795. In practice we currently limit Aft blocks to a single block, and limit
  796. things further in the profitablility checks of the unroll and jam pass.
  797. Because of the way we rearrange basic blocks, we also require that
  798. the Fore blocks of L on all unrolled iterations are safe to move before the
  799. blocks of the direct child of L of all iterations. So we require that the
  800. phi node looping operands of ForeHeader can be moved to at least the end of
  801. ForeEnd, so that we can arrange cloned Fore Blocks before the subloop and
  802. match up Phi's correctly.
  803. i.e. The old order of blocks used to be
  804. (F1)1 (F2)1 J1_1 J1_2 (A2)1 (A1)1 (F1)2 (F2)2 J2_1 J2_2 (A2)2 (A1)2.
  805. It needs to be safe to transform this to
  806. (F1)1 (F1)2 (F2)1 (F2)2 J1_1 J1_2 J2_1 J2_2 (A2)1 (A2)2 (A1)1 (A1)2.
  807. There are then a number of checks along the lines of no calls, no
  808. exceptions, inner loop IV is consistent, etc. Note that for loops requiring
  809. runtime unrolling, UnrollRuntimeLoopRemainder can also fail in
  810. UnrollAndJamLoop if the trip count cannot be easily calculated.
  811. */
  812. // Split blocks into Fore/SubLoop/Aft based on dominators
  813. Loop *JamLoop = getInnerMostLoop(L);
  814. BasicBlockSet SubLoopBlocks;
  815. DenseMap<Loop *, BasicBlockSet> ForeBlocksMap;
  816. DenseMap<Loop *, BasicBlockSet> AftBlocksMap;
  817. if (!partitionOuterLoopBlocks(*L, *JamLoop, SubLoopBlocks, ForeBlocksMap,
  818. AftBlocksMap, DT)) {
  819. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Incompatible loop layout\n");
  820. return false;
  821. }
  822. // Aft blocks may need to move instructions to fore blocks, which becomes more
  823. // difficult if there are multiple (potentially conditionally executed)
  824. // blocks. For now we just exclude loops with multiple aft blocks.
  825. if (AftBlocksMap[L].size() != 1) {
  826. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Can't currently handle "
  827. "multiple blocks after the loop\n");
  828. return false;
  829. }
  830. // Check inner loop backedge count is consistent on all iterations of the
  831. // outer loop
  832. if (any_of(L->getLoopsInPreorder(), [&SE](Loop *SubLoop) {
  833. return !hasIterationCountInvariantInParent(SubLoop, SE);
  834. })) {
  835. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Inner loop iteration count is "
  836. "not consistent on each iteration\n");
  837. return false;
  838. }
  839. // Check the loop safety info for exceptions.
  840. SimpleLoopSafetyInfo LSI;
  841. LSI.computeLoopSafetyInfo(L);
  842. if (LSI.anyBlockMayThrow()) {
  843. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; Something may throw\n");
  844. return false;
  845. }
  846. // We've ruled out the easy stuff and now need to check that there are no
  847. // interdependencies which may prevent us from moving the:
  848. // ForeBlocks before Subloop and AftBlocks.
  849. // Subloop before AftBlocks.
  850. // ForeBlock phi operands before the subloop
  851. // Make sure we can move all instructions we need to before the subloop
  852. BasicBlock *Header = L->getHeader();
  853. BasicBlock *Latch = L->getLoopLatch();
  854. BasicBlockSet AftBlocks = AftBlocksMap[L];
  855. Loop *SubLoop = L->getSubLoops()[0];
  856. if (!processHeaderPhiOperands(
  857. Header, Latch, AftBlocks, [&AftBlocks, &SubLoop](Instruction *I) {
  858. if (SubLoop->contains(I->getParent()))
  859. return false;
  860. if (AftBlocks.count(I->getParent())) {
  861. // If we hit a phi node in afts we know we are done (probably
  862. // LCSSA)
  863. if (isa<PHINode>(I))
  864. return false;
  865. // Can't move instructions with side effects or memory
  866. // reads/writes
  867. if (I->mayHaveSideEffects() || I->mayReadOrWriteMemory())
  868. return false;
  869. }
  870. // Keep going
  871. return true;
  872. })) {
  873. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; can't move required "
  874. "instructions after subloop to before it\n");
  875. return false;
  876. }
  877. // Check for memory dependencies which prohibit the unrolling we are doing.
  878. // Because of the way we are unrolling Fore/Sub/Aft blocks, we need to check
  879. // there are no dependencies between Fore-Sub, Fore-Aft, Sub-Aft and Sub-Sub.
  880. if (!checkDependencies(*L, SubLoopBlocks, ForeBlocksMap, AftBlocksMap, DI,
  881. LI)) {
  882. LLVM_DEBUG(dbgs() << "Won't unroll-and-jam; failed dependency check\n");
  883. return false;
  884. }
  885. return true;
  886. }