LoopDistribute.cpp 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087
  1. //===- LoopDistribute.cpp - Loop Distribution Pass ------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the Loop Distribution Pass. Its main focus is to
  10. // distribute loops that cannot be vectorized due to dependence cycles. It
  11. // tries to isolate the offending dependences into a new loop allowing
  12. // vectorization of the remaining parts.
  13. //
  14. // For dependence analysis, the pass uses the LoopVectorizer's
  15. // LoopAccessAnalysis. Because this analysis presumes no change in the order of
  16. // memory operations, special care is taken to preserve the lexical order of
  17. // these operations.
  18. //
  19. // Similarly to the Vectorizer, the pass also supports loop versioning to
  20. // run-time disambiguate potentially overlapping arrays.
  21. //
  22. //===----------------------------------------------------------------------===//
  23. #include "llvm/Transforms/Scalar/LoopDistribute.h"
  24. #include "llvm/ADT/DenseMap.h"
  25. #include "llvm/ADT/DepthFirstIterator.h"
  26. #include "llvm/ADT/EquivalenceClasses.h"
  27. #include "llvm/ADT/Optional.h"
  28. #include "llvm/ADT/STLExtras.h"
  29. #include "llvm/ADT/SmallPtrSet.h"
  30. #include "llvm/ADT/SmallVector.h"
  31. #include "llvm/ADT/Statistic.h"
  32. #include "llvm/ADT/StringRef.h"
  33. #include "llvm/ADT/Twine.h"
  34. #include "llvm/ADT/iterator_range.h"
  35. #include "llvm/Analysis/AssumptionCache.h"
  36. #include "llvm/Analysis/GlobalsModRef.h"
  37. #include "llvm/Analysis/LoopAccessAnalysis.h"
  38. #include "llvm/Analysis/LoopAnalysisManager.h"
  39. #include "llvm/Analysis/LoopInfo.h"
  40. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  41. #include "llvm/Analysis/ScalarEvolution.h"
  42. #include "llvm/Analysis/TargetLibraryInfo.h"
  43. #include "llvm/Analysis/TargetTransformInfo.h"
  44. #include "llvm/IR/BasicBlock.h"
  45. #include "llvm/IR/Constants.h"
  46. #include "llvm/IR/DiagnosticInfo.h"
  47. #include "llvm/IR/Dominators.h"
  48. #include "llvm/IR/Function.h"
  49. #include "llvm/IR/InstrTypes.h"
  50. #include "llvm/IR/Instruction.h"
  51. #include "llvm/IR/Instructions.h"
  52. #include "llvm/IR/LLVMContext.h"
  53. #include "llvm/IR/Metadata.h"
  54. #include "llvm/IR/PassManager.h"
  55. #include "llvm/IR/Value.h"
  56. #include "llvm/InitializePasses.h"
  57. #include "llvm/Pass.h"
  58. #include "llvm/Support/Casting.h"
  59. #include "llvm/Support/CommandLine.h"
  60. #include "llvm/Support/Debug.h"
  61. #include "llvm/Support/raw_ostream.h"
  62. #include "llvm/Transforms/Scalar.h"
  63. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  64. #include "llvm/Transforms/Utils/Cloning.h"
  65. #include "llvm/Transforms/Utils/LoopUtils.h"
  66. #include "llvm/Transforms/Utils/LoopVersioning.h"
  67. #include "llvm/Transforms/Utils/ValueMapper.h"
  68. #include <cassert>
  69. #include <functional>
  70. #include <list>
  71. #include <tuple>
  72. #include <utility>
  73. using namespace llvm;
  74. #define LDIST_NAME "loop-distribute"
  75. #define DEBUG_TYPE LDIST_NAME
  76. /// @{
  77. /// Metadata attribute names
  78. static const char *const LLVMLoopDistributeFollowupAll =
  79. "llvm.loop.distribute.followup_all";
  80. static const char *const LLVMLoopDistributeFollowupCoincident =
  81. "llvm.loop.distribute.followup_coincident";
  82. static const char *const LLVMLoopDistributeFollowupSequential =
  83. "llvm.loop.distribute.followup_sequential";
  84. static const char *const LLVMLoopDistributeFollowupFallback =
  85. "llvm.loop.distribute.followup_fallback";
  86. /// @}
  87. static cl::opt<bool>
  88. LDistVerify("loop-distribute-verify", cl::Hidden,
  89. cl::desc("Turn on DominatorTree and LoopInfo verification "
  90. "after Loop Distribution"),
  91. cl::init(false));
  92. static cl::opt<bool> DistributeNonIfConvertible(
  93. "loop-distribute-non-if-convertible", cl::Hidden,
  94. cl::desc("Whether to distribute into a loop that may not be "
  95. "if-convertible by the loop vectorizer"),
  96. cl::init(false));
  97. static cl::opt<unsigned> DistributeSCEVCheckThreshold(
  98. "loop-distribute-scev-check-threshold", cl::init(8), cl::Hidden,
  99. cl::desc("The maximum number of SCEV checks allowed for Loop "
  100. "Distribution"));
  101. static cl::opt<unsigned> PragmaDistributeSCEVCheckThreshold(
  102. "loop-distribute-scev-check-threshold-with-pragma", cl::init(128),
  103. cl::Hidden,
  104. cl::desc(
  105. "The maximum number of SCEV checks allowed for Loop "
  106. "Distribution for loop marked with #pragma loop distribute(enable)"));
  107. static cl::opt<bool> EnableLoopDistribute(
  108. "enable-loop-distribute", cl::Hidden,
  109. cl::desc("Enable the new, experimental LoopDistribution Pass"),
  110. cl::init(false));
  111. STATISTIC(NumLoopsDistributed, "Number of loops distributed");
  112. namespace {
  113. /// Maintains the set of instructions of the loop for a partition before
  114. /// cloning. After cloning, it hosts the new loop.
  115. class InstPartition {
  116. using InstructionSet = SmallPtrSet<Instruction *, 8>;
  117. public:
  118. InstPartition(Instruction *I, Loop *L, bool DepCycle = false)
  119. : DepCycle(DepCycle), OrigLoop(L) {
  120. Set.insert(I);
  121. }
  122. /// Returns whether this partition contains a dependence cycle.
  123. bool hasDepCycle() const { return DepCycle; }
  124. /// Adds an instruction to this partition.
  125. void add(Instruction *I) { Set.insert(I); }
  126. /// Collection accessors.
  127. InstructionSet::iterator begin() { return Set.begin(); }
  128. InstructionSet::iterator end() { return Set.end(); }
  129. InstructionSet::const_iterator begin() const { return Set.begin(); }
  130. InstructionSet::const_iterator end() const { return Set.end(); }
  131. bool empty() const { return Set.empty(); }
  132. /// Moves this partition into \p Other. This partition becomes empty
  133. /// after this.
  134. void moveTo(InstPartition &Other) {
  135. Other.Set.insert(Set.begin(), Set.end());
  136. Set.clear();
  137. Other.DepCycle |= DepCycle;
  138. }
  139. /// Populates the partition with a transitive closure of all the
  140. /// instructions that the seeded instructions dependent on.
  141. void populateUsedSet() {
  142. // FIXME: We currently don't use control-dependence but simply include all
  143. // blocks (possibly empty at the end) and let simplifycfg mostly clean this
  144. // up.
  145. for (auto *B : OrigLoop->getBlocks())
  146. Set.insert(B->getTerminator());
  147. // Follow the use-def chains to form a transitive closure of all the
  148. // instructions that the originally seeded instructions depend on.
  149. SmallVector<Instruction *, 8> Worklist(Set.begin(), Set.end());
  150. while (!Worklist.empty()) {
  151. Instruction *I = Worklist.pop_back_val();
  152. // Insert instructions from the loop that we depend on.
  153. for (Value *V : I->operand_values()) {
  154. auto *I = dyn_cast<Instruction>(V);
  155. if (I && OrigLoop->contains(I->getParent()) && Set.insert(I).second)
  156. Worklist.push_back(I);
  157. }
  158. }
  159. }
  160. /// Clones the original loop.
  161. ///
  162. /// Updates LoopInfo and DominatorTree using the information that block \p
  163. /// LoopDomBB dominates the loop.
  164. Loop *cloneLoopWithPreheader(BasicBlock *InsertBefore, BasicBlock *LoopDomBB,
  165. unsigned Index, LoopInfo *LI,
  166. DominatorTree *DT) {
  167. ClonedLoop = ::cloneLoopWithPreheader(InsertBefore, LoopDomBB, OrigLoop,
  168. VMap, Twine(".ldist") + Twine(Index),
  169. LI, DT, ClonedLoopBlocks);
  170. return ClonedLoop;
  171. }
  172. /// The cloned loop. If this partition is mapped to the original loop,
  173. /// this is null.
  174. const Loop *getClonedLoop() const { return ClonedLoop; }
  175. /// Returns the loop where this partition ends up after distribution.
  176. /// If this partition is mapped to the original loop then use the block from
  177. /// the loop.
  178. Loop *getDistributedLoop() const {
  179. return ClonedLoop ? ClonedLoop : OrigLoop;
  180. }
  181. /// The VMap that is populated by cloning and then used in
  182. /// remapinstruction to remap the cloned instructions.
  183. ValueToValueMapTy &getVMap() { return VMap; }
  184. /// Remaps the cloned instructions using VMap.
  185. void remapInstructions() {
  186. remapInstructionsInBlocks(ClonedLoopBlocks, VMap);
  187. }
  188. /// Based on the set of instructions selected for this partition,
  189. /// removes the unnecessary ones.
  190. void removeUnusedInsts() {
  191. SmallVector<Instruction *, 8> Unused;
  192. for (auto *Block : OrigLoop->getBlocks())
  193. for (auto &Inst : *Block)
  194. if (!Set.count(&Inst)) {
  195. Instruction *NewInst = &Inst;
  196. if (!VMap.empty())
  197. NewInst = cast<Instruction>(VMap[NewInst]);
  198. assert(!isa<BranchInst>(NewInst) &&
  199. "Branches are marked used early on");
  200. Unused.push_back(NewInst);
  201. }
  202. // Delete the instructions backwards, as it has a reduced likelihood of
  203. // having to update as many def-use and use-def chains.
  204. for (auto *Inst : reverse(Unused)) {
  205. if (!Inst->use_empty())
  206. Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
  207. Inst->eraseFromParent();
  208. }
  209. }
  210. void print() const {
  211. if (DepCycle)
  212. dbgs() << " (cycle)\n";
  213. for (auto *I : Set)
  214. // Prefix with the block name.
  215. dbgs() << " " << I->getParent()->getName() << ":" << *I << "\n";
  216. }
  217. void printBlocks() const {
  218. for (auto *BB : getDistributedLoop()->getBlocks())
  219. dbgs() << *BB;
  220. }
  221. private:
  222. /// Instructions from OrigLoop selected for this partition.
  223. InstructionSet Set;
  224. /// Whether this partition contains a dependence cycle.
  225. bool DepCycle;
  226. /// The original loop.
  227. Loop *OrigLoop;
  228. /// The cloned loop. If this partition is mapped to the original loop,
  229. /// this is null.
  230. Loop *ClonedLoop = nullptr;
  231. /// The blocks of ClonedLoop including the preheader. If this
  232. /// partition is mapped to the original loop, this is empty.
  233. SmallVector<BasicBlock *, 8> ClonedLoopBlocks;
  234. /// These gets populated once the set of instructions have been
  235. /// finalized. If this partition is mapped to the original loop, these are not
  236. /// set.
  237. ValueToValueMapTy VMap;
  238. };
  239. /// Holds the set of Partitions. It populates them, merges them and then
  240. /// clones the loops.
  241. class InstPartitionContainer {
  242. using InstToPartitionIdT = DenseMap<Instruction *, int>;
  243. public:
  244. InstPartitionContainer(Loop *L, LoopInfo *LI, DominatorTree *DT)
  245. : L(L), LI(LI), DT(DT) {}
  246. /// Returns the number of partitions.
  247. unsigned getSize() const { return PartitionContainer.size(); }
  248. /// Adds \p Inst into the current partition if that is marked to
  249. /// contain cycles. Otherwise start a new partition for it.
  250. void addToCyclicPartition(Instruction *Inst) {
  251. // If the current partition is non-cyclic. Start a new one.
  252. if (PartitionContainer.empty() || !PartitionContainer.back().hasDepCycle())
  253. PartitionContainer.emplace_back(Inst, L, /*DepCycle=*/true);
  254. else
  255. PartitionContainer.back().add(Inst);
  256. }
  257. /// Adds \p Inst into a partition that is not marked to contain
  258. /// dependence cycles.
  259. ///
  260. // Initially we isolate memory instructions into as many partitions as
  261. // possible, then later we may merge them back together.
  262. void addToNewNonCyclicPartition(Instruction *Inst) {
  263. PartitionContainer.emplace_back(Inst, L);
  264. }
  265. /// Merges adjacent non-cyclic partitions.
  266. ///
  267. /// The idea is that we currently only want to isolate the non-vectorizable
  268. /// partition. We could later allow more distribution among these partition
  269. /// too.
  270. void mergeAdjacentNonCyclic() {
  271. mergeAdjacentPartitionsIf(
  272. [](const InstPartition *P) { return !P->hasDepCycle(); });
  273. }
  274. /// If a partition contains only conditional stores, we won't vectorize
  275. /// it. Try to merge it with a previous cyclic partition.
  276. void mergeNonIfConvertible() {
  277. mergeAdjacentPartitionsIf([&](const InstPartition *Partition) {
  278. if (Partition->hasDepCycle())
  279. return true;
  280. // Now, check if all stores are conditional in this partition.
  281. bool seenStore = false;
  282. for (auto *Inst : *Partition)
  283. if (isa<StoreInst>(Inst)) {
  284. seenStore = true;
  285. if (!LoopAccessInfo::blockNeedsPredication(Inst->getParent(), L, DT))
  286. return false;
  287. }
  288. return seenStore;
  289. });
  290. }
  291. /// Merges the partitions according to various heuristics.
  292. void mergeBeforePopulating() {
  293. mergeAdjacentNonCyclic();
  294. if (!DistributeNonIfConvertible)
  295. mergeNonIfConvertible();
  296. }
  297. /// Merges partitions in order to ensure that no loads are duplicated.
  298. ///
  299. /// We can't duplicate loads because that could potentially reorder them.
  300. /// LoopAccessAnalysis provides dependency information with the context that
  301. /// the order of memory operation is preserved.
  302. ///
  303. /// Return if any partitions were merged.
  304. bool mergeToAvoidDuplicatedLoads() {
  305. using LoadToPartitionT = DenseMap<Instruction *, InstPartition *>;
  306. using ToBeMergedT = EquivalenceClasses<InstPartition *>;
  307. LoadToPartitionT LoadToPartition;
  308. ToBeMergedT ToBeMerged;
  309. // Step through the partitions and create equivalence between partitions
  310. // that contain the same load. Also put partitions in between them in the
  311. // same equivalence class to avoid reordering of memory operations.
  312. for (PartitionContainerT::iterator I = PartitionContainer.begin(),
  313. E = PartitionContainer.end();
  314. I != E; ++I) {
  315. auto *PartI = &*I;
  316. // If a load occurs in two partitions PartI and PartJ, merge all
  317. // partitions (PartI, PartJ] into PartI.
  318. for (Instruction *Inst : *PartI)
  319. if (isa<LoadInst>(Inst)) {
  320. bool NewElt;
  321. LoadToPartitionT::iterator LoadToPart;
  322. std::tie(LoadToPart, NewElt) =
  323. LoadToPartition.insert(std::make_pair(Inst, PartI));
  324. if (!NewElt) {
  325. LLVM_DEBUG(dbgs()
  326. << "Merging partitions due to this load in multiple "
  327. << "partitions: " << PartI << ", " << LoadToPart->second
  328. << "\n"
  329. << *Inst << "\n");
  330. auto PartJ = I;
  331. do {
  332. --PartJ;
  333. ToBeMerged.unionSets(PartI, &*PartJ);
  334. } while (&*PartJ != LoadToPart->second);
  335. }
  336. }
  337. }
  338. if (ToBeMerged.empty())
  339. return false;
  340. // Merge the member of an equivalence class into its class leader. This
  341. // makes the members empty.
  342. for (ToBeMergedT::iterator I = ToBeMerged.begin(), E = ToBeMerged.end();
  343. I != E; ++I) {
  344. if (!I->isLeader())
  345. continue;
  346. auto PartI = I->getData();
  347. for (auto PartJ : make_range(std::next(ToBeMerged.member_begin(I)),
  348. ToBeMerged.member_end())) {
  349. PartJ->moveTo(*PartI);
  350. }
  351. }
  352. // Remove the empty partitions.
  353. PartitionContainer.remove_if(
  354. [](const InstPartition &P) { return P.empty(); });
  355. return true;
  356. }
  357. /// Sets up the mapping between instructions to partitions. If the
  358. /// instruction is duplicated across multiple partitions, set the entry to -1.
  359. void setupPartitionIdOnInstructions() {
  360. int PartitionID = 0;
  361. for (const auto &Partition : PartitionContainer) {
  362. for (Instruction *Inst : Partition) {
  363. bool NewElt;
  364. InstToPartitionIdT::iterator Iter;
  365. std::tie(Iter, NewElt) =
  366. InstToPartitionId.insert(std::make_pair(Inst, PartitionID));
  367. if (!NewElt)
  368. Iter->second = -1;
  369. }
  370. ++PartitionID;
  371. }
  372. }
  373. /// Populates the partition with everything that the seeding
  374. /// instructions require.
  375. void populateUsedSet() {
  376. for (auto &P : PartitionContainer)
  377. P.populateUsedSet();
  378. }
  379. /// This performs the main chunk of the work of cloning the loops for
  380. /// the partitions.
  381. void cloneLoops() {
  382. BasicBlock *OrigPH = L->getLoopPreheader();
  383. // At this point the predecessor of the preheader is either the memcheck
  384. // block or the top part of the original preheader.
  385. BasicBlock *Pred = OrigPH->getSinglePredecessor();
  386. assert(Pred && "Preheader does not have a single predecessor");
  387. BasicBlock *ExitBlock = L->getExitBlock();
  388. assert(ExitBlock && "No single exit block");
  389. Loop *NewLoop;
  390. assert(!PartitionContainer.empty() && "at least two partitions expected");
  391. // We're cloning the preheader along with the loop so we already made sure
  392. // it was empty.
  393. assert(&*OrigPH->begin() == OrigPH->getTerminator() &&
  394. "preheader not empty");
  395. // Preserve the original loop ID for use after the transformation.
  396. MDNode *OrigLoopID = L->getLoopID();
  397. // Create a loop for each partition except the last. Clone the original
  398. // loop before PH along with adding a preheader for the cloned loop. Then
  399. // update PH to point to the newly added preheader.
  400. BasicBlock *TopPH = OrigPH;
  401. unsigned Index = getSize() - 1;
  402. for (auto I = std::next(PartitionContainer.rbegin()),
  403. E = PartitionContainer.rend();
  404. I != E; ++I, --Index, TopPH = NewLoop->getLoopPreheader()) {
  405. auto *Part = &*I;
  406. NewLoop = Part->cloneLoopWithPreheader(TopPH, Pred, Index, LI, DT);
  407. Part->getVMap()[ExitBlock] = TopPH;
  408. Part->remapInstructions();
  409. setNewLoopID(OrigLoopID, Part);
  410. }
  411. Pred->getTerminator()->replaceUsesOfWith(OrigPH, TopPH);
  412. // Also set a new loop ID for the last loop.
  413. setNewLoopID(OrigLoopID, &PartitionContainer.back());
  414. // Now go in forward order and update the immediate dominator for the
  415. // preheaders with the exiting block of the previous loop. Dominance
  416. // within the loop is updated in cloneLoopWithPreheader.
  417. for (auto Curr = PartitionContainer.cbegin(),
  418. Next = std::next(PartitionContainer.cbegin()),
  419. E = PartitionContainer.cend();
  420. Next != E; ++Curr, ++Next)
  421. DT->changeImmediateDominator(
  422. Next->getDistributedLoop()->getLoopPreheader(),
  423. Curr->getDistributedLoop()->getExitingBlock());
  424. }
  425. /// Removes the dead instructions from the cloned loops.
  426. void removeUnusedInsts() {
  427. for (auto &Partition : PartitionContainer)
  428. Partition.removeUnusedInsts();
  429. }
  430. /// For each memory pointer, it computes the partitionId the pointer is
  431. /// used in.
  432. ///
  433. /// This returns an array of int where the I-th entry corresponds to I-th
  434. /// entry in LAI.getRuntimePointerCheck(). If the pointer is used in multiple
  435. /// partitions its entry is set to -1.
  436. SmallVector<int, 8>
  437. computePartitionSetForPointers(const LoopAccessInfo &LAI) {
  438. const RuntimePointerChecking *RtPtrCheck = LAI.getRuntimePointerChecking();
  439. unsigned N = RtPtrCheck->Pointers.size();
  440. SmallVector<int, 8> PtrToPartitions(N);
  441. for (unsigned I = 0; I < N; ++I) {
  442. Value *Ptr = RtPtrCheck->Pointers[I].PointerValue;
  443. auto Instructions =
  444. LAI.getInstructionsForAccess(Ptr, RtPtrCheck->Pointers[I].IsWritePtr);
  445. int &Partition = PtrToPartitions[I];
  446. // First set it to uninitialized.
  447. Partition = -2;
  448. for (Instruction *Inst : Instructions) {
  449. // Note that this could be -1 if Inst is duplicated across multiple
  450. // partitions.
  451. int ThisPartition = this->InstToPartitionId[Inst];
  452. if (Partition == -2)
  453. Partition = ThisPartition;
  454. // -1 means belonging to multiple partitions.
  455. else if (Partition == -1)
  456. break;
  457. else if (Partition != (int)ThisPartition)
  458. Partition = -1;
  459. }
  460. assert(Partition != -2 && "Pointer not belonging to any partition");
  461. }
  462. return PtrToPartitions;
  463. }
  464. void print(raw_ostream &OS) const {
  465. unsigned Index = 0;
  466. for (const auto &P : PartitionContainer) {
  467. OS << "Partition " << Index++ << " (" << &P << "):\n";
  468. P.print();
  469. }
  470. }
  471. void dump() const { print(dbgs()); }
  472. #ifndef NDEBUG
  473. friend raw_ostream &operator<<(raw_ostream &OS,
  474. const InstPartitionContainer &Partitions) {
  475. Partitions.print(OS);
  476. return OS;
  477. }
  478. #endif
  479. void printBlocks() const {
  480. unsigned Index = 0;
  481. for (const auto &P : PartitionContainer) {
  482. dbgs() << "\nPartition " << Index++ << " (" << &P << "):\n";
  483. P.printBlocks();
  484. }
  485. }
  486. private:
  487. using PartitionContainerT = std::list<InstPartition>;
  488. /// List of partitions.
  489. PartitionContainerT PartitionContainer;
  490. /// Mapping from Instruction to partition Id. If the instruction
  491. /// belongs to multiple partitions the entry contains -1.
  492. InstToPartitionIdT InstToPartitionId;
  493. Loop *L;
  494. LoopInfo *LI;
  495. DominatorTree *DT;
  496. /// The control structure to merge adjacent partitions if both satisfy
  497. /// the \p Predicate.
  498. template <class UnaryPredicate>
  499. void mergeAdjacentPartitionsIf(UnaryPredicate Predicate) {
  500. InstPartition *PrevMatch = nullptr;
  501. for (auto I = PartitionContainer.begin(); I != PartitionContainer.end();) {
  502. auto DoesMatch = Predicate(&*I);
  503. if (PrevMatch == nullptr && DoesMatch) {
  504. PrevMatch = &*I;
  505. ++I;
  506. } else if (PrevMatch != nullptr && DoesMatch) {
  507. I->moveTo(*PrevMatch);
  508. I = PartitionContainer.erase(I);
  509. } else {
  510. PrevMatch = nullptr;
  511. ++I;
  512. }
  513. }
  514. }
  515. /// Assign new LoopIDs for the partition's cloned loop.
  516. void setNewLoopID(MDNode *OrigLoopID, InstPartition *Part) {
  517. Optional<MDNode *> PartitionID = makeFollowupLoopID(
  518. OrigLoopID,
  519. {LLVMLoopDistributeFollowupAll,
  520. Part->hasDepCycle() ? LLVMLoopDistributeFollowupSequential
  521. : LLVMLoopDistributeFollowupCoincident});
  522. if (PartitionID.hasValue()) {
  523. Loop *NewLoop = Part->getDistributedLoop();
  524. NewLoop->setLoopID(PartitionID.getValue());
  525. }
  526. }
  527. };
  528. /// For each memory instruction, this class maintains difference of the
  529. /// number of unsafe dependences that start out from this instruction minus
  530. /// those that end here.
  531. ///
  532. /// By traversing the memory instructions in program order and accumulating this
  533. /// number, we know whether any unsafe dependence crosses over a program point.
  534. class MemoryInstructionDependences {
  535. using Dependence = MemoryDepChecker::Dependence;
  536. public:
  537. struct Entry {
  538. Instruction *Inst;
  539. unsigned NumUnsafeDependencesStartOrEnd = 0;
  540. Entry(Instruction *Inst) : Inst(Inst) {}
  541. };
  542. using AccessesType = SmallVector<Entry, 8>;
  543. AccessesType::const_iterator begin() const { return Accesses.begin(); }
  544. AccessesType::const_iterator end() const { return Accesses.end(); }
  545. MemoryInstructionDependences(
  546. const SmallVectorImpl<Instruction *> &Instructions,
  547. const SmallVectorImpl<Dependence> &Dependences) {
  548. Accesses.append(Instructions.begin(), Instructions.end());
  549. LLVM_DEBUG(dbgs() << "Backward dependences:\n");
  550. for (auto &Dep : Dependences)
  551. if (Dep.isPossiblyBackward()) {
  552. // Note that the designations source and destination follow the program
  553. // order, i.e. source is always first. (The direction is given by the
  554. // DepType.)
  555. ++Accesses[Dep.Source].NumUnsafeDependencesStartOrEnd;
  556. --Accesses[Dep.Destination].NumUnsafeDependencesStartOrEnd;
  557. LLVM_DEBUG(Dep.print(dbgs(), 2, Instructions));
  558. }
  559. }
  560. private:
  561. AccessesType Accesses;
  562. };
  563. /// The actual class performing the per-loop work.
  564. class LoopDistributeForLoop {
  565. public:
  566. LoopDistributeForLoop(Loop *L, Function *F, LoopInfo *LI, DominatorTree *DT,
  567. ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
  568. : L(L), F(F), LI(LI), DT(DT), SE(SE), ORE(ORE) {
  569. setForced();
  570. }
  571. /// Try to distribute an inner-most loop.
  572. bool processLoop(std::function<const LoopAccessInfo &(Loop &)> &GetLAA) {
  573. assert(L->isInnermost() && "Only process inner loops.");
  574. LLVM_DEBUG(dbgs() << "\nLDist: In \""
  575. << L->getHeader()->getParent()->getName()
  576. << "\" checking " << *L << "\n");
  577. // Having a single exit block implies there's also one exiting block.
  578. if (!L->getExitBlock())
  579. return fail("MultipleExitBlocks", "multiple exit blocks");
  580. if (!L->isLoopSimplifyForm())
  581. return fail("NotLoopSimplifyForm",
  582. "loop is not in loop-simplify form");
  583. if (!L->isRotatedForm())
  584. return fail("NotBottomTested", "loop is not bottom tested");
  585. BasicBlock *PH = L->getLoopPreheader();
  586. LAI = &GetLAA(*L);
  587. // Currently, we only distribute to isolate the part of the loop with
  588. // dependence cycles to enable partial vectorization.
  589. if (LAI->canVectorizeMemory())
  590. return fail("MemOpsCanBeVectorized",
  591. "memory operations are safe for vectorization");
  592. auto *Dependences = LAI->getDepChecker().getDependences();
  593. if (!Dependences || Dependences->empty())
  594. return fail("NoUnsafeDeps", "no unsafe dependences to isolate");
  595. InstPartitionContainer Partitions(L, LI, DT);
  596. // First, go through each memory operation and assign them to consecutive
  597. // partitions (the order of partitions follows program order). Put those
  598. // with unsafe dependences into "cyclic" partition otherwise put each store
  599. // in its own "non-cyclic" partition (we'll merge these later).
  600. //
  601. // Note that a memory operation (e.g. Load2 below) at a program point that
  602. // has an unsafe dependence (Store3->Load1) spanning over it must be
  603. // included in the same cyclic partition as the dependent operations. This
  604. // is to preserve the original program order after distribution. E.g.:
  605. //
  606. // NumUnsafeDependencesStartOrEnd NumUnsafeDependencesActive
  607. // Load1 -. 1 0->1
  608. // Load2 | /Unsafe/ 0 1
  609. // Store3 -' -1 1->0
  610. // Load4 0 0
  611. //
  612. // NumUnsafeDependencesActive > 0 indicates this situation and in this case
  613. // we just keep assigning to the same cyclic partition until
  614. // NumUnsafeDependencesActive reaches 0.
  615. const MemoryDepChecker &DepChecker = LAI->getDepChecker();
  616. MemoryInstructionDependences MID(DepChecker.getMemoryInstructions(),
  617. *Dependences);
  618. int NumUnsafeDependencesActive = 0;
  619. for (auto &InstDep : MID) {
  620. Instruction *I = InstDep.Inst;
  621. // We update NumUnsafeDependencesActive post-instruction, catch the
  622. // start of a dependence directly via NumUnsafeDependencesStartOrEnd.
  623. if (NumUnsafeDependencesActive ||
  624. InstDep.NumUnsafeDependencesStartOrEnd > 0)
  625. Partitions.addToCyclicPartition(I);
  626. else
  627. Partitions.addToNewNonCyclicPartition(I);
  628. NumUnsafeDependencesActive += InstDep.NumUnsafeDependencesStartOrEnd;
  629. assert(NumUnsafeDependencesActive >= 0 &&
  630. "Negative number of dependences active");
  631. }
  632. // Add partitions for values used outside. These partitions can be out of
  633. // order from the original program order. This is OK because if the
  634. // partition uses a load we will merge this partition with the original
  635. // partition of the load that we set up in the previous loop (see
  636. // mergeToAvoidDuplicatedLoads).
  637. auto DefsUsedOutside = findDefsUsedOutsideOfLoop(L);
  638. for (auto *Inst : DefsUsedOutside)
  639. Partitions.addToNewNonCyclicPartition(Inst);
  640. LLVM_DEBUG(dbgs() << "Seeded partitions:\n" << Partitions);
  641. if (Partitions.getSize() < 2)
  642. return fail("CantIsolateUnsafeDeps",
  643. "cannot isolate unsafe dependencies");
  644. // Run the merge heuristics: Merge non-cyclic adjacent partitions since we
  645. // should be able to vectorize these together.
  646. Partitions.mergeBeforePopulating();
  647. LLVM_DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions);
  648. if (Partitions.getSize() < 2)
  649. return fail("CantIsolateUnsafeDeps",
  650. "cannot isolate unsafe dependencies");
  651. // Now, populate the partitions with non-memory operations.
  652. Partitions.populateUsedSet();
  653. LLVM_DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions);
  654. // In order to preserve original lexical order for loads, keep them in the
  655. // partition that we set up in the MemoryInstructionDependences loop.
  656. if (Partitions.mergeToAvoidDuplicatedLoads()) {
  657. LLVM_DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n"
  658. << Partitions);
  659. if (Partitions.getSize() < 2)
  660. return fail("CantIsolateUnsafeDeps",
  661. "cannot isolate unsafe dependencies");
  662. }
  663. // Don't distribute the loop if we need too many SCEV run-time checks, or
  664. // any if it's illegal.
  665. const SCEVUnionPredicate &Pred = LAI->getPSE().getUnionPredicate();
  666. if (LAI->hasConvergentOp() && !Pred.isAlwaysTrue()) {
  667. return fail("RuntimeCheckWithConvergent",
  668. "may not insert runtime check with convergent operation");
  669. }
  670. if (Pred.getComplexity() > (IsForced.getValueOr(false)
  671. ? PragmaDistributeSCEVCheckThreshold
  672. : DistributeSCEVCheckThreshold))
  673. return fail("TooManySCEVRuntimeChecks",
  674. "too many SCEV run-time checks needed.\n");
  675. if (!IsForced.getValueOr(false) && hasDisableAllTransformsHint(L))
  676. return fail("HeuristicDisabled", "distribution heuristic disabled");
  677. LLVM_DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n");
  678. // We're done forming the partitions set up the reverse mapping from
  679. // instructions to partitions.
  680. Partitions.setupPartitionIdOnInstructions();
  681. // If we need run-time checks, version the loop now.
  682. auto PtrToPartition = Partitions.computePartitionSetForPointers(*LAI);
  683. const auto *RtPtrChecking = LAI->getRuntimePointerChecking();
  684. const auto &AllChecks = RtPtrChecking->getChecks();
  685. auto Checks = includeOnlyCrossPartitionChecks(AllChecks, PtrToPartition,
  686. RtPtrChecking);
  687. if (LAI->hasConvergentOp() && !Checks.empty()) {
  688. return fail("RuntimeCheckWithConvergent",
  689. "may not insert runtime check with convergent operation");
  690. }
  691. // To keep things simple have an empty preheader before we version or clone
  692. // the loop. (Also split if this has no predecessor, i.e. entry, because we
  693. // rely on PH having a predecessor.)
  694. if (!PH->getSinglePredecessor() || &*PH->begin() != PH->getTerminator())
  695. SplitBlock(PH, PH->getTerminator(), DT, LI);
  696. if (!Pred.isAlwaysTrue() || !Checks.empty()) {
  697. assert(!LAI->hasConvergentOp() && "inserting illegal loop versioning");
  698. MDNode *OrigLoopID = L->getLoopID();
  699. LLVM_DEBUG(dbgs() << "\nPointers:\n");
  700. LLVM_DEBUG(LAI->getRuntimePointerChecking()->printChecks(dbgs(), Checks));
  701. LoopVersioning LVer(*LAI, Checks, L, LI, DT, SE);
  702. LVer.versionLoop(DefsUsedOutside);
  703. LVer.annotateLoopWithNoAlias();
  704. // The unversioned loop will not be changed, so we inherit all attributes
  705. // from the original loop, but remove the loop distribution metadata to
  706. // avoid to distribute it again.
  707. MDNode *UnversionedLoopID =
  708. makeFollowupLoopID(OrigLoopID,
  709. {LLVMLoopDistributeFollowupAll,
  710. LLVMLoopDistributeFollowupFallback},
  711. "llvm.loop.distribute.", true)
  712. .getValue();
  713. LVer.getNonVersionedLoop()->setLoopID(UnversionedLoopID);
  714. }
  715. // Create identical copies of the original loop for each partition and hook
  716. // them up sequentially.
  717. Partitions.cloneLoops();
  718. // Now, we remove the instruction from each loop that don't belong to that
  719. // partition.
  720. Partitions.removeUnusedInsts();
  721. LLVM_DEBUG(dbgs() << "\nAfter removing unused Instrs:\n");
  722. LLVM_DEBUG(Partitions.printBlocks());
  723. if (LDistVerify) {
  724. LI->verify(*DT);
  725. assert(DT->verify(DominatorTree::VerificationLevel::Fast));
  726. }
  727. ++NumLoopsDistributed;
  728. // Report the success.
  729. ORE->emit([&]() {
  730. return OptimizationRemark(LDIST_NAME, "Distribute", L->getStartLoc(),
  731. L->getHeader())
  732. << "distributed loop";
  733. });
  734. return true;
  735. }
  736. /// Provide diagnostics then \return with false.
  737. bool fail(StringRef RemarkName, StringRef Message) {
  738. LLVMContext &Ctx = F->getContext();
  739. bool Forced = isForced().getValueOr(false);
  740. LLVM_DEBUG(dbgs() << "Skipping; " << Message << "\n");
  741. // With Rpass-missed report that distribution failed.
  742. ORE->emit([&]() {
  743. return OptimizationRemarkMissed(LDIST_NAME, "NotDistributed",
  744. L->getStartLoc(), L->getHeader())
  745. << "loop not distributed: use -Rpass-analysis=loop-distribute for "
  746. "more "
  747. "info";
  748. });
  749. // With Rpass-analysis report why. This is on by default if distribution
  750. // was requested explicitly.
  751. ORE->emit(OptimizationRemarkAnalysis(
  752. Forced ? OptimizationRemarkAnalysis::AlwaysPrint : LDIST_NAME,
  753. RemarkName, L->getStartLoc(), L->getHeader())
  754. << "loop not distributed: " << Message);
  755. // Also issue a warning if distribution was requested explicitly but it
  756. // failed.
  757. if (Forced)
  758. Ctx.diagnose(DiagnosticInfoOptimizationFailure(
  759. *F, L->getStartLoc(), "loop not distributed: failed "
  760. "explicitly specified loop distribution"));
  761. return false;
  762. }
  763. /// Return if distribution forced to be enabled/disabled for the loop.
  764. ///
  765. /// If the optional has a value, it indicates whether distribution was forced
  766. /// to be enabled (true) or disabled (false). If the optional has no value
  767. /// distribution was not forced either way.
  768. const Optional<bool> &isForced() const { return IsForced; }
  769. private:
  770. /// Filter out checks between pointers from the same partition.
  771. ///
  772. /// \p PtrToPartition contains the partition number for pointers. Partition
  773. /// number -1 means that the pointer is used in multiple partitions. In this
  774. /// case we can't safely omit the check.
  775. SmallVector<RuntimePointerCheck, 4> includeOnlyCrossPartitionChecks(
  776. const SmallVectorImpl<RuntimePointerCheck> &AllChecks,
  777. const SmallVectorImpl<int> &PtrToPartition,
  778. const RuntimePointerChecking *RtPtrChecking) {
  779. SmallVector<RuntimePointerCheck, 4> Checks;
  780. copy_if(AllChecks, std::back_inserter(Checks),
  781. [&](const RuntimePointerCheck &Check) {
  782. for (unsigned PtrIdx1 : Check.first->Members)
  783. for (unsigned PtrIdx2 : Check.second->Members)
  784. // Only include this check if there is a pair of pointers
  785. // that require checking and the pointers fall into
  786. // separate partitions.
  787. //
  788. // (Note that we already know at this point that the two
  789. // pointer groups need checking but it doesn't follow
  790. // that each pair of pointers within the two groups need
  791. // checking as well.
  792. //
  793. // In other words we don't want to include a check just
  794. // because there is a pair of pointers between the two
  795. // pointer groups that require checks and a different
  796. // pair whose pointers fall into different partitions.)
  797. if (RtPtrChecking->needsChecking(PtrIdx1, PtrIdx2) &&
  798. !RuntimePointerChecking::arePointersInSamePartition(
  799. PtrToPartition, PtrIdx1, PtrIdx2))
  800. return true;
  801. return false;
  802. });
  803. return Checks;
  804. }
  805. /// Check whether the loop metadata is forcing distribution to be
  806. /// enabled/disabled.
  807. void setForced() {
  808. Optional<const MDOperand *> Value =
  809. findStringMetadataForLoop(L, "llvm.loop.distribute.enable");
  810. if (!Value)
  811. return;
  812. const MDOperand *Op = *Value;
  813. assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
  814. IsForced = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
  815. }
  816. Loop *L;
  817. Function *F;
  818. // Analyses used.
  819. LoopInfo *LI;
  820. const LoopAccessInfo *LAI = nullptr;
  821. DominatorTree *DT;
  822. ScalarEvolution *SE;
  823. OptimizationRemarkEmitter *ORE;
  824. /// Indicates whether distribution is forced to be enabled/disabled for
  825. /// the loop.
  826. ///
  827. /// If the optional has a value, it indicates whether distribution was forced
  828. /// to be enabled (true) or disabled (false). If the optional has no value
  829. /// distribution was not forced either way.
  830. Optional<bool> IsForced;
  831. };
  832. } // end anonymous namespace
  833. /// Shared implementation between new and old PMs.
  834. static bool runImpl(Function &F, LoopInfo *LI, DominatorTree *DT,
  835. ScalarEvolution *SE, OptimizationRemarkEmitter *ORE,
  836. std::function<const LoopAccessInfo &(Loop &)> &GetLAA) {
  837. // Build up a worklist of inner-loops to vectorize. This is necessary as the
  838. // act of distributing a loop creates new loops and can invalidate iterators
  839. // across the loops.
  840. SmallVector<Loop *, 8> Worklist;
  841. for (Loop *TopLevelLoop : *LI)
  842. for (Loop *L : depth_first(TopLevelLoop))
  843. // We only handle inner-most loops.
  844. if (L->isInnermost())
  845. Worklist.push_back(L);
  846. // Now walk the identified inner loops.
  847. bool Changed = false;
  848. for (Loop *L : Worklist) {
  849. LoopDistributeForLoop LDL(L, &F, LI, DT, SE, ORE);
  850. // If distribution was forced for the specific loop to be
  851. // enabled/disabled, follow that. Otherwise use the global flag.
  852. if (LDL.isForced().getValueOr(EnableLoopDistribute))
  853. Changed |= LDL.processLoop(GetLAA);
  854. }
  855. // Process each loop nest in the function.
  856. return Changed;
  857. }
  858. namespace {
  859. /// The pass class.
  860. class LoopDistributeLegacy : public FunctionPass {
  861. public:
  862. static char ID;
  863. LoopDistributeLegacy() : FunctionPass(ID) {
  864. // The default is set by the caller.
  865. initializeLoopDistributeLegacyPass(*PassRegistry::getPassRegistry());
  866. }
  867. bool runOnFunction(Function &F) override {
  868. if (skipFunction(F))
  869. return false;
  870. auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  871. auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
  872. auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  873. auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
  874. auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
  875. std::function<const LoopAccessInfo &(Loop &)> GetLAA =
  876. [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
  877. return runImpl(F, LI, DT, SE, ORE, GetLAA);
  878. }
  879. void getAnalysisUsage(AnalysisUsage &AU) const override {
  880. AU.addRequired<ScalarEvolutionWrapperPass>();
  881. AU.addRequired<LoopInfoWrapperPass>();
  882. AU.addPreserved<LoopInfoWrapperPass>();
  883. AU.addRequired<LoopAccessLegacyAnalysis>();
  884. AU.addRequired<DominatorTreeWrapperPass>();
  885. AU.addPreserved<DominatorTreeWrapperPass>();
  886. AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
  887. AU.addPreserved<GlobalsAAWrapperPass>();
  888. }
  889. };
  890. } // end anonymous namespace
  891. PreservedAnalyses LoopDistributePass::run(Function &F,
  892. FunctionAnalysisManager &AM) {
  893. auto &LI = AM.getResult<LoopAnalysis>(F);
  894. auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
  895. auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
  896. auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
  897. // We don't directly need these analyses but they're required for loop
  898. // analyses so provide them below.
  899. auto &AA = AM.getResult<AAManager>(F);
  900. auto &AC = AM.getResult<AssumptionAnalysis>(F);
  901. auto &TTI = AM.getResult<TargetIRAnalysis>(F);
  902. auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
  903. auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
  904. std::function<const LoopAccessInfo &(Loop &)> GetLAA =
  905. [&](Loop &L) -> const LoopAccessInfo & {
  906. LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE,
  907. TLI, TTI, nullptr, nullptr, nullptr};
  908. return LAM.getResult<LoopAccessAnalysis>(L, AR);
  909. };
  910. bool Changed = runImpl(F, &LI, &DT, &SE, &ORE, GetLAA);
  911. if (!Changed)
  912. return PreservedAnalyses::all();
  913. PreservedAnalyses PA;
  914. PA.preserve<LoopAnalysis>();
  915. PA.preserve<DominatorTreeAnalysis>();
  916. return PA;
  917. }
  918. char LoopDistributeLegacy::ID;
  919. static const char ldist_name[] = "Loop Distribution";
  920. INITIALIZE_PASS_BEGIN(LoopDistributeLegacy, LDIST_NAME, ldist_name, false,
  921. false)
  922. INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
  923. INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
  924. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  925. INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
  926. INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
  927. INITIALIZE_PASS_END(LoopDistributeLegacy, LDIST_NAME, ldist_name, false, false)
  928. FunctionPass *llvm::createLoopDistributePass() { return new LoopDistributeLegacy(); }