LoopCacheAnalysis.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. //===- LoopCacheAnalysis.cpp - Loop Cache Analysis -------------------------==//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  6. // See https://llvm.org/LICENSE.txt for license information.
  7. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  8. //
  9. //===----------------------------------------------------------------------===//
  10. ///
  11. /// \file
  12. /// This file defines the implementation for the loop cache analysis.
  13. /// The implementation is largely based on the following paper:
  14. ///
  15. /// Compiler Optimizations for Improving Data Locality
  16. /// By: Steve Carr, Katherine S. McKinley, Chau-Wen Tseng
  17. /// http://www.cs.utexas.edu/users/mckinley/papers/asplos-1994.pdf
  18. ///
  19. /// The general approach taken to estimate the number of cache lines used by the
  20. /// memory references in an inner loop is:
  21. /// 1. Partition memory references that exhibit temporal or spacial reuse
  22. /// into reference groups.
  23. /// 2. For each loop L in the a loop nest LN:
  24. /// a. Compute the cost of the reference group
  25. /// b. Compute the loop cost by summing up the reference groups costs
  26. //===----------------------------------------------------------------------===//
  27. #include "llvm/Analysis/LoopCacheAnalysis.h"
  28. #include "llvm/ADT/BreadthFirstIterator.h"
  29. #include "llvm/ADT/Sequence.h"
  30. #include "llvm/ADT/SmallVector.h"
  31. #include "llvm/Analysis/AliasAnalysis.h"
  32. #include "llvm/Analysis/Delinearization.h"
  33. #include "llvm/Analysis/DependenceAnalysis.h"
  34. #include "llvm/Analysis/LoopInfo.h"
  35. #include "llvm/Analysis/ScalarEvolutionExpressions.h"
  36. #include "llvm/Analysis/TargetTransformInfo.h"
  37. #include "llvm/Support/CommandLine.h"
  38. #include "llvm/Support/Debug.h"
  39. using namespace llvm;
  40. #define DEBUG_TYPE "loop-cache-cost"
  41. static cl::opt<unsigned> DefaultTripCount(
  42. "default-trip-count", cl::init(100), cl::Hidden,
  43. cl::desc("Use this to specify the default trip count of a loop"));
  44. // In this analysis two array references are considered to exhibit temporal
  45. // reuse if they access either the same memory location, or a memory location
  46. // with distance smaller than a configurable threshold.
  47. static cl::opt<unsigned> TemporalReuseThreshold(
  48. "temporal-reuse-threshold", cl::init(2), cl::Hidden,
  49. cl::desc("Use this to specify the max. distance between array elements "
  50. "accessed in a loop so that the elements are classified to have "
  51. "temporal reuse"));
  52. /// Retrieve the innermost loop in the given loop nest \p Loops. It returns a
  53. /// nullptr if any loops in the loop vector supplied has more than one sibling.
  54. /// The loop vector is expected to contain loops collected in breadth-first
  55. /// order.
  56. static Loop *getInnerMostLoop(const LoopVectorTy &Loops) {
  57. assert(!Loops.empty() && "Expecting a non-empy loop vector");
  58. Loop *LastLoop = Loops.back();
  59. Loop *ParentLoop = LastLoop->getParentLoop();
  60. if (ParentLoop == nullptr) {
  61. assert(Loops.size() == 1 && "Expecting a single loop");
  62. return LastLoop;
  63. }
  64. return (llvm::is_sorted(Loops,
  65. [](const Loop *L1, const Loop *L2) {
  66. return L1->getLoopDepth() < L2->getLoopDepth();
  67. }))
  68. ? LastLoop
  69. : nullptr;
  70. }
  71. static bool isOneDimensionalArray(const SCEV &AccessFn, const SCEV &ElemSize,
  72. const Loop &L, ScalarEvolution &SE) {
  73. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(&AccessFn);
  74. if (!AR || !AR->isAffine())
  75. return false;
  76. assert(AR->getLoop() && "AR should have a loop");
  77. // Check that start and increment are not add recurrences.
  78. const SCEV *Start = AR->getStart();
  79. const SCEV *Step = AR->getStepRecurrence(SE);
  80. if (isa<SCEVAddRecExpr>(Start) || isa<SCEVAddRecExpr>(Step))
  81. return false;
  82. // Check that start and increment are both invariant in the loop.
  83. if (!SE.isLoopInvariant(Start, &L) || !SE.isLoopInvariant(Step, &L))
  84. return false;
  85. const SCEV *StepRec = AR->getStepRecurrence(SE);
  86. if (StepRec && SE.isKnownNegative(StepRec))
  87. StepRec = SE.getNegativeSCEV(StepRec);
  88. return StepRec == &ElemSize;
  89. }
  90. /// Compute the trip count for the given loop \p L. Return the SCEV expression
  91. /// for the trip count or nullptr if it cannot be computed.
  92. static const SCEV *computeTripCount(const Loop &L, ScalarEvolution &SE) {
  93. const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(&L);
  94. if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) ||
  95. !isa<SCEVConstant>(BackedgeTakenCount))
  96. return nullptr;
  97. return SE.getTripCountFromExitCount(BackedgeTakenCount);
  98. }
  99. //===----------------------------------------------------------------------===//
  100. // IndexedReference implementation
  101. //
  102. raw_ostream &llvm::operator<<(raw_ostream &OS, const IndexedReference &R) {
  103. if (!R.IsValid) {
  104. OS << R.StoreOrLoadInst;
  105. OS << ", IsValid=false.";
  106. return OS;
  107. }
  108. OS << *R.BasePointer;
  109. for (const SCEV *Subscript : R.Subscripts)
  110. OS << "[" << *Subscript << "]";
  111. OS << ", Sizes: ";
  112. for (const SCEV *Size : R.Sizes)
  113. OS << "[" << *Size << "]";
  114. return OS;
  115. }
  116. IndexedReference::IndexedReference(Instruction &StoreOrLoadInst,
  117. const LoopInfo &LI, ScalarEvolution &SE)
  118. : StoreOrLoadInst(StoreOrLoadInst), SE(SE) {
  119. assert((isa<StoreInst>(StoreOrLoadInst) || isa<LoadInst>(StoreOrLoadInst)) &&
  120. "Expecting a load or store instruction");
  121. IsValid = delinearize(LI);
  122. if (IsValid)
  123. LLVM_DEBUG(dbgs().indent(2) << "Succesfully delinearized: " << *this
  124. << "\n");
  125. }
  126. Optional<bool> IndexedReference::hasSpacialReuse(const IndexedReference &Other,
  127. unsigned CLS,
  128. AAResults &AA) const {
  129. assert(IsValid && "Expecting a valid reference");
  130. if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) {
  131. LLVM_DEBUG(dbgs().indent(2)
  132. << "No spacial reuse: different base pointers\n");
  133. return false;
  134. }
  135. unsigned NumSubscripts = getNumSubscripts();
  136. if (NumSubscripts != Other.getNumSubscripts()) {
  137. LLVM_DEBUG(dbgs().indent(2)
  138. << "No spacial reuse: different number of subscripts\n");
  139. return false;
  140. }
  141. // all subscripts must be equal, except the leftmost one (the last one).
  142. for (auto SubNum : seq<unsigned>(0, NumSubscripts - 1)) {
  143. if (getSubscript(SubNum) != Other.getSubscript(SubNum)) {
  144. LLVM_DEBUG(dbgs().indent(2) << "No spacial reuse, different subscripts: "
  145. << "\n\t" << *getSubscript(SubNum) << "\n\t"
  146. << *Other.getSubscript(SubNum) << "\n");
  147. return false;
  148. }
  149. }
  150. // the difference between the last subscripts must be less than the cache line
  151. // size.
  152. const SCEV *LastSubscript = getLastSubscript();
  153. const SCEV *OtherLastSubscript = Other.getLastSubscript();
  154. const SCEVConstant *Diff = dyn_cast<SCEVConstant>(
  155. SE.getMinusSCEV(LastSubscript, OtherLastSubscript));
  156. if (Diff == nullptr) {
  157. LLVM_DEBUG(dbgs().indent(2)
  158. << "No spacial reuse, difference between subscript:\n\t"
  159. << *LastSubscript << "\n\t" << OtherLastSubscript
  160. << "\nis not constant.\n");
  161. return None;
  162. }
  163. bool InSameCacheLine = (Diff->getValue()->getSExtValue() < CLS);
  164. LLVM_DEBUG({
  165. if (InSameCacheLine)
  166. dbgs().indent(2) << "Found spacial reuse.\n";
  167. else
  168. dbgs().indent(2) << "No spacial reuse.\n";
  169. });
  170. return InSameCacheLine;
  171. }
  172. Optional<bool> IndexedReference::hasTemporalReuse(const IndexedReference &Other,
  173. unsigned MaxDistance,
  174. const Loop &L,
  175. DependenceInfo &DI,
  176. AAResults &AA) const {
  177. assert(IsValid && "Expecting a valid reference");
  178. if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) {
  179. LLVM_DEBUG(dbgs().indent(2)
  180. << "No temporal reuse: different base pointer\n");
  181. return false;
  182. }
  183. std::unique_ptr<Dependence> D =
  184. DI.depends(&StoreOrLoadInst, &Other.StoreOrLoadInst, true);
  185. if (D == nullptr) {
  186. LLVM_DEBUG(dbgs().indent(2) << "No temporal reuse: no dependence\n");
  187. return false;
  188. }
  189. if (D->isLoopIndependent()) {
  190. LLVM_DEBUG(dbgs().indent(2) << "Found temporal reuse\n");
  191. return true;
  192. }
  193. // Check the dependence distance at every loop level. There is temporal reuse
  194. // if the distance at the given loop's depth is small (|d| <= MaxDistance) and
  195. // it is zero at every other loop level.
  196. int LoopDepth = L.getLoopDepth();
  197. int Levels = D->getLevels();
  198. for (int Level = 1; Level <= Levels; ++Level) {
  199. const SCEV *Distance = D->getDistance(Level);
  200. const SCEVConstant *SCEVConst = dyn_cast_or_null<SCEVConstant>(Distance);
  201. if (SCEVConst == nullptr) {
  202. LLVM_DEBUG(dbgs().indent(2) << "No temporal reuse: distance unknown\n");
  203. return None;
  204. }
  205. const ConstantInt &CI = *SCEVConst->getValue();
  206. if (Level != LoopDepth && !CI.isZero()) {
  207. LLVM_DEBUG(dbgs().indent(2)
  208. << "No temporal reuse: distance is not zero at depth=" << Level
  209. << "\n");
  210. return false;
  211. } else if (Level == LoopDepth && CI.getSExtValue() > MaxDistance) {
  212. LLVM_DEBUG(
  213. dbgs().indent(2)
  214. << "No temporal reuse: distance is greater than MaxDistance at depth="
  215. << Level << "\n");
  216. return false;
  217. }
  218. }
  219. LLVM_DEBUG(dbgs().indent(2) << "Found temporal reuse\n");
  220. return true;
  221. }
  222. CacheCostTy IndexedReference::computeRefCost(const Loop &L,
  223. unsigned CLS) const {
  224. assert(IsValid && "Expecting a valid reference");
  225. LLVM_DEBUG({
  226. dbgs().indent(2) << "Computing cache cost for:\n";
  227. dbgs().indent(4) << *this << "\n";
  228. });
  229. // If the indexed reference is loop invariant the cost is one.
  230. if (isLoopInvariant(L)) {
  231. LLVM_DEBUG(dbgs().indent(4) << "Reference is loop invariant: RefCost=1\n");
  232. return 1;
  233. }
  234. const SCEV *TripCount = computeTripCount(L, SE);
  235. if (!TripCount) {
  236. LLVM_DEBUG(dbgs() << "Trip count of loop " << L.getName()
  237. << " could not be computed, using DefaultTripCount\n");
  238. const SCEV *ElemSize = Sizes.back();
  239. TripCount = SE.getConstant(ElemSize->getType(), DefaultTripCount);
  240. }
  241. LLVM_DEBUG(dbgs() << "TripCount=" << *TripCount << "\n");
  242. // If the indexed reference is 'consecutive' the cost is
  243. // (TripCount*Stride)/CLS, otherwise the cost is TripCount.
  244. const SCEV *RefCost = TripCount;
  245. if (isConsecutive(L, CLS)) {
  246. const SCEV *Coeff = getLastCoefficient();
  247. const SCEV *ElemSize = Sizes.back();
  248. const SCEV *Stride = SE.getMulExpr(Coeff, ElemSize);
  249. Type *WiderType = SE.getWiderType(Stride->getType(), TripCount->getType());
  250. const SCEV *CacheLineSize = SE.getConstant(WiderType, CLS);
  251. if (SE.isKnownNegative(Stride))
  252. Stride = SE.getNegativeSCEV(Stride);
  253. Stride = SE.getNoopOrAnyExtend(Stride, WiderType);
  254. TripCount = SE.getNoopOrAnyExtend(TripCount, WiderType);
  255. const SCEV *Numerator = SE.getMulExpr(Stride, TripCount);
  256. RefCost = SE.getUDivExpr(Numerator, CacheLineSize);
  257. LLVM_DEBUG(dbgs().indent(4)
  258. << "Access is consecutive: RefCost=(TripCount*Stride)/CLS="
  259. << *RefCost << "\n");
  260. } else
  261. LLVM_DEBUG(dbgs().indent(4)
  262. << "Access is not consecutive: RefCost=TripCount=" << *RefCost
  263. << "\n");
  264. // Attempt to fold RefCost into a constant.
  265. if (auto ConstantCost = dyn_cast<SCEVConstant>(RefCost))
  266. return ConstantCost->getValue()->getSExtValue();
  267. LLVM_DEBUG(dbgs().indent(4)
  268. << "RefCost is not a constant! Setting to RefCost=InvalidCost "
  269. "(invalid value).\n");
  270. return CacheCost::InvalidCost;
  271. }
  272. bool IndexedReference::delinearize(const LoopInfo &LI) {
  273. assert(Subscripts.empty() && "Subscripts should be empty");
  274. assert(Sizes.empty() && "Sizes should be empty");
  275. assert(!IsValid && "Should be called once from the constructor");
  276. LLVM_DEBUG(dbgs() << "Delinearizing: " << StoreOrLoadInst << "\n");
  277. const SCEV *ElemSize = SE.getElementSize(&StoreOrLoadInst);
  278. const BasicBlock *BB = StoreOrLoadInst.getParent();
  279. if (Loop *L = LI.getLoopFor(BB)) {
  280. const SCEV *AccessFn =
  281. SE.getSCEVAtScope(getPointerOperand(&StoreOrLoadInst), L);
  282. BasePointer = dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFn));
  283. if (BasePointer == nullptr) {
  284. LLVM_DEBUG(
  285. dbgs().indent(2)
  286. << "ERROR: failed to delinearize, can't identify base pointer\n");
  287. return false;
  288. }
  289. AccessFn = SE.getMinusSCEV(AccessFn, BasePointer);
  290. LLVM_DEBUG(dbgs().indent(2) << "In Loop '" << L->getName()
  291. << "', AccessFn: " << *AccessFn << "\n");
  292. llvm::delinearize(SE, AccessFn, Subscripts, Sizes,
  293. SE.getElementSize(&StoreOrLoadInst));
  294. if (Subscripts.empty() || Sizes.empty() ||
  295. Subscripts.size() != Sizes.size()) {
  296. // Attempt to determine whether we have a single dimensional array access.
  297. // before giving up.
  298. if (!isOneDimensionalArray(*AccessFn, *ElemSize, *L, SE)) {
  299. LLVM_DEBUG(dbgs().indent(2)
  300. << "ERROR: failed to delinearize reference\n");
  301. Subscripts.clear();
  302. Sizes.clear();
  303. return false;
  304. }
  305. // The array may be accessed in reverse, for example:
  306. // for (i = N; i > 0; i--)
  307. // A[i] = 0;
  308. // In this case, reconstruct the access function using the absolute value
  309. // of the step recurrence.
  310. const SCEVAddRecExpr *AccessFnAR = dyn_cast<SCEVAddRecExpr>(AccessFn);
  311. const SCEV *StepRec = AccessFnAR ? AccessFnAR->getStepRecurrence(SE) : nullptr;
  312. if (StepRec && SE.isKnownNegative(StepRec))
  313. AccessFn = SE.getAddRecExpr(AccessFnAR->getStart(),
  314. SE.getNegativeSCEV(StepRec),
  315. AccessFnAR->getLoop(),
  316. AccessFnAR->getNoWrapFlags());
  317. const SCEV *Div = SE.getUDivExactExpr(AccessFn, ElemSize);
  318. Subscripts.push_back(Div);
  319. Sizes.push_back(ElemSize);
  320. }
  321. return all_of(Subscripts, [&](const SCEV *Subscript) {
  322. return isSimpleAddRecurrence(*Subscript, *L);
  323. });
  324. }
  325. return false;
  326. }
  327. bool IndexedReference::isLoopInvariant(const Loop &L) const {
  328. Value *Addr = getPointerOperand(&StoreOrLoadInst);
  329. assert(Addr != nullptr && "Expecting either a load or a store instruction");
  330. assert(SE.isSCEVable(Addr->getType()) && "Addr should be SCEVable");
  331. if (SE.isLoopInvariant(SE.getSCEV(Addr), &L))
  332. return true;
  333. // The indexed reference is loop invariant if none of the coefficients use
  334. // the loop induction variable.
  335. bool allCoeffForLoopAreZero = all_of(Subscripts, [&](const SCEV *Subscript) {
  336. return isCoeffForLoopZeroOrInvariant(*Subscript, L);
  337. });
  338. return allCoeffForLoopAreZero;
  339. }
  340. bool IndexedReference::isConsecutive(const Loop &L, unsigned CLS) const {
  341. // The indexed reference is 'consecutive' if the only coefficient that uses
  342. // the loop induction variable is the last one...
  343. const SCEV *LastSubscript = Subscripts.back();
  344. for (const SCEV *Subscript : Subscripts) {
  345. if (Subscript == LastSubscript)
  346. continue;
  347. if (!isCoeffForLoopZeroOrInvariant(*Subscript, L))
  348. return false;
  349. }
  350. // ...and the access stride is less than the cache line size.
  351. const SCEV *Coeff = getLastCoefficient();
  352. const SCEV *ElemSize = Sizes.back();
  353. const SCEV *Stride = SE.getMulExpr(Coeff, ElemSize);
  354. const SCEV *CacheLineSize = SE.getConstant(Stride->getType(), CLS);
  355. Stride = SE.isKnownNegative(Stride) ? SE.getNegativeSCEV(Stride) : Stride;
  356. return SE.isKnownPredicate(ICmpInst::ICMP_ULT, Stride, CacheLineSize);
  357. }
  358. const SCEV *IndexedReference::getLastCoefficient() const {
  359. const SCEV *LastSubscript = getLastSubscript();
  360. auto *AR = cast<SCEVAddRecExpr>(LastSubscript);
  361. return AR->getStepRecurrence(SE);
  362. }
  363. bool IndexedReference::isCoeffForLoopZeroOrInvariant(const SCEV &Subscript,
  364. const Loop &L) const {
  365. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(&Subscript);
  366. return (AR != nullptr) ? AR->getLoop() != &L
  367. : SE.isLoopInvariant(&Subscript, &L);
  368. }
  369. bool IndexedReference::isSimpleAddRecurrence(const SCEV &Subscript,
  370. const Loop &L) const {
  371. if (!isa<SCEVAddRecExpr>(Subscript))
  372. return false;
  373. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(&Subscript);
  374. assert(AR->getLoop() && "AR should have a loop");
  375. if (!AR->isAffine())
  376. return false;
  377. const SCEV *Start = AR->getStart();
  378. const SCEV *Step = AR->getStepRecurrence(SE);
  379. if (!SE.isLoopInvariant(Start, &L) || !SE.isLoopInvariant(Step, &L))
  380. return false;
  381. return true;
  382. }
  383. bool IndexedReference::isAliased(const IndexedReference &Other,
  384. AAResults &AA) const {
  385. const auto &Loc1 = MemoryLocation::get(&StoreOrLoadInst);
  386. const auto &Loc2 = MemoryLocation::get(&Other.StoreOrLoadInst);
  387. return AA.isMustAlias(Loc1, Loc2);
  388. }
  389. //===----------------------------------------------------------------------===//
  390. // CacheCost implementation
  391. //
  392. raw_ostream &llvm::operator<<(raw_ostream &OS, const CacheCost &CC) {
  393. for (const auto &LC : CC.LoopCosts) {
  394. const Loop *L = LC.first;
  395. OS << "Loop '" << L->getName() << "' has cost = " << LC.second << "\n";
  396. }
  397. return OS;
  398. }
  399. CacheCost::CacheCost(const LoopVectorTy &Loops, const LoopInfo &LI,
  400. ScalarEvolution &SE, TargetTransformInfo &TTI,
  401. AAResults &AA, DependenceInfo &DI, Optional<unsigned> TRT)
  402. : Loops(Loops),
  403. TRT((TRT == None) ? Optional<unsigned>(TemporalReuseThreshold) : TRT),
  404. LI(LI), SE(SE), TTI(TTI), AA(AA), DI(DI) {
  405. assert(!Loops.empty() && "Expecting a non-empty loop vector.");
  406. for (const Loop *L : Loops) {
  407. unsigned TripCount = SE.getSmallConstantTripCount(L);
  408. TripCount = (TripCount == 0) ? DefaultTripCount : TripCount;
  409. TripCounts.push_back({L, TripCount});
  410. }
  411. calculateCacheFootprint();
  412. }
  413. std::unique_ptr<CacheCost>
  414. CacheCost::getCacheCost(Loop &Root, LoopStandardAnalysisResults &AR,
  415. DependenceInfo &DI, Optional<unsigned> TRT) {
  416. if (!Root.isOutermost()) {
  417. LLVM_DEBUG(dbgs() << "Expecting the outermost loop in a loop nest\n");
  418. return nullptr;
  419. }
  420. LoopVectorTy Loops;
  421. append_range(Loops, breadth_first(&Root));
  422. if (!getInnerMostLoop(Loops)) {
  423. LLVM_DEBUG(dbgs() << "Cannot compute cache cost of loop nest with more "
  424. "than one innermost loop\n");
  425. return nullptr;
  426. }
  427. return std::make_unique<CacheCost>(Loops, AR.LI, AR.SE, AR.TTI, AR.AA, DI, TRT);
  428. }
  429. void CacheCost::calculateCacheFootprint() {
  430. LLVM_DEBUG(dbgs() << "POPULATING REFERENCE GROUPS\n");
  431. ReferenceGroupsTy RefGroups;
  432. if (!populateReferenceGroups(RefGroups))
  433. return;
  434. LLVM_DEBUG(dbgs() << "COMPUTING LOOP CACHE COSTS\n");
  435. for (const Loop *L : Loops) {
  436. assert(llvm::none_of(
  437. LoopCosts,
  438. [L](const LoopCacheCostTy &LCC) { return LCC.first == L; }) &&
  439. "Should not add duplicate element");
  440. CacheCostTy LoopCost = computeLoopCacheCost(*L, RefGroups);
  441. LoopCosts.push_back(std::make_pair(L, LoopCost));
  442. }
  443. sortLoopCosts();
  444. RefGroups.clear();
  445. }
  446. bool CacheCost::populateReferenceGroups(ReferenceGroupsTy &RefGroups) const {
  447. assert(RefGroups.empty() && "Reference groups should be empty");
  448. unsigned CLS = TTI.getCacheLineSize();
  449. Loop *InnerMostLoop = getInnerMostLoop(Loops);
  450. assert(InnerMostLoop != nullptr && "Expecting a valid innermost loop");
  451. for (BasicBlock *BB : InnerMostLoop->getBlocks()) {
  452. for (Instruction &I : *BB) {
  453. if (!isa<StoreInst>(I) && !isa<LoadInst>(I))
  454. continue;
  455. std::unique_ptr<IndexedReference> R(new IndexedReference(I, LI, SE));
  456. if (!R->isValid())
  457. continue;
  458. bool Added = false;
  459. for (ReferenceGroupTy &RefGroup : RefGroups) {
  460. const IndexedReference &Representative = *RefGroup.front().get();
  461. LLVM_DEBUG({
  462. dbgs() << "References:\n";
  463. dbgs().indent(2) << *R << "\n";
  464. dbgs().indent(2) << Representative << "\n";
  465. });
  466. // FIXME: Both positive and negative access functions will be placed
  467. // into the same reference group, resulting in a bi-directional array
  468. // access such as:
  469. // for (i = N; i > 0; i--)
  470. // A[i] = A[N - i];
  471. // having the same cost calculation as a single dimention access pattern
  472. // for (i = 0; i < N; i++)
  473. // A[i] = A[i];
  474. // when in actuality, depending on the array size, the first example
  475. // should have a cost closer to 2x the second due to the two cache
  476. // access per iteration from opposite ends of the array
  477. Optional<bool> HasTemporalReuse =
  478. R->hasTemporalReuse(Representative, *TRT, *InnerMostLoop, DI, AA);
  479. Optional<bool> HasSpacialReuse =
  480. R->hasSpacialReuse(Representative, CLS, AA);
  481. if ((HasTemporalReuse.hasValue() && *HasTemporalReuse) ||
  482. (HasSpacialReuse.hasValue() && *HasSpacialReuse)) {
  483. RefGroup.push_back(std::move(R));
  484. Added = true;
  485. break;
  486. }
  487. }
  488. if (!Added) {
  489. ReferenceGroupTy RG;
  490. RG.push_back(std::move(R));
  491. RefGroups.push_back(std::move(RG));
  492. }
  493. }
  494. }
  495. if (RefGroups.empty())
  496. return false;
  497. LLVM_DEBUG({
  498. dbgs() << "\nIDENTIFIED REFERENCE GROUPS:\n";
  499. int n = 1;
  500. for (const ReferenceGroupTy &RG : RefGroups) {
  501. dbgs().indent(2) << "RefGroup " << n << ":\n";
  502. for (const auto &IR : RG)
  503. dbgs().indent(4) << *IR << "\n";
  504. n++;
  505. }
  506. dbgs() << "\n";
  507. });
  508. return true;
  509. }
  510. CacheCostTy
  511. CacheCost::computeLoopCacheCost(const Loop &L,
  512. const ReferenceGroupsTy &RefGroups) const {
  513. if (!L.isLoopSimplifyForm())
  514. return InvalidCost;
  515. LLVM_DEBUG(dbgs() << "Considering loop '" << L.getName()
  516. << "' as innermost loop.\n");
  517. // Compute the product of the trip counts of each other loop in the nest.
  518. CacheCostTy TripCountsProduct = 1;
  519. for (const auto &TC : TripCounts) {
  520. if (TC.first == &L)
  521. continue;
  522. TripCountsProduct *= TC.second;
  523. }
  524. CacheCostTy LoopCost = 0;
  525. for (const ReferenceGroupTy &RG : RefGroups) {
  526. CacheCostTy RefGroupCost = computeRefGroupCacheCost(RG, L);
  527. LoopCost += RefGroupCost * TripCountsProduct;
  528. }
  529. LLVM_DEBUG(dbgs().indent(2) << "Loop '" << L.getName()
  530. << "' has cost=" << LoopCost << "\n");
  531. return LoopCost;
  532. }
  533. CacheCostTy CacheCost::computeRefGroupCacheCost(const ReferenceGroupTy &RG,
  534. const Loop &L) const {
  535. assert(!RG.empty() && "Reference group should have at least one member.");
  536. const IndexedReference *Representative = RG.front().get();
  537. return Representative->computeRefCost(L, TTI.getCacheLineSize());
  538. }
  539. //===----------------------------------------------------------------------===//
  540. // LoopCachePrinterPass implementation
  541. //
  542. PreservedAnalyses LoopCachePrinterPass::run(Loop &L, LoopAnalysisManager &AM,
  543. LoopStandardAnalysisResults &AR,
  544. LPMUpdater &U) {
  545. Function *F = L.getHeader()->getParent();
  546. DependenceInfo DI(F, &AR.AA, &AR.SE, &AR.LI);
  547. if (auto CC = CacheCost::getCacheCost(L, AR, DI))
  548. OS << *CC;
  549. return PreservedAnalyses::all();
  550. }