LoopCacheAnalysis.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740
  1. //===- LoopCacheAnalysis.cpp - Loop Cache Analysis -------------------------==//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  6. // See https://llvm.org/LICENSE.txt for license information.
  7. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  8. //
  9. //===----------------------------------------------------------------------===//
  10. ///
  11. /// \file
  12. /// This file defines the implementation for the loop cache analysis.
  13. /// The implementation is largely based on the following paper:
  14. ///
  15. /// Compiler Optimizations for Improving Data Locality
  16. /// By: Steve Carr, Katherine S. McKinley, Chau-Wen Tseng
  17. /// http://www.cs.utexas.edu/users/mckinley/papers/asplos-1994.pdf
  18. ///
  19. /// The general approach taken to estimate the number of cache lines used by the
  20. /// memory references in an inner loop is:
  21. /// 1. Partition memory references that exhibit temporal or spacial reuse
  22. /// into reference groups.
  23. /// 2. For each loop L in the a loop nest LN:
  24. /// a. Compute the cost of the reference group
  25. /// b. Compute the loop cost by summing up the reference groups costs
  26. //===----------------------------------------------------------------------===//
  27. #include "llvm/Analysis/LoopCacheAnalysis.h"
  28. #include "llvm/ADT/BreadthFirstIterator.h"
  29. #include "llvm/ADT/Sequence.h"
  30. #include "llvm/ADT/SmallVector.h"
  31. #include "llvm/Analysis/AliasAnalysis.h"
  32. #include "llvm/Analysis/Delinearization.h"
  33. #include "llvm/Analysis/DependenceAnalysis.h"
  34. #include "llvm/Analysis/LoopInfo.h"
  35. #include "llvm/Analysis/ScalarEvolutionExpressions.h"
  36. #include "llvm/Analysis/TargetTransformInfo.h"
  37. #include "llvm/Support/CommandLine.h"
  38. #include "llvm/Support/Debug.h"
  39. using namespace llvm;
  40. #define DEBUG_TYPE "loop-cache-cost"
  41. static cl::opt<unsigned> DefaultTripCount(
  42. "default-trip-count", cl::init(100), cl::Hidden,
  43. cl::desc("Use this to specify the default trip count of a loop"));
  44. // In this analysis two array references are considered to exhibit temporal
  45. // reuse if they access either the same memory location, or a memory location
  46. // with distance smaller than a configurable threshold.
  47. static cl::opt<unsigned> TemporalReuseThreshold(
  48. "temporal-reuse-threshold", cl::init(2), cl::Hidden,
  49. cl::desc("Use this to specify the max. distance between array elements "
  50. "accessed in a loop so that the elements are classified to have "
  51. "temporal reuse"));
  52. /// Retrieve the innermost loop in the given loop nest \p Loops. It returns a
  53. /// nullptr if any loops in the loop vector supplied has more than one sibling.
  54. /// The loop vector is expected to contain loops collected in breadth-first
  55. /// order.
  56. static Loop *getInnerMostLoop(const LoopVectorTy &Loops) {
  57. assert(!Loops.empty() && "Expecting a non-empy loop vector");
  58. Loop *LastLoop = Loops.back();
  59. Loop *ParentLoop = LastLoop->getParentLoop();
  60. if (ParentLoop == nullptr) {
  61. assert(Loops.size() == 1 && "Expecting a single loop");
  62. return LastLoop;
  63. }
  64. return (llvm::is_sorted(Loops,
  65. [](const Loop *L1, const Loop *L2) {
  66. return L1->getLoopDepth() < L2->getLoopDepth();
  67. }))
  68. ? LastLoop
  69. : nullptr;
  70. }
  71. static bool isOneDimensionalArray(const SCEV &AccessFn, const SCEV &ElemSize,
  72. const Loop &L, ScalarEvolution &SE) {
  73. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(&AccessFn);
  74. if (!AR || !AR->isAffine())
  75. return false;
  76. assert(AR->getLoop() && "AR should have a loop");
  77. // Check that start and increment are not add recurrences.
  78. const SCEV *Start = AR->getStart();
  79. const SCEV *Step = AR->getStepRecurrence(SE);
  80. if (isa<SCEVAddRecExpr>(Start) || isa<SCEVAddRecExpr>(Step))
  81. return false;
  82. // Check that start and increment are both invariant in the loop.
  83. if (!SE.isLoopInvariant(Start, &L) || !SE.isLoopInvariant(Step, &L))
  84. return false;
  85. const SCEV *StepRec = AR->getStepRecurrence(SE);
  86. if (StepRec && SE.isKnownNegative(StepRec))
  87. StepRec = SE.getNegativeSCEV(StepRec);
  88. return StepRec == &ElemSize;
  89. }
  90. /// Compute the trip count for the given loop \p L or assume a default value if
  91. /// it is not a compile time constant. Return the SCEV expression for the trip
  92. /// count.
  93. static const SCEV *computeTripCount(const Loop &L, const SCEV &ElemSize,
  94. ScalarEvolution &SE) {
  95. const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(&L);
  96. const SCEV *TripCount = (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
  97. isa<SCEVConstant>(BackedgeTakenCount))
  98. ? SE.getTripCountFromExitCount(BackedgeTakenCount)
  99. : nullptr;
  100. if (!TripCount) {
  101. LLVM_DEBUG(dbgs() << "Trip count of loop " << L.getName()
  102. << " could not be computed, using DefaultTripCount\n");
  103. TripCount = SE.getConstant(ElemSize.getType(), DefaultTripCount);
  104. }
  105. return TripCount;
  106. }
  107. //===----------------------------------------------------------------------===//
  108. // IndexedReference implementation
  109. //
  110. raw_ostream &llvm::operator<<(raw_ostream &OS, const IndexedReference &R) {
  111. if (!R.IsValid) {
  112. OS << R.StoreOrLoadInst;
  113. OS << ", IsValid=false.";
  114. return OS;
  115. }
  116. OS << *R.BasePointer;
  117. for (const SCEV *Subscript : R.Subscripts)
  118. OS << "[" << *Subscript << "]";
  119. OS << ", Sizes: ";
  120. for (const SCEV *Size : R.Sizes)
  121. OS << "[" << *Size << "]";
  122. return OS;
  123. }
  124. IndexedReference::IndexedReference(Instruction &StoreOrLoadInst,
  125. const LoopInfo &LI, ScalarEvolution &SE)
  126. : StoreOrLoadInst(StoreOrLoadInst), SE(SE) {
  127. assert((isa<StoreInst>(StoreOrLoadInst) || isa<LoadInst>(StoreOrLoadInst)) &&
  128. "Expecting a load or store instruction");
  129. IsValid = delinearize(LI);
  130. if (IsValid)
  131. LLVM_DEBUG(dbgs().indent(2) << "Succesfully delinearized: " << *this
  132. << "\n");
  133. }
  134. std::optional<bool>
  135. IndexedReference::hasSpacialReuse(const IndexedReference &Other, unsigned CLS,
  136. AAResults &AA) const {
  137. assert(IsValid && "Expecting a valid reference");
  138. if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) {
  139. LLVM_DEBUG(dbgs().indent(2)
  140. << "No spacial reuse: different base pointers\n");
  141. return false;
  142. }
  143. unsigned NumSubscripts = getNumSubscripts();
  144. if (NumSubscripts != Other.getNumSubscripts()) {
  145. LLVM_DEBUG(dbgs().indent(2)
  146. << "No spacial reuse: different number of subscripts\n");
  147. return false;
  148. }
  149. // all subscripts must be equal, except the leftmost one (the last one).
  150. for (auto SubNum : seq<unsigned>(0, NumSubscripts - 1)) {
  151. if (getSubscript(SubNum) != Other.getSubscript(SubNum)) {
  152. LLVM_DEBUG(dbgs().indent(2) << "No spacial reuse, different subscripts: "
  153. << "\n\t" << *getSubscript(SubNum) << "\n\t"
  154. << *Other.getSubscript(SubNum) << "\n");
  155. return false;
  156. }
  157. }
  158. // the difference between the last subscripts must be less than the cache line
  159. // size.
  160. const SCEV *LastSubscript = getLastSubscript();
  161. const SCEV *OtherLastSubscript = Other.getLastSubscript();
  162. const SCEVConstant *Diff = dyn_cast<SCEVConstant>(
  163. SE.getMinusSCEV(LastSubscript, OtherLastSubscript));
  164. if (Diff == nullptr) {
  165. LLVM_DEBUG(dbgs().indent(2)
  166. << "No spacial reuse, difference between subscript:\n\t"
  167. << *LastSubscript << "\n\t" << OtherLastSubscript
  168. << "\nis not constant.\n");
  169. return std::nullopt;
  170. }
  171. bool InSameCacheLine = (Diff->getValue()->getSExtValue() < CLS);
  172. LLVM_DEBUG({
  173. if (InSameCacheLine)
  174. dbgs().indent(2) << "Found spacial reuse.\n";
  175. else
  176. dbgs().indent(2) << "No spacial reuse.\n";
  177. });
  178. return InSameCacheLine;
  179. }
  180. std::optional<bool>
  181. IndexedReference::hasTemporalReuse(const IndexedReference &Other,
  182. unsigned MaxDistance, const Loop &L,
  183. DependenceInfo &DI, AAResults &AA) const {
  184. assert(IsValid && "Expecting a valid reference");
  185. if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) {
  186. LLVM_DEBUG(dbgs().indent(2)
  187. << "No temporal reuse: different base pointer\n");
  188. return false;
  189. }
  190. std::unique_ptr<Dependence> D =
  191. DI.depends(&StoreOrLoadInst, &Other.StoreOrLoadInst, true);
  192. if (D == nullptr) {
  193. LLVM_DEBUG(dbgs().indent(2) << "No temporal reuse: no dependence\n");
  194. return false;
  195. }
  196. if (D->isLoopIndependent()) {
  197. LLVM_DEBUG(dbgs().indent(2) << "Found temporal reuse\n");
  198. return true;
  199. }
  200. // Check the dependence distance at every loop level. There is temporal reuse
  201. // if the distance at the given loop's depth is small (|d| <= MaxDistance) and
  202. // it is zero at every other loop level.
  203. int LoopDepth = L.getLoopDepth();
  204. int Levels = D->getLevels();
  205. for (int Level = 1; Level <= Levels; ++Level) {
  206. const SCEV *Distance = D->getDistance(Level);
  207. const SCEVConstant *SCEVConst = dyn_cast_or_null<SCEVConstant>(Distance);
  208. if (SCEVConst == nullptr) {
  209. LLVM_DEBUG(dbgs().indent(2) << "No temporal reuse: distance unknown\n");
  210. return std::nullopt;
  211. }
  212. const ConstantInt &CI = *SCEVConst->getValue();
  213. if (Level != LoopDepth && !CI.isZero()) {
  214. LLVM_DEBUG(dbgs().indent(2)
  215. << "No temporal reuse: distance is not zero at depth=" << Level
  216. << "\n");
  217. return false;
  218. } else if (Level == LoopDepth && CI.getSExtValue() > MaxDistance) {
  219. LLVM_DEBUG(
  220. dbgs().indent(2)
  221. << "No temporal reuse: distance is greater than MaxDistance at depth="
  222. << Level << "\n");
  223. return false;
  224. }
  225. }
  226. LLVM_DEBUG(dbgs().indent(2) << "Found temporal reuse\n");
  227. return true;
  228. }
  229. CacheCostTy IndexedReference::computeRefCost(const Loop &L,
  230. unsigned CLS) const {
  231. assert(IsValid && "Expecting a valid reference");
  232. LLVM_DEBUG({
  233. dbgs().indent(2) << "Computing cache cost for:\n";
  234. dbgs().indent(4) << *this << "\n";
  235. });
  236. // If the indexed reference is loop invariant the cost is one.
  237. if (isLoopInvariant(L)) {
  238. LLVM_DEBUG(dbgs().indent(4) << "Reference is loop invariant: RefCost=1\n");
  239. return 1;
  240. }
  241. const SCEV *TripCount = computeTripCount(L, *Sizes.back(), SE);
  242. assert(TripCount && "Expecting valid TripCount");
  243. LLVM_DEBUG(dbgs() << "TripCount=" << *TripCount << "\n");
  244. const SCEV *RefCost = nullptr;
  245. const SCEV *Stride = nullptr;
  246. if (isConsecutive(L, Stride, CLS)) {
  247. // If the indexed reference is 'consecutive' the cost is
  248. // (TripCount*Stride)/CLS.
  249. assert(Stride != nullptr &&
  250. "Stride should not be null for consecutive access!");
  251. Type *WiderType = SE.getWiderType(Stride->getType(), TripCount->getType());
  252. const SCEV *CacheLineSize = SE.getConstant(WiderType, CLS);
  253. Stride = SE.getNoopOrAnyExtend(Stride, WiderType);
  254. TripCount = SE.getNoopOrAnyExtend(TripCount, WiderType);
  255. const SCEV *Numerator = SE.getMulExpr(Stride, TripCount);
  256. RefCost = SE.getUDivExpr(Numerator, CacheLineSize);
  257. LLVM_DEBUG(dbgs().indent(4)
  258. << "Access is consecutive: RefCost=(TripCount*Stride)/CLS="
  259. << *RefCost << "\n");
  260. } else {
  261. // If the indexed reference is not 'consecutive' the cost is proportional to
  262. // the trip count and the depth of the dimension which the subject loop
  263. // subscript is accessing. We try to estimate this by multiplying the cost
  264. // by the trip counts of loops corresponding to the inner dimensions. For
  265. // example, given the indexed reference 'A[i][j][k]', and assuming the
  266. // i-loop is in the innermost position, the cost would be equal to the
  267. // iterations of the i-loop multiplied by iterations of the j-loop.
  268. RefCost = TripCount;
  269. int Index = getSubscriptIndex(L);
  270. assert(Index >= 0 && "Cound not locate a valid Index");
  271. for (unsigned I = Index + 1; I < getNumSubscripts() - 1; ++I) {
  272. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(getSubscript(I));
  273. assert(AR && AR->getLoop() && "Expecting valid loop");
  274. const SCEV *TripCount =
  275. computeTripCount(*AR->getLoop(), *Sizes.back(), SE);
  276. Type *WiderType = SE.getWiderType(RefCost->getType(), TripCount->getType());
  277. RefCost = SE.getMulExpr(SE.getNoopOrAnyExtend(RefCost, WiderType),
  278. SE.getNoopOrAnyExtend(TripCount, WiderType));
  279. }
  280. LLVM_DEBUG(dbgs().indent(4)
  281. << "Access is not consecutive: RefCost=" << *RefCost << "\n");
  282. }
  283. assert(RefCost && "Expecting a valid RefCost");
  284. // Attempt to fold RefCost into a constant.
  285. if (auto ConstantCost = dyn_cast<SCEVConstant>(RefCost))
  286. return ConstantCost->getValue()->getSExtValue();
  287. LLVM_DEBUG(dbgs().indent(4)
  288. << "RefCost is not a constant! Setting to RefCost=InvalidCost "
  289. "(invalid value).\n");
  290. return CacheCost::InvalidCost;
  291. }
  292. bool IndexedReference::tryDelinearizeFixedSize(
  293. const SCEV *AccessFn, SmallVectorImpl<const SCEV *> &Subscripts) {
  294. SmallVector<int, 4> ArraySizes;
  295. if (!tryDelinearizeFixedSizeImpl(&SE, &StoreOrLoadInst, AccessFn, Subscripts,
  296. ArraySizes))
  297. return false;
  298. // Populate Sizes with scev expressions to be used in calculations later.
  299. for (auto Idx : seq<unsigned>(1, Subscripts.size()))
  300. Sizes.push_back(
  301. SE.getConstant(Subscripts[Idx]->getType(), ArraySizes[Idx - 1]));
  302. LLVM_DEBUG({
  303. dbgs() << "Delinearized subscripts of fixed-size array\n"
  304. << "GEP:" << *getLoadStorePointerOperand(&StoreOrLoadInst)
  305. << "\n";
  306. });
  307. return true;
  308. }
  309. bool IndexedReference::delinearize(const LoopInfo &LI) {
  310. assert(Subscripts.empty() && "Subscripts should be empty");
  311. assert(Sizes.empty() && "Sizes should be empty");
  312. assert(!IsValid && "Should be called once from the constructor");
  313. LLVM_DEBUG(dbgs() << "Delinearizing: " << StoreOrLoadInst << "\n");
  314. const SCEV *ElemSize = SE.getElementSize(&StoreOrLoadInst);
  315. const BasicBlock *BB = StoreOrLoadInst.getParent();
  316. if (Loop *L = LI.getLoopFor(BB)) {
  317. const SCEV *AccessFn =
  318. SE.getSCEVAtScope(getPointerOperand(&StoreOrLoadInst), L);
  319. BasePointer = dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFn));
  320. if (BasePointer == nullptr) {
  321. LLVM_DEBUG(
  322. dbgs().indent(2)
  323. << "ERROR: failed to delinearize, can't identify base pointer\n");
  324. return false;
  325. }
  326. bool IsFixedSize = false;
  327. // Try to delinearize fixed-size arrays.
  328. if (tryDelinearizeFixedSize(AccessFn, Subscripts)) {
  329. IsFixedSize = true;
  330. // The last element of Sizes is the element size.
  331. Sizes.push_back(ElemSize);
  332. LLVM_DEBUG(dbgs().indent(2) << "In Loop '" << L->getName()
  333. << "', AccessFn: " << *AccessFn << "\n");
  334. }
  335. AccessFn = SE.getMinusSCEV(AccessFn, BasePointer);
  336. // Try to delinearize parametric-size arrays.
  337. if (!IsFixedSize) {
  338. LLVM_DEBUG(dbgs().indent(2) << "In Loop '" << L->getName()
  339. << "', AccessFn: " << *AccessFn << "\n");
  340. llvm::delinearize(SE, AccessFn, Subscripts, Sizes,
  341. SE.getElementSize(&StoreOrLoadInst));
  342. }
  343. if (Subscripts.empty() || Sizes.empty() ||
  344. Subscripts.size() != Sizes.size()) {
  345. // Attempt to determine whether we have a single dimensional array access.
  346. // before giving up.
  347. if (!isOneDimensionalArray(*AccessFn, *ElemSize, *L, SE)) {
  348. LLVM_DEBUG(dbgs().indent(2)
  349. << "ERROR: failed to delinearize reference\n");
  350. Subscripts.clear();
  351. Sizes.clear();
  352. return false;
  353. }
  354. // The array may be accessed in reverse, for example:
  355. // for (i = N; i > 0; i--)
  356. // A[i] = 0;
  357. // In this case, reconstruct the access function using the absolute value
  358. // of the step recurrence.
  359. const SCEVAddRecExpr *AccessFnAR = dyn_cast<SCEVAddRecExpr>(AccessFn);
  360. const SCEV *StepRec = AccessFnAR ? AccessFnAR->getStepRecurrence(SE) : nullptr;
  361. if (StepRec && SE.isKnownNegative(StepRec))
  362. AccessFn = SE.getAddRecExpr(AccessFnAR->getStart(),
  363. SE.getNegativeSCEV(StepRec),
  364. AccessFnAR->getLoop(),
  365. AccessFnAR->getNoWrapFlags());
  366. const SCEV *Div = SE.getUDivExactExpr(AccessFn, ElemSize);
  367. Subscripts.push_back(Div);
  368. Sizes.push_back(ElemSize);
  369. }
  370. return all_of(Subscripts, [&](const SCEV *Subscript) {
  371. return isSimpleAddRecurrence(*Subscript, *L);
  372. });
  373. }
  374. return false;
  375. }
  376. bool IndexedReference::isLoopInvariant(const Loop &L) const {
  377. Value *Addr = getPointerOperand(&StoreOrLoadInst);
  378. assert(Addr != nullptr && "Expecting either a load or a store instruction");
  379. assert(SE.isSCEVable(Addr->getType()) && "Addr should be SCEVable");
  380. if (SE.isLoopInvariant(SE.getSCEV(Addr), &L))
  381. return true;
  382. // The indexed reference is loop invariant if none of the coefficients use
  383. // the loop induction variable.
  384. bool allCoeffForLoopAreZero = all_of(Subscripts, [&](const SCEV *Subscript) {
  385. return isCoeffForLoopZeroOrInvariant(*Subscript, L);
  386. });
  387. return allCoeffForLoopAreZero;
  388. }
  389. bool IndexedReference::isConsecutive(const Loop &L, const SCEV *&Stride,
  390. unsigned CLS) const {
  391. // The indexed reference is 'consecutive' if the only coefficient that uses
  392. // the loop induction variable is the last one...
  393. const SCEV *LastSubscript = Subscripts.back();
  394. for (const SCEV *Subscript : Subscripts) {
  395. if (Subscript == LastSubscript)
  396. continue;
  397. if (!isCoeffForLoopZeroOrInvariant(*Subscript, L))
  398. return false;
  399. }
  400. // ...and the access stride is less than the cache line size.
  401. const SCEV *Coeff = getLastCoefficient();
  402. const SCEV *ElemSize = Sizes.back();
  403. Type *WiderType = SE.getWiderType(Coeff->getType(), ElemSize->getType());
  404. // FIXME: This assumes that all values are signed integers which may
  405. // be incorrect in unusual codes and incorrectly use sext instead of zext.
  406. // for (uint32_t i = 0; i < 512; ++i) {
  407. // uint8_t trunc = i;
  408. // A[trunc] = 42;
  409. // }
  410. // This consecutively iterates twice over A. If `trunc` is sign-extended,
  411. // we would conclude that this may iterate backwards over the array.
  412. // However, LoopCacheAnalysis is heuristic anyway and transformations must
  413. // not result in wrong optimizations if the heuristic was incorrect.
  414. Stride = SE.getMulExpr(SE.getNoopOrSignExtend(Coeff, WiderType),
  415. SE.getNoopOrSignExtend(ElemSize, WiderType));
  416. const SCEV *CacheLineSize = SE.getConstant(Stride->getType(), CLS);
  417. Stride = SE.isKnownNegative(Stride) ? SE.getNegativeSCEV(Stride) : Stride;
  418. return SE.isKnownPredicate(ICmpInst::ICMP_ULT, Stride, CacheLineSize);
  419. }
  420. int IndexedReference::getSubscriptIndex(const Loop &L) const {
  421. for (auto Idx : seq<int>(0, getNumSubscripts())) {
  422. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(getSubscript(Idx));
  423. if (AR && AR->getLoop() == &L) {
  424. return Idx;
  425. }
  426. }
  427. return -1;
  428. }
  429. const SCEV *IndexedReference::getLastCoefficient() const {
  430. const SCEV *LastSubscript = getLastSubscript();
  431. auto *AR = cast<SCEVAddRecExpr>(LastSubscript);
  432. return AR->getStepRecurrence(SE);
  433. }
  434. bool IndexedReference::isCoeffForLoopZeroOrInvariant(const SCEV &Subscript,
  435. const Loop &L) const {
  436. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(&Subscript);
  437. return (AR != nullptr) ? AR->getLoop() != &L
  438. : SE.isLoopInvariant(&Subscript, &L);
  439. }
  440. bool IndexedReference::isSimpleAddRecurrence(const SCEV &Subscript,
  441. const Loop &L) const {
  442. if (!isa<SCEVAddRecExpr>(Subscript))
  443. return false;
  444. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(&Subscript);
  445. assert(AR->getLoop() && "AR should have a loop");
  446. if (!AR->isAffine())
  447. return false;
  448. const SCEV *Start = AR->getStart();
  449. const SCEV *Step = AR->getStepRecurrence(SE);
  450. if (!SE.isLoopInvariant(Start, &L) || !SE.isLoopInvariant(Step, &L))
  451. return false;
  452. return true;
  453. }
  454. bool IndexedReference::isAliased(const IndexedReference &Other,
  455. AAResults &AA) const {
  456. const auto &Loc1 = MemoryLocation::get(&StoreOrLoadInst);
  457. const auto &Loc2 = MemoryLocation::get(&Other.StoreOrLoadInst);
  458. return AA.isMustAlias(Loc1, Loc2);
  459. }
  460. //===----------------------------------------------------------------------===//
  461. // CacheCost implementation
  462. //
  463. raw_ostream &llvm::operator<<(raw_ostream &OS, const CacheCost &CC) {
  464. for (const auto &LC : CC.LoopCosts) {
  465. const Loop *L = LC.first;
  466. OS << "Loop '" << L->getName() << "' has cost = " << LC.second << "\n";
  467. }
  468. return OS;
  469. }
  470. CacheCost::CacheCost(const LoopVectorTy &Loops, const LoopInfo &LI,
  471. ScalarEvolution &SE, TargetTransformInfo &TTI,
  472. AAResults &AA, DependenceInfo &DI,
  473. std::optional<unsigned> TRT)
  474. : Loops(Loops), TRT(TRT.value_or(TemporalReuseThreshold)), LI(LI), SE(SE),
  475. TTI(TTI), AA(AA), DI(DI) {
  476. assert(!Loops.empty() && "Expecting a non-empty loop vector.");
  477. for (const Loop *L : Loops) {
  478. unsigned TripCount = SE.getSmallConstantTripCount(L);
  479. TripCount = (TripCount == 0) ? DefaultTripCount : TripCount;
  480. TripCounts.push_back({L, TripCount});
  481. }
  482. calculateCacheFootprint();
  483. }
  484. std::unique_ptr<CacheCost>
  485. CacheCost::getCacheCost(Loop &Root, LoopStandardAnalysisResults &AR,
  486. DependenceInfo &DI, std::optional<unsigned> TRT) {
  487. if (!Root.isOutermost()) {
  488. LLVM_DEBUG(dbgs() << "Expecting the outermost loop in a loop nest\n");
  489. return nullptr;
  490. }
  491. LoopVectorTy Loops;
  492. append_range(Loops, breadth_first(&Root));
  493. if (!getInnerMostLoop(Loops)) {
  494. LLVM_DEBUG(dbgs() << "Cannot compute cache cost of loop nest with more "
  495. "than one innermost loop\n");
  496. return nullptr;
  497. }
  498. return std::make_unique<CacheCost>(Loops, AR.LI, AR.SE, AR.TTI, AR.AA, DI, TRT);
  499. }
  500. void CacheCost::calculateCacheFootprint() {
  501. LLVM_DEBUG(dbgs() << "POPULATING REFERENCE GROUPS\n");
  502. ReferenceGroupsTy RefGroups;
  503. if (!populateReferenceGroups(RefGroups))
  504. return;
  505. LLVM_DEBUG(dbgs() << "COMPUTING LOOP CACHE COSTS\n");
  506. for (const Loop *L : Loops) {
  507. assert(llvm::none_of(
  508. LoopCosts,
  509. [L](const LoopCacheCostTy &LCC) { return LCC.first == L; }) &&
  510. "Should not add duplicate element");
  511. CacheCostTy LoopCost = computeLoopCacheCost(*L, RefGroups);
  512. LoopCosts.push_back(std::make_pair(L, LoopCost));
  513. }
  514. sortLoopCosts();
  515. RefGroups.clear();
  516. }
  517. bool CacheCost::populateReferenceGroups(ReferenceGroupsTy &RefGroups) const {
  518. assert(RefGroups.empty() && "Reference groups should be empty");
  519. unsigned CLS = TTI.getCacheLineSize();
  520. Loop *InnerMostLoop = getInnerMostLoop(Loops);
  521. assert(InnerMostLoop != nullptr && "Expecting a valid innermost loop");
  522. for (BasicBlock *BB : InnerMostLoop->getBlocks()) {
  523. for (Instruction &I : *BB) {
  524. if (!isa<StoreInst>(I) && !isa<LoadInst>(I))
  525. continue;
  526. std::unique_ptr<IndexedReference> R(new IndexedReference(I, LI, SE));
  527. if (!R->isValid())
  528. continue;
  529. bool Added = false;
  530. for (ReferenceGroupTy &RefGroup : RefGroups) {
  531. const IndexedReference &Representative = *RefGroup.front();
  532. LLVM_DEBUG({
  533. dbgs() << "References:\n";
  534. dbgs().indent(2) << *R << "\n";
  535. dbgs().indent(2) << Representative << "\n";
  536. });
  537. // FIXME: Both positive and negative access functions will be placed
  538. // into the same reference group, resulting in a bi-directional array
  539. // access such as:
  540. // for (i = N; i > 0; i--)
  541. // A[i] = A[N - i];
  542. // having the same cost calculation as a single dimention access pattern
  543. // for (i = 0; i < N; i++)
  544. // A[i] = A[i];
  545. // when in actuality, depending on the array size, the first example
  546. // should have a cost closer to 2x the second due to the two cache
  547. // access per iteration from opposite ends of the array
  548. std::optional<bool> HasTemporalReuse =
  549. R->hasTemporalReuse(Representative, *TRT, *InnerMostLoop, DI, AA);
  550. std::optional<bool> HasSpacialReuse =
  551. R->hasSpacialReuse(Representative, CLS, AA);
  552. if ((HasTemporalReuse && *HasTemporalReuse) ||
  553. (HasSpacialReuse && *HasSpacialReuse)) {
  554. RefGroup.push_back(std::move(R));
  555. Added = true;
  556. break;
  557. }
  558. }
  559. if (!Added) {
  560. ReferenceGroupTy RG;
  561. RG.push_back(std::move(R));
  562. RefGroups.push_back(std::move(RG));
  563. }
  564. }
  565. }
  566. if (RefGroups.empty())
  567. return false;
  568. LLVM_DEBUG({
  569. dbgs() << "\nIDENTIFIED REFERENCE GROUPS:\n";
  570. int n = 1;
  571. for (const ReferenceGroupTy &RG : RefGroups) {
  572. dbgs().indent(2) << "RefGroup " << n << ":\n";
  573. for (const auto &IR : RG)
  574. dbgs().indent(4) << *IR << "\n";
  575. n++;
  576. }
  577. dbgs() << "\n";
  578. });
  579. return true;
  580. }
  581. CacheCostTy
  582. CacheCost::computeLoopCacheCost(const Loop &L,
  583. const ReferenceGroupsTy &RefGroups) const {
  584. if (!L.isLoopSimplifyForm())
  585. return InvalidCost;
  586. LLVM_DEBUG(dbgs() << "Considering loop '" << L.getName()
  587. << "' as innermost loop.\n");
  588. // Compute the product of the trip counts of each other loop in the nest.
  589. CacheCostTy TripCountsProduct = 1;
  590. for (const auto &TC : TripCounts) {
  591. if (TC.first == &L)
  592. continue;
  593. TripCountsProduct *= TC.second;
  594. }
  595. CacheCostTy LoopCost = 0;
  596. for (const ReferenceGroupTy &RG : RefGroups) {
  597. CacheCostTy RefGroupCost = computeRefGroupCacheCost(RG, L);
  598. LoopCost += RefGroupCost * TripCountsProduct;
  599. }
  600. LLVM_DEBUG(dbgs().indent(2) << "Loop '" << L.getName()
  601. << "' has cost=" << LoopCost << "\n");
  602. return LoopCost;
  603. }
  604. CacheCostTy CacheCost::computeRefGroupCacheCost(const ReferenceGroupTy &RG,
  605. const Loop &L) const {
  606. assert(!RG.empty() && "Reference group should have at least one member.");
  607. const IndexedReference *Representative = RG.front().get();
  608. return Representative->computeRefCost(L, TTI.getCacheLineSize());
  609. }
  610. //===----------------------------------------------------------------------===//
  611. // LoopCachePrinterPass implementation
  612. //
  613. PreservedAnalyses LoopCachePrinterPass::run(Loop &L, LoopAnalysisManager &AM,
  614. LoopStandardAnalysisResults &AR,
  615. LPMUpdater &U) {
  616. Function *F = L.getHeader()->getParent();
  617. DependenceInfo DI(F, &AR.AA, &AR.SE, &AR.LI);
  618. if (auto CC = CacheCost::getCacheCost(L, AR, DI))
  619. OS << *CC;
  620. return PreservedAnalyses::all();
  621. }