SimplifyIndVar.cpp 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053
  1. //===-- SimplifyIndVar.cpp - Induction variable simplification ------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements induction variable simplification. It does
  10. // not define any actual pass or policy, but provides a single function to
  11. // simplify a loop's induction variables based on ScalarEvolution.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "llvm/Transforms/Utils/SimplifyIndVar.h"
  15. #include "llvm/ADT/STLExtras.h"
  16. #include "llvm/ADT/SmallVector.h"
  17. #include "llvm/ADT/Statistic.h"
  18. #include "llvm/Analysis/LoopInfo.h"
  19. #include "llvm/IR/DataLayout.h"
  20. #include "llvm/IR/Dominators.h"
  21. #include "llvm/IR/IRBuilder.h"
  22. #include "llvm/IR/Instructions.h"
  23. #include "llvm/IR/IntrinsicInst.h"
  24. #include "llvm/IR/PatternMatch.h"
  25. #include "llvm/Support/Debug.h"
  26. #include "llvm/Support/raw_ostream.h"
  27. #include "llvm/Transforms/Utils/Local.h"
  28. #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
  29. using namespace llvm;
  30. #define DEBUG_TYPE "indvars"
  31. STATISTIC(NumElimIdentity, "Number of IV identities eliminated");
  32. STATISTIC(NumElimOperand, "Number of IV operands folded into a use");
  33. STATISTIC(NumFoldedUser, "Number of IV users folded into a constant");
  34. STATISTIC(NumElimRem , "Number of IV remainder operations eliminated");
  35. STATISTIC(
  36. NumSimplifiedSDiv,
  37. "Number of IV signed division operations converted to unsigned division");
  38. STATISTIC(
  39. NumSimplifiedSRem,
  40. "Number of IV signed remainder operations converted to unsigned remainder");
  41. STATISTIC(NumElimCmp , "Number of IV comparisons eliminated");
  42. namespace {
  43. /// This is a utility for simplifying induction variables
  44. /// based on ScalarEvolution. It is the primary instrument of the
  45. /// IndvarSimplify pass, but it may also be directly invoked to cleanup after
  46. /// other loop passes that preserve SCEV.
  47. class SimplifyIndvar {
  48. Loop *L;
  49. LoopInfo *LI;
  50. ScalarEvolution *SE;
  51. DominatorTree *DT;
  52. const TargetTransformInfo *TTI;
  53. SCEVExpander &Rewriter;
  54. SmallVectorImpl<WeakTrackingVH> &DeadInsts;
  55. bool Changed;
  56. public:
  57. SimplifyIndvar(Loop *Loop, ScalarEvolution *SE, DominatorTree *DT,
  58. LoopInfo *LI, const TargetTransformInfo *TTI,
  59. SCEVExpander &Rewriter,
  60. SmallVectorImpl<WeakTrackingVH> &Dead)
  61. : L(Loop), LI(LI), SE(SE), DT(DT), TTI(TTI), Rewriter(Rewriter),
  62. DeadInsts(Dead), Changed(false) {
  63. assert(LI && "IV simplification requires LoopInfo");
  64. }
  65. bool hasChanged() const { return Changed; }
  66. /// Iteratively perform simplification on a worklist of users of the
  67. /// specified induction variable. This is the top-level driver that applies
  68. /// all simplifications to users of an IV.
  69. void simplifyUsers(PHINode *CurrIV, IVVisitor *V = nullptr);
  70. Value *foldIVUser(Instruction *UseInst, Instruction *IVOperand);
  71. bool eliminateIdentitySCEV(Instruction *UseInst, Instruction *IVOperand);
  72. bool replaceIVUserWithLoopInvariant(Instruction *UseInst);
  73. bool eliminateOverflowIntrinsic(WithOverflowInst *WO);
  74. bool eliminateSaturatingIntrinsic(SaturatingInst *SI);
  75. bool eliminateTrunc(TruncInst *TI);
  76. bool eliminateIVUser(Instruction *UseInst, Instruction *IVOperand);
  77. bool makeIVComparisonInvariant(ICmpInst *ICmp, Value *IVOperand);
  78. void eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand);
  79. void simplifyIVRemainder(BinaryOperator *Rem, Value *IVOperand,
  80. bool IsSigned);
  81. void replaceRemWithNumerator(BinaryOperator *Rem);
  82. void replaceRemWithNumeratorOrZero(BinaryOperator *Rem);
  83. void replaceSRemWithURem(BinaryOperator *Rem);
  84. bool eliminateSDiv(BinaryOperator *SDiv);
  85. bool strengthenOverflowingOperation(BinaryOperator *OBO, Value *IVOperand);
  86. bool strengthenRightShift(BinaryOperator *BO, Value *IVOperand);
  87. };
  88. }
  89. /// Find a point in code which dominates all given instructions. We can safely
  90. /// assume that, whatever fact we can prove at the found point, this fact is
  91. /// also true for each of the given instructions.
  92. static Instruction *findCommonDominator(ArrayRef<Instruction *> Instructions,
  93. DominatorTree &DT) {
  94. Instruction *CommonDom = nullptr;
  95. for (auto *Insn : Instructions)
  96. if (!CommonDom || DT.dominates(Insn, CommonDom))
  97. CommonDom = Insn;
  98. else if (!DT.dominates(CommonDom, Insn))
  99. // If there is no dominance relation, use common dominator.
  100. CommonDom =
  101. DT.findNearestCommonDominator(CommonDom->getParent(),
  102. Insn->getParent())->getTerminator();
  103. assert(CommonDom && "Common dominator not found?");
  104. return CommonDom;
  105. }
  106. /// Fold an IV operand into its use. This removes increments of an
  107. /// aligned IV when used by a instruction that ignores the low bits.
  108. ///
  109. /// IVOperand is guaranteed SCEVable, but UseInst may not be.
  110. ///
  111. /// Return the operand of IVOperand for this induction variable if IVOperand can
  112. /// be folded (in case more folding opportunities have been exposed).
  113. /// Otherwise return null.
  114. Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand) {
  115. Value *IVSrc = nullptr;
  116. const unsigned OperIdx = 0;
  117. const SCEV *FoldedExpr = nullptr;
  118. bool MustDropExactFlag = false;
  119. switch (UseInst->getOpcode()) {
  120. default:
  121. return nullptr;
  122. case Instruction::UDiv:
  123. case Instruction::LShr:
  124. // We're only interested in the case where we know something about
  125. // the numerator and have a constant denominator.
  126. if (IVOperand != UseInst->getOperand(OperIdx) ||
  127. !isa<ConstantInt>(UseInst->getOperand(1)))
  128. return nullptr;
  129. // Attempt to fold a binary operator with constant operand.
  130. // e.g. ((I + 1) >> 2) => I >> 2
  131. if (!isa<BinaryOperator>(IVOperand)
  132. || !isa<ConstantInt>(IVOperand->getOperand(1)))
  133. return nullptr;
  134. IVSrc = IVOperand->getOperand(0);
  135. // IVSrc must be the (SCEVable) IV, since the other operand is const.
  136. assert(SE->isSCEVable(IVSrc->getType()) && "Expect SCEVable IV operand");
  137. ConstantInt *D = cast<ConstantInt>(UseInst->getOperand(1));
  138. if (UseInst->getOpcode() == Instruction::LShr) {
  139. // Get a constant for the divisor. See createSCEV.
  140. uint32_t BitWidth = cast<IntegerType>(UseInst->getType())->getBitWidth();
  141. if (D->getValue().uge(BitWidth))
  142. return nullptr;
  143. D = ConstantInt::get(UseInst->getContext(),
  144. APInt::getOneBitSet(BitWidth, D->getZExtValue()));
  145. }
  146. FoldedExpr = SE->getUDivExpr(SE->getSCEV(IVSrc), SE->getSCEV(D));
  147. // We might have 'exact' flag set at this point which will no longer be
  148. // correct after we make the replacement.
  149. if (UseInst->isExact() &&
  150. SE->getSCEV(IVSrc) != SE->getMulExpr(FoldedExpr, SE->getSCEV(D)))
  151. MustDropExactFlag = true;
  152. }
  153. // We have something that might fold it's operand. Compare SCEVs.
  154. if (!SE->isSCEVable(UseInst->getType()))
  155. return nullptr;
  156. // Bypass the operand if SCEV can prove it has no effect.
  157. if (SE->getSCEV(UseInst) != FoldedExpr)
  158. return nullptr;
  159. LLVM_DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand
  160. << " -> " << *UseInst << '\n');
  161. UseInst->setOperand(OperIdx, IVSrc);
  162. assert(SE->getSCEV(UseInst) == FoldedExpr && "bad SCEV with folded oper");
  163. if (MustDropExactFlag)
  164. UseInst->dropPoisonGeneratingFlags();
  165. ++NumElimOperand;
  166. Changed = true;
  167. if (IVOperand->use_empty())
  168. DeadInsts.emplace_back(IVOperand);
  169. return IVSrc;
  170. }
  171. bool SimplifyIndvar::makeIVComparisonInvariant(ICmpInst *ICmp,
  172. Value *IVOperand) {
  173. unsigned IVOperIdx = 0;
  174. ICmpInst::Predicate Pred = ICmp->getPredicate();
  175. if (IVOperand != ICmp->getOperand(0)) {
  176. // Swapped
  177. assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
  178. IVOperIdx = 1;
  179. Pred = ICmpInst::getSwappedPredicate(Pred);
  180. }
  181. // Get the SCEVs for the ICmp operands (in the specific context of the
  182. // current loop)
  183. const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
  184. const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
  185. const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
  186. auto *PN = dyn_cast<PHINode>(IVOperand);
  187. if (!PN)
  188. return false;
  189. auto LIP = SE->getLoopInvariantPredicate(Pred, S, X, L);
  190. if (!LIP)
  191. return false;
  192. ICmpInst::Predicate InvariantPredicate = LIP->Pred;
  193. const SCEV *InvariantLHS = LIP->LHS;
  194. const SCEV *InvariantRHS = LIP->RHS;
  195. // Rewrite the comparison to a loop invariant comparison if it can be done
  196. // cheaply, where cheaply means "we don't need to emit any new
  197. // instructions".
  198. SmallDenseMap<const SCEV*, Value*> CheapExpansions;
  199. CheapExpansions[S] = ICmp->getOperand(IVOperIdx);
  200. CheapExpansions[X] = ICmp->getOperand(1 - IVOperIdx);
  201. // TODO: Support multiple entry loops? (We currently bail out of these in
  202. // the IndVarSimplify pass)
  203. if (auto *BB = L->getLoopPredecessor()) {
  204. const int Idx = PN->getBasicBlockIndex(BB);
  205. if (Idx >= 0) {
  206. Value *Incoming = PN->getIncomingValue(Idx);
  207. const SCEV *IncomingS = SE->getSCEV(Incoming);
  208. CheapExpansions[IncomingS] = Incoming;
  209. }
  210. }
  211. Value *NewLHS = CheapExpansions[InvariantLHS];
  212. Value *NewRHS = CheapExpansions[InvariantRHS];
  213. if (!NewLHS)
  214. if (auto *ConstLHS = dyn_cast<SCEVConstant>(InvariantLHS))
  215. NewLHS = ConstLHS->getValue();
  216. if (!NewRHS)
  217. if (auto *ConstRHS = dyn_cast<SCEVConstant>(InvariantRHS))
  218. NewRHS = ConstRHS->getValue();
  219. if (!NewLHS || !NewRHS)
  220. // We could not find an existing value to replace either LHS or RHS.
  221. // Generating new instructions has subtler tradeoffs, so avoid doing that
  222. // for now.
  223. return false;
  224. LLVM_DEBUG(dbgs() << "INDVARS: Simplified comparison: " << *ICmp << '\n');
  225. ICmp->setPredicate(InvariantPredicate);
  226. ICmp->setOperand(0, NewLHS);
  227. ICmp->setOperand(1, NewRHS);
  228. return true;
  229. }
  230. /// SimplifyIVUsers helper for eliminating useless
  231. /// comparisons against an induction variable.
  232. void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand) {
  233. unsigned IVOperIdx = 0;
  234. ICmpInst::Predicate Pred = ICmp->getPredicate();
  235. ICmpInst::Predicate OriginalPred = Pred;
  236. if (IVOperand != ICmp->getOperand(0)) {
  237. // Swapped
  238. assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
  239. IVOperIdx = 1;
  240. Pred = ICmpInst::getSwappedPredicate(Pred);
  241. }
  242. // Get the SCEVs for the ICmp operands (in the specific context of the
  243. // current loop)
  244. const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
  245. const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
  246. const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
  247. // If the condition is always true or always false in the given context,
  248. // replace it with a constant value.
  249. SmallVector<Instruction *, 4> Users;
  250. for (auto *U : ICmp->users())
  251. Users.push_back(cast<Instruction>(U));
  252. const Instruction *CtxI = findCommonDominator(Users, *DT);
  253. if (auto Ev = SE->evaluatePredicateAt(Pred, S, X, CtxI)) {
  254. ICmp->replaceAllUsesWith(ConstantInt::getBool(ICmp->getContext(), *Ev));
  255. DeadInsts.emplace_back(ICmp);
  256. LLVM_DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
  257. } else if (makeIVComparisonInvariant(ICmp, IVOperand)) {
  258. // fallthrough to end of function
  259. } else if (ICmpInst::isSigned(OriginalPred) &&
  260. SE->isKnownNonNegative(S) && SE->isKnownNonNegative(X)) {
  261. // If we were unable to make anything above, all we can is to canonicalize
  262. // the comparison hoping that it will open the doors for other
  263. // optimizations. If we find out that we compare two non-negative values,
  264. // we turn the instruction's predicate to its unsigned version. Note that
  265. // we cannot rely on Pred here unless we check if we have swapped it.
  266. assert(ICmp->getPredicate() == OriginalPred && "Predicate changed?");
  267. LLVM_DEBUG(dbgs() << "INDVARS: Turn to unsigned comparison: " << *ICmp
  268. << '\n');
  269. ICmp->setPredicate(ICmpInst::getUnsignedPredicate(OriginalPred));
  270. } else
  271. return;
  272. ++NumElimCmp;
  273. Changed = true;
  274. }
  275. bool SimplifyIndvar::eliminateSDiv(BinaryOperator *SDiv) {
  276. // Get the SCEVs for the ICmp operands.
  277. auto *N = SE->getSCEV(SDiv->getOperand(0));
  278. auto *D = SE->getSCEV(SDiv->getOperand(1));
  279. // Simplify unnecessary loops away.
  280. const Loop *L = LI->getLoopFor(SDiv->getParent());
  281. N = SE->getSCEVAtScope(N, L);
  282. D = SE->getSCEVAtScope(D, L);
  283. // Replace sdiv by udiv if both of the operands are non-negative
  284. if (SE->isKnownNonNegative(N) && SE->isKnownNonNegative(D)) {
  285. auto *UDiv = BinaryOperator::Create(
  286. BinaryOperator::UDiv, SDiv->getOperand(0), SDiv->getOperand(1),
  287. SDiv->getName() + ".udiv", SDiv);
  288. UDiv->setIsExact(SDiv->isExact());
  289. SDiv->replaceAllUsesWith(UDiv);
  290. LLVM_DEBUG(dbgs() << "INDVARS: Simplified sdiv: " << *SDiv << '\n');
  291. ++NumSimplifiedSDiv;
  292. Changed = true;
  293. DeadInsts.push_back(SDiv);
  294. return true;
  295. }
  296. return false;
  297. }
  298. // i %s n -> i %u n if i >= 0 and n >= 0
  299. void SimplifyIndvar::replaceSRemWithURem(BinaryOperator *Rem) {
  300. auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
  301. auto *URem = BinaryOperator::Create(BinaryOperator::URem, N, D,
  302. Rem->getName() + ".urem", Rem);
  303. Rem->replaceAllUsesWith(URem);
  304. LLVM_DEBUG(dbgs() << "INDVARS: Simplified srem: " << *Rem << '\n');
  305. ++NumSimplifiedSRem;
  306. Changed = true;
  307. DeadInsts.emplace_back(Rem);
  308. }
  309. // i % n --> i if i is in [0,n).
  310. void SimplifyIndvar::replaceRemWithNumerator(BinaryOperator *Rem) {
  311. Rem->replaceAllUsesWith(Rem->getOperand(0));
  312. LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
  313. ++NumElimRem;
  314. Changed = true;
  315. DeadInsts.emplace_back(Rem);
  316. }
  317. // (i+1) % n --> (i+1)==n?0:(i+1) if i is in [0,n).
  318. void SimplifyIndvar::replaceRemWithNumeratorOrZero(BinaryOperator *Rem) {
  319. auto *T = Rem->getType();
  320. auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
  321. ICmpInst *ICmp = new ICmpInst(Rem, ICmpInst::ICMP_EQ, N, D);
  322. SelectInst *Sel =
  323. SelectInst::Create(ICmp, ConstantInt::get(T, 0), N, "iv.rem", Rem);
  324. Rem->replaceAllUsesWith(Sel);
  325. LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
  326. ++NumElimRem;
  327. Changed = true;
  328. DeadInsts.emplace_back(Rem);
  329. }
  330. /// SimplifyIVUsers helper for eliminating useless remainder operations
  331. /// operating on an induction variable or replacing srem by urem.
  332. void SimplifyIndvar::simplifyIVRemainder(BinaryOperator *Rem, Value *IVOperand,
  333. bool IsSigned) {
  334. auto *NValue = Rem->getOperand(0);
  335. auto *DValue = Rem->getOperand(1);
  336. // We're only interested in the case where we know something about
  337. // the numerator, unless it is a srem, because we want to replace srem by urem
  338. // in general.
  339. bool UsedAsNumerator = IVOperand == NValue;
  340. if (!UsedAsNumerator && !IsSigned)
  341. return;
  342. const SCEV *N = SE->getSCEV(NValue);
  343. // Simplify unnecessary loops away.
  344. const Loop *ICmpLoop = LI->getLoopFor(Rem->getParent());
  345. N = SE->getSCEVAtScope(N, ICmpLoop);
  346. bool IsNumeratorNonNegative = !IsSigned || SE->isKnownNonNegative(N);
  347. // Do not proceed if the Numerator may be negative
  348. if (!IsNumeratorNonNegative)
  349. return;
  350. const SCEV *D = SE->getSCEV(DValue);
  351. D = SE->getSCEVAtScope(D, ICmpLoop);
  352. if (UsedAsNumerator) {
  353. auto LT = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
  354. if (SE->isKnownPredicate(LT, N, D)) {
  355. replaceRemWithNumerator(Rem);
  356. return;
  357. }
  358. auto *T = Rem->getType();
  359. const auto *NLessOne = SE->getMinusSCEV(N, SE->getOne(T));
  360. if (SE->isKnownPredicate(LT, NLessOne, D)) {
  361. replaceRemWithNumeratorOrZero(Rem);
  362. return;
  363. }
  364. }
  365. // Try to replace SRem with URem, if both N and D are known non-negative.
  366. // Since we had already check N, we only need to check D now
  367. if (!IsSigned || !SE->isKnownNonNegative(D))
  368. return;
  369. replaceSRemWithURem(Rem);
  370. }
  371. bool SimplifyIndvar::eliminateOverflowIntrinsic(WithOverflowInst *WO) {
  372. const SCEV *LHS = SE->getSCEV(WO->getLHS());
  373. const SCEV *RHS = SE->getSCEV(WO->getRHS());
  374. if (!SE->willNotOverflow(WO->getBinaryOp(), WO->isSigned(), LHS, RHS))
  375. return false;
  376. // Proved no overflow, nuke the overflow check and, if possible, the overflow
  377. // intrinsic as well.
  378. BinaryOperator *NewResult = BinaryOperator::Create(
  379. WO->getBinaryOp(), WO->getLHS(), WO->getRHS(), "", WO);
  380. if (WO->isSigned())
  381. NewResult->setHasNoSignedWrap(true);
  382. else
  383. NewResult->setHasNoUnsignedWrap(true);
  384. SmallVector<ExtractValueInst *, 4> ToDelete;
  385. for (auto *U : WO->users()) {
  386. if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
  387. if (EVI->getIndices()[0] == 1)
  388. EVI->replaceAllUsesWith(ConstantInt::getFalse(WO->getContext()));
  389. else {
  390. assert(EVI->getIndices()[0] == 0 && "Only two possibilities!");
  391. EVI->replaceAllUsesWith(NewResult);
  392. }
  393. ToDelete.push_back(EVI);
  394. }
  395. }
  396. for (auto *EVI : ToDelete)
  397. EVI->eraseFromParent();
  398. if (WO->use_empty())
  399. WO->eraseFromParent();
  400. Changed = true;
  401. return true;
  402. }
  403. bool SimplifyIndvar::eliminateSaturatingIntrinsic(SaturatingInst *SI) {
  404. const SCEV *LHS = SE->getSCEV(SI->getLHS());
  405. const SCEV *RHS = SE->getSCEV(SI->getRHS());
  406. if (!SE->willNotOverflow(SI->getBinaryOp(), SI->isSigned(), LHS, RHS))
  407. return false;
  408. BinaryOperator *BO = BinaryOperator::Create(
  409. SI->getBinaryOp(), SI->getLHS(), SI->getRHS(), SI->getName(), SI);
  410. if (SI->isSigned())
  411. BO->setHasNoSignedWrap();
  412. else
  413. BO->setHasNoUnsignedWrap();
  414. SI->replaceAllUsesWith(BO);
  415. DeadInsts.emplace_back(SI);
  416. Changed = true;
  417. return true;
  418. }
  419. bool SimplifyIndvar::eliminateTrunc(TruncInst *TI) {
  420. // It is always legal to replace
  421. // icmp <pred> i32 trunc(iv), n
  422. // with
  423. // icmp <pred> i64 sext(trunc(iv)), sext(n), if pred is signed predicate.
  424. // Or with
  425. // icmp <pred> i64 zext(trunc(iv)), zext(n), if pred is unsigned predicate.
  426. // Or with either of these if pred is an equality predicate.
  427. //
  428. // If we can prove that iv == sext(trunc(iv)) or iv == zext(trunc(iv)) for
  429. // every comparison which uses trunc, it means that we can replace each of
  430. // them with comparison of iv against sext/zext(n). We no longer need trunc
  431. // after that.
  432. //
  433. // TODO: Should we do this if we can widen *some* comparisons, but not all
  434. // of them? Sometimes it is enough to enable other optimizations, but the
  435. // trunc instruction will stay in the loop.
  436. Value *IV = TI->getOperand(0);
  437. Type *IVTy = IV->getType();
  438. const SCEV *IVSCEV = SE->getSCEV(IV);
  439. const SCEV *TISCEV = SE->getSCEV(TI);
  440. // Check if iv == zext(trunc(iv)) and if iv == sext(trunc(iv)). If so, we can
  441. // get rid of trunc
  442. bool DoesSExtCollapse = false;
  443. bool DoesZExtCollapse = false;
  444. if (IVSCEV == SE->getSignExtendExpr(TISCEV, IVTy))
  445. DoesSExtCollapse = true;
  446. if (IVSCEV == SE->getZeroExtendExpr(TISCEV, IVTy))
  447. DoesZExtCollapse = true;
  448. // If neither sext nor zext does collapse, it is not profitable to do any
  449. // transform. Bail.
  450. if (!DoesSExtCollapse && !DoesZExtCollapse)
  451. return false;
  452. // Collect users of the trunc that look like comparisons against invariants.
  453. // Bail if we find something different.
  454. SmallVector<ICmpInst *, 4> ICmpUsers;
  455. for (auto *U : TI->users()) {
  456. // We don't care about users in unreachable blocks.
  457. if (isa<Instruction>(U) &&
  458. !DT->isReachableFromEntry(cast<Instruction>(U)->getParent()))
  459. continue;
  460. ICmpInst *ICI = dyn_cast<ICmpInst>(U);
  461. if (!ICI) return false;
  462. assert(L->contains(ICI->getParent()) && "LCSSA form broken?");
  463. if (!(ICI->getOperand(0) == TI && L->isLoopInvariant(ICI->getOperand(1))) &&
  464. !(ICI->getOperand(1) == TI && L->isLoopInvariant(ICI->getOperand(0))))
  465. return false;
  466. // If we cannot get rid of trunc, bail.
  467. if (ICI->isSigned() && !DoesSExtCollapse)
  468. return false;
  469. if (ICI->isUnsigned() && !DoesZExtCollapse)
  470. return false;
  471. // For equality, either signed or unsigned works.
  472. ICmpUsers.push_back(ICI);
  473. }
  474. auto CanUseZExt = [&](ICmpInst *ICI) {
  475. // Unsigned comparison can be widened as unsigned.
  476. if (ICI->isUnsigned())
  477. return true;
  478. // Is it profitable to do zext?
  479. if (!DoesZExtCollapse)
  480. return false;
  481. // For equality, we can safely zext both parts.
  482. if (ICI->isEquality())
  483. return true;
  484. // Otherwise we can only use zext when comparing two non-negative or two
  485. // negative values. But in practice, we will never pass DoesZExtCollapse
  486. // check for a negative value, because zext(trunc(x)) is non-negative. So
  487. // it only make sense to check for non-negativity here.
  488. const SCEV *SCEVOP1 = SE->getSCEV(ICI->getOperand(0));
  489. const SCEV *SCEVOP2 = SE->getSCEV(ICI->getOperand(1));
  490. return SE->isKnownNonNegative(SCEVOP1) && SE->isKnownNonNegative(SCEVOP2);
  491. };
  492. // Replace all comparisons against trunc with comparisons against IV.
  493. for (auto *ICI : ICmpUsers) {
  494. bool IsSwapped = L->isLoopInvariant(ICI->getOperand(0));
  495. auto *Op1 = IsSwapped ? ICI->getOperand(0) : ICI->getOperand(1);
  496. Instruction *Ext = nullptr;
  497. // For signed/unsigned predicate, replace the old comparison with comparison
  498. // of immediate IV against sext/zext of the invariant argument. If we can
  499. // use either sext or zext (i.e. we are dealing with equality predicate),
  500. // then prefer zext as a more canonical form.
  501. // TODO: If we see a signed comparison which can be turned into unsigned,
  502. // we can do it here for canonicalization purposes.
  503. ICmpInst::Predicate Pred = ICI->getPredicate();
  504. if (IsSwapped) Pred = ICmpInst::getSwappedPredicate(Pred);
  505. if (CanUseZExt(ICI)) {
  506. assert(DoesZExtCollapse && "Unprofitable zext?");
  507. Ext = new ZExtInst(Op1, IVTy, "zext", ICI);
  508. Pred = ICmpInst::getUnsignedPredicate(Pred);
  509. } else {
  510. assert(DoesSExtCollapse && "Unprofitable sext?");
  511. Ext = new SExtInst(Op1, IVTy, "sext", ICI);
  512. assert(Pred == ICmpInst::getSignedPredicate(Pred) && "Must be signed!");
  513. }
  514. bool Changed;
  515. L->makeLoopInvariant(Ext, Changed);
  516. (void)Changed;
  517. ICmpInst *NewICI = new ICmpInst(ICI, Pred, IV, Ext);
  518. ICI->replaceAllUsesWith(NewICI);
  519. DeadInsts.emplace_back(ICI);
  520. }
  521. // Trunc no longer needed.
  522. TI->replaceAllUsesWith(UndefValue::get(TI->getType()));
  523. DeadInsts.emplace_back(TI);
  524. return true;
  525. }
  526. /// Eliminate an operation that consumes a simple IV and has no observable
  527. /// side-effect given the range of IV values. IVOperand is guaranteed SCEVable,
  528. /// but UseInst may not be.
  529. bool SimplifyIndvar::eliminateIVUser(Instruction *UseInst,
  530. Instruction *IVOperand) {
  531. if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
  532. eliminateIVComparison(ICmp, IVOperand);
  533. return true;
  534. }
  535. if (BinaryOperator *Bin = dyn_cast<BinaryOperator>(UseInst)) {
  536. bool IsSRem = Bin->getOpcode() == Instruction::SRem;
  537. if (IsSRem || Bin->getOpcode() == Instruction::URem) {
  538. simplifyIVRemainder(Bin, IVOperand, IsSRem);
  539. return true;
  540. }
  541. if (Bin->getOpcode() == Instruction::SDiv)
  542. return eliminateSDiv(Bin);
  543. }
  544. if (auto *WO = dyn_cast<WithOverflowInst>(UseInst))
  545. if (eliminateOverflowIntrinsic(WO))
  546. return true;
  547. if (auto *SI = dyn_cast<SaturatingInst>(UseInst))
  548. if (eliminateSaturatingIntrinsic(SI))
  549. return true;
  550. if (auto *TI = dyn_cast<TruncInst>(UseInst))
  551. if (eliminateTrunc(TI))
  552. return true;
  553. if (eliminateIdentitySCEV(UseInst, IVOperand))
  554. return true;
  555. return false;
  556. }
  557. static Instruction *GetLoopInvariantInsertPosition(Loop *L, Instruction *Hint) {
  558. if (auto *BB = L->getLoopPreheader())
  559. return BB->getTerminator();
  560. return Hint;
  561. }
  562. /// Replace the UseInst with a loop invariant expression if it is safe.
  563. bool SimplifyIndvar::replaceIVUserWithLoopInvariant(Instruction *I) {
  564. if (!SE->isSCEVable(I->getType()))
  565. return false;
  566. // Get the symbolic expression for this instruction.
  567. const SCEV *S = SE->getSCEV(I);
  568. if (!SE->isLoopInvariant(S, L))
  569. return false;
  570. // Do not generate something ridiculous even if S is loop invariant.
  571. if (Rewriter.isHighCostExpansion(S, L, SCEVCheapExpansionBudget, TTI, I))
  572. return false;
  573. auto *IP = GetLoopInvariantInsertPosition(L, I);
  574. if (!isSafeToExpandAt(S, IP, *SE)) {
  575. LLVM_DEBUG(dbgs() << "INDVARS: Can not replace IV user: " << *I
  576. << " with non-speculable loop invariant: " << *S << '\n');
  577. return false;
  578. }
  579. auto *Invariant = Rewriter.expandCodeFor(S, I->getType(), IP);
  580. I->replaceAllUsesWith(Invariant);
  581. LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *I
  582. << " with loop invariant: " << *S << '\n');
  583. ++NumFoldedUser;
  584. Changed = true;
  585. DeadInsts.emplace_back(I);
  586. return true;
  587. }
  588. /// Eliminate any operation that SCEV can prove is an identity function.
  589. bool SimplifyIndvar::eliminateIdentitySCEV(Instruction *UseInst,
  590. Instruction *IVOperand) {
  591. if (!SE->isSCEVable(UseInst->getType()) ||
  592. (UseInst->getType() != IVOperand->getType()) ||
  593. (SE->getSCEV(UseInst) != SE->getSCEV(IVOperand)))
  594. return false;
  595. // getSCEV(X) == getSCEV(Y) does not guarantee that X and Y are related in the
  596. // dominator tree, even if X is an operand to Y. For instance, in
  597. //
  598. // %iv = phi i32 {0,+,1}
  599. // br %cond, label %left, label %merge
  600. //
  601. // left:
  602. // %X = add i32 %iv, 0
  603. // br label %merge
  604. //
  605. // merge:
  606. // %M = phi (%X, %iv)
  607. //
  608. // getSCEV(%M) == getSCEV(%X) == {0,+,1}, but %X does not dominate %M, and
  609. // %M.replaceAllUsesWith(%X) would be incorrect.
  610. if (isa<PHINode>(UseInst))
  611. // If UseInst is not a PHI node then we know that IVOperand dominates
  612. // UseInst directly from the legality of SSA.
  613. if (!DT || !DT->dominates(IVOperand, UseInst))
  614. return false;
  615. if (!LI->replacementPreservesLCSSAForm(UseInst, IVOperand))
  616. return false;
  617. LLVM_DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n');
  618. UseInst->replaceAllUsesWith(IVOperand);
  619. ++NumElimIdentity;
  620. Changed = true;
  621. DeadInsts.emplace_back(UseInst);
  622. return true;
  623. }
  624. /// Annotate BO with nsw / nuw if it provably does not signed-overflow /
  625. /// unsigned-overflow. Returns true if anything changed, false otherwise.
  626. bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
  627. Value *IVOperand) {
  628. SCEV::NoWrapFlags Flags;
  629. bool Deduced;
  630. std::tie(Flags, Deduced) = SE->getStrengthenedNoWrapFlagsFromBinOp(
  631. cast<OverflowingBinaryOperator>(BO));
  632. if (!Deduced)
  633. return Deduced;
  634. BO->setHasNoUnsignedWrap(ScalarEvolution::maskFlags(Flags, SCEV::FlagNUW) ==
  635. SCEV::FlagNUW);
  636. BO->setHasNoSignedWrap(ScalarEvolution::maskFlags(Flags, SCEV::FlagNSW) ==
  637. SCEV::FlagNSW);
  638. // The getStrengthenedNoWrapFlagsFromBinOp() check inferred additional nowrap
  639. // flags on addrecs while performing zero/sign extensions. We could call
  640. // forgetValue() here to make sure those flags also propagate to any other
  641. // SCEV expressions based on the addrec. However, this can have pathological
  642. // compile-time impact, see https://bugs.llvm.org/show_bug.cgi?id=50384.
  643. return Deduced;
  644. }
  645. /// Annotate the Shr in (X << IVOperand) >> C as exact using the
  646. /// information from the IV's range. Returns true if anything changed, false
  647. /// otherwise.
  648. bool SimplifyIndvar::strengthenRightShift(BinaryOperator *BO,
  649. Value *IVOperand) {
  650. using namespace llvm::PatternMatch;
  651. if (BO->getOpcode() == Instruction::Shl) {
  652. bool Changed = false;
  653. ConstantRange IVRange = SE->getUnsignedRange(SE->getSCEV(IVOperand));
  654. for (auto *U : BO->users()) {
  655. const APInt *C;
  656. if (match(U,
  657. m_AShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C))) ||
  658. match(U,
  659. m_LShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C)))) {
  660. BinaryOperator *Shr = cast<BinaryOperator>(U);
  661. if (!Shr->isExact() && IVRange.getUnsignedMin().uge(*C)) {
  662. Shr->setIsExact(true);
  663. Changed = true;
  664. }
  665. }
  666. }
  667. return Changed;
  668. }
  669. return false;
  670. }
  671. /// Add all uses of Def to the current IV's worklist.
  672. static void pushIVUsers(
  673. Instruction *Def, Loop *L,
  674. SmallPtrSet<Instruction*,16> &Simplified,
  675. SmallVectorImpl< std::pair<Instruction*,Instruction*> > &SimpleIVUsers) {
  676. for (User *U : Def->users()) {
  677. Instruction *UI = cast<Instruction>(U);
  678. // Avoid infinite or exponential worklist processing.
  679. // Also ensure unique worklist users.
  680. // If Def is a LoopPhi, it may not be in the Simplified set, so check for
  681. // self edges first.
  682. if (UI == Def)
  683. continue;
  684. // Only change the current Loop, do not change the other parts (e.g. other
  685. // Loops).
  686. if (!L->contains(UI))
  687. continue;
  688. // Do not push the same instruction more than once.
  689. if (!Simplified.insert(UI).second)
  690. continue;
  691. SimpleIVUsers.push_back(std::make_pair(UI, Def));
  692. }
  693. }
  694. /// Return true if this instruction generates a simple SCEV
  695. /// expression in terms of that IV.
  696. ///
  697. /// This is similar to IVUsers' isInteresting() but processes each instruction
  698. /// non-recursively when the operand is already known to be a simpleIVUser.
  699. ///
  700. static bool isSimpleIVUser(Instruction *I, const Loop *L, ScalarEvolution *SE) {
  701. if (!SE->isSCEVable(I->getType()))
  702. return false;
  703. // Get the symbolic expression for this instruction.
  704. const SCEV *S = SE->getSCEV(I);
  705. // Only consider affine recurrences.
  706. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S);
  707. if (AR && AR->getLoop() == L)
  708. return true;
  709. return false;
  710. }
  711. /// Iteratively perform simplification on a worklist of users
  712. /// of the specified induction variable. Each successive simplification may push
  713. /// more users which may themselves be candidates for simplification.
  714. ///
  715. /// This algorithm does not require IVUsers analysis. Instead, it simplifies
  716. /// instructions in-place during analysis. Rather than rewriting induction
  717. /// variables bottom-up from their users, it transforms a chain of IVUsers
  718. /// top-down, updating the IR only when it encounters a clear optimization
  719. /// opportunity.
  720. ///
  721. /// Once DisableIVRewrite is default, LSR will be the only client of IVUsers.
  722. ///
  723. void SimplifyIndvar::simplifyUsers(PHINode *CurrIV, IVVisitor *V) {
  724. if (!SE->isSCEVable(CurrIV->getType()))
  725. return;
  726. // Instructions processed by SimplifyIndvar for CurrIV.
  727. SmallPtrSet<Instruction*,16> Simplified;
  728. // Use-def pairs if IV users waiting to be processed for CurrIV.
  729. SmallVector<std::pair<Instruction*, Instruction*>, 8> SimpleIVUsers;
  730. // Push users of the current LoopPhi. In rare cases, pushIVUsers may be
  731. // called multiple times for the same LoopPhi. This is the proper thing to
  732. // do for loop header phis that use each other.
  733. pushIVUsers(CurrIV, L, Simplified, SimpleIVUsers);
  734. while (!SimpleIVUsers.empty()) {
  735. std::pair<Instruction*, Instruction*> UseOper =
  736. SimpleIVUsers.pop_back_val();
  737. Instruction *UseInst = UseOper.first;
  738. // If a user of the IndVar is trivially dead, we prefer just to mark it dead
  739. // rather than try to do some complex analysis or transformation (such as
  740. // widening) basing on it.
  741. // TODO: Propagate TLI and pass it here to handle more cases.
  742. if (isInstructionTriviallyDead(UseInst, /* TLI */ nullptr)) {
  743. DeadInsts.emplace_back(UseInst);
  744. continue;
  745. }
  746. // Bypass back edges to avoid extra work.
  747. if (UseInst == CurrIV) continue;
  748. // Try to replace UseInst with a loop invariant before any other
  749. // simplifications.
  750. if (replaceIVUserWithLoopInvariant(UseInst))
  751. continue;
  752. Instruction *IVOperand = UseOper.second;
  753. for (unsigned N = 0; IVOperand; ++N) {
  754. assert(N <= Simplified.size() && "runaway iteration");
  755. Value *NewOper = foldIVUser(UseInst, IVOperand);
  756. if (!NewOper)
  757. break; // done folding
  758. IVOperand = dyn_cast<Instruction>(NewOper);
  759. }
  760. if (!IVOperand)
  761. continue;
  762. if (eliminateIVUser(UseInst, IVOperand)) {
  763. pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
  764. continue;
  765. }
  766. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(UseInst)) {
  767. if ((isa<OverflowingBinaryOperator>(BO) &&
  768. strengthenOverflowingOperation(BO, IVOperand)) ||
  769. (isa<ShlOperator>(BO) && strengthenRightShift(BO, IVOperand))) {
  770. // re-queue uses of the now modified binary operator and fall
  771. // through to the checks that remain.
  772. pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
  773. }
  774. }
  775. CastInst *Cast = dyn_cast<CastInst>(UseInst);
  776. if (V && Cast) {
  777. V->visitCast(Cast);
  778. continue;
  779. }
  780. if (isSimpleIVUser(UseInst, L, SE)) {
  781. pushIVUsers(UseInst, L, Simplified, SimpleIVUsers);
  782. }
  783. }
  784. }
  785. namespace llvm {
  786. void IVVisitor::anchor() { }
  787. /// Simplify instructions that use this induction variable
  788. /// by using ScalarEvolution to analyze the IV's recurrence.
  789. bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
  790. LoopInfo *LI, const TargetTransformInfo *TTI,
  791. SmallVectorImpl<WeakTrackingVH> &Dead,
  792. SCEVExpander &Rewriter, IVVisitor *V) {
  793. SimplifyIndvar SIV(LI->getLoopFor(CurrIV->getParent()), SE, DT, LI, TTI,
  794. Rewriter, Dead);
  795. SIV.simplifyUsers(CurrIV, V);
  796. return SIV.hasChanged();
  797. }
  798. /// Simplify users of induction variables within this
  799. /// loop. This does not actually change or add IVs.
  800. bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
  801. LoopInfo *LI, const TargetTransformInfo *TTI,
  802. SmallVectorImpl<WeakTrackingVH> &Dead) {
  803. SCEVExpander Rewriter(*SE, SE->getDataLayout(), "indvars");
  804. #ifndef NDEBUG
  805. Rewriter.setDebugType(DEBUG_TYPE);
  806. #endif
  807. bool Changed = false;
  808. for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
  809. Changed |=
  810. simplifyUsersOfIV(cast<PHINode>(I), SE, DT, LI, TTI, Dead, Rewriter);
  811. }
  812. return Changed;
  813. }
  814. } // namespace llvm
  815. namespace {
  816. //===----------------------------------------------------------------------===//
  817. // Widen Induction Variables - Extend the width of an IV to cover its
  818. // widest uses.
  819. //===----------------------------------------------------------------------===//
  820. class WidenIV {
  821. // Parameters
  822. PHINode *OrigPhi;
  823. Type *WideType;
  824. // Context
  825. LoopInfo *LI;
  826. Loop *L;
  827. ScalarEvolution *SE;
  828. DominatorTree *DT;
  829. // Does the module have any calls to the llvm.experimental.guard intrinsic
  830. // at all? If not we can avoid scanning instructions looking for guards.
  831. bool HasGuards;
  832. bool UsePostIncrementRanges;
  833. // Statistics
  834. unsigned NumElimExt = 0;
  835. unsigned NumWidened = 0;
  836. // Result
  837. PHINode *WidePhi = nullptr;
  838. Instruction *WideInc = nullptr;
  839. const SCEV *WideIncExpr = nullptr;
  840. SmallVectorImpl<WeakTrackingVH> &DeadInsts;
  841. SmallPtrSet<Instruction *,16> Widened;
  842. enum ExtendKind { ZeroExtended, SignExtended, Unknown };
  843. // A map tracking the kind of extension used to widen each narrow IV
  844. // and narrow IV user.
  845. // Key: pointer to a narrow IV or IV user.
  846. // Value: the kind of extension used to widen this Instruction.
  847. DenseMap<AssertingVH<Instruction>, ExtendKind> ExtendKindMap;
  848. using DefUserPair = std::pair<AssertingVH<Value>, AssertingVH<Instruction>>;
  849. // A map with control-dependent ranges for post increment IV uses. The key is
  850. // a pair of IV def and a use of this def denoting the context. The value is
  851. // a ConstantRange representing possible values of the def at the given
  852. // context.
  853. DenseMap<DefUserPair, ConstantRange> PostIncRangeInfos;
  854. Optional<ConstantRange> getPostIncRangeInfo(Value *Def,
  855. Instruction *UseI) {
  856. DefUserPair Key(Def, UseI);
  857. auto It = PostIncRangeInfos.find(Key);
  858. return It == PostIncRangeInfos.end()
  859. ? Optional<ConstantRange>(None)
  860. : Optional<ConstantRange>(It->second);
  861. }
  862. void calculatePostIncRanges(PHINode *OrigPhi);
  863. void calculatePostIncRange(Instruction *NarrowDef, Instruction *NarrowUser);
  864. void updatePostIncRangeInfo(Value *Def, Instruction *UseI, ConstantRange R) {
  865. DefUserPair Key(Def, UseI);
  866. auto It = PostIncRangeInfos.find(Key);
  867. if (It == PostIncRangeInfos.end())
  868. PostIncRangeInfos.insert({Key, R});
  869. else
  870. It->second = R.intersectWith(It->second);
  871. }
  872. public:
  873. /// Record a link in the Narrow IV def-use chain along with the WideIV that
  874. /// computes the same value as the Narrow IV def. This avoids caching Use*
  875. /// pointers.
  876. struct NarrowIVDefUse {
  877. Instruction *NarrowDef = nullptr;
  878. Instruction *NarrowUse = nullptr;
  879. Instruction *WideDef = nullptr;
  880. // True if the narrow def is never negative. Tracking this information lets
  881. // us use a sign extension instead of a zero extension or vice versa, when
  882. // profitable and legal.
  883. bool NeverNegative = false;
  884. NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD,
  885. bool NeverNegative)
  886. : NarrowDef(ND), NarrowUse(NU), WideDef(WD),
  887. NeverNegative(NeverNegative) {}
  888. };
  889. WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
  890. DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
  891. bool HasGuards, bool UsePostIncrementRanges = true);
  892. PHINode *createWideIV(SCEVExpander &Rewriter);
  893. unsigned getNumElimExt() { return NumElimExt; };
  894. unsigned getNumWidened() { return NumWidened; };
  895. protected:
  896. Value *createExtendInst(Value *NarrowOper, Type *WideType, bool IsSigned,
  897. Instruction *Use);
  898. Instruction *cloneIVUser(NarrowIVDefUse DU, const SCEVAddRecExpr *WideAR);
  899. Instruction *cloneArithmeticIVUser(NarrowIVDefUse DU,
  900. const SCEVAddRecExpr *WideAR);
  901. Instruction *cloneBitwiseIVUser(NarrowIVDefUse DU);
  902. ExtendKind getExtendKind(Instruction *I);
  903. using WidenedRecTy = std::pair<const SCEVAddRecExpr *, ExtendKind>;
  904. WidenedRecTy getWideRecurrence(NarrowIVDefUse DU);
  905. WidenedRecTy getExtendedOperandRecurrence(NarrowIVDefUse DU);
  906. const SCEV *getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
  907. unsigned OpCode) const;
  908. Instruction *widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter);
  909. bool widenLoopCompare(NarrowIVDefUse DU);
  910. bool widenWithVariantUse(NarrowIVDefUse DU);
  911. void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef);
  912. private:
  913. SmallVector<NarrowIVDefUse, 8> NarrowIVUsers;
  914. };
  915. } // namespace
  916. /// Determine the insertion point for this user. By default, insert immediately
  917. /// before the user. SCEVExpander or LICM will hoist loop invariants out of the
  918. /// loop. For PHI nodes, there may be multiple uses, so compute the nearest
  919. /// common dominator for the incoming blocks. A nullptr can be returned if no
  920. /// viable location is found: it may happen if User is a PHI and Def only comes
  921. /// to this PHI from unreachable blocks.
  922. static Instruction *getInsertPointForUses(Instruction *User, Value *Def,
  923. DominatorTree *DT, LoopInfo *LI) {
  924. PHINode *PHI = dyn_cast<PHINode>(User);
  925. if (!PHI)
  926. return User;
  927. Instruction *InsertPt = nullptr;
  928. for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) {
  929. if (PHI->getIncomingValue(i) != Def)
  930. continue;
  931. BasicBlock *InsertBB = PHI->getIncomingBlock(i);
  932. if (!DT->isReachableFromEntry(InsertBB))
  933. continue;
  934. if (!InsertPt) {
  935. InsertPt = InsertBB->getTerminator();
  936. continue;
  937. }
  938. InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB);
  939. InsertPt = InsertBB->getTerminator();
  940. }
  941. // If we have skipped all inputs, it means that Def only comes to Phi from
  942. // unreachable blocks.
  943. if (!InsertPt)
  944. return nullptr;
  945. auto *DefI = dyn_cast<Instruction>(Def);
  946. if (!DefI)
  947. return InsertPt;
  948. assert(DT->dominates(DefI, InsertPt) && "def does not dominate all uses");
  949. auto *L = LI->getLoopFor(DefI->getParent());
  950. assert(!L || L->contains(LI->getLoopFor(InsertPt->getParent())));
  951. for (auto *DTN = (*DT)[InsertPt->getParent()]; DTN; DTN = DTN->getIDom())
  952. if (LI->getLoopFor(DTN->getBlock()) == L)
  953. return DTN->getBlock()->getTerminator();
  954. llvm_unreachable("DefI dominates InsertPt!");
  955. }
  956. WidenIV::WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
  957. DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
  958. bool HasGuards, bool UsePostIncrementRanges)
  959. : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), LI(LInfo),
  960. L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree),
  961. HasGuards(HasGuards), UsePostIncrementRanges(UsePostIncrementRanges),
  962. DeadInsts(DI) {
  963. assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
  964. ExtendKindMap[OrigPhi] = WI.IsSigned ? SignExtended : ZeroExtended;
  965. }
  966. Value *WidenIV::createExtendInst(Value *NarrowOper, Type *WideType,
  967. bool IsSigned, Instruction *Use) {
  968. // Set the debug location and conservative insertion point.
  969. IRBuilder<> Builder(Use);
  970. // Hoist the insertion point into loop preheaders as far as possible.
  971. for (const Loop *L = LI->getLoopFor(Use->getParent());
  972. L && L->getLoopPreheader() && L->isLoopInvariant(NarrowOper);
  973. L = L->getParentLoop())
  974. Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
  975. return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) :
  976. Builder.CreateZExt(NarrowOper, WideType);
  977. }
  978. /// Instantiate a wide operation to replace a narrow operation. This only needs
  979. /// to handle operations that can evaluation to SCEVAddRec. It can safely return
  980. /// 0 for any operation we decide not to clone.
  981. Instruction *WidenIV::cloneIVUser(WidenIV::NarrowIVDefUse DU,
  982. const SCEVAddRecExpr *WideAR) {
  983. unsigned Opcode = DU.NarrowUse->getOpcode();
  984. switch (Opcode) {
  985. default:
  986. return nullptr;
  987. case Instruction::Add:
  988. case Instruction::Mul:
  989. case Instruction::UDiv:
  990. case Instruction::Sub:
  991. return cloneArithmeticIVUser(DU, WideAR);
  992. case Instruction::And:
  993. case Instruction::Or:
  994. case Instruction::Xor:
  995. case Instruction::Shl:
  996. case Instruction::LShr:
  997. case Instruction::AShr:
  998. return cloneBitwiseIVUser(DU);
  999. }
  1000. }
  1001. Instruction *WidenIV::cloneBitwiseIVUser(WidenIV::NarrowIVDefUse DU) {
  1002. Instruction *NarrowUse = DU.NarrowUse;
  1003. Instruction *NarrowDef = DU.NarrowDef;
  1004. Instruction *WideDef = DU.WideDef;
  1005. LLVM_DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n");
  1006. // Replace NarrowDef operands with WideDef. Otherwise, we don't know anything
  1007. // about the narrow operand yet so must insert a [sz]ext. It is probably loop
  1008. // invariant and will be folded or hoisted. If it actually comes from a
  1009. // widened IV, it should be removed during a future call to widenIVUse.
  1010. bool IsSigned = getExtendKind(NarrowDef) == SignExtended;
  1011. Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
  1012. ? WideDef
  1013. : createExtendInst(NarrowUse->getOperand(0), WideType,
  1014. IsSigned, NarrowUse);
  1015. Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
  1016. ? WideDef
  1017. : createExtendInst(NarrowUse->getOperand(1), WideType,
  1018. IsSigned, NarrowUse);
  1019. auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
  1020. auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
  1021. NarrowBO->getName());
  1022. IRBuilder<> Builder(NarrowUse);
  1023. Builder.Insert(WideBO);
  1024. WideBO->copyIRFlags(NarrowBO);
  1025. return WideBO;
  1026. }
  1027. Instruction *WidenIV::cloneArithmeticIVUser(WidenIV::NarrowIVDefUse DU,
  1028. const SCEVAddRecExpr *WideAR) {
  1029. Instruction *NarrowUse = DU.NarrowUse;
  1030. Instruction *NarrowDef = DU.NarrowDef;
  1031. Instruction *WideDef = DU.WideDef;
  1032. LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
  1033. unsigned IVOpIdx = (NarrowUse->getOperand(0) == NarrowDef) ? 0 : 1;
  1034. // We're trying to find X such that
  1035. //
  1036. // Widen(NarrowDef `op` NonIVNarrowDef) == WideAR == WideDef `op.wide` X
  1037. //
  1038. // We guess two solutions to X, sext(NonIVNarrowDef) and zext(NonIVNarrowDef),
  1039. // and check using SCEV if any of them are correct.
  1040. // Returns true if extending NonIVNarrowDef according to `SignExt` is a
  1041. // correct solution to X.
  1042. auto GuessNonIVOperand = [&](bool SignExt) {
  1043. const SCEV *WideLHS;
  1044. const SCEV *WideRHS;
  1045. auto GetExtend = [this, SignExt](const SCEV *S, Type *Ty) {
  1046. if (SignExt)
  1047. return SE->getSignExtendExpr(S, Ty);
  1048. return SE->getZeroExtendExpr(S, Ty);
  1049. };
  1050. if (IVOpIdx == 0) {
  1051. WideLHS = SE->getSCEV(WideDef);
  1052. const SCEV *NarrowRHS = SE->getSCEV(NarrowUse->getOperand(1));
  1053. WideRHS = GetExtend(NarrowRHS, WideType);
  1054. } else {
  1055. const SCEV *NarrowLHS = SE->getSCEV(NarrowUse->getOperand(0));
  1056. WideLHS = GetExtend(NarrowLHS, WideType);
  1057. WideRHS = SE->getSCEV(WideDef);
  1058. }
  1059. // WideUse is "WideDef `op.wide` X" as described in the comment.
  1060. const SCEV *WideUse =
  1061. getSCEVByOpCode(WideLHS, WideRHS, NarrowUse->getOpcode());
  1062. return WideUse == WideAR;
  1063. };
  1064. bool SignExtend = getExtendKind(NarrowDef) == SignExtended;
  1065. if (!GuessNonIVOperand(SignExtend)) {
  1066. SignExtend = !SignExtend;
  1067. if (!GuessNonIVOperand(SignExtend))
  1068. return nullptr;
  1069. }
  1070. Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
  1071. ? WideDef
  1072. : createExtendInst(NarrowUse->getOperand(0), WideType,
  1073. SignExtend, NarrowUse);
  1074. Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
  1075. ? WideDef
  1076. : createExtendInst(NarrowUse->getOperand(1), WideType,
  1077. SignExtend, NarrowUse);
  1078. auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
  1079. auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
  1080. NarrowBO->getName());
  1081. IRBuilder<> Builder(NarrowUse);
  1082. Builder.Insert(WideBO);
  1083. WideBO->copyIRFlags(NarrowBO);
  1084. return WideBO;
  1085. }
  1086. WidenIV::ExtendKind WidenIV::getExtendKind(Instruction *I) {
  1087. auto It = ExtendKindMap.find(I);
  1088. assert(It != ExtendKindMap.end() && "Instruction not yet extended!");
  1089. return It->second;
  1090. }
  1091. const SCEV *WidenIV::getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
  1092. unsigned OpCode) const {
  1093. switch (OpCode) {
  1094. case Instruction::Add:
  1095. return SE->getAddExpr(LHS, RHS);
  1096. case Instruction::Sub:
  1097. return SE->getMinusSCEV(LHS, RHS);
  1098. case Instruction::Mul:
  1099. return SE->getMulExpr(LHS, RHS);
  1100. case Instruction::UDiv:
  1101. return SE->getUDivExpr(LHS, RHS);
  1102. default:
  1103. llvm_unreachable("Unsupported opcode.");
  1104. };
  1105. }
  1106. /// No-wrap operations can transfer sign extension of their result to their
  1107. /// operands. Generate the SCEV value for the widened operation without
  1108. /// actually modifying the IR yet. If the expression after extending the
  1109. /// operands is an AddRec for this loop, return the AddRec and the kind of
  1110. /// extension used.
  1111. WidenIV::WidenedRecTy
  1112. WidenIV::getExtendedOperandRecurrence(WidenIV::NarrowIVDefUse DU) {
  1113. // Handle the common case of add<nsw/nuw>
  1114. const unsigned OpCode = DU.NarrowUse->getOpcode();
  1115. // Only Add/Sub/Mul instructions supported yet.
  1116. if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
  1117. OpCode != Instruction::Mul)
  1118. return {nullptr, Unknown};
  1119. // One operand (NarrowDef) has already been extended to WideDef. Now determine
  1120. // if extending the other will lead to a recurrence.
  1121. const unsigned ExtendOperIdx =
  1122. DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0;
  1123. assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU");
  1124. const SCEV *ExtendOperExpr = nullptr;
  1125. const OverflowingBinaryOperator *OBO =
  1126. cast<OverflowingBinaryOperator>(DU.NarrowUse);
  1127. ExtendKind ExtKind = getExtendKind(DU.NarrowDef);
  1128. if (ExtKind == SignExtended && OBO->hasNoSignedWrap())
  1129. ExtendOperExpr = SE->getSignExtendExpr(
  1130. SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
  1131. else if(ExtKind == ZeroExtended && OBO->hasNoUnsignedWrap())
  1132. ExtendOperExpr = SE->getZeroExtendExpr(
  1133. SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
  1134. else
  1135. return {nullptr, Unknown};
  1136. // When creating this SCEV expr, don't apply the current operations NSW or NUW
  1137. // flags. This instruction may be guarded by control flow that the no-wrap
  1138. // behavior depends on. Non-control-equivalent instructions can be mapped to
  1139. // the same SCEV expression, and it would be incorrect to transfer NSW/NUW
  1140. // semantics to those operations.
  1141. const SCEV *lhs = SE->getSCEV(DU.WideDef);
  1142. const SCEV *rhs = ExtendOperExpr;
  1143. // Let's swap operands to the initial order for the case of non-commutative
  1144. // operations, like SUB. See PR21014.
  1145. if (ExtendOperIdx == 0)
  1146. std::swap(lhs, rhs);
  1147. const SCEVAddRecExpr *AddRec =
  1148. dyn_cast<SCEVAddRecExpr>(getSCEVByOpCode(lhs, rhs, OpCode));
  1149. if (!AddRec || AddRec->getLoop() != L)
  1150. return {nullptr, Unknown};
  1151. return {AddRec, ExtKind};
  1152. }
  1153. /// Is this instruction potentially interesting for further simplification after
  1154. /// widening it's type? In other words, can the extend be safely hoisted out of
  1155. /// the loop with SCEV reducing the value to a recurrence on the same loop. If
  1156. /// so, return the extended recurrence and the kind of extension used. Otherwise
  1157. /// return {nullptr, Unknown}.
  1158. WidenIV::WidenedRecTy WidenIV::getWideRecurrence(WidenIV::NarrowIVDefUse DU) {
  1159. if (!DU.NarrowUse->getType()->isIntegerTy())
  1160. return {nullptr, Unknown};
  1161. const SCEV *NarrowExpr = SE->getSCEV(DU.NarrowUse);
  1162. if (SE->getTypeSizeInBits(NarrowExpr->getType()) >=
  1163. SE->getTypeSizeInBits(WideType)) {
  1164. // NarrowUse implicitly widens its operand. e.g. a gep with a narrow
  1165. // index. So don't follow this use.
  1166. return {nullptr, Unknown};
  1167. }
  1168. const SCEV *WideExpr;
  1169. ExtendKind ExtKind;
  1170. if (DU.NeverNegative) {
  1171. WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
  1172. if (isa<SCEVAddRecExpr>(WideExpr))
  1173. ExtKind = SignExtended;
  1174. else {
  1175. WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
  1176. ExtKind = ZeroExtended;
  1177. }
  1178. } else if (getExtendKind(DU.NarrowDef) == SignExtended) {
  1179. WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
  1180. ExtKind = SignExtended;
  1181. } else {
  1182. WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
  1183. ExtKind = ZeroExtended;
  1184. }
  1185. const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr);
  1186. if (!AddRec || AddRec->getLoop() != L)
  1187. return {nullptr, Unknown};
  1188. return {AddRec, ExtKind};
  1189. }
  1190. /// This IV user cannot be widened. Replace this use of the original narrow IV
  1191. /// with a truncation of the new wide IV to isolate and eliminate the narrow IV.
  1192. static void truncateIVUse(WidenIV::NarrowIVDefUse DU, DominatorTree *DT,
  1193. LoopInfo *LI) {
  1194. auto *InsertPt = getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI);
  1195. if (!InsertPt)
  1196. return;
  1197. LLVM_DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef << " for user "
  1198. << *DU.NarrowUse << "\n");
  1199. IRBuilder<> Builder(InsertPt);
  1200. Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType());
  1201. DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc);
  1202. }
  1203. /// If the narrow use is a compare instruction, then widen the compare
  1204. // (and possibly the other operand). The extend operation is hoisted into the
  1205. // loop preheader as far as possible.
  1206. bool WidenIV::widenLoopCompare(WidenIV::NarrowIVDefUse DU) {
  1207. ICmpInst *Cmp = dyn_cast<ICmpInst>(DU.NarrowUse);
  1208. if (!Cmp)
  1209. return false;
  1210. // We can legally widen the comparison in the following two cases:
  1211. //
  1212. // - The signedness of the IV extension and comparison match
  1213. //
  1214. // - The narrow IV is always positive (and thus its sign extension is equal
  1215. // to its zero extension). For instance, let's say we're zero extending
  1216. // %narrow for the following use
  1217. //
  1218. // icmp slt i32 %narrow, %val ... (A)
  1219. //
  1220. // and %narrow is always positive. Then
  1221. //
  1222. // (A) == icmp slt i32 sext(%narrow), sext(%val)
  1223. // == icmp slt i32 zext(%narrow), sext(%val)
  1224. bool IsSigned = getExtendKind(DU.NarrowDef) == SignExtended;
  1225. if (!(DU.NeverNegative || IsSigned == Cmp->isSigned()))
  1226. return false;
  1227. Value *Op = Cmp->getOperand(Cmp->getOperand(0) == DU.NarrowDef ? 1 : 0);
  1228. unsigned CastWidth = SE->getTypeSizeInBits(Op->getType());
  1229. unsigned IVWidth = SE->getTypeSizeInBits(WideType);
  1230. assert(CastWidth <= IVWidth && "Unexpected width while widening compare.");
  1231. // Widen the compare instruction.
  1232. auto *InsertPt = getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI);
  1233. if (!InsertPt)
  1234. return false;
  1235. IRBuilder<> Builder(InsertPt);
  1236. DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
  1237. // Widen the other operand of the compare, if necessary.
  1238. if (CastWidth < IVWidth) {
  1239. Value *ExtOp = createExtendInst(Op, WideType, Cmp->isSigned(), Cmp);
  1240. DU.NarrowUse->replaceUsesOfWith(Op, ExtOp);
  1241. }
  1242. return true;
  1243. }
  1244. // The widenIVUse avoids generating trunc by evaluating the use as AddRec, this
  1245. // will not work when:
  1246. // 1) SCEV traces back to an instruction inside the loop that SCEV can not
  1247. // expand, eg. add %indvar, (load %addr)
  1248. // 2) SCEV finds a loop variant, eg. add %indvar, %loopvariant
  1249. // While SCEV fails to avoid trunc, we can still try to use instruction
  1250. // combining approach to prove trunc is not required. This can be further
  1251. // extended with other instruction combining checks, but for now we handle the
  1252. // following case (sub can be "add" and "mul", "nsw + sext" can be "nus + zext")
  1253. //
  1254. // Src:
  1255. // %c = sub nsw %b, %indvar
  1256. // %d = sext %c to i64
  1257. // Dst:
  1258. // %indvar.ext1 = sext %indvar to i64
  1259. // %m = sext %b to i64
  1260. // %d = sub nsw i64 %m, %indvar.ext1
  1261. // Therefore, as long as the result of add/sub/mul is extended to wide type, no
  1262. // trunc is required regardless of how %b is generated. This pattern is common
  1263. // when calculating address in 64 bit architecture
  1264. bool WidenIV::widenWithVariantUse(WidenIV::NarrowIVDefUse DU) {
  1265. Instruction *NarrowUse = DU.NarrowUse;
  1266. Instruction *NarrowDef = DU.NarrowDef;
  1267. Instruction *WideDef = DU.WideDef;
  1268. // Handle the common case of add<nsw/nuw>
  1269. const unsigned OpCode = NarrowUse->getOpcode();
  1270. // Only Add/Sub/Mul instructions are supported.
  1271. if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
  1272. OpCode != Instruction::Mul)
  1273. return false;
  1274. // The operand that is not defined by NarrowDef of DU. Let's call it the
  1275. // other operand.
  1276. assert((NarrowUse->getOperand(0) == NarrowDef ||
  1277. NarrowUse->getOperand(1) == NarrowDef) &&
  1278. "bad DU");
  1279. const OverflowingBinaryOperator *OBO =
  1280. cast<OverflowingBinaryOperator>(NarrowUse);
  1281. ExtendKind ExtKind = getExtendKind(NarrowDef);
  1282. bool CanSignExtend = ExtKind == SignExtended && OBO->hasNoSignedWrap();
  1283. bool CanZeroExtend = ExtKind == ZeroExtended && OBO->hasNoUnsignedWrap();
  1284. auto AnotherOpExtKind = ExtKind;
  1285. // Check that all uses are either:
  1286. // - narrow def (in case of we are widening the IV increment);
  1287. // - single-input LCSSA Phis;
  1288. // - comparison of the chosen type;
  1289. // - extend of the chosen type (raison d'etre).
  1290. SmallVector<Instruction *, 4> ExtUsers;
  1291. SmallVector<PHINode *, 4> LCSSAPhiUsers;
  1292. SmallVector<ICmpInst *, 4> ICmpUsers;
  1293. for (Use &U : NarrowUse->uses()) {
  1294. Instruction *User = cast<Instruction>(U.getUser());
  1295. if (User == NarrowDef)
  1296. continue;
  1297. if (!L->contains(User)) {
  1298. auto *LCSSAPhi = cast<PHINode>(User);
  1299. // Make sure there is only 1 input, so that we don't have to split
  1300. // critical edges.
  1301. if (LCSSAPhi->getNumOperands() != 1)
  1302. return false;
  1303. LCSSAPhiUsers.push_back(LCSSAPhi);
  1304. continue;
  1305. }
  1306. if (auto *ICmp = dyn_cast<ICmpInst>(User)) {
  1307. auto Pred = ICmp->getPredicate();
  1308. // We have 3 types of predicates: signed, unsigned and equality
  1309. // predicates. For equality, it's legal to widen icmp for either sign and
  1310. // zero extend. For sign extend, we can also do so for signed predicates,
  1311. // likeweise for zero extend we can widen icmp for unsigned predicates.
  1312. if (ExtKind == ZeroExtended && ICmpInst::isSigned(Pred))
  1313. return false;
  1314. if (ExtKind == SignExtended && ICmpInst::isUnsigned(Pred))
  1315. return false;
  1316. ICmpUsers.push_back(ICmp);
  1317. continue;
  1318. }
  1319. if (ExtKind == SignExtended)
  1320. User = dyn_cast<SExtInst>(User);
  1321. else
  1322. User = dyn_cast<ZExtInst>(User);
  1323. if (!User || User->getType() != WideType)
  1324. return false;
  1325. ExtUsers.push_back(User);
  1326. }
  1327. if (ExtUsers.empty()) {
  1328. DeadInsts.emplace_back(NarrowUse);
  1329. return true;
  1330. }
  1331. // We'll prove some facts that should be true in the context of ext users. If
  1332. // there is no users, we are done now. If there are some, pick their common
  1333. // dominator as context.
  1334. const Instruction *CtxI = findCommonDominator(ExtUsers, *DT);
  1335. if (!CanSignExtend && !CanZeroExtend) {
  1336. // Because InstCombine turns 'sub nuw' to 'add' losing the no-wrap flag, we
  1337. // will most likely not see it. Let's try to prove it.
  1338. if (OpCode != Instruction::Add)
  1339. return false;
  1340. if (ExtKind != ZeroExtended)
  1341. return false;
  1342. const SCEV *LHS = SE->getSCEV(OBO->getOperand(0));
  1343. const SCEV *RHS = SE->getSCEV(OBO->getOperand(1));
  1344. // TODO: Support case for NarrowDef = NarrowUse->getOperand(1).
  1345. if (NarrowUse->getOperand(0) != NarrowDef)
  1346. return false;
  1347. if (!SE->isKnownNegative(RHS))
  1348. return false;
  1349. bool ProvedSubNUW = SE->isKnownPredicateAt(ICmpInst::ICMP_UGE, LHS,
  1350. SE->getNegativeSCEV(RHS), CtxI);
  1351. if (!ProvedSubNUW)
  1352. return false;
  1353. // In fact, our 'add' is 'sub nuw'. We will need to widen the 2nd operand as
  1354. // neg(zext(neg(op))), which is basically sext(op).
  1355. AnotherOpExtKind = SignExtended;
  1356. }
  1357. // Verifying that Defining operand is an AddRec
  1358. const SCEV *Op1 = SE->getSCEV(WideDef);
  1359. const SCEVAddRecExpr *AddRecOp1 = dyn_cast<SCEVAddRecExpr>(Op1);
  1360. if (!AddRecOp1 || AddRecOp1->getLoop() != L)
  1361. return false;
  1362. LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
  1363. // Generating a widening use instruction.
  1364. Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
  1365. ? WideDef
  1366. : createExtendInst(NarrowUse->getOperand(0), WideType,
  1367. AnotherOpExtKind, NarrowUse);
  1368. Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
  1369. ? WideDef
  1370. : createExtendInst(NarrowUse->getOperand(1), WideType,
  1371. AnotherOpExtKind, NarrowUse);
  1372. auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
  1373. auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
  1374. NarrowBO->getName());
  1375. IRBuilder<> Builder(NarrowUse);
  1376. Builder.Insert(WideBO);
  1377. WideBO->copyIRFlags(NarrowBO);
  1378. ExtendKindMap[NarrowUse] = ExtKind;
  1379. for (Instruction *User : ExtUsers) {
  1380. assert(User->getType() == WideType && "Checked before!");
  1381. LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *User << " replaced by "
  1382. << *WideBO << "\n");
  1383. ++NumElimExt;
  1384. User->replaceAllUsesWith(WideBO);
  1385. DeadInsts.emplace_back(User);
  1386. }
  1387. for (PHINode *User : LCSSAPhiUsers) {
  1388. assert(User->getNumOperands() == 1 && "Checked before!");
  1389. Builder.SetInsertPoint(User);
  1390. auto *WidePN =
  1391. Builder.CreatePHI(WideBO->getType(), 1, User->getName() + ".wide");
  1392. BasicBlock *LoopExitingBlock = User->getParent()->getSinglePredecessor();
  1393. assert(LoopExitingBlock && L->contains(LoopExitingBlock) &&
  1394. "Not a LCSSA Phi?");
  1395. WidePN->addIncoming(WideBO, LoopExitingBlock);
  1396. Builder.SetInsertPoint(&*User->getParent()->getFirstInsertionPt());
  1397. auto *TruncPN = Builder.CreateTrunc(WidePN, User->getType());
  1398. User->replaceAllUsesWith(TruncPN);
  1399. DeadInsts.emplace_back(User);
  1400. }
  1401. for (ICmpInst *User : ICmpUsers) {
  1402. Builder.SetInsertPoint(User);
  1403. auto ExtendedOp = [&](Value * V)->Value * {
  1404. if (V == NarrowUse)
  1405. return WideBO;
  1406. if (ExtKind == ZeroExtended)
  1407. return Builder.CreateZExt(V, WideBO->getType());
  1408. else
  1409. return Builder.CreateSExt(V, WideBO->getType());
  1410. };
  1411. auto Pred = User->getPredicate();
  1412. auto *LHS = ExtendedOp(User->getOperand(0));
  1413. auto *RHS = ExtendedOp(User->getOperand(1));
  1414. auto *WideCmp =
  1415. Builder.CreateICmp(Pred, LHS, RHS, User->getName() + ".wide");
  1416. User->replaceAllUsesWith(WideCmp);
  1417. DeadInsts.emplace_back(User);
  1418. }
  1419. return true;
  1420. }
  1421. /// Determine whether an individual user of the narrow IV can be widened. If so,
  1422. /// return the wide clone of the user.
  1423. Instruction *WidenIV::widenIVUse(WidenIV::NarrowIVDefUse DU, SCEVExpander &Rewriter) {
  1424. assert(ExtendKindMap.count(DU.NarrowDef) &&
  1425. "Should already know the kind of extension used to widen NarrowDef");
  1426. // Stop traversing the def-use chain at inner-loop phis or post-loop phis.
  1427. if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) {
  1428. if (LI->getLoopFor(UsePhi->getParent()) != L) {
  1429. // For LCSSA phis, sink the truncate outside the loop.
  1430. // After SimplifyCFG most loop exit targets have a single predecessor.
  1431. // Otherwise fall back to a truncate within the loop.
  1432. if (UsePhi->getNumOperands() != 1)
  1433. truncateIVUse(DU, DT, LI);
  1434. else {
  1435. // Widening the PHI requires us to insert a trunc. The logical place
  1436. // for this trunc is in the same BB as the PHI. This is not possible if
  1437. // the BB is terminated by a catchswitch.
  1438. if (isa<CatchSwitchInst>(UsePhi->getParent()->getTerminator()))
  1439. return nullptr;
  1440. PHINode *WidePhi =
  1441. PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide",
  1442. UsePhi);
  1443. WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
  1444. IRBuilder<> Builder(&*WidePhi->getParent()->getFirstInsertionPt());
  1445. Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType());
  1446. UsePhi->replaceAllUsesWith(Trunc);
  1447. DeadInsts.emplace_back(UsePhi);
  1448. LLVM_DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi << " to "
  1449. << *WidePhi << "\n");
  1450. }
  1451. return nullptr;
  1452. }
  1453. }
  1454. // This narrow use can be widened by a sext if it's non-negative or its narrow
  1455. // def was widended by a sext. Same for zext.
  1456. auto canWidenBySExt = [&]() {
  1457. return DU.NeverNegative || getExtendKind(DU.NarrowDef) == SignExtended;
  1458. };
  1459. auto canWidenByZExt = [&]() {
  1460. return DU.NeverNegative || getExtendKind(DU.NarrowDef) == ZeroExtended;
  1461. };
  1462. // Our raison d'etre! Eliminate sign and zero extension.
  1463. if ((isa<SExtInst>(DU.NarrowUse) && canWidenBySExt()) ||
  1464. (isa<ZExtInst>(DU.NarrowUse) && canWidenByZExt())) {
  1465. Value *NewDef = DU.WideDef;
  1466. if (DU.NarrowUse->getType() != WideType) {
  1467. unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType());
  1468. unsigned IVWidth = SE->getTypeSizeInBits(WideType);
  1469. if (CastWidth < IVWidth) {
  1470. // The cast isn't as wide as the IV, so insert a Trunc.
  1471. IRBuilder<> Builder(DU.NarrowUse);
  1472. NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType());
  1473. }
  1474. else {
  1475. // A wider extend was hidden behind a narrower one. This may induce
  1476. // another round of IV widening in which the intermediate IV becomes
  1477. // dead. It should be very rare.
  1478. LLVM_DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi
  1479. << " not wide enough to subsume " << *DU.NarrowUse
  1480. << "\n");
  1481. DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
  1482. NewDef = DU.NarrowUse;
  1483. }
  1484. }
  1485. if (NewDef != DU.NarrowUse) {
  1486. LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse
  1487. << " replaced by " << *DU.WideDef << "\n");
  1488. ++NumElimExt;
  1489. DU.NarrowUse->replaceAllUsesWith(NewDef);
  1490. DeadInsts.emplace_back(DU.NarrowUse);
  1491. }
  1492. // Now that the extend is gone, we want to expose it's uses for potential
  1493. // further simplification. We don't need to directly inform SimplifyIVUsers
  1494. // of the new users, because their parent IV will be processed later as a
  1495. // new loop phi. If we preserved IVUsers analysis, we would also want to
  1496. // push the uses of WideDef here.
  1497. // No further widening is needed. The deceased [sz]ext had done it for us.
  1498. return nullptr;
  1499. }
  1500. // Does this user itself evaluate to a recurrence after widening?
  1501. WidenedRecTy WideAddRec = getExtendedOperandRecurrence(DU);
  1502. if (!WideAddRec.first)
  1503. WideAddRec = getWideRecurrence(DU);
  1504. assert((WideAddRec.first == nullptr) == (WideAddRec.second == Unknown));
  1505. if (!WideAddRec.first) {
  1506. // If use is a loop condition, try to promote the condition instead of
  1507. // truncating the IV first.
  1508. if (widenLoopCompare(DU))
  1509. return nullptr;
  1510. // We are here about to generate a truncate instruction that may hurt
  1511. // performance because the scalar evolution expression computed earlier
  1512. // in WideAddRec.first does not indicate a polynomial induction expression.
  1513. // In that case, look at the operands of the use instruction to determine
  1514. // if we can still widen the use instead of truncating its operand.
  1515. if (widenWithVariantUse(DU))
  1516. return nullptr;
  1517. // This user does not evaluate to a recurrence after widening, so don't
  1518. // follow it. Instead insert a Trunc to kill off the original use,
  1519. // eventually isolating the original narrow IV so it can be removed.
  1520. truncateIVUse(DU, DT, LI);
  1521. return nullptr;
  1522. }
  1523. // Assume block terminators cannot evaluate to a recurrence. We can't to
  1524. // insert a Trunc after a terminator if there happens to be a critical edge.
  1525. assert(DU.NarrowUse != DU.NarrowUse->getParent()->getTerminator() &&
  1526. "SCEV is not expected to evaluate a block terminator");
  1527. // Reuse the IV increment that SCEVExpander created as long as it dominates
  1528. // NarrowUse.
  1529. Instruction *WideUse = nullptr;
  1530. if (WideAddRec.first == WideIncExpr &&
  1531. Rewriter.hoistIVInc(WideInc, DU.NarrowUse))
  1532. WideUse = WideInc;
  1533. else {
  1534. WideUse = cloneIVUser(DU, WideAddRec.first);
  1535. if (!WideUse)
  1536. return nullptr;
  1537. }
  1538. // Evaluation of WideAddRec ensured that the narrow expression could be
  1539. // extended outside the loop without overflow. This suggests that the wide use
  1540. // evaluates to the same expression as the extended narrow use, but doesn't
  1541. // absolutely guarantee it. Hence the following failsafe check. In rare cases
  1542. // where it fails, we simply throw away the newly created wide use.
  1543. if (WideAddRec.first != SE->getSCEV(WideUse)) {
  1544. LLVM_DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": "
  1545. << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first
  1546. << "\n");
  1547. DeadInsts.emplace_back(WideUse);
  1548. return nullptr;
  1549. }
  1550. // if we reached this point then we are going to replace
  1551. // DU.NarrowUse with WideUse. Reattach DbgValue then.
  1552. replaceAllDbgUsesWith(*DU.NarrowUse, *WideUse, *WideUse, *DT);
  1553. ExtendKindMap[DU.NarrowUse] = WideAddRec.second;
  1554. // Returning WideUse pushes it on the worklist.
  1555. return WideUse;
  1556. }
  1557. /// Add eligible users of NarrowDef to NarrowIVUsers.
  1558. void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) {
  1559. const SCEV *NarrowSCEV = SE->getSCEV(NarrowDef);
  1560. bool NonNegativeDef =
  1561. SE->isKnownPredicate(ICmpInst::ICMP_SGE, NarrowSCEV,
  1562. SE->getZero(NarrowSCEV->getType()));
  1563. for (User *U : NarrowDef->users()) {
  1564. Instruction *NarrowUser = cast<Instruction>(U);
  1565. // Handle data flow merges and bizarre phi cycles.
  1566. if (!Widened.insert(NarrowUser).second)
  1567. continue;
  1568. bool NonNegativeUse = false;
  1569. if (!NonNegativeDef) {
  1570. // We might have a control-dependent range information for this context.
  1571. if (auto RangeInfo = getPostIncRangeInfo(NarrowDef, NarrowUser))
  1572. NonNegativeUse = RangeInfo->getSignedMin().isNonNegative();
  1573. }
  1574. NarrowIVUsers.emplace_back(NarrowDef, NarrowUser, WideDef,
  1575. NonNegativeDef || NonNegativeUse);
  1576. }
  1577. }
  1578. /// Process a single induction variable. First use the SCEVExpander to create a
  1579. /// wide induction variable that evaluates to the same recurrence as the
  1580. /// original narrow IV. Then use a worklist to forward traverse the narrow IV's
  1581. /// def-use chain. After widenIVUse has processed all interesting IV users, the
  1582. /// narrow IV will be isolated for removal by DeleteDeadPHIs.
  1583. ///
  1584. /// It would be simpler to delete uses as they are processed, but we must avoid
  1585. /// invalidating SCEV expressions.
  1586. PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) {
  1587. // Is this phi an induction variable?
  1588. const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi));
  1589. if (!AddRec)
  1590. return nullptr;
  1591. // Widen the induction variable expression.
  1592. const SCEV *WideIVExpr = getExtendKind(OrigPhi) == SignExtended
  1593. ? SE->getSignExtendExpr(AddRec, WideType)
  1594. : SE->getZeroExtendExpr(AddRec, WideType);
  1595. assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType &&
  1596. "Expect the new IV expression to preserve its type");
  1597. // Can the IV be extended outside the loop without overflow?
  1598. AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr);
  1599. if (!AddRec || AddRec->getLoop() != L)
  1600. return nullptr;
  1601. // An AddRec must have loop-invariant operands. Since this AddRec is
  1602. // materialized by a loop header phi, the expression cannot have any post-loop
  1603. // operands, so they must dominate the loop header.
  1604. assert(
  1605. SE->properlyDominates(AddRec->getStart(), L->getHeader()) &&
  1606. SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) &&
  1607. "Loop header phi recurrence inputs do not dominate the loop");
  1608. // Iterate over IV uses (including transitive ones) looking for IV increments
  1609. // of the form 'add nsw %iv, <const>'. For each increment and each use of
  1610. // the increment calculate control-dependent range information basing on
  1611. // dominating conditions inside of the loop (e.g. a range check inside of the
  1612. // loop). Calculated ranges are stored in PostIncRangeInfos map.
  1613. //
  1614. // Control-dependent range information is later used to prove that a narrow
  1615. // definition is not negative (see pushNarrowIVUsers). It's difficult to do
  1616. // this on demand because when pushNarrowIVUsers needs this information some
  1617. // of the dominating conditions might be already widened.
  1618. if (UsePostIncrementRanges)
  1619. calculatePostIncRanges(OrigPhi);
  1620. // The rewriter provides a value for the desired IV expression. This may
  1621. // either find an existing phi or materialize a new one. Either way, we
  1622. // expect a well-formed cyclic phi-with-increments. i.e. any operand not part
  1623. // of the phi-SCC dominates the loop entry.
  1624. Instruction *InsertPt = &*L->getHeader()->getFirstInsertionPt();
  1625. Value *ExpandInst = Rewriter.expandCodeFor(AddRec, WideType, InsertPt);
  1626. // If the wide phi is not a phi node, for example a cast node, like bitcast,
  1627. // inttoptr, ptrtoint, just skip for now.
  1628. if (!(WidePhi = dyn_cast<PHINode>(ExpandInst))) {
  1629. // if the cast node is an inserted instruction without any user, we should
  1630. // remove it to make sure the pass don't touch the function as we can not
  1631. // wide the phi.
  1632. if (ExpandInst->hasNUses(0) &&
  1633. Rewriter.isInsertedInstruction(cast<Instruction>(ExpandInst)))
  1634. DeadInsts.emplace_back(ExpandInst);
  1635. return nullptr;
  1636. }
  1637. // Remembering the WideIV increment generated by SCEVExpander allows
  1638. // widenIVUse to reuse it when widening the narrow IV's increment. We don't
  1639. // employ a general reuse mechanism because the call above is the only call to
  1640. // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses.
  1641. if (BasicBlock *LatchBlock = L->getLoopLatch()) {
  1642. WideInc =
  1643. cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock));
  1644. WideIncExpr = SE->getSCEV(WideInc);
  1645. // Propagate the debug location associated with the original loop increment
  1646. // to the new (widened) increment.
  1647. auto *OrigInc =
  1648. cast<Instruction>(OrigPhi->getIncomingValueForBlock(LatchBlock));
  1649. WideInc->setDebugLoc(OrigInc->getDebugLoc());
  1650. }
  1651. LLVM_DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n");
  1652. ++NumWidened;
  1653. // Traverse the def-use chain using a worklist starting at the original IV.
  1654. assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" );
  1655. Widened.insert(OrigPhi);
  1656. pushNarrowIVUsers(OrigPhi, WidePhi);
  1657. while (!NarrowIVUsers.empty()) {
  1658. WidenIV::NarrowIVDefUse DU = NarrowIVUsers.pop_back_val();
  1659. // Process a def-use edge. This may replace the use, so don't hold a
  1660. // use_iterator across it.
  1661. Instruction *WideUse = widenIVUse(DU, Rewriter);
  1662. // Follow all def-use edges from the previous narrow use.
  1663. if (WideUse)
  1664. pushNarrowIVUsers(DU.NarrowUse, WideUse);
  1665. // widenIVUse may have removed the def-use edge.
  1666. if (DU.NarrowDef->use_empty())
  1667. DeadInsts.emplace_back(DU.NarrowDef);
  1668. }
  1669. // Attach any debug information to the new PHI.
  1670. replaceAllDbgUsesWith(*OrigPhi, *WidePhi, *WidePhi, *DT);
  1671. return WidePhi;
  1672. }
  1673. /// Calculates control-dependent range for the given def at the given context
  1674. /// by looking at dominating conditions inside of the loop
  1675. void WidenIV::calculatePostIncRange(Instruction *NarrowDef,
  1676. Instruction *NarrowUser) {
  1677. using namespace llvm::PatternMatch;
  1678. Value *NarrowDefLHS;
  1679. const APInt *NarrowDefRHS;
  1680. if (!match(NarrowDef, m_NSWAdd(m_Value(NarrowDefLHS),
  1681. m_APInt(NarrowDefRHS))) ||
  1682. !NarrowDefRHS->isNonNegative())
  1683. return;
  1684. auto UpdateRangeFromCondition = [&] (Value *Condition,
  1685. bool TrueDest) {
  1686. CmpInst::Predicate Pred;
  1687. Value *CmpRHS;
  1688. if (!match(Condition, m_ICmp(Pred, m_Specific(NarrowDefLHS),
  1689. m_Value(CmpRHS))))
  1690. return;
  1691. CmpInst::Predicate P =
  1692. TrueDest ? Pred : CmpInst::getInversePredicate(Pred);
  1693. auto CmpRHSRange = SE->getSignedRange(SE->getSCEV(CmpRHS));
  1694. auto CmpConstrainedLHSRange =
  1695. ConstantRange::makeAllowedICmpRegion(P, CmpRHSRange);
  1696. auto NarrowDefRange = CmpConstrainedLHSRange.addWithNoWrap(
  1697. *NarrowDefRHS, OverflowingBinaryOperator::NoSignedWrap);
  1698. updatePostIncRangeInfo(NarrowDef, NarrowUser, NarrowDefRange);
  1699. };
  1700. auto UpdateRangeFromGuards = [&](Instruction *Ctx) {
  1701. if (!HasGuards)
  1702. return;
  1703. for (Instruction &I : make_range(Ctx->getIterator().getReverse(),
  1704. Ctx->getParent()->rend())) {
  1705. Value *C = nullptr;
  1706. if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(C))))
  1707. UpdateRangeFromCondition(C, /*TrueDest=*/true);
  1708. }
  1709. };
  1710. UpdateRangeFromGuards(NarrowUser);
  1711. BasicBlock *NarrowUserBB = NarrowUser->getParent();
  1712. // If NarrowUserBB is statically unreachable asking dominator queries may
  1713. // yield surprising results. (e.g. the block may not have a dom tree node)
  1714. if (!DT->isReachableFromEntry(NarrowUserBB))
  1715. return;
  1716. for (auto *DTB = (*DT)[NarrowUserBB]->getIDom();
  1717. L->contains(DTB->getBlock());
  1718. DTB = DTB->getIDom()) {
  1719. auto *BB = DTB->getBlock();
  1720. auto *TI = BB->getTerminator();
  1721. UpdateRangeFromGuards(TI);
  1722. auto *BI = dyn_cast<BranchInst>(TI);
  1723. if (!BI || !BI->isConditional())
  1724. continue;
  1725. auto *TrueSuccessor = BI->getSuccessor(0);
  1726. auto *FalseSuccessor = BI->getSuccessor(1);
  1727. auto DominatesNarrowUser = [this, NarrowUser] (BasicBlockEdge BBE) {
  1728. return BBE.isSingleEdge() &&
  1729. DT->dominates(BBE, NarrowUser->getParent());
  1730. };
  1731. if (DominatesNarrowUser(BasicBlockEdge(BB, TrueSuccessor)))
  1732. UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/true);
  1733. if (DominatesNarrowUser(BasicBlockEdge(BB, FalseSuccessor)))
  1734. UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/false);
  1735. }
  1736. }
  1737. /// Calculates PostIncRangeInfos map for the given IV
  1738. void WidenIV::calculatePostIncRanges(PHINode *OrigPhi) {
  1739. SmallPtrSet<Instruction *, 16> Visited;
  1740. SmallVector<Instruction *, 6> Worklist;
  1741. Worklist.push_back(OrigPhi);
  1742. Visited.insert(OrigPhi);
  1743. while (!Worklist.empty()) {
  1744. Instruction *NarrowDef = Worklist.pop_back_val();
  1745. for (Use &U : NarrowDef->uses()) {
  1746. auto *NarrowUser = cast<Instruction>(U.getUser());
  1747. // Don't go looking outside the current loop.
  1748. auto *NarrowUserLoop = (*LI)[NarrowUser->getParent()];
  1749. if (!NarrowUserLoop || !L->contains(NarrowUserLoop))
  1750. continue;
  1751. if (!Visited.insert(NarrowUser).second)
  1752. continue;
  1753. Worklist.push_back(NarrowUser);
  1754. calculatePostIncRange(NarrowDef, NarrowUser);
  1755. }
  1756. }
  1757. }
  1758. PHINode *llvm::createWideIV(const WideIVInfo &WI,
  1759. LoopInfo *LI, ScalarEvolution *SE, SCEVExpander &Rewriter,
  1760. DominatorTree *DT, SmallVectorImpl<WeakTrackingVH> &DeadInsts,
  1761. unsigned &NumElimExt, unsigned &NumWidened,
  1762. bool HasGuards, bool UsePostIncrementRanges) {
  1763. WidenIV Widener(WI, LI, SE, DT, DeadInsts, HasGuards, UsePostIncrementRanges);
  1764. PHINode *WidePHI = Widener.createWideIV(Rewriter);
  1765. NumElimExt = Widener.getNumElimExt();
  1766. NumWidened = Widener.getNumWidened();
  1767. return WidePHI;
  1768. }