SimplifyIndVar.cpp 78 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089
  1. //===-- SimplifyIndVar.cpp - Induction variable simplification ------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements induction variable simplification. It does
  10. // not define any actual pass or policy, but provides a single function to
  11. // simplify a loop's induction variables based on ScalarEvolution.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "llvm/Transforms/Utils/SimplifyIndVar.h"
  15. #include "llvm/ADT/STLExtras.h"
  16. #include "llvm/ADT/SmallVector.h"
  17. #include "llvm/ADT/Statistic.h"
  18. #include "llvm/Analysis/LoopInfo.h"
  19. #include "llvm/IR/DataLayout.h"
  20. #include "llvm/IR/Dominators.h"
  21. #include "llvm/IR/IRBuilder.h"
  22. #include "llvm/IR/Instructions.h"
  23. #include "llvm/IR/IntrinsicInst.h"
  24. #include "llvm/IR/PatternMatch.h"
  25. #include "llvm/Support/Debug.h"
  26. #include "llvm/Support/raw_ostream.h"
  27. #include "llvm/Transforms/Utils/Local.h"
  28. #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
  29. using namespace llvm;
  30. #define DEBUG_TYPE "indvars"
  31. STATISTIC(NumElimIdentity, "Number of IV identities eliminated");
  32. STATISTIC(NumElimOperand, "Number of IV operands folded into a use");
  33. STATISTIC(NumFoldedUser, "Number of IV users folded into a constant");
  34. STATISTIC(NumElimRem , "Number of IV remainder operations eliminated");
  35. STATISTIC(
  36. NumSimplifiedSDiv,
  37. "Number of IV signed division operations converted to unsigned division");
  38. STATISTIC(
  39. NumSimplifiedSRem,
  40. "Number of IV signed remainder operations converted to unsigned remainder");
  41. STATISTIC(NumElimCmp , "Number of IV comparisons eliminated");
  42. namespace {
  43. /// This is a utility for simplifying induction variables
  44. /// based on ScalarEvolution. It is the primary instrument of the
  45. /// IndvarSimplify pass, but it may also be directly invoked to cleanup after
  46. /// other loop passes that preserve SCEV.
  47. class SimplifyIndvar {
  48. Loop *L;
  49. LoopInfo *LI;
  50. ScalarEvolution *SE;
  51. DominatorTree *DT;
  52. const TargetTransformInfo *TTI;
  53. SCEVExpander &Rewriter;
  54. SmallVectorImpl<WeakTrackingVH> &DeadInsts;
  55. bool Changed;
  56. public:
  57. SimplifyIndvar(Loop *Loop, ScalarEvolution *SE, DominatorTree *DT,
  58. LoopInfo *LI, const TargetTransformInfo *TTI,
  59. SCEVExpander &Rewriter,
  60. SmallVectorImpl<WeakTrackingVH> &Dead)
  61. : L(Loop), LI(LI), SE(SE), DT(DT), TTI(TTI), Rewriter(Rewriter),
  62. DeadInsts(Dead), Changed(false) {
  63. assert(LI && "IV simplification requires LoopInfo");
  64. }
  65. bool hasChanged() const { return Changed; }
  66. /// Iteratively perform simplification on a worklist of users of the
  67. /// specified induction variable. This is the top-level driver that applies
  68. /// all simplifications to users of an IV.
  69. void simplifyUsers(PHINode *CurrIV, IVVisitor *V = nullptr);
  70. Value *foldIVUser(Instruction *UseInst, Instruction *IVOperand);
  71. bool eliminateIdentitySCEV(Instruction *UseInst, Instruction *IVOperand);
  72. bool replaceIVUserWithLoopInvariant(Instruction *UseInst);
  73. bool eliminateOverflowIntrinsic(WithOverflowInst *WO);
  74. bool eliminateSaturatingIntrinsic(SaturatingInst *SI);
  75. bool eliminateTrunc(TruncInst *TI);
  76. bool eliminateIVUser(Instruction *UseInst, Instruction *IVOperand);
  77. bool makeIVComparisonInvariant(ICmpInst *ICmp, Value *IVOperand);
  78. void eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand);
  79. void simplifyIVRemainder(BinaryOperator *Rem, Value *IVOperand,
  80. bool IsSigned);
  81. void replaceRemWithNumerator(BinaryOperator *Rem);
  82. void replaceRemWithNumeratorOrZero(BinaryOperator *Rem);
  83. void replaceSRemWithURem(BinaryOperator *Rem);
  84. bool eliminateSDiv(BinaryOperator *SDiv);
  85. bool strengthenOverflowingOperation(BinaryOperator *OBO, Value *IVOperand);
  86. bool strengthenRightShift(BinaryOperator *BO, Value *IVOperand);
  87. };
  88. }
  89. /// Fold an IV operand into its use. This removes increments of an
  90. /// aligned IV when used by a instruction that ignores the low bits.
  91. ///
  92. /// IVOperand is guaranteed SCEVable, but UseInst may not be.
  93. ///
  94. /// Return the operand of IVOperand for this induction variable if IVOperand can
  95. /// be folded (in case more folding opportunities have been exposed).
  96. /// Otherwise return null.
  97. Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand) {
  98. Value *IVSrc = nullptr;
  99. const unsigned OperIdx = 0;
  100. const SCEV *FoldedExpr = nullptr;
  101. bool MustDropExactFlag = false;
  102. switch (UseInst->getOpcode()) {
  103. default:
  104. return nullptr;
  105. case Instruction::UDiv:
  106. case Instruction::LShr:
  107. // We're only interested in the case where we know something about
  108. // the numerator and have a constant denominator.
  109. if (IVOperand != UseInst->getOperand(OperIdx) ||
  110. !isa<ConstantInt>(UseInst->getOperand(1)))
  111. return nullptr;
  112. // Attempt to fold a binary operator with constant operand.
  113. // e.g. ((I + 1) >> 2) => I >> 2
  114. if (!isa<BinaryOperator>(IVOperand)
  115. || !isa<ConstantInt>(IVOperand->getOperand(1)))
  116. return nullptr;
  117. IVSrc = IVOperand->getOperand(0);
  118. // IVSrc must be the (SCEVable) IV, since the other operand is const.
  119. assert(SE->isSCEVable(IVSrc->getType()) && "Expect SCEVable IV operand");
  120. ConstantInt *D = cast<ConstantInt>(UseInst->getOperand(1));
  121. if (UseInst->getOpcode() == Instruction::LShr) {
  122. // Get a constant for the divisor. See createSCEV.
  123. uint32_t BitWidth = cast<IntegerType>(UseInst->getType())->getBitWidth();
  124. if (D->getValue().uge(BitWidth))
  125. return nullptr;
  126. D = ConstantInt::get(UseInst->getContext(),
  127. APInt::getOneBitSet(BitWidth, D->getZExtValue()));
  128. }
  129. FoldedExpr = SE->getUDivExpr(SE->getSCEV(IVSrc), SE->getSCEV(D));
  130. // We might have 'exact' flag set at this point which will no longer be
  131. // correct after we make the replacement.
  132. if (UseInst->isExact() &&
  133. SE->getSCEV(IVSrc) != SE->getMulExpr(FoldedExpr, SE->getSCEV(D)))
  134. MustDropExactFlag = true;
  135. }
  136. // We have something that might fold it's operand. Compare SCEVs.
  137. if (!SE->isSCEVable(UseInst->getType()))
  138. return nullptr;
  139. // Bypass the operand if SCEV can prove it has no effect.
  140. if (SE->getSCEV(UseInst) != FoldedExpr)
  141. return nullptr;
  142. LLVM_DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand
  143. << " -> " << *UseInst << '\n');
  144. UseInst->setOperand(OperIdx, IVSrc);
  145. assert(SE->getSCEV(UseInst) == FoldedExpr && "bad SCEV with folded oper");
  146. if (MustDropExactFlag)
  147. UseInst->dropPoisonGeneratingFlags();
  148. ++NumElimOperand;
  149. Changed = true;
  150. if (IVOperand->use_empty())
  151. DeadInsts.emplace_back(IVOperand);
  152. return IVSrc;
  153. }
  154. bool SimplifyIndvar::makeIVComparisonInvariant(ICmpInst *ICmp,
  155. Value *IVOperand) {
  156. unsigned IVOperIdx = 0;
  157. ICmpInst::Predicate Pred = ICmp->getPredicate();
  158. if (IVOperand != ICmp->getOperand(0)) {
  159. // Swapped
  160. assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
  161. IVOperIdx = 1;
  162. Pred = ICmpInst::getSwappedPredicate(Pred);
  163. }
  164. // Get the SCEVs for the ICmp operands (in the specific context of the
  165. // current loop)
  166. const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
  167. const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
  168. const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
  169. auto *PN = dyn_cast<PHINode>(IVOperand);
  170. if (!PN)
  171. return false;
  172. auto LIP = SE->getLoopInvariantPredicate(Pred, S, X, L);
  173. if (!LIP)
  174. return false;
  175. ICmpInst::Predicate InvariantPredicate = LIP->Pred;
  176. const SCEV *InvariantLHS = LIP->LHS;
  177. const SCEV *InvariantRHS = LIP->RHS;
  178. // Rewrite the comparison to a loop invariant comparison if it can be done
  179. // cheaply, where cheaply means "we don't need to emit any new
  180. // instructions".
  181. SmallDenseMap<const SCEV*, Value*> CheapExpansions;
  182. CheapExpansions[S] = ICmp->getOperand(IVOperIdx);
  183. CheapExpansions[X] = ICmp->getOperand(1 - IVOperIdx);
  184. // TODO: Support multiple entry loops? (We currently bail out of these in
  185. // the IndVarSimplify pass)
  186. if (auto *BB = L->getLoopPredecessor()) {
  187. const int Idx = PN->getBasicBlockIndex(BB);
  188. if (Idx >= 0) {
  189. Value *Incoming = PN->getIncomingValue(Idx);
  190. const SCEV *IncomingS = SE->getSCEV(Incoming);
  191. CheapExpansions[IncomingS] = Incoming;
  192. }
  193. }
  194. Value *NewLHS = CheapExpansions[InvariantLHS];
  195. Value *NewRHS = CheapExpansions[InvariantRHS];
  196. if (!NewLHS)
  197. if (auto *ConstLHS = dyn_cast<SCEVConstant>(InvariantLHS))
  198. NewLHS = ConstLHS->getValue();
  199. if (!NewRHS)
  200. if (auto *ConstRHS = dyn_cast<SCEVConstant>(InvariantRHS))
  201. NewRHS = ConstRHS->getValue();
  202. if (!NewLHS || !NewRHS)
  203. // We could not find an existing value to replace either LHS or RHS.
  204. // Generating new instructions has subtler tradeoffs, so avoid doing that
  205. // for now.
  206. return false;
  207. LLVM_DEBUG(dbgs() << "INDVARS: Simplified comparison: " << *ICmp << '\n');
  208. ICmp->setPredicate(InvariantPredicate);
  209. ICmp->setOperand(0, NewLHS);
  210. ICmp->setOperand(1, NewRHS);
  211. return true;
  212. }
  213. /// SimplifyIVUsers helper for eliminating useless
  214. /// comparisons against an induction variable.
  215. void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand) {
  216. unsigned IVOperIdx = 0;
  217. ICmpInst::Predicate Pred = ICmp->getPredicate();
  218. ICmpInst::Predicate OriginalPred = Pred;
  219. if (IVOperand != ICmp->getOperand(0)) {
  220. // Swapped
  221. assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
  222. IVOperIdx = 1;
  223. Pred = ICmpInst::getSwappedPredicate(Pred);
  224. }
  225. // Get the SCEVs for the ICmp operands (in the specific context of the
  226. // current loop)
  227. const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
  228. const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
  229. const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
  230. // If the condition is always true or always false, replace it with
  231. // a constant value.
  232. if (SE->isKnownPredicate(Pred, S, X)) {
  233. ICmp->replaceAllUsesWith(ConstantInt::getTrue(ICmp->getContext()));
  234. DeadInsts.emplace_back(ICmp);
  235. LLVM_DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
  236. } else if (SE->isKnownPredicate(ICmpInst::getInversePredicate(Pred), S, X)) {
  237. ICmp->replaceAllUsesWith(ConstantInt::getFalse(ICmp->getContext()));
  238. DeadInsts.emplace_back(ICmp);
  239. LLVM_DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
  240. } else if (makeIVComparisonInvariant(ICmp, IVOperand)) {
  241. // fallthrough to end of function
  242. } else if (ICmpInst::isSigned(OriginalPred) &&
  243. SE->isKnownNonNegative(S) && SE->isKnownNonNegative(X)) {
  244. // If we were unable to make anything above, all we can is to canonicalize
  245. // the comparison hoping that it will open the doors for other
  246. // optimizations. If we find out that we compare two non-negative values,
  247. // we turn the instruction's predicate to its unsigned version. Note that
  248. // we cannot rely on Pred here unless we check if we have swapped it.
  249. assert(ICmp->getPredicate() == OriginalPred && "Predicate changed?");
  250. LLVM_DEBUG(dbgs() << "INDVARS: Turn to unsigned comparison: " << *ICmp
  251. << '\n');
  252. ICmp->setPredicate(ICmpInst::getUnsignedPredicate(OriginalPred));
  253. } else
  254. return;
  255. ++NumElimCmp;
  256. Changed = true;
  257. }
  258. bool SimplifyIndvar::eliminateSDiv(BinaryOperator *SDiv) {
  259. // Get the SCEVs for the ICmp operands.
  260. auto *N = SE->getSCEV(SDiv->getOperand(0));
  261. auto *D = SE->getSCEV(SDiv->getOperand(1));
  262. // Simplify unnecessary loops away.
  263. const Loop *L = LI->getLoopFor(SDiv->getParent());
  264. N = SE->getSCEVAtScope(N, L);
  265. D = SE->getSCEVAtScope(D, L);
  266. // Replace sdiv by udiv if both of the operands are non-negative
  267. if (SE->isKnownNonNegative(N) && SE->isKnownNonNegative(D)) {
  268. auto *UDiv = BinaryOperator::Create(
  269. BinaryOperator::UDiv, SDiv->getOperand(0), SDiv->getOperand(1),
  270. SDiv->getName() + ".udiv", SDiv);
  271. UDiv->setIsExact(SDiv->isExact());
  272. SDiv->replaceAllUsesWith(UDiv);
  273. LLVM_DEBUG(dbgs() << "INDVARS: Simplified sdiv: " << *SDiv << '\n');
  274. ++NumSimplifiedSDiv;
  275. Changed = true;
  276. DeadInsts.push_back(SDiv);
  277. return true;
  278. }
  279. return false;
  280. }
  281. // i %s n -> i %u n if i >= 0 and n >= 0
  282. void SimplifyIndvar::replaceSRemWithURem(BinaryOperator *Rem) {
  283. auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
  284. auto *URem = BinaryOperator::Create(BinaryOperator::URem, N, D,
  285. Rem->getName() + ".urem", Rem);
  286. Rem->replaceAllUsesWith(URem);
  287. LLVM_DEBUG(dbgs() << "INDVARS: Simplified srem: " << *Rem << '\n');
  288. ++NumSimplifiedSRem;
  289. Changed = true;
  290. DeadInsts.emplace_back(Rem);
  291. }
  292. // i % n --> i if i is in [0,n).
  293. void SimplifyIndvar::replaceRemWithNumerator(BinaryOperator *Rem) {
  294. Rem->replaceAllUsesWith(Rem->getOperand(0));
  295. LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
  296. ++NumElimRem;
  297. Changed = true;
  298. DeadInsts.emplace_back(Rem);
  299. }
  300. // (i+1) % n --> (i+1)==n?0:(i+1) if i is in [0,n).
  301. void SimplifyIndvar::replaceRemWithNumeratorOrZero(BinaryOperator *Rem) {
  302. auto *T = Rem->getType();
  303. auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
  304. ICmpInst *ICmp = new ICmpInst(Rem, ICmpInst::ICMP_EQ, N, D);
  305. SelectInst *Sel =
  306. SelectInst::Create(ICmp, ConstantInt::get(T, 0), N, "iv.rem", Rem);
  307. Rem->replaceAllUsesWith(Sel);
  308. LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
  309. ++NumElimRem;
  310. Changed = true;
  311. DeadInsts.emplace_back(Rem);
  312. }
  313. /// SimplifyIVUsers helper for eliminating useless remainder operations
  314. /// operating on an induction variable or replacing srem by urem.
  315. void SimplifyIndvar::simplifyIVRemainder(BinaryOperator *Rem, Value *IVOperand,
  316. bool IsSigned) {
  317. auto *NValue = Rem->getOperand(0);
  318. auto *DValue = Rem->getOperand(1);
  319. // We're only interested in the case where we know something about
  320. // the numerator, unless it is a srem, because we want to replace srem by urem
  321. // in general.
  322. bool UsedAsNumerator = IVOperand == NValue;
  323. if (!UsedAsNumerator && !IsSigned)
  324. return;
  325. const SCEV *N = SE->getSCEV(NValue);
  326. // Simplify unnecessary loops away.
  327. const Loop *ICmpLoop = LI->getLoopFor(Rem->getParent());
  328. N = SE->getSCEVAtScope(N, ICmpLoop);
  329. bool IsNumeratorNonNegative = !IsSigned || SE->isKnownNonNegative(N);
  330. // Do not proceed if the Numerator may be negative
  331. if (!IsNumeratorNonNegative)
  332. return;
  333. const SCEV *D = SE->getSCEV(DValue);
  334. D = SE->getSCEVAtScope(D, ICmpLoop);
  335. if (UsedAsNumerator) {
  336. auto LT = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
  337. if (SE->isKnownPredicate(LT, N, D)) {
  338. replaceRemWithNumerator(Rem);
  339. return;
  340. }
  341. auto *T = Rem->getType();
  342. const auto *NLessOne = SE->getMinusSCEV(N, SE->getOne(T));
  343. if (SE->isKnownPredicate(LT, NLessOne, D)) {
  344. replaceRemWithNumeratorOrZero(Rem);
  345. return;
  346. }
  347. }
  348. // Try to replace SRem with URem, if both N and D are known non-negative.
  349. // Since we had already check N, we only need to check D now
  350. if (!IsSigned || !SE->isKnownNonNegative(D))
  351. return;
  352. replaceSRemWithURem(Rem);
  353. }
  354. static bool willNotOverflow(ScalarEvolution *SE, Instruction::BinaryOps BinOp,
  355. bool Signed, const SCEV *LHS, const SCEV *RHS) {
  356. const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
  357. SCEV::NoWrapFlags, unsigned);
  358. switch (BinOp) {
  359. default:
  360. llvm_unreachable("Unsupported binary op");
  361. case Instruction::Add:
  362. Operation = &ScalarEvolution::getAddExpr;
  363. break;
  364. case Instruction::Sub:
  365. Operation = &ScalarEvolution::getMinusSCEV;
  366. break;
  367. case Instruction::Mul:
  368. Operation = &ScalarEvolution::getMulExpr;
  369. break;
  370. }
  371. const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) =
  372. Signed ? &ScalarEvolution::getSignExtendExpr
  373. : &ScalarEvolution::getZeroExtendExpr;
  374. // Check ext(LHS op RHS) == ext(LHS) op ext(RHS)
  375. auto *NarrowTy = cast<IntegerType>(LHS->getType());
  376. auto *WideTy =
  377. IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
  378. const SCEV *A =
  379. (SE->*Extension)((SE->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0),
  380. WideTy, 0);
  381. const SCEV *B =
  382. (SE->*Operation)((SE->*Extension)(LHS, WideTy, 0),
  383. (SE->*Extension)(RHS, WideTy, 0), SCEV::FlagAnyWrap, 0);
  384. return A == B;
  385. }
  386. bool SimplifyIndvar::eliminateOverflowIntrinsic(WithOverflowInst *WO) {
  387. const SCEV *LHS = SE->getSCEV(WO->getLHS());
  388. const SCEV *RHS = SE->getSCEV(WO->getRHS());
  389. if (!willNotOverflow(SE, WO->getBinaryOp(), WO->isSigned(), LHS, RHS))
  390. return false;
  391. // Proved no overflow, nuke the overflow check and, if possible, the overflow
  392. // intrinsic as well.
  393. BinaryOperator *NewResult = BinaryOperator::Create(
  394. WO->getBinaryOp(), WO->getLHS(), WO->getRHS(), "", WO);
  395. if (WO->isSigned())
  396. NewResult->setHasNoSignedWrap(true);
  397. else
  398. NewResult->setHasNoUnsignedWrap(true);
  399. SmallVector<ExtractValueInst *, 4> ToDelete;
  400. for (auto *U : WO->users()) {
  401. if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
  402. if (EVI->getIndices()[0] == 1)
  403. EVI->replaceAllUsesWith(ConstantInt::getFalse(WO->getContext()));
  404. else {
  405. assert(EVI->getIndices()[0] == 0 && "Only two possibilities!");
  406. EVI->replaceAllUsesWith(NewResult);
  407. }
  408. ToDelete.push_back(EVI);
  409. }
  410. }
  411. for (auto *EVI : ToDelete)
  412. EVI->eraseFromParent();
  413. if (WO->use_empty())
  414. WO->eraseFromParent();
  415. Changed = true;
  416. return true;
  417. }
  418. bool SimplifyIndvar::eliminateSaturatingIntrinsic(SaturatingInst *SI) {
  419. const SCEV *LHS = SE->getSCEV(SI->getLHS());
  420. const SCEV *RHS = SE->getSCEV(SI->getRHS());
  421. if (!willNotOverflow(SE, SI->getBinaryOp(), SI->isSigned(), LHS, RHS))
  422. return false;
  423. BinaryOperator *BO = BinaryOperator::Create(
  424. SI->getBinaryOp(), SI->getLHS(), SI->getRHS(), SI->getName(), SI);
  425. if (SI->isSigned())
  426. BO->setHasNoSignedWrap();
  427. else
  428. BO->setHasNoUnsignedWrap();
  429. SI->replaceAllUsesWith(BO);
  430. DeadInsts.emplace_back(SI);
  431. Changed = true;
  432. return true;
  433. }
  434. bool SimplifyIndvar::eliminateTrunc(TruncInst *TI) {
  435. // It is always legal to replace
  436. // icmp <pred> i32 trunc(iv), n
  437. // with
  438. // icmp <pred> i64 sext(trunc(iv)), sext(n), if pred is signed predicate.
  439. // Or with
  440. // icmp <pred> i64 zext(trunc(iv)), zext(n), if pred is unsigned predicate.
  441. // Or with either of these if pred is an equality predicate.
  442. //
  443. // If we can prove that iv == sext(trunc(iv)) or iv == zext(trunc(iv)) for
  444. // every comparison which uses trunc, it means that we can replace each of
  445. // them with comparison of iv against sext/zext(n). We no longer need trunc
  446. // after that.
  447. //
  448. // TODO: Should we do this if we can widen *some* comparisons, but not all
  449. // of them? Sometimes it is enough to enable other optimizations, but the
  450. // trunc instruction will stay in the loop.
  451. Value *IV = TI->getOperand(0);
  452. Type *IVTy = IV->getType();
  453. const SCEV *IVSCEV = SE->getSCEV(IV);
  454. const SCEV *TISCEV = SE->getSCEV(TI);
  455. // Check if iv == zext(trunc(iv)) and if iv == sext(trunc(iv)). If so, we can
  456. // get rid of trunc
  457. bool DoesSExtCollapse = false;
  458. bool DoesZExtCollapse = false;
  459. if (IVSCEV == SE->getSignExtendExpr(TISCEV, IVTy))
  460. DoesSExtCollapse = true;
  461. if (IVSCEV == SE->getZeroExtendExpr(TISCEV, IVTy))
  462. DoesZExtCollapse = true;
  463. // If neither sext nor zext does collapse, it is not profitable to do any
  464. // transform. Bail.
  465. if (!DoesSExtCollapse && !DoesZExtCollapse)
  466. return false;
  467. // Collect users of the trunc that look like comparisons against invariants.
  468. // Bail if we find something different.
  469. SmallVector<ICmpInst *, 4> ICmpUsers;
  470. for (auto *U : TI->users()) {
  471. // We don't care about users in unreachable blocks.
  472. if (isa<Instruction>(U) &&
  473. !DT->isReachableFromEntry(cast<Instruction>(U)->getParent()))
  474. continue;
  475. ICmpInst *ICI = dyn_cast<ICmpInst>(U);
  476. if (!ICI) return false;
  477. assert(L->contains(ICI->getParent()) && "LCSSA form broken?");
  478. if (!(ICI->getOperand(0) == TI && L->isLoopInvariant(ICI->getOperand(1))) &&
  479. !(ICI->getOperand(1) == TI && L->isLoopInvariant(ICI->getOperand(0))))
  480. return false;
  481. // If we cannot get rid of trunc, bail.
  482. if (ICI->isSigned() && !DoesSExtCollapse)
  483. return false;
  484. if (ICI->isUnsigned() && !DoesZExtCollapse)
  485. return false;
  486. // For equality, either signed or unsigned works.
  487. ICmpUsers.push_back(ICI);
  488. }
  489. auto CanUseZExt = [&](ICmpInst *ICI) {
  490. // Unsigned comparison can be widened as unsigned.
  491. if (ICI->isUnsigned())
  492. return true;
  493. // Is it profitable to do zext?
  494. if (!DoesZExtCollapse)
  495. return false;
  496. // For equality, we can safely zext both parts.
  497. if (ICI->isEquality())
  498. return true;
  499. // Otherwise we can only use zext when comparing two non-negative or two
  500. // negative values. But in practice, we will never pass DoesZExtCollapse
  501. // check for a negative value, because zext(trunc(x)) is non-negative. So
  502. // it only make sense to check for non-negativity here.
  503. const SCEV *SCEVOP1 = SE->getSCEV(ICI->getOperand(0));
  504. const SCEV *SCEVOP2 = SE->getSCEV(ICI->getOperand(1));
  505. return SE->isKnownNonNegative(SCEVOP1) && SE->isKnownNonNegative(SCEVOP2);
  506. };
  507. // Replace all comparisons against trunc with comparisons against IV.
  508. for (auto *ICI : ICmpUsers) {
  509. bool IsSwapped = L->isLoopInvariant(ICI->getOperand(0));
  510. auto *Op1 = IsSwapped ? ICI->getOperand(0) : ICI->getOperand(1);
  511. Instruction *Ext = nullptr;
  512. // For signed/unsigned predicate, replace the old comparison with comparison
  513. // of immediate IV against sext/zext of the invariant argument. If we can
  514. // use either sext or zext (i.e. we are dealing with equality predicate),
  515. // then prefer zext as a more canonical form.
  516. // TODO: If we see a signed comparison which can be turned into unsigned,
  517. // we can do it here for canonicalization purposes.
  518. ICmpInst::Predicate Pred = ICI->getPredicate();
  519. if (IsSwapped) Pred = ICmpInst::getSwappedPredicate(Pred);
  520. if (CanUseZExt(ICI)) {
  521. assert(DoesZExtCollapse && "Unprofitable zext?");
  522. Ext = new ZExtInst(Op1, IVTy, "zext", ICI);
  523. Pred = ICmpInst::getUnsignedPredicate(Pred);
  524. } else {
  525. assert(DoesSExtCollapse && "Unprofitable sext?");
  526. Ext = new SExtInst(Op1, IVTy, "sext", ICI);
  527. assert(Pred == ICmpInst::getSignedPredicate(Pred) && "Must be signed!");
  528. }
  529. bool Changed;
  530. L->makeLoopInvariant(Ext, Changed);
  531. (void)Changed;
  532. ICmpInst *NewICI = new ICmpInst(ICI, Pred, IV, Ext);
  533. ICI->replaceAllUsesWith(NewICI);
  534. DeadInsts.emplace_back(ICI);
  535. }
  536. // Trunc no longer needed.
  537. TI->replaceAllUsesWith(UndefValue::get(TI->getType()));
  538. DeadInsts.emplace_back(TI);
  539. return true;
  540. }
  541. /// Eliminate an operation that consumes a simple IV and has no observable
  542. /// side-effect given the range of IV values. IVOperand is guaranteed SCEVable,
  543. /// but UseInst may not be.
  544. bool SimplifyIndvar::eliminateIVUser(Instruction *UseInst,
  545. Instruction *IVOperand) {
  546. if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
  547. eliminateIVComparison(ICmp, IVOperand);
  548. return true;
  549. }
  550. if (BinaryOperator *Bin = dyn_cast<BinaryOperator>(UseInst)) {
  551. bool IsSRem = Bin->getOpcode() == Instruction::SRem;
  552. if (IsSRem || Bin->getOpcode() == Instruction::URem) {
  553. simplifyIVRemainder(Bin, IVOperand, IsSRem);
  554. return true;
  555. }
  556. if (Bin->getOpcode() == Instruction::SDiv)
  557. return eliminateSDiv(Bin);
  558. }
  559. if (auto *WO = dyn_cast<WithOverflowInst>(UseInst))
  560. if (eliminateOverflowIntrinsic(WO))
  561. return true;
  562. if (auto *SI = dyn_cast<SaturatingInst>(UseInst))
  563. if (eliminateSaturatingIntrinsic(SI))
  564. return true;
  565. if (auto *TI = dyn_cast<TruncInst>(UseInst))
  566. if (eliminateTrunc(TI))
  567. return true;
  568. if (eliminateIdentitySCEV(UseInst, IVOperand))
  569. return true;
  570. return false;
  571. }
  572. static Instruction *GetLoopInvariantInsertPosition(Loop *L, Instruction *Hint) {
  573. if (auto *BB = L->getLoopPreheader())
  574. return BB->getTerminator();
  575. return Hint;
  576. }
  577. /// Replace the UseInst with a loop invariant expression if it is safe.
  578. bool SimplifyIndvar::replaceIVUserWithLoopInvariant(Instruction *I) {
  579. if (!SE->isSCEVable(I->getType()))
  580. return false;
  581. // Get the symbolic expression for this instruction.
  582. const SCEV *S = SE->getSCEV(I);
  583. if (!SE->isLoopInvariant(S, L))
  584. return false;
  585. // Do not generate something ridiculous even if S is loop invariant.
  586. if (Rewriter.isHighCostExpansion(S, L, SCEVCheapExpansionBudget, TTI, I))
  587. return false;
  588. auto *IP = GetLoopInvariantInsertPosition(L, I);
  589. if (!isSafeToExpandAt(S, IP, *SE)) {
  590. LLVM_DEBUG(dbgs() << "INDVARS: Can not replace IV user: " << *I
  591. << " with non-speculable loop invariant: " << *S << '\n');
  592. return false;
  593. }
  594. auto *Invariant = Rewriter.expandCodeFor(S, I->getType(), IP);
  595. I->replaceAllUsesWith(Invariant);
  596. LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *I
  597. << " with loop invariant: " << *S << '\n');
  598. ++NumFoldedUser;
  599. Changed = true;
  600. DeadInsts.emplace_back(I);
  601. return true;
  602. }
  603. /// Eliminate any operation that SCEV can prove is an identity function.
  604. bool SimplifyIndvar::eliminateIdentitySCEV(Instruction *UseInst,
  605. Instruction *IVOperand) {
  606. if (!SE->isSCEVable(UseInst->getType()) ||
  607. (UseInst->getType() != IVOperand->getType()) ||
  608. (SE->getSCEV(UseInst) != SE->getSCEV(IVOperand)))
  609. return false;
  610. // getSCEV(X) == getSCEV(Y) does not guarantee that X and Y are related in the
  611. // dominator tree, even if X is an operand to Y. For instance, in
  612. //
  613. // %iv = phi i32 {0,+,1}
  614. // br %cond, label %left, label %merge
  615. //
  616. // left:
  617. // %X = add i32 %iv, 0
  618. // br label %merge
  619. //
  620. // merge:
  621. // %M = phi (%X, %iv)
  622. //
  623. // getSCEV(%M) == getSCEV(%X) == {0,+,1}, but %X does not dominate %M, and
  624. // %M.replaceAllUsesWith(%X) would be incorrect.
  625. if (isa<PHINode>(UseInst))
  626. // If UseInst is not a PHI node then we know that IVOperand dominates
  627. // UseInst directly from the legality of SSA.
  628. if (!DT || !DT->dominates(IVOperand, UseInst))
  629. return false;
  630. if (!LI->replacementPreservesLCSSAForm(UseInst, IVOperand))
  631. return false;
  632. LLVM_DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n');
  633. UseInst->replaceAllUsesWith(IVOperand);
  634. ++NumElimIdentity;
  635. Changed = true;
  636. DeadInsts.emplace_back(UseInst);
  637. return true;
  638. }
  639. /// Annotate BO with nsw / nuw if it provably does not signed-overflow /
  640. /// unsigned-overflow. Returns true if anything changed, false otherwise.
  641. bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
  642. Value *IVOperand) {
  643. // Fastpath: we don't have any work to do if `BO` is `nuw` and `nsw`.
  644. if (BO->hasNoUnsignedWrap() && BO->hasNoSignedWrap())
  645. return false;
  646. if (BO->getOpcode() != Instruction::Add &&
  647. BO->getOpcode() != Instruction::Sub &&
  648. BO->getOpcode() != Instruction::Mul)
  649. return false;
  650. const SCEV *LHS = SE->getSCEV(BO->getOperand(0));
  651. const SCEV *RHS = SE->getSCEV(BO->getOperand(1));
  652. bool Changed = false;
  653. if (!BO->hasNoUnsignedWrap() &&
  654. willNotOverflow(SE, BO->getOpcode(), /* Signed */ false, LHS, RHS)) {
  655. BO->setHasNoUnsignedWrap();
  656. SE->forgetValue(BO);
  657. Changed = true;
  658. }
  659. if (!BO->hasNoSignedWrap() &&
  660. willNotOverflow(SE, BO->getOpcode(), /* Signed */ true, LHS, RHS)) {
  661. BO->setHasNoSignedWrap();
  662. SE->forgetValue(BO);
  663. Changed = true;
  664. }
  665. return Changed;
  666. }
  667. /// Annotate the Shr in (X << IVOperand) >> C as exact using the
  668. /// information from the IV's range. Returns true if anything changed, false
  669. /// otherwise.
  670. bool SimplifyIndvar::strengthenRightShift(BinaryOperator *BO,
  671. Value *IVOperand) {
  672. using namespace llvm::PatternMatch;
  673. if (BO->getOpcode() == Instruction::Shl) {
  674. bool Changed = false;
  675. ConstantRange IVRange = SE->getUnsignedRange(SE->getSCEV(IVOperand));
  676. for (auto *U : BO->users()) {
  677. const APInt *C;
  678. if (match(U,
  679. m_AShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C))) ||
  680. match(U,
  681. m_LShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C)))) {
  682. BinaryOperator *Shr = cast<BinaryOperator>(U);
  683. if (!Shr->isExact() && IVRange.getUnsignedMin().uge(*C)) {
  684. Shr->setIsExact(true);
  685. Changed = true;
  686. }
  687. }
  688. }
  689. return Changed;
  690. }
  691. return false;
  692. }
  693. /// Add all uses of Def to the current IV's worklist.
  694. static void pushIVUsers(
  695. Instruction *Def, Loop *L,
  696. SmallPtrSet<Instruction*,16> &Simplified,
  697. SmallVectorImpl< std::pair<Instruction*,Instruction*> > &SimpleIVUsers) {
  698. for (User *U : Def->users()) {
  699. Instruction *UI = cast<Instruction>(U);
  700. // Avoid infinite or exponential worklist processing.
  701. // Also ensure unique worklist users.
  702. // If Def is a LoopPhi, it may not be in the Simplified set, so check for
  703. // self edges first.
  704. if (UI == Def)
  705. continue;
  706. // Only change the current Loop, do not change the other parts (e.g. other
  707. // Loops).
  708. if (!L->contains(UI))
  709. continue;
  710. // Do not push the same instruction more than once.
  711. if (!Simplified.insert(UI).second)
  712. continue;
  713. SimpleIVUsers.push_back(std::make_pair(UI, Def));
  714. }
  715. }
  716. /// Return true if this instruction generates a simple SCEV
  717. /// expression in terms of that IV.
  718. ///
  719. /// This is similar to IVUsers' isInteresting() but processes each instruction
  720. /// non-recursively when the operand is already known to be a simpleIVUser.
  721. ///
  722. static bool isSimpleIVUser(Instruction *I, const Loop *L, ScalarEvolution *SE) {
  723. if (!SE->isSCEVable(I->getType()))
  724. return false;
  725. // Get the symbolic expression for this instruction.
  726. const SCEV *S = SE->getSCEV(I);
  727. // Only consider affine recurrences.
  728. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S);
  729. if (AR && AR->getLoop() == L)
  730. return true;
  731. return false;
  732. }
  733. /// Iteratively perform simplification on a worklist of users
  734. /// of the specified induction variable. Each successive simplification may push
  735. /// more users which may themselves be candidates for simplification.
  736. ///
  737. /// This algorithm does not require IVUsers analysis. Instead, it simplifies
  738. /// instructions in-place during analysis. Rather than rewriting induction
  739. /// variables bottom-up from their users, it transforms a chain of IVUsers
  740. /// top-down, updating the IR only when it encounters a clear optimization
  741. /// opportunity.
  742. ///
  743. /// Once DisableIVRewrite is default, LSR will be the only client of IVUsers.
  744. ///
  745. void SimplifyIndvar::simplifyUsers(PHINode *CurrIV, IVVisitor *V) {
  746. if (!SE->isSCEVable(CurrIV->getType()))
  747. return;
  748. // Instructions processed by SimplifyIndvar for CurrIV.
  749. SmallPtrSet<Instruction*,16> Simplified;
  750. // Use-def pairs if IV users waiting to be processed for CurrIV.
  751. SmallVector<std::pair<Instruction*, Instruction*>, 8> SimpleIVUsers;
  752. // Push users of the current LoopPhi. In rare cases, pushIVUsers may be
  753. // called multiple times for the same LoopPhi. This is the proper thing to
  754. // do for loop header phis that use each other.
  755. pushIVUsers(CurrIV, L, Simplified, SimpleIVUsers);
  756. while (!SimpleIVUsers.empty()) {
  757. std::pair<Instruction*, Instruction*> UseOper =
  758. SimpleIVUsers.pop_back_val();
  759. Instruction *UseInst = UseOper.first;
  760. // If a user of the IndVar is trivially dead, we prefer just to mark it dead
  761. // rather than try to do some complex analysis or transformation (such as
  762. // widening) basing on it.
  763. // TODO: Propagate TLI and pass it here to handle more cases.
  764. if (isInstructionTriviallyDead(UseInst, /* TLI */ nullptr)) {
  765. DeadInsts.emplace_back(UseInst);
  766. continue;
  767. }
  768. // Bypass back edges to avoid extra work.
  769. if (UseInst == CurrIV) continue;
  770. // Try to replace UseInst with a loop invariant before any other
  771. // simplifications.
  772. if (replaceIVUserWithLoopInvariant(UseInst))
  773. continue;
  774. Instruction *IVOperand = UseOper.second;
  775. for (unsigned N = 0; IVOperand; ++N) {
  776. assert(N <= Simplified.size() && "runaway iteration");
  777. Value *NewOper = foldIVUser(UseInst, IVOperand);
  778. if (!NewOper)
  779. break; // done folding
  780. IVOperand = dyn_cast<Instruction>(NewOper);
  781. }
  782. if (!IVOperand)
  783. continue;
  784. if (eliminateIVUser(UseInst, IVOperand)) {
  785. pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
  786. continue;
  787. }
  788. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(UseInst)) {
  789. if ((isa<OverflowingBinaryOperator>(BO) &&
  790. strengthenOverflowingOperation(BO, IVOperand)) ||
  791. (isa<ShlOperator>(BO) && strengthenRightShift(BO, IVOperand))) {
  792. // re-queue uses of the now modified binary operator and fall
  793. // through to the checks that remain.
  794. pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
  795. }
  796. }
  797. CastInst *Cast = dyn_cast<CastInst>(UseInst);
  798. if (V && Cast) {
  799. V->visitCast(Cast);
  800. continue;
  801. }
  802. if (isSimpleIVUser(UseInst, L, SE)) {
  803. pushIVUsers(UseInst, L, Simplified, SimpleIVUsers);
  804. }
  805. }
  806. }
  807. namespace llvm {
  808. void IVVisitor::anchor() { }
  809. /// Simplify instructions that use this induction variable
  810. /// by using ScalarEvolution to analyze the IV's recurrence.
  811. bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
  812. LoopInfo *LI, const TargetTransformInfo *TTI,
  813. SmallVectorImpl<WeakTrackingVH> &Dead,
  814. SCEVExpander &Rewriter, IVVisitor *V) {
  815. SimplifyIndvar SIV(LI->getLoopFor(CurrIV->getParent()), SE, DT, LI, TTI,
  816. Rewriter, Dead);
  817. SIV.simplifyUsers(CurrIV, V);
  818. return SIV.hasChanged();
  819. }
  820. /// Simplify users of induction variables within this
  821. /// loop. This does not actually change or add IVs.
  822. bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
  823. LoopInfo *LI, const TargetTransformInfo *TTI,
  824. SmallVectorImpl<WeakTrackingVH> &Dead) {
  825. SCEVExpander Rewriter(*SE, SE->getDataLayout(), "indvars");
  826. #ifndef NDEBUG
  827. Rewriter.setDebugType(DEBUG_TYPE);
  828. #endif
  829. bool Changed = false;
  830. for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
  831. Changed |=
  832. simplifyUsersOfIV(cast<PHINode>(I), SE, DT, LI, TTI, Dead, Rewriter);
  833. }
  834. return Changed;
  835. }
  836. } // namespace llvm
  837. //===----------------------------------------------------------------------===//
  838. // Widen Induction Variables - Extend the width of an IV to cover its
  839. // widest uses.
  840. //===----------------------------------------------------------------------===//
  841. class WidenIV {
  842. // Parameters
  843. PHINode *OrigPhi;
  844. Type *WideType;
  845. // Context
  846. LoopInfo *LI;
  847. Loop *L;
  848. ScalarEvolution *SE;
  849. DominatorTree *DT;
  850. // Does the module have any calls to the llvm.experimental.guard intrinsic
  851. // at all? If not we can avoid scanning instructions looking for guards.
  852. bool HasGuards;
  853. bool UsePostIncrementRanges;
  854. // Statistics
  855. unsigned NumElimExt = 0;
  856. unsigned NumWidened = 0;
  857. // Result
  858. PHINode *WidePhi = nullptr;
  859. Instruction *WideInc = nullptr;
  860. const SCEV *WideIncExpr = nullptr;
  861. SmallVectorImpl<WeakTrackingVH> &DeadInsts;
  862. SmallPtrSet<Instruction *,16> Widened;
  863. enum ExtendKind { ZeroExtended, SignExtended, Unknown };
  864. // A map tracking the kind of extension used to widen each narrow IV
  865. // and narrow IV user.
  866. // Key: pointer to a narrow IV or IV user.
  867. // Value: the kind of extension used to widen this Instruction.
  868. DenseMap<AssertingVH<Instruction>, ExtendKind> ExtendKindMap;
  869. using DefUserPair = std::pair<AssertingVH<Value>, AssertingVH<Instruction>>;
  870. // A map with control-dependent ranges for post increment IV uses. The key is
  871. // a pair of IV def and a use of this def denoting the context. The value is
  872. // a ConstantRange representing possible values of the def at the given
  873. // context.
  874. DenseMap<DefUserPair, ConstantRange> PostIncRangeInfos;
  875. Optional<ConstantRange> getPostIncRangeInfo(Value *Def,
  876. Instruction *UseI) {
  877. DefUserPair Key(Def, UseI);
  878. auto It = PostIncRangeInfos.find(Key);
  879. return It == PostIncRangeInfos.end()
  880. ? Optional<ConstantRange>(None)
  881. : Optional<ConstantRange>(It->second);
  882. }
  883. void calculatePostIncRanges(PHINode *OrigPhi);
  884. void calculatePostIncRange(Instruction *NarrowDef, Instruction *NarrowUser);
  885. void updatePostIncRangeInfo(Value *Def, Instruction *UseI, ConstantRange R) {
  886. DefUserPair Key(Def, UseI);
  887. auto It = PostIncRangeInfos.find(Key);
  888. if (It == PostIncRangeInfos.end())
  889. PostIncRangeInfos.insert({Key, R});
  890. else
  891. It->second = R.intersectWith(It->second);
  892. }
  893. public:
  894. /// Record a link in the Narrow IV def-use chain along with the WideIV that
  895. /// computes the same value as the Narrow IV def. This avoids caching Use*
  896. /// pointers.
  897. struct NarrowIVDefUse {
  898. Instruction *NarrowDef = nullptr;
  899. Instruction *NarrowUse = nullptr;
  900. Instruction *WideDef = nullptr;
  901. // True if the narrow def is never negative. Tracking this information lets
  902. // us use a sign extension instead of a zero extension or vice versa, when
  903. // profitable and legal.
  904. bool NeverNegative = false;
  905. NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD,
  906. bool NeverNegative)
  907. : NarrowDef(ND), NarrowUse(NU), WideDef(WD),
  908. NeverNegative(NeverNegative) {}
  909. };
  910. WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
  911. DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
  912. bool HasGuards, bool UsePostIncrementRanges = true);
  913. PHINode *createWideIV(SCEVExpander &Rewriter);
  914. unsigned getNumElimExt() { return NumElimExt; };
  915. unsigned getNumWidened() { return NumWidened; };
  916. protected:
  917. Value *createExtendInst(Value *NarrowOper, Type *WideType, bool IsSigned,
  918. Instruction *Use);
  919. Instruction *cloneIVUser(NarrowIVDefUse DU, const SCEVAddRecExpr *WideAR);
  920. Instruction *cloneArithmeticIVUser(NarrowIVDefUse DU,
  921. const SCEVAddRecExpr *WideAR);
  922. Instruction *cloneBitwiseIVUser(NarrowIVDefUse DU);
  923. ExtendKind getExtendKind(Instruction *I);
  924. using WidenedRecTy = std::pair<const SCEVAddRecExpr *, ExtendKind>;
  925. WidenedRecTy getWideRecurrence(NarrowIVDefUse DU);
  926. WidenedRecTy getExtendedOperandRecurrence(NarrowIVDefUse DU);
  927. const SCEV *getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
  928. unsigned OpCode) const;
  929. Instruction *widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter);
  930. bool widenLoopCompare(NarrowIVDefUse DU);
  931. bool widenWithVariantUse(NarrowIVDefUse DU);
  932. void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef);
  933. private:
  934. SmallVector<NarrowIVDefUse, 8> NarrowIVUsers;
  935. };
  936. /// Determine the insertion point for this user. By default, insert immediately
  937. /// before the user. SCEVExpander or LICM will hoist loop invariants out of the
  938. /// loop. For PHI nodes, there may be multiple uses, so compute the nearest
  939. /// common dominator for the incoming blocks. A nullptr can be returned if no
  940. /// viable location is found: it may happen if User is a PHI and Def only comes
  941. /// to this PHI from unreachable blocks.
  942. static Instruction *getInsertPointForUses(Instruction *User, Value *Def,
  943. DominatorTree *DT, LoopInfo *LI) {
  944. PHINode *PHI = dyn_cast<PHINode>(User);
  945. if (!PHI)
  946. return User;
  947. Instruction *InsertPt = nullptr;
  948. for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) {
  949. if (PHI->getIncomingValue(i) != Def)
  950. continue;
  951. BasicBlock *InsertBB = PHI->getIncomingBlock(i);
  952. if (!DT->isReachableFromEntry(InsertBB))
  953. continue;
  954. if (!InsertPt) {
  955. InsertPt = InsertBB->getTerminator();
  956. continue;
  957. }
  958. InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB);
  959. InsertPt = InsertBB->getTerminator();
  960. }
  961. // If we have skipped all inputs, it means that Def only comes to Phi from
  962. // unreachable blocks.
  963. if (!InsertPt)
  964. return nullptr;
  965. auto *DefI = dyn_cast<Instruction>(Def);
  966. if (!DefI)
  967. return InsertPt;
  968. assert(DT->dominates(DefI, InsertPt) && "def does not dominate all uses");
  969. auto *L = LI->getLoopFor(DefI->getParent());
  970. assert(!L || L->contains(LI->getLoopFor(InsertPt->getParent())));
  971. for (auto *DTN = (*DT)[InsertPt->getParent()]; DTN; DTN = DTN->getIDom())
  972. if (LI->getLoopFor(DTN->getBlock()) == L)
  973. return DTN->getBlock()->getTerminator();
  974. llvm_unreachable("DefI dominates InsertPt!");
  975. }
  976. WidenIV::WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
  977. DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
  978. bool HasGuards, bool UsePostIncrementRanges)
  979. : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), LI(LInfo),
  980. L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree),
  981. HasGuards(HasGuards), UsePostIncrementRanges(UsePostIncrementRanges),
  982. DeadInsts(DI) {
  983. assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
  984. ExtendKindMap[OrigPhi] = WI.IsSigned ? SignExtended : ZeroExtended;
  985. }
  986. Value *WidenIV::createExtendInst(Value *NarrowOper, Type *WideType,
  987. bool IsSigned, Instruction *Use) {
  988. // Set the debug location and conservative insertion point.
  989. IRBuilder<> Builder(Use);
  990. // Hoist the insertion point into loop preheaders as far as possible.
  991. for (const Loop *L = LI->getLoopFor(Use->getParent());
  992. L && L->getLoopPreheader() && L->isLoopInvariant(NarrowOper);
  993. L = L->getParentLoop())
  994. Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
  995. return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) :
  996. Builder.CreateZExt(NarrowOper, WideType);
  997. }
  998. /// Instantiate a wide operation to replace a narrow operation. This only needs
  999. /// to handle operations that can evaluation to SCEVAddRec. It can safely return
  1000. /// 0 for any operation we decide not to clone.
  1001. Instruction *WidenIV::cloneIVUser(WidenIV::NarrowIVDefUse DU,
  1002. const SCEVAddRecExpr *WideAR) {
  1003. unsigned Opcode = DU.NarrowUse->getOpcode();
  1004. switch (Opcode) {
  1005. default:
  1006. return nullptr;
  1007. case Instruction::Add:
  1008. case Instruction::Mul:
  1009. case Instruction::UDiv:
  1010. case Instruction::Sub:
  1011. return cloneArithmeticIVUser(DU, WideAR);
  1012. case Instruction::And:
  1013. case Instruction::Or:
  1014. case Instruction::Xor:
  1015. case Instruction::Shl:
  1016. case Instruction::LShr:
  1017. case Instruction::AShr:
  1018. return cloneBitwiseIVUser(DU);
  1019. }
  1020. }
  1021. Instruction *WidenIV::cloneBitwiseIVUser(WidenIV::NarrowIVDefUse DU) {
  1022. Instruction *NarrowUse = DU.NarrowUse;
  1023. Instruction *NarrowDef = DU.NarrowDef;
  1024. Instruction *WideDef = DU.WideDef;
  1025. LLVM_DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n");
  1026. // Replace NarrowDef operands with WideDef. Otherwise, we don't know anything
  1027. // about the narrow operand yet so must insert a [sz]ext. It is probably loop
  1028. // invariant and will be folded or hoisted. If it actually comes from a
  1029. // widened IV, it should be removed during a future call to widenIVUse.
  1030. bool IsSigned = getExtendKind(NarrowDef) == SignExtended;
  1031. Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
  1032. ? WideDef
  1033. : createExtendInst(NarrowUse->getOperand(0), WideType,
  1034. IsSigned, NarrowUse);
  1035. Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
  1036. ? WideDef
  1037. : createExtendInst(NarrowUse->getOperand(1), WideType,
  1038. IsSigned, NarrowUse);
  1039. auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
  1040. auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
  1041. NarrowBO->getName());
  1042. IRBuilder<> Builder(NarrowUse);
  1043. Builder.Insert(WideBO);
  1044. WideBO->copyIRFlags(NarrowBO);
  1045. return WideBO;
  1046. }
  1047. Instruction *WidenIV::cloneArithmeticIVUser(WidenIV::NarrowIVDefUse DU,
  1048. const SCEVAddRecExpr *WideAR) {
  1049. Instruction *NarrowUse = DU.NarrowUse;
  1050. Instruction *NarrowDef = DU.NarrowDef;
  1051. Instruction *WideDef = DU.WideDef;
  1052. LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
  1053. unsigned IVOpIdx = (NarrowUse->getOperand(0) == NarrowDef) ? 0 : 1;
  1054. // We're trying to find X such that
  1055. //
  1056. // Widen(NarrowDef `op` NonIVNarrowDef) == WideAR == WideDef `op.wide` X
  1057. //
  1058. // We guess two solutions to X, sext(NonIVNarrowDef) and zext(NonIVNarrowDef),
  1059. // and check using SCEV if any of them are correct.
  1060. // Returns true if extending NonIVNarrowDef according to `SignExt` is a
  1061. // correct solution to X.
  1062. auto GuessNonIVOperand = [&](bool SignExt) {
  1063. const SCEV *WideLHS;
  1064. const SCEV *WideRHS;
  1065. auto GetExtend = [this, SignExt](const SCEV *S, Type *Ty) {
  1066. if (SignExt)
  1067. return SE->getSignExtendExpr(S, Ty);
  1068. return SE->getZeroExtendExpr(S, Ty);
  1069. };
  1070. if (IVOpIdx == 0) {
  1071. WideLHS = SE->getSCEV(WideDef);
  1072. const SCEV *NarrowRHS = SE->getSCEV(NarrowUse->getOperand(1));
  1073. WideRHS = GetExtend(NarrowRHS, WideType);
  1074. } else {
  1075. const SCEV *NarrowLHS = SE->getSCEV(NarrowUse->getOperand(0));
  1076. WideLHS = GetExtend(NarrowLHS, WideType);
  1077. WideRHS = SE->getSCEV(WideDef);
  1078. }
  1079. // WideUse is "WideDef `op.wide` X" as described in the comment.
  1080. const SCEV *WideUse =
  1081. getSCEVByOpCode(WideLHS, WideRHS, NarrowUse->getOpcode());
  1082. return WideUse == WideAR;
  1083. };
  1084. bool SignExtend = getExtendKind(NarrowDef) == SignExtended;
  1085. if (!GuessNonIVOperand(SignExtend)) {
  1086. SignExtend = !SignExtend;
  1087. if (!GuessNonIVOperand(SignExtend))
  1088. return nullptr;
  1089. }
  1090. Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
  1091. ? WideDef
  1092. : createExtendInst(NarrowUse->getOperand(0), WideType,
  1093. SignExtend, NarrowUse);
  1094. Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
  1095. ? WideDef
  1096. : createExtendInst(NarrowUse->getOperand(1), WideType,
  1097. SignExtend, NarrowUse);
  1098. auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
  1099. auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
  1100. NarrowBO->getName());
  1101. IRBuilder<> Builder(NarrowUse);
  1102. Builder.Insert(WideBO);
  1103. WideBO->copyIRFlags(NarrowBO);
  1104. return WideBO;
  1105. }
  1106. WidenIV::ExtendKind WidenIV::getExtendKind(Instruction *I) {
  1107. auto It = ExtendKindMap.find(I);
  1108. assert(It != ExtendKindMap.end() && "Instruction not yet extended!");
  1109. return It->second;
  1110. }
  1111. const SCEV *WidenIV::getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
  1112. unsigned OpCode) const {
  1113. switch (OpCode) {
  1114. case Instruction::Add:
  1115. return SE->getAddExpr(LHS, RHS);
  1116. case Instruction::Sub:
  1117. return SE->getMinusSCEV(LHS, RHS);
  1118. case Instruction::Mul:
  1119. return SE->getMulExpr(LHS, RHS);
  1120. case Instruction::UDiv:
  1121. return SE->getUDivExpr(LHS, RHS);
  1122. default:
  1123. llvm_unreachable("Unsupported opcode.");
  1124. };
  1125. }
  1126. /// No-wrap operations can transfer sign extension of their result to their
  1127. /// operands. Generate the SCEV value for the widened operation without
  1128. /// actually modifying the IR yet. If the expression after extending the
  1129. /// operands is an AddRec for this loop, return the AddRec and the kind of
  1130. /// extension used.
  1131. WidenIV::WidenedRecTy
  1132. WidenIV::getExtendedOperandRecurrence(WidenIV::NarrowIVDefUse DU) {
  1133. // Handle the common case of add<nsw/nuw>
  1134. const unsigned OpCode = DU.NarrowUse->getOpcode();
  1135. // Only Add/Sub/Mul instructions supported yet.
  1136. if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
  1137. OpCode != Instruction::Mul)
  1138. return {nullptr, Unknown};
  1139. // One operand (NarrowDef) has already been extended to WideDef. Now determine
  1140. // if extending the other will lead to a recurrence.
  1141. const unsigned ExtendOperIdx =
  1142. DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0;
  1143. assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU");
  1144. const SCEV *ExtendOperExpr = nullptr;
  1145. const OverflowingBinaryOperator *OBO =
  1146. cast<OverflowingBinaryOperator>(DU.NarrowUse);
  1147. ExtendKind ExtKind = getExtendKind(DU.NarrowDef);
  1148. if (ExtKind == SignExtended && OBO->hasNoSignedWrap())
  1149. ExtendOperExpr = SE->getSignExtendExpr(
  1150. SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
  1151. else if(ExtKind == ZeroExtended && OBO->hasNoUnsignedWrap())
  1152. ExtendOperExpr = SE->getZeroExtendExpr(
  1153. SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
  1154. else
  1155. return {nullptr, Unknown};
  1156. // When creating this SCEV expr, don't apply the current operations NSW or NUW
  1157. // flags. This instruction may be guarded by control flow that the no-wrap
  1158. // behavior depends on. Non-control-equivalent instructions can be mapped to
  1159. // the same SCEV expression, and it would be incorrect to transfer NSW/NUW
  1160. // semantics to those operations.
  1161. const SCEV *lhs = SE->getSCEV(DU.WideDef);
  1162. const SCEV *rhs = ExtendOperExpr;
  1163. // Let's swap operands to the initial order for the case of non-commutative
  1164. // operations, like SUB. See PR21014.
  1165. if (ExtendOperIdx == 0)
  1166. std::swap(lhs, rhs);
  1167. const SCEVAddRecExpr *AddRec =
  1168. dyn_cast<SCEVAddRecExpr>(getSCEVByOpCode(lhs, rhs, OpCode));
  1169. if (!AddRec || AddRec->getLoop() != L)
  1170. return {nullptr, Unknown};
  1171. return {AddRec, ExtKind};
  1172. }
  1173. /// Is this instruction potentially interesting for further simplification after
  1174. /// widening it's type? In other words, can the extend be safely hoisted out of
  1175. /// the loop with SCEV reducing the value to a recurrence on the same loop. If
  1176. /// so, return the extended recurrence and the kind of extension used. Otherwise
  1177. /// return {nullptr, Unknown}.
  1178. WidenIV::WidenedRecTy WidenIV::getWideRecurrence(WidenIV::NarrowIVDefUse DU) {
  1179. if (!SE->isSCEVable(DU.NarrowUse->getType()))
  1180. return {nullptr, Unknown};
  1181. const SCEV *NarrowExpr = SE->getSCEV(DU.NarrowUse);
  1182. if (SE->getTypeSizeInBits(NarrowExpr->getType()) >=
  1183. SE->getTypeSizeInBits(WideType)) {
  1184. // NarrowUse implicitly widens its operand. e.g. a gep with a narrow
  1185. // index. So don't follow this use.
  1186. return {nullptr, Unknown};
  1187. }
  1188. const SCEV *WideExpr;
  1189. ExtendKind ExtKind;
  1190. if (DU.NeverNegative) {
  1191. WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
  1192. if (isa<SCEVAddRecExpr>(WideExpr))
  1193. ExtKind = SignExtended;
  1194. else {
  1195. WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
  1196. ExtKind = ZeroExtended;
  1197. }
  1198. } else if (getExtendKind(DU.NarrowDef) == SignExtended) {
  1199. WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
  1200. ExtKind = SignExtended;
  1201. } else {
  1202. WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
  1203. ExtKind = ZeroExtended;
  1204. }
  1205. const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr);
  1206. if (!AddRec || AddRec->getLoop() != L)
  1207. return {nullptr, Unknown};
  1208. return {AddRec, ExtKind};
  1209. }
  1210. /// This IV user cannot be widened. Replace this use of the original narrow IV
  1211. /// with a truncation of the new wide IV to isolate and eliminate the narrow IV.
  1212. static void truncateIVUse(WidenIV::NarrowIVDefUse DU, DominatorTree *DT,
  1213. LoopInfo *LI) {
  1214. auto *InsertPt = getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI);
  1215. if (!InsertPt)
  1216. return;
  1217. LLVM_DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef << " for user "
  1218. << *DU.NarrowUse << "\n");
  1219. IRBuilder<> Builder(InsertPt);
  1220. Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType());
  1221. DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc);
  1222. }
  1223. /// If the narrow use is a compare instruction, then widen the compare
  1224. // (and possibly the other operand). The extend operation is hoisted into the
  1225. // loop preheader as far as possible.
  1226. bool WidenIV::widenLoopCompare(WidenIV::NarrowIVDefUse DU) {
  1227. ICmpInst *Cmp = dyn_cast<ICmpInst>(DU.NarrowUse);
  1228. if (!Cmp)
  1229. return false;
  1230. // We can legally widen the comparison in the following two cases:
  1231. //
  1232. // - The signedness of the IV extension and comparison match
  1233. //
  1234. // - The narrow IV is always positive (and thus its sign extension is equal
  1235. // to its zero extension). For instance, let's say we're zero extending
  1236. // %narrow for the following use
  1237. //
  1238. // icmp slt i32 %narrow, %val ... (A)
  1239. //
  1240. // and %narrow is always positive. Then
  1241. //
  1242. // (A) == icmp slt i32 sext(%narrow), sext(%val)
  1243. // == icmp slt i32 zext(%narrow), sext(%val)
  1244. bool IsSigned = getExtendKind(DU.NarrowDef) == SignExtended;
  1245. if (!(DU.NeverNegative || IsSigned == Cmp->isSigned()))
  1246. return false;
  1247. Value *Op = Cmp->getOperand(Cmp->getOperand(0) == DU.NarrowDef ? 1 : 0);
  1248. unsigned CastWidth = SE->getTypeSizeInBits(Op->getType());
  1249. unsigned IVWidth = SE->getTypeSizeInBits(WideType);
  1250. assert(CastWidth <= IVWidth && "Unexpected width while widening compare.");
  1251. // Widen the compare instruction.
  1252. auto *InsertPt = getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI);
  1253. if (!InsertPt)
  1254. return false;
  1255. IRBuilder<> Builder(InsertPt);
  1256. DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
  1257. // Widen the other operand of the compare, if necessary.
  1258. if (CastWidth < IVWidth) {
  1259. Value *ExtOp = createExtendInst(Op, WideType, Cmp->isSigned(), Cmp);
  1260. DU.NarrowUse->replaceUsesOfWith(Op, ExtOp);
  1261. }
  1262. return true;
  1263. }
  1264. // The widenIVUse avoids generating trunc by evaluating the use as AddRec, this
  1265. // will not work when:
  1266. // 1) SCEV traces back to an instruction inside the loop that SCEV can not
  1267. // expand, eg. add %indvar, (load %addr)
  1268. // 2) SCEV finds a loop variant, eg. add %indvar, %loopvariant
  1269. // While SCEV fails to avoid trunc, we can still try to use instruction
  1270. // combining approach to prove trunc is not required. This can be further
  1271. // extended with other instruction combining checks, but for now we handle the
  1272. // following case (sub can be "add" and "mul", "nsw + sext" can be "nus + zext")
  1273. //
  1274. // Src:
  1275. // %c = sub nsw %b, %indvar
  1276. // %d = sext %c to i64
  1277. // Dst:
  1278. // %indvar.ext1 = sext %indvar to i64
  1279. // %m = sext %b to i64
  1280. // %d = sub nsw i64 %m, %indvar.ext1
  1281. // Therefore, as long as the result of add/sub/mul is extended to wide type, no
  1282. // trunc is required regardless of how %b is generated. This pattern is common
  1283. // when calculating address in 64 bit architecture
  1284. bool WidenIV::widenWithVariantUse(WidenIV::NarrowIVDefUse DU) {
  1285. Instruction *NarrowUse = DU.NarrowUse;
  1286. Instruction *NarrowDef = DU.NarrowDef;
  1287. Instruction *WideDef = DU.WideDef;
  1288. // Handle the common case of add<nsw/nuw>
  1289. const unsigned OpCode = NarrowUse->getOpcode();
  1290. // Only Add/Sub/Mul instructions are supported.
  1291. if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
  1292. OpCode != Instruction::Mul)
  1293. return false;
  1294. // The operand that is not defined by NarrowDef of DU. Let's call it the
  1295. // other operand.
  1296. assert((NarrowUse->getOperand(0) == NarrowDef ||
  1297. NarrowUse->getOperand(1) == NarrowDef) &&
  1298. "bad DU");
  1299. const OverflowingBinaryOperator *OBO =
  1300. cast<OverflowingBinaryOperator>(NarrowUse);
  1301. ExtendKind ExtKind = getExtendKind(NarrowDef);
  1302. bool CanSignExtend = ExtKind == SignExtended && OBO->hasNoSignedWrap();
  1303. bool CanZeroExtend = ExtKind == ZeroExtended && OBO->hasNoUnsignedWrap();
  1304. auto AnotherOpExtKind = ExtKind;
  1305. // Check that all uses are either:
  1306. // - narrow def (in case of we are widening the IV increment);
  1307. // - single-input LCSSA Phis;
  1308. // - comparison of the chosen type;
  1309. // - extend of the chosen type (raison d'etre).
  1310. SmallVector<Instruction *, 4> ExtUsers;
  1311. SmallVector<PHINode *, 4> LCSSAPhiUsers;
  1312. SmallVector<ICmpInst *, 4> ICmpUsers;
  1313. for (Use &U : NarrowUse->uses()) {
  1314. Instruction *User = cast<Instruction>(U.getUser());
  1315. if (User == NarrowDef)
  1316. continue;
  1317. if (!L->contains(User)) {
  1318. auto *LCSSAPhi = cast<PHINode>(User);
  1319. // Make sure there is only 1 input, so that we don't have to split
  1320. // critical edges.
  1321. if (LCSSAPhi->getNumOperands() != 1)
  1322. return false;
  1323. LCSSAPhiUsers.push_back(LCSSAPhi);
  1324. continue;
  1325. }
  1326. if (auto *ICmp = dyn_cast<ICmpInst>(User)) {
  1327. auto Pred = ICmp->getPredicate();
  1328. // We have 3 types of predicates: signed, unsigned and equality
  1329. // predicates. For equality, it's legal to widen icmp for either sign and
  1330. // zero extend. For sign extend, we can also do so for signed predicates,
  1331. // likeweise for zero extend we can widen icmp for unsigned predicates.
  1332. if (ExtKind == ZeroExtended && ICmpInst::isSigned(Pred))
  1333. return false;
  1334. if (ExtKind == SignExtended && ICmpInst::isUnsigned(Pred))
  1335. return false;
  1336. ICmpUsers.push_back(ICmp);
  1337. continue;
  1338. }
  1339. if (ExtKind == SignExtended)
  1340. User = dyn_cast<SExtInst>(User);
  1341. else
  1342. User = dyn_cast<ZExtInst>(User);
  1343. if (!User || User->getType() != WideType)
  1344. return false;
  1345. ExtUsers.push_back(User);
  1346. }
  1347. if (ExtUsers.empty()) {
  1348. DeadInsts.emplace_back(NarrowUse);
  1349. return true;
  1350. }
  1351. // We'll prove some facts that should be true in the context of ext users. If
  1352. // there is no users, we are done now. If there are some, pick their common
  1353. // dominator as context.
  1354. Instruction *Context = nullptr;
  1355. for (auto *Ext : ExtUsers) {
  1356. if (!Context || DT->dominates(Ext, Context))
  1357. Context = Ext;
  1358. else if (!DT->dominates(Context, Ext))
  1359. // For users that don't have dominance relation, use common dominator.
  1360. Context =
  1361. DT->findNearestCommonDominator(Context->getParent(), Ext->getParent())
  1362. ->getTerminator();
  1363. }
  1364. assert(Context && "Context not found?");
  1365. if (!CanSignExtend && !CanZeroExtend) {
  1366. // Because InstCombine turns 'sub nuw' to 'add' losing the no-wrap flag, we
  1367. // will most likely not see it. Let's try to prove it.
  1368. if (OpCode != Instruction::Add)
  1369. return false;
  1370. if (ExtKind != ZeroExtended)
  1371. return false;
  1372. const SCEV *LHS = SE->getSCEV(OBO->getOperand(0));
  1373. const SCEV *RHS = SE->getSCEV(OBO->getOperand(1));
  1374. // TODO: Support case for NarrowDef = NarrowUse->getOperand(1).
  1375. if (NarrowUse->getOperand(0) != NarrowDef)
  1376. return false;
  1377. if (!SE->isKnownNegative(RHS))
  1378. return false;
  1379. bool ProvedSubNUW = SE->isKnownPredicateAt(
  1380. ICmpInst::ICMP_UGE, LHS, SE->getNegativeSCEV(RHS), Context);
  1381. if (!ProvedSubNUW)
  1382. return false;
  1383. // In fact, our 'add' is 'sub nuw'. We will need to widen the 2nd operand as
  1384. // neg(zext(neg(op))), which is basically sext(op).
  1385. AnotherOpExtKind = SignExtended;
  1386. }
  1387. // Verifying that Defining operand is an AddRec
  1388. const SCEV *Op1 = SE->getSCEV(WideDef);
  1389. const SCEVAddRecExpr *AddRecOp1 = dyn_cast<SCEVAddRecExpr>(Op1);
  1390. if (!AddRecOp1 || AddRecOp1->getLoop() != L)
  1391. return false;
  1392. LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
  1393. // Generating a widening use instruction.
  1394. Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
  1395. ? WideDef
  1396. : createExtendInst(NarrowUse->getOperand(0), WideType,
  1397. AnotherOpExtKind, NarrowUse);
  1398. Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
  1399. ? WideDef
  1400. : createExtendInst(NarrowUse->getOperand(1), WideType,
  1401. AnotherOpExtKind, NarrowUse);
  1402. auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
  1403. auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
  1404. NarrowBO->getName());
  1405. IRBuilder<> Builder(NarrowUse);
  1406. Builder.Insert(WideBO);
  1407. WideBO->copyIRFlags(NarrowBO);
  1408. ExtendKindMap[NarrowUse] = ExtKind;
  1409. for (Instruction *User : ExtUsers) {
  1410. assert(User->getType() == WideType && "Checked before!");
  1411. LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *User << " replaced by "
  1412. << *WideBO << "\n");
  1413. ++NumElimExt;
  1414. User->replaceAllUsesWith(WideBO);
  1415. DeadInsts.emplace_back(User);
  1416. }
  1417. for (PHINode *User : LCSSAPhiUsers) {
  1418. assert(User->getNumOperands() == 1 && "Checked before!");
  1419. Builder.SetInsertPoint(User);
  1420. auto *WidePN =
  1421. Builder.CreatePHI(WideBO->getType(), 1, User->getName() + ".wide");
  1422. BasicBlock *LoopExitingBlock = User->getParent()->getSinglePredecessor();
  1423. assert(LoopExitingBlock && L->contains(LoopExitingBlock) &&
  1424. "Not a LCSSA Phi?");
  1425. WidePN->addIncoming(WideBO, LoopExitingBlock);
  1426. Builder.SetInsertPoint(&*User->getParent()->getFirstInsertionPt());
  1427. auto *TruncPN = Builder.CreateTrunc(WidePN, User->getType());
  1428. User->replaceAllUsesWith(TruncPN);
  1429. DeadInsts.emplace_back(User);
  1430. }
  1431. for (ICmpInst *User : ICmpUsers) {
  1432. Builder.SetInsertPoint(User);
  1433. auto ExtendedOp = [&](Value * V)->Value * {
  1434. if (V == NarrowUse)
  1435. return WideBO;
  1436. if (ExtKind == ZeroExtended)
  1437. return Builder.CreateZExt(V, WideBO->getType());
  1438. else
  1439. return Builder.CreateSExt(V, WideBO->getType());
  1440. };
  1441. auto Pred = User->getPredicate();
  1442. auto *LHS = ExtendedOp(User->getOperand(0));
  1443. auto *RHS = ExtendedOp(User->getOperand(1));
  1444. auto *WideCmp =
  1445. Builder.CreateICmp(Pred, LHS, RHS, User->getName() + ".wide");
  1446. User->replaceAllUsesWith(WideCmp);
  1447. DeadInsts.emplace_back(User);
  1448. }
  1449. return true;
  1450. }
  1451. /// Determine whether an individual user of the narrow IV can be widened. If so,
  1452. /// return the wide clone of the user.
  1453. Instruction *WidenIV::widenIVUse(WidenIV::NarrowIVDefUse DU, SCEVExpander &Rewriter) {
  1454. assert(ExtendKindMap.count(DU.NarrowDef) &&
  1455. "Should already know the kind of extension used to widen NarrowDef");
  1456. // Stop traversing the def-use chain at inner-loop phis or post-loop phis.
  1457. if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) {
  1458. if (LI->getLoopFor(UsePhi->getParent()) != L) {
  1459. // For LCSSA phis, sink the truncate outside the loop.
  1460. // After SimplifyCFG most loop exit targets have a single predecessor.
  1461. // Otherwise fall back to a truncate within the loop.
  1462. if (UsePhi->getNumOperands() != 1)
  1463. truncateIVUse(DU, DT, LI);
  1464. else {
  1465. // Widening the PHI requires us to insert a trunc. The logical place
  1466. // for this trunc is in the same BB as the PHI. This is not possible if
  1467. // the BB is terminated by a catchswitch.
  1468. if (isa<CatchSwitchInst>(UsePhi->getParent()->getTerminator()))
  1469. return nullptr;
  1470. PHINode *WidePhi =
  1471. PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide",
  1472. UsePhi);
  1473. WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
  1474. IRBuilder<> Builder(&*WidePhi->getParent()->getFirstInsertionPt());
  1475. Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType());
  1476. UsePhi->replaceAllUsesWith(Trunc);
  1477. DeadInsts.emplace_back(UsePhi);
  1478. LLVM_DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi << " to "
  1479. << *WidePhi << "\n");
  1480. }
  1481. return nullptr;
  1482. }
  1483. }
  1484. // This narrow use can be widened by a sext if it's non-negative or its narrow
  1485. // def was widended by a sext. Same for zext.
  1486. auto canWidenBySExt = [&]() {
  1487. return DU.NeverNegative || getExtendKind(DU.NarrowDef) == SignExtended;
  1488. };
  1489. auto canWidenByZExt = [&]() {
  1490. return DU.NeverNegative || getExtendKind(DU.NarrowDef) == ZeroExtended;
  1491. };
  1492. // Our raison d'etre! Eliminate sign and zero extension.
  1493. if ((isa<SExtInst>(DU.NarrowUse) && canWidenBySExt()) ||
  1494. (isa<ZExtInst>(DU.NarrowUse) && canWidenByZExt())) {
  1495. Value *NewDef = DU.WideDef;
  1496. if (DU.NarrowUse->getType() != WideType) {
  1497. unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType());
  1498. unsigned IVWidth = SE->getTypeSizeInBits(WideType);
  1499. if (CastWidth < IVWidth) {
  1500. // The cast isn't as wide as the IV, so insert a Trunc.
  1501. IRBuilder<> Builder(DU.NarrowUse);
  1502. NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType());
  1503. }
  1504. else {
  1505. // A wider extend was hidden behind a narrower one. This may induce
  1506. // another round of IV widening in which the intermediate IV becomes
  1507. // dead. It should be very rare.
  1508. LLVM_DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi
  1509. << " not wide enough to subsume " << *DU.NarrowUse
  1510. << "\n");
  1511. DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
  1512. NewDef = DU.NarrowUse;
  1513. }
  1514. }
  1515. if (NewDef != DU.NarrowUse) {
  1516. LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse
  1517. << " replaced by " << *DU.WideDef << "\n");
  1518. ++NumElimExt;
  1519. DU.NarrowUse->replaceAllUsesWith(NewDef);
  1520. DeadInsts.emplace_back(DU.NarrowUse);
  1521. }
  1522. // Now that the extend is gone, we want to expose it's uses for potential
  1523. // further simplification. We don't need to directly inform SimplifyIVUsers
  1524. // of the new users, because their parent IV will be processed later as a
  1525. // new loop phi. If we preserved IVUsers analysis, we would also want to
  1526. // push the uses of WideDef here.
  1527. // No further widening is needed. The deceased [sz]ext had done it for us.
  1528. return nullptr;
  1529. }
  1530. // Does this user itself evaluate to a recurrence after widening?
  1531. WidenedRecTy WideAddRec = getExtendedOperandRecurrence(DU);
  1532. if (!WideAddRec.first)
  1533. WideAddRec = getWideRecurrence(DU);
  1534. assert((WideAddRec.first == nullptr) == (WideAddRec.second == Unknown));
  1535. if (!WideAddRec.first) {
  1536. // If use is a loop condition, try to promote the condition instead of
  1537. // truncating the IV first.
  1538. if (widenLoopCompare(DU))
  1539. return nullptr;
  1540. // We are here about to generate a truncate instruction that may hurt
  1541. // performance because the scalar evolution expression computed earlier
  1542. // in WideAddRec.first does not indicate a polynomial induction expression.
  1543. // In that case, look at the operands of the use instruction to determine
  1544. // if we can still widen the use instead of truncating its operand.
  1545. if (widenWithVariantUse(DU))
  1546. return nullptr;
  1547. // This user does not evaluate to a recurrence after widening, so don't
  1548. // follow it. Instead insert a Trunc to kill off the original use,
  1549. // eventually isolating the original narrow IV so it can be removed.
  1550. truncateIVUse(DU, DT, LI);
  1551. return nullptr;
  1552. }
  1553. // Assume block terminators cannot evaluate to a recurrence. We can't to
  1554. // insert a Trunc after a terminator if there happens to be a critical edge.
  1555. assert(DU.NarrowUse != DU.NarrowUse->getParent()->getTerminator() &&
  1556. "SCEV is not expected to evaluate a block terminator");
  1557. // Reuse the IV increment that SCEVExpander created as long as it dominates
  1558. // NarrowUse.
  1559. Instruction *WideUse = nullptr;
  1560. if (WideAddRec.first == WideIncExpr &&
  1561. Rewriter.hoistIVInc(WideInc, DU.NarrowUse))
  1562. WideUse = WideInc;
  1563. else {
  1564. WideUse = cloneIVUser(DU, WideAddRec.first);
  1565. if (!WideUse)
  1566. return nullptr;
  1567. }
  1568. // Evaluation of WideAddRec ensured that the narrow expression could be
  1569. // extended outside the loop without overflow. This suggests that the wide use
  1570. // evaluates to the same expression as the extended narrow use, but doesn't
  1571. // absolutely guarantee it. Hence the following failsafe check. In rare cases
  1572. // where it fails, we simply throw away the newly created wide use.
  1573. if (WideAddRec.first != SE->getSCEV(WideUse)) {
  1574. LLVM_DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": "
  1575. << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first
  1576. << "\n");
  1577. DeadInsts.emplace_back(WideUse);
  1578. return nullptr;
  1579. }
  1580. // if we reached this point then we are going to replace
  1581. // DU.NarrowUse with WideUse. Reattach DbgValue then.
  1582. replaceAllDbgUsesWith(*DU.NarrowUse, *WideUse, *WideUse, *DT);
  1583. ExtendKindMap[DU.NarrowUse] = WideAddRec.second;
  1584. // Returning WideUse pushes it on the worklist.
  1585. return WideUse;
  1586. }
  1587. /// Add eligible users of NarrowDef to NarrowIVUsers.
  1588. void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) {
  1589. const SCEV *NarrowSCEV = SE->getSCEV(NarrowDef);
  1590. bool NonNegativeDef =
  1591. SE->isKnownPredicate(ICmpInst::ICMP_SGE, NarrowSCEV,
  1592. SE->getZero(NarrowSCEV->getType()));
  1593. for (User *U : NarrowDef->users()) {
  1594. Instruction *NarrowUser = cast<Instruction>(U);
  1595. // Handle data flow merges and bizarre phi cycles.
  1596. if (!Widened.insert(NarrowUser).second)
  1597. continue;
  1598. bool NonNegativeUse = false;
  1599. if (!NonNegativeDef) {
  1600. // We might have a control-dependent range information for this context.
  1601. if (auto RangeInfo = getPostIncRangeInfo(NarrowDef, NarrowUser))
  1602. NonNegativeUse = RangeInfo->getSignedMin().isNonNegative();
  1603. }
  1604. NarrowIVUsers.emplace_back(NarrowDef, NarrowUser, WideDef,
  1605. NonNegativeDef || NonNegativeUse);
  1606. }
  1607. }
  1608. /// Process a single induction variable. First use the SCEVExpander to create a
  1609. /// wide induction variable that evaluates to the same recurrence as the
  1610. /// original narrow IV. Then use a worklist to forward traverse the narrow IV's
  1611. /// def-use chain. After widenIVUse has processed all interesting IV users, the
  1612. /// narrow IV will be isolated for removal by DeleteDeadPHIs.
  1613. ///
  1614. /// It would be simpler to delete uses as they are processed, but we must avoid
  1615. /// invalidating SCEV expressions.
  1616. PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) {
  1617. // Is this phi an induction variable?
  1618. const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi));
  1619. if (!AddRec)
  1620. return nullptr;
  1621. // Widen the induction variable expression.
  1622. const SCEV *WideIVExpr = getExtendKind(OrigPhi) == SignExtended
  1623. ? SE->getSignExtendExpr(AddRec, WideType)
  1624. : SE->getZeroExtendExpr(AddRec, WideType);
  1625. assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType &&
  1626. "Expect the new IV expression to preserve its type");
  1627. // Can the IV be extended outside the loop without overflow?
  1628. AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr);
  1629. if (!AddRec || AddRec->getLoop() != L)
  1630. return nullptr;
  1631. // An AddRec must have loop-invariant operands. Since this AddRec is
  1632. // materialized by a loop header phi, the expression cannot have any post-loop
  1633. // operands, so they must dominate the loop header.
  1634. assert(
  1635. SE->properlyDominates(AddRec->getStart(), L->getHeader()) &&
  1636. SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) &&
  1637. "Loop header phi recurrence inputs do not dominate the loop");
  1638. // Iterate over IV uses (including transitive ones) looking for IV increments
  1639. // of the form 'add nsw %iv, <const>'. For each increment and each use of
  1640. // the increment calculate control-dependent range information basing on
  1641. // dominating conditions inside of the loop (e.g. a range check inside of the
  1642. // loop). Calculated ranges are stored in PostIncRangeInfos map.
  1643. //
  1644. // Control-dependent range information is later used to prove that a narrow
  1645. // definition is not negative (see pushNarrowIVUsers). It's difficult to do
  1646. // this on demand because when pushNarrowIVUsers needs this information some
  1647. // of the dominating conditions might be already widened.
  1648. if (UsePostIncrementRanges)
  1649. calculatePostIncRanges(OrigPhi);
  1650. // The rewriter provides a value for the desired IV expression. This may
  1651. // either find an existing phi or materialize a new one. Either way, we
  1652. // expect a well-formed cyclic phi-with-increments. i.e. any operand not part
  1653. // of the phi-SCC dominates the loop entry.
  1654. Instruction *InsertPt = &*L->getHeader()->getFirstInsertionPt();
  1655. Value *ExpandInst = Rewriter.expandCodeFor(AddRec, WideType, InsertPt);
  1656. // If the wide phi is not a phi node, for example a cast node, like bitcast,
  1657. // inttoptr, ptrtoint, just skip for now.
  1658. if (!(WidePhi = dyn_cast<PHINode>(ExpandInst))) {
  1659. // if the cast node is an inserted instruction without any user, we should
  1660. // remove it to make sure the pass don't touch the function as we can not
  1661. // wide the phi.
  1662. if (ExpandInst->hasNUses(0) &&
  1663. Rewriter.isInsertedInstruction(cast<Instruction>(ExpandInst)))
  1664. DeadInsts.emplace_back(ExpandInst);
  1665. return nullptr;
  1666. }
  1667. // Remembering the WideIV increment generated by SCEVExpander allows
  1668. // widenIVUse to reuse it when widening the narrow IV's increment. We don't
  1669. // employ a general reuse mechanism because the call above is the only call to
  1670. // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses.
  1671. if (BasicBlock *LatchBlock = L->getLoopLatch()) {
  1672. WideInc =
  1673. cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock));
  1674. WideIncExpr = SE->getSCEV(WideInc);
  1675. // Propagate the debug location associated with the original loop increment
  1676. // to the new (widened) increment.
  1677. auto *OrigInc =
  1678. cast<Instruction>(OrigPhi->getIncomingValueForBlock(LatchBlock));
  1679. WideInc->setDebugLoc(OrigInc->getDebugLoc());
  1680. }
  1681. LLVM_DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n");
  1682. ++NumWidened;
  1683. // Traverse the def-use chain using a worklist starting at the original IV.
  1684. assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" );
  1685. Widened.insert(OrigPhi);
  1686. pushNarrowIVUsers(OrigPhi, WidePhi);
  1687. while (!NarrowIVUsers.empty()) {
  1688. WidenIV::NarrowIVDefUse DU = NarrowIVUsers.pop_back_val();
  1689. // Process a def-use edge. This may replace the use, so don't hold a
  1690. // use_iterator across it.
  1691. Instruction *WideUse = widenIVUse(DU, Rewriter);
  1692. // Follow all def-use edges from the previous narrow use.
  1693. if (WideUse)
  1694. pushNarrowIVUsers(DU.NarrowUse, WideUse);
  1695. // widenIVUse may have removed the def-use edge.
  1696. if (DU.NarrowDef->use_empty())
  1697. DeadInsts.emplace_back(DU.NarrowDef);
  1698. }
  1699. // Attach any debug information to the new PHI.
  1700. replaceAllDbgUsesWith(*OrigPhi, *WidePhi, *WidePhi, *DT);
  1701. return WidePhi;
  1702. }
  1703. /// Calculates control-dependent range for the given def at the given context
  1704. /// by looking at dominating conditions inside of the loop
  1705. void WidenIV::calculatePostIncRange(Instruction *NarrowDef,
  1706. Instruction *NarrowUser) {
  1707. using namespace llvm::PatternMatch;
  1708. Value *NarrowDefLHS;
  1709. const APInt *NarrowDefRHS;
  1710. if (!match(NarrowDef, m_NSWAdd(m_Value(NarrowDefLHS),
  1711. m_APInt(NarrowDefRHS))) ||
  1712. !NarrowDefRHS->isNonNegative())
  1713. return;
  1714. auto UpdateRangeFromCondition = [&] (Value *Condition,
  1715. bool TrueDest) {
  1716. CmpInst::Predicate Pred;
  1717. Value *CmpRHS;
  1718. if (!match(Condition, m_ICmp(Pred, m_Specific(NarrowDefLHS),
  1719. m_Value(CmpRHS))))
  1720. return;
  1721. CmpInst::Predicate P =
  1722. TrueDest ? Pred : CmpInst::getInversePredicate(Pred);
  1723. auto CmpRHSRange = SE->getSignedRange(SE->getSCEV(CmpRHS));
  1724. auto CmpConstrainedLHSRange =
  1725. ConstantRange::makeAllowedICmpRegion(P, CmpRHSRange);
  1726. auto NarrowDefRange = CmpConstrainedLHSRange.addWithNoWrap(
  1727. *NarrowDefRHS, OverflowingBinaryOperator::NoSignedWrap);
  1728. updatePostIncRangeInfo(NarrowDef, NarrowUser, NarrowDefRange);
  1729. };
  1730. auto UpdateRangeFromGuards = [&](Instruction *Ctx) {
  1731. if (!HasGuards)
  1732. return;
  1733. for (Instruction &I : make_range(Ctx->getIterator().getReverse(),
  1734. Ctx->getParent()->rend())) {
  1735. Value *C = nullptr;
  1736. if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(C))))
  1737. UpdateRangeFromCondition(C, /*TrueDest=*/true);
  1738. }
  1739. };
  1740. UpdateRangeFromGuards(NarrowUser);
  1741. BasicBlock *NarrowUserBB = NarrowUser->getParent();
  1742. // If NarrowUserBB is statically unreachable asking dominator queries may
  1743. // yield surprising results. (e.g. the block may not have a dom tree node)
  1744. if (!DT->isReachableFromEntry(NarrowUserBB))
  1745. return;
  1746. for (auto *DTB = (*DT)[NarrowUserBB]->getIDom();
  1747. L->contains(DTB->getBlock());
  1748. DTB = DTB->getIDom()) {
  1749. auto *BB = DTB->getBlock();
  1750. auto *TI = BB->getTerminator();
  1751. UpdateRangeFromGuards(TI);
  1752. auto *BI = dyn_cast<BranchInst>(TI);
  1753. if (!BI || !BI->isConditional())
  1754. continue;
  1755. auto *TrueSuccessor = BI->getSuccessor(0);
  1756. auto *FalseSuccessor = BI->getSuccessor(1);
  1757. auto DominatesNarrowUser = [this, NarrowUser] (BasicBlockEdge BBE) {
  1758. return BBE.isSingleEdge() &&
  1759. DT->dominates(BBE, NarrowUser->getParent());
  1760. };
  1761. if (DominatesNarrowUser(BasicBlockEdge(BB, TrueSuccessor)))
  1762. UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/true);
  1763. if (DominatesNarrowUser(BasicBlockEdge(BB, FalseSuccessor)))
  1764. UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/false);
  1765. }
  1766. }
  1767. /// Calculates PostIncRangeInfos map for the given IV
  1768. void WidenIV::calculatePostIncRanges(PHINode *OrigPhi) {
  1769. SmallPtrSet<Instruction *, 16> Visited;
  1770. SmallVector<Instruction *, 6> Worklist;
  1771. Worklist.push_back(OrigPhi);
  1772. Visited.insert(OrigPhi);
  1773. while (!Worklist.empty()) {
  1774. Instruction *NarrowDef = Worklist.pop_back_val();
  1775. for (Use &U : NarrowDef->uses()) {
  1776. auto *NarrowUser = cast<Instruction>(U.getUser());
  1777. // Don't go looking outside the current loop.
  1778. auto *NarrowUserLoop = (*LI)[NarrowUser->getParent()];
  1779. if (!NarrowUserLoop || !L->contains(NarrowUserLoop))
  1780. continue;
  1781. if (!Visited.insert(NarrowUser).second)
  1782. continue;
  1783. Worklist.push_back(NarrowUser);
  1784. calculatePostIncRange(NarrowDef, NarrowUser);
  1785. }
  1786. }
  1787. }
  1788. PHINode *llvm::createWideIV(const WideIVInfo &WI,
  1789. LoopInfo *LI, ScalarEvolution *SE, SCEVExpander &Rewriter,
  1790. DominatorTree *DT, SmallVectorImpl<WeakTrackingVH> &DeadInsts,
  1791. unsigned &NumElimExt, unsigned &NumWidened,
  1792. bool HasGuards, bool UsePostIncrementRanges) {
  1793. WidenIV Widener(WI, LI, SE, DT, DeadInsts, HasGuards, UsePostIncrementRanges);
  1794. PHINode *WidePHI = Widener.createWideIV(Rewriter);
  1795. NumElimExt = Widener.getNumElimExt();
  1796. NumWidened = Widener.getNumWidened();
  1797. return WidePHI;
  1798. }