SimplifyIndVar.cpp 78 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089
  1. //===-- SimplifyIndVar.cpp - Induction variable simplification ------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements induction variable simplification. It does
  10. // not define any actual pass or policy, but provides a single function to
  11. // simplify a loop's induction variables based on ScalarEvolution.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "llvm/Transforms/Utils/SimplifyIndVar.h"
  15. #include "llvm/ADT/SmallVector.h"
  16. #include "llvm/ADT/Statistic.h"
  17. #include "llvm/Analysis/LoopInfo.h"
  18. #include "llvm/IR/Dominators.h"
  19. #include "llvm/IR/IRBuilder.h"
  20. #include "llvm/IR/Instructions.h"
  21. #include "llvm/IR/IntrinsicInst.h"
  22. #include "llvm/IR/PatternMatch.h"
  23. #include "llvm/Support/Debug.h"
  24. #include "llvm/Support/raw_ostream.h"
  25. #include "llvm/Transforms/Utils/Local.h"
  26. #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
  27. using namespace llvm;
  28. #define DEBUG_TYPE "indvars"
  29. STATISTIC(NumElimIdentity, "Number of IV identities eliminated");
  30. STATISTIC(NumElimOperand, "Number of IV operands folded into a use");
  31. STATISTIC(NumFoldedUser, "Number of IV users folded into a constant");
  32. STATISTIC(NumElimRem , "Number of IV remainder operations eliminated");
  33. STATISTIC(
  34. NumSimplifiedSDiv,
  35. "Number of IV signed division operations converted to unsigned division");
  36. STATISTIC(
  37. NumSimplifiedSRem,
  38. "Number of IV signed remainder operations converted to unsigned remainder");
  39. STATISTIC(NumElimCmp , "Number of IV comparisons eliminated");
  40. namespace {
  41. /// This is a utility for simplifying induction variables
  42. /// based on ScalarEvolution. It is the primary instrument of the
  43. /// IndvarSimplify pass, but it may also be directly invoked to cleanup after
  44. /// other loop passes that preserve SCEV.
  45. class SimplifyIndvar {
  46. Loop *L;
  47. LoopInfo *LI;
  48. ScalarEvolution *SE;
  49. DominatorTree *DT;
  50. const TargetTransformInfo *TTI;
  51. SCEVExpander &Rewriter;
  52. SmallVectorImpl<WeakTrackingVH> &DeadInsts;
  53. bool Changed = false;
  54. public:
  55. SimplifyIndvar(Loop *Loop, ScalarEvolution *SE, DominatorTree *DT,
  56. LoopInfo *LI, const TargetTransformInfo *TTI,
  57. SCEVExpander &Rewriter,
  58. SmallVectorImpl<WeakTrackingVH> &Dead)
  59. : L(Loop), LI(LI), SE(SE), DT(DT), TTI(TTI), Rewriter(Rewriter),
  60. DeadInsts(Dead) {
  61. assert(LI && "IV simplification requires LoopInfo");
  62. }
  63. bool hasChanged() const { return Changed; }
  64. /// Iteratively perform simplification on a worklist of users of the
  65. /// specified induction variable. This is the top-level driver that applies
  66. /// all simplifications to users of an IV.
  67. void simplifyUsers(PHINode *CurrIV, IVVisitor *V = nullptr);
  68. Value *foldIVUser(Instruction *UseInst, Instruction *IVOperand);
  69. bool eliminateIdentitySCEV(Instruction *UseInst, Instruction *IVOperand);
  70. bool replaceIVUserWithLoopInvariant(Instruction *UseInst);
  71. bool replaceFloatIVWithIntegerIV(Instruction *UseInst);
  72. bool eliminateOverflowIntrinsic(WithOverflowInst *WO);
  73. bool eliminateSaturatingIntrinsic(SaturatingInst *SI);
  74. bool eliminateTrunc(TruncInst *TI);
  75. bool eliminateIVUser(Instruction *UseInst, Instruction *IVOperand);
  76. bool makeIVComparisonInvariant(ICmpInst *ICmp, Instruction *IVOperand);
  77. void eliminateIVComparison(ICmpInst *ICmp, Instruction *IVOperand);
  78. void simplifyIVRemainder(BinaryOperator *Rem, Instruction *IVOperand,
  79. bool IsSigned);
  80. void replaceRemWithNumerator(BinaryOperator *Rem);
  81. void replaceRemWithNumeratorOrZero(BinaryOperator *Rem);
  82. void replaceSRemWithURem(BinaryOperator *Rem);
  83. bool eliminateSDiv(BinaryOperator *SDiv);
  84. bool strengthenOverflowingOperation(BinaryOperator *OBO,
  85. Instruction *IVOperand);
  86. bool strengthenRightShift(BinaryOperator *BO, Instruction *IVOperand);
  87. };
  88. }
  89. /// Find a point in code which dominates all given instructions. We can safely
  90. /// assume that, whatever fact we can prove at the found point, this fact is
  91. /// also true for each of the given instructions.
  92. static Instruction *findCommonDominator(ArrayRef<Instruction *> Instructions,
  93. DominatorTree &DT) {
  94. Instruction *CommonDom = nullptr;
  95. for (auto *Insn : Instructions)
  96. CommonDom =
  97. CommonDom ? DT.findNearestCommonDominator(CommonDom, Insn) : Insn;
  98. assert(CommonDom && "Common dominator not found?");
  99. return CommonDom;
  100. }
  101. /// Fold an IV operand into its use. This removes increments of an
  102. /// aligned IV when used by a instruction that ignores the low bits.
  103. ///
  104. /// IVOperand is guaranteed SCEVable, but UseInst may not be.
  105. ///
  106. /// Return the operand of IVOperand for this induction variable if IVOperand can
  107. /// be folded (in case more folding opportunities have been exposed).
  108. /// Otherwise return null.
  109. Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand) {
  110. Value *IVSrc = nullptr;
  111. const unsigned OperIdx = 0;
  112. const SCEV *FoldedExpr = nullptr;
  113. bool MustDropExactFlag = false;
  114. switch (UseInst->getOpcode()) {
  115. default:
  116. return nullptr;
  117. case Instruction::UDiv:
  118. case Instruction::LShr:
  119. // We're only interested in the case where we know something about
  120. // the numerator and have a constant denominator.
  121. if (IVOperand != UseInst->getOperand(OperIdx) ||
  122. !isa<ConstantInt>(UseInst->getOperand(1)))
  123. return nullptr;
  124. // Attempt to fold a binary operator with constant operand.
  125. // e.g. ((I + 1) >> 2) => I >> 2
  126. if (!isa<BinaryOperator>(IVOperand)
  127. || !isa<ConstantInt>(IVOperand->getOperand(1)))
  128. return nullptr;
  129. IVSrc = IVOperand->getOperand(0);
  130. // IVSrc must be the (SCEVable) IV, since the other operand is const.
  131. assert(SE->isSCEVable(IVSrc->getType()) && "Expect SCEVable IV operand");
  132. ConstantInt *D = cast<ConstantInt>(UseInst->getOperand(1));
  133. if (UseInst->getOpcode() == Instruction::LShr) {
  134. // Get a constant for the divisor. See createSCEV.
  135. uint32_t BitWidth = cast<IntegerType>(UseInst->getType())->getBitWidth();
  136. if (D->getValue().uge(BitWidth))
  137. return nullptr;
  138. D = ConstantInt::get(UseInst->getContext(),
  139. APInt::getOneBitSet(BitWidth, D->getZExtValue()));
  140. }
  141. const auto *LHS = SE->getSCEV(IVSrc);
  142. const auto *RHS = SE->getSCEV(D);
  143. FoldedExpr = SE->getUDivExpr(LHS, RHS);
  144. // We might have 'exact' flag set at this point which will no longer be
  145. // correct after we make the replacement.
  146. if (UseInst->isExact() && LHS != SE->getMulExpr(FoldedExpr, RHS))
  147. MustDropExactFlag = true;
  148. }
  149. // We have something that might fold it's operand. Compare SCEVs.
  150. if (!SE->isSCEVable(UseInst->getType()))
  151. return nullptr;
  152. // Bypass the operand if SCEV can prove it has no effect.
  153. if (SE->getSCEV(UseInst) != FoldedExpr)
  154. return nullptr;
  155. LLVM_DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand
  156. << " -> " << *UseInst << '\n');
  157. UseInst->setOperand(OperIdx, IVSrc);
  158. assert(SE->getSCEV(UseInst) == FoldedExpr && "bad SCEV with folded oper");
  159. if (MustDropExactFlag)
  160. UseInst->dropPoisonGeneratingFlags();
  161. ++NumElimOperand;
  162. Changed = true;
  163. if (IVOperand->use_empty())
  164. DeadInsts.emplace_back(IVOperand);
  165. return IVSrc;
  166. }
  167. bool SimplifyIndvar::makeIVComparisonInvariant(ICmpInst *ICmp,
  168. Instruction *IVOperand) {
  169. auto *Preheader = L->getLoopPreheader();
  170. if (!Preheader)
  171. return false;
  172. unsigned IVOperIdx = 0;
  173. ICmpInst::Predicate Pred = ICmp->getPredicate();
  174. if (IVOperand != ICmp->getOperand(0)) {
  175. // Swapped
  176. assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
  177. IVOperIdx = 1;
  178. Pred = ICmpInst::getSwappedPredicate(Pred);
  179. }
  180. // Get the SCEVs for the ICmp operands (in the specific context of the
  181. // current loop)
  182. const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
  183. const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
  184. const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
  185. auto LIP = SE->getLoopInvariantPredicate(Pred, S, X, L, ICmp);
  186. if (!LIP)
  187. return false;
  188. ICmpInst::Predicate InvariantPredicate = LIP->Pred;
  189. const SCEV *InvariantLHS = LIP->LHS;
  190. const SCEV *InvariantRHS = LIP->RHS;
  191. // Do not generate something ridiculous.
  192. auto *PHTerm = Preheader->getTerminator();
  193. if (Rewriter.isHighCostExpansion({ InvariantLHS, InvariantRHS }, L,
  194. 2 * SCEVCheapExpansionBudget, TTI, PHTerm))
  195. return false;
  196. auto *NewLHS =
  197. Rewriter.expandCodeFor(InvariantLHS, IVOperand->getType(), PHTerm);
  198. auto *NewRHS =
  199. Rewriter.expandCodeFor(InvariantRHS, IVOperand->getType(), PHTerm);
  200. LLVM_DEBUG(dbgs() << "INDVARS: Simplified comparison: " << *ICmp << '\n');
  201. ICmp->setPredicate(InvariantPredicate);
  202. ICmp->setOperand(0, NewLHS);
  203. ICmp->setOperand(1, NewRHS);
  204. return true;
  205. }
  206. /// SimplifyIVUsers helper for eliminating useless
  207. /// comparisons against an induction variable.
  208. void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp,
  209. Instruction *IVOperand) {
  210. unsigned IVOperIdx = 0;
  211. ICmpInst::Predicate Pred = ICmp->getPredicate();
  212. ICmpInst::Predicate OriginalPred = Pred;
  213. if (IVOperand != ICmp->getOperand(0)) {
  214. // Swapped
  215. assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
  216. IVOperIdx = 1;
  217. Pred = ICmpInst::getSwappedPredicate(Pred);
  218. }
  219. // Get the SCEVs for the ICmp operands (in the specific context of the
  220. // current loop)
  221. const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
  222. const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
  223. const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
  224. // If the condition is always true or always false in the given context,
  225. // replace it with a constant value.
  226. SmallVector<Instruction *, 4> Users;
  227. for (auto *U : ICmp->users())
  228. Users.push_back(cast<Instruction>(U));
  229. const Instruction *CtxI = findCommonDominator(Users, *DT);
  230. if (auto Ev = SE->evaluatePredicateAt(Pred, S, X, CtxI)) {
  231. SE->forgetValue(ICmp);
  232. ICmp->replaceAllUsesWith(ConstantInt::getBool(ICmp->getContext(), *Ev));
  233. DeadInsts.emplace_back(ICmp);
  234. LLVM_DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
  235. } else if (makeIVComparisonInvariant(ICmp, IVOperand)) {
  236. // fallthrough to end of function
  237. } else if (ICmpInst::isSigned(OriginalPred) &&
  238. SE->isKnownNonNegative(S) && SE->isKnownNonNegative(X)) {
  239. // If we were unable to make anything above, all we can is to canonicalize
  240. // the comparison hoping that it will open the doors for other
  241. // optimizations. If we find out that we compare two non-negative values,
  242. // we turn the instruction's predicate to its unsigned version. Note that
  243. // we cannot rely on Pred here unless we check if we have swapped it.
  244. assert(ICmp->getPredicate() == OriginalPred && "Predicate changed?");
  245. LLVM_DEBUG(dbgs() << "INDVARS: Turn to unsigned comparison: " << *ICmp
  246. << '\n');
  247. ICmp->setPredicate(ICmpInst::getUnsignedPredicate(OriginalPred));
  248. } else
  249. return;
  250. ++NumElimCmp;
  251. Changed = true;
  252. }
  253. bool SimplifyIndvar::eliminateSDiv(BinaryOperator *SDiv) {
  254. // Get the SCEVs for the ICmp operands.
  255. auto *N = SE->getSCEV(SDiv->getOperand(0));
  256. auto *D = SE->getSCEV(SDiv->getOperand(1));
  257. // Simplify unnecessary loops away.
  258. const Loop *L = LI->getLoopFor(SDiv->getParent());
  259. N = SE->getSCEVAtScope(N, L);
  260. D = SE->getSCEVAtScope(D, L);
  261. // Replace sdiv by udiv if both of the operands are non-negative
  262. if (SE->isKnownNonNegative(N) && SE->isKnownNonNegative(D)) {
  263. auto *UDiv = BinaryOperator::Create(
  264. BinaryOperator::UDiv, SDiv->getOperand(0), SDiv->getOperand(1),
  265. SDiv->getName() + ".udiv", SDiv);
  266. UDiv->setIsExact(SDiv->isExact());
  267. SDiv->replaceAllUsesWith(UDiv);
  268. LLVM_DEBUG(dbgs() << "INDVARS: Simplified sdiv: " << *SDiv << '\n');
  269. ++NumSimplifiedSDiv;
  270. Changed = true;
  271. DeadInsts.push_back(SDiv);
  272. return true;
  273. }
  274. return false;
  275. }
  276. // i %s n -> i %u n if i >= 0 and n >= 0
  277. void SimplifyIndvar::replaceSRemWithURem(BinaryOperator *Rem) {
  278. auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
  279. auto *URem = BinaryOperator::Create(BinaryOperator::URem, N, D,
  280. Rem->getName() + ".urem", Rem);
  281. Rem->replaceAllUsesWith(URem);
  282. LLVM_DEBUG(dbgs() << "INDVARS: Simplified srem: " << *Rem << '\n');
  283. ++NumSimplifiedSRem;
  284. Changed = true;
  285. DeadInsts.emplace_back(Rem);
  286. }
  287. // i % n --> i if i is in [0,n).
  288. void SimplifyIndvar::replaceRemWithNumerator(BinaryOperator *Rem) {
  289. Rem->replaceAllUsesWith(Rem->getOperand(0));
  290. LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
  291. ++NumElimRem;
  292. Changed = true;
  293. DeadInsts.emplace_back(Rem);
  294. }
  295. // (i+1) % n --> (i+1)==n?0:(i+1) if i is in [0,n).
  296. void SimplifyIndvar::replaceRemWithNumeratorOrZero(BinaryOperator *Rem) {
  297. auto *T = Rem->getType();
  298. auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
  299. ICmpInst *ICmp = new ICmpInst(Rem, ICmpInst::ICMP_EQ, N, D);
  300. SelectInst *Sel =
  301. SelectInst::Create(ICmp, ConstantInt::get(T, 0), N, "iv.rem", Rem);
  302. Rem->replaceAllUsesWith(Sel);
  303. LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
  304. ++NumElimRem;
  305. Changed = true;
  306. DeadInsts.emplace_back(Rem);
  307. }
  308. /// SimplifyIVUsers helper for eliminating useless remainder operations
  309. /// operating on an induction variable or replacing srem by urem.
  310. void SimplifyIndvar::simplifyIVRemainder(BinaryOperator *Rem,
  311. Instruction *IVOperand,
  312. bool IsSigned) {
  313. auto *NValue = Rem->getOperand(0);
  314. auto *DValue = Rem->getOperand(1);
  315. // We're only interested in the case where we know something about
  316. // the numerator, unless it is a srem, because we want to replace srem by urem
  317. // in general.
  318. bool UsedAsNumerator = IVOperand == NValue;
  319. if (!UsedAsNumerator && !IsSigned)
  320. return;
  321. const SCEV *N = SE->getSCEV(NValue);
  322. // Simplify unnecessary loops away.
  323. const Loop *ICmpLoop = LI->getLoopFor(Rem->getParent());
  324. N = SE->getSCEVAtScope(N, ICmpLoop);
  325. bool IsNumeratorNonNegative = !IsSigned || SE->isKnownNonNegative(N);
  326. // Do not proceed if the Numerator may be negative
  327. if (!IsNumeratorNonNegative)
  328. return;
  329. const SCEV *D = SE->getSCEV(DValue);
  330. D = SE->getSCEVAtScope(D, ICmpLoop);
  331. if (UsedAsNumerator) {
  332. auto LT = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
  333. if (SE->isKnownPredicate(LT, N, D)) {
  334. replaceRemWithNumerator(Rem);
  335. return;
  336. }
  337. auto *T = Rem->getType();
  338. const auto *NLessOne = SE->getMinusSCEV(N, SE->getOne(T));
  339. if (SE->isKnownPredicate(LT, NLessOne, D)) {
  340. replaceRemWithNumeratorOrZero(Rem);
  341. return;
  342. }
  343. }
  344. // Try to replace SRem with URem, if both N and D are known non-negative.
  345. // Since we had already check N, we only need to check D now
  346. if (!IsSigned || !SE->isKnownNonNegative(D))
  347. return;
  348. replaceSRemWithURem(Rem);
  349. }
  350. bool SimplifyIndvar::eliminateOverflowIntrinsic(WithOverflowInst *WO) {
  351. const SCEV *LHS = SE->getSCEV(WO->getLHS());
  352. const SCEV *RHS = SE->getSCEV(WO->getRHS());
  353. if (!SE->willNotOverflow(WO->getBinaryOp(), WO->isSigned(), LHS, RHS))
  354. return false;
  355. // Proved no overflow, nuke the overflow check and, if possible, the overflow
  356. // intrinsic as well.
  357. BinaryOperator *NewResult = BinaryOperator::Create(
  358. WO->getBinaryOp(), WO->getLHS(), WO->getRHS(), "", WO);
  359. if (WO->isSigned())
  360. NewResult->setHasNoSignedWrap(true);
  361. else
  362. NewResult->setHasNoUnsignedWrap(true);
  363. SmallVector<ExtractValueInst *, 4> ToDelete;
  364. for (auto *U : WO->users()) {
  365. if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
  366. if (EVI->getIndices()[0] == 1)
  367. EVI->replaceAllUsesWith(ConstantInt::getFalse(WO->getContext()));
  368. else {
  369. assert(EVI->getIndices()[0] == 0 && "Only two possibilities!");
  370. EVI->replaceAllUsesWith(NewResult);
  371. }
  372. ToDelete.push_back(EVI);
  373. }
  374. }
  375. for (auto *EVI : ToDelete)
  376. EVI->eraseFromParent();
  377. if (WO->use_empty())
  378. WO->eraseFromParent();
  379. Changed = true;
  380. return true;
  381. }
  382. bool SimplifyIndvar::eliminateSaturatingIntrinsic(SaturatingInst *SI) {
  383. const SCEV *LHS = SE->getSCEV(SI->getLHS());
  384. const SCEV *RHS = SE->getSCEV(SI->getRHS());
  385. if (!SE->willNotOverflow(SI->getBinaryOp(), SI->isSigned(), LHS, RHS))
  386. return false;
  387. BinaryOperator *BO = BinaryOperator::Create(
  388. SI->getBinaryOp(), SI->getLHS(), SI->getRHS(), SI->getName(), SI);
  389. if (SI->isSigned())
  390. BO->setHasNoSignedWrap();
  391. else
  392. BO->setHasNoUnsignedWrap();
  393. SI->replaceAllUsesWith(BO);
  394. DeadInsts.emplace_back(SI);
  395. Changed = true;
  396. return true;
  397. }
  398. bool SimplifyIndvar::eliminateTrunc(TruncInst *TI) {
  399. // It is always legal to replace
  400. // icmp <pred> i32 trunc(iv), n
  401. // with
  402. // icmp <pred> i64 sext(trunc(iv)), sext(n), if pred is signed predicate.
  403. // Or with
  404. // icmp <pred> i64 zext(trunc(iv)), zext(n), if pred is unsigned predicate.
  405. // Or with either of these if pred is an equality predicate.
  406. //
  407. // If we can prove that iv == sext(trunc(iv)) or iv == zext(trunc(iv)) for
  408. // every comparison which uses trunc, it means that we can replace each of
  409. // them with comparison of iv against sext/zext(n). We no longer need trunc
  410. // after that.
  411. //
  412. // TODO: Should we do this if we can widen *some* comparisons, but not all
  413. // of them? Sometimes it is enough to enable other optimizations, but the
  414. // trunc instruction will stay in the loop.
  415. Value *IV = TI->getOperand(0);
  416. Type *IVTy = IV->getType();
  417. const SCEV *IVSCEV = SE->getSCEV(IV);
  418. const SCEV *TISCEV = SE->getSCEV(TI);
  419. // Check if iv == zext(trunc(iv)) and if iv == sext(trunc(iv)). If so, we can
  420. // get rid of trunc
  421. bool DoesSExtCollapse = false;
  422. bool DoesZExtCollapse = false;
  423. if (IVSCEV == SE->getSignExtendExpr(TISCEV, IVTy))
  424. DoesSExtCollapse = true;
  425. if (IVSCEV == SE->getZeroExtendExpr(TISCEV, IVTy))
  426. DoesZExtCollapse = true;
  427. // If neither sext nor zext does collapse, it is not profitable to do any
  428. // transform. Bail.
  429. if (!DoesSExtCollapse && !DoesZExtCollapse)
  430. return false;
  431. // Collect users of the trunc that look like comparisons against invariants.
  432. // Bail if we find something different.
  433. SmallVector<ICmpInst *, 4> ICmpUsers;
  434. for (auto *U : TI->users()) {
  435. // We don't care about users in unreachable blocks.
  436. if (isa<Instruction>(U) &&
  437. !DT->isReachableFromEntry(cast<Instruction>(U)->getParent()))
  438. continue;
  439. ICmpInst *ICI = dyn_cast<ICmpInst>(U);
  440. if (!ICI) return false;
  441. assert(L->contains(ICI->getParent()) && "LCSSA form broken?");
  442. if (!(ICI->getOperand(0) == TI && L->isLoopInvariant(ICI->getOperand(1))) &&
  443. !(ICI->getOperand(1) == TI && L->isLoopInvariant(ICI->getOperand(0))))
  444. return false;
  445. // If we cannot get rid of trunc, bail.
  446. if (ICI->isSigned() && !DoesSExtCollapse)
  447. return false;
  448. if (ICI->isUnsigned() && !DoesZExtCollapse)
  449. return false;
  450. // For equality, either signed or unsigned works.
  451. ICmpUsers.push_back(ICI);
  452. }
  453. auto CanUseZExt = [&](ICmpInst *ICI) {
  454. // Unsigned comparison can be widened as unsigned.
  455. if (ICI->isUnsigned())
  456. return true;
  457. // Is it profitable to do zext?
  458. if (!DoesZExtCollapse)
  459. return false;
  460. // For equality, we can safely zext both parts.
  461. if (ICI->isEquality())
  462. return true;
  463. // Otherwise we can only use zext when comparing two non-negative or two
  464. // negative values. But in practice, we will never pass DoesZExtCollapse
  465. // check for a negative value, because zext(trunc(x)) is non-negative. So
  466. // it only make sense to check for non-negativity here.
  467. const SCEV *SCEVOP1 = SE->getSCEV(ICI->getOperand(0));
  468. const SCEV *SCEVOP2 = SE->getSCEV(ICI->getOperand(1));
  469. return SE->isKnownNonNegative(SCEVOP1) && SE->isKnownNonNegative(SCEVOP2);
  470. };
  471. // Replace all comparisons against trunc with comparisons against IV.
  472. for (auto *ICI : ICmpUsers) {
  473. bool IsSwapped = L->isLoopInvariant(ICI->getOperand(0));
  474. auto *Op1 = IsSwapped ? ICI->getOperand(0) : ICI->getOperand(1);
  475. Instruction *Ext = nullptr;
  476. // For signed/unsigned predicate, replace the old comparison with comparison
  477. // of immediate IV against sext/zext of the invariant argument. If we can
  478. // use either sext or zext (i.e. we are dealing with equality predicate),
  479. // then prefer zext as a more canonical form.
  480. // TODO: If we see a signed comparison which can be turned into unsigned,
  481. // we can do it here for canonicalization purposes.
  482. ICmpInst::Predicate Pred = ICI->getPredicate();
  483. if (IsSwapped) Pred = ICmpInst::getSwappedPredicate(Pred);
  484. if (CanUseZExt(ICI)) {
  485. assert(DoesZExtCollapse && "Unprofitable zext?");
  486. Ext = new ZExtInst(Op1, IVTy, "zext", ICI);
  487. Pred = ICmpInst::getUnsignedPredicate(Pred);
  488. } else {
  489. assert(DoesSExtCollapse && "Unprofitable sext?");
  490. Ext = new SExtInst(Op1, IVTy, "sext", ICI);
  491. assert(Pred == ICmpInst::getSignedPredicate(Pred) && "Must be signed!");
  492. }
  493. bool Changed;
  494. L->makeLoopInvariant(Ext, Changed);
  495. (void)Changed;
  496. ICmpInst *NewICI = new ICmpInst(ICI, Pred, IV, Ext);
  497. ICI->replaceAllUsesWith(NewICI);
  498. DeadInsts.emplace_back(ICI);
  499. }
  500. // Trunc no longer needed.
  501. TI->replaceAllUsesWith(PoisonValue::get(TI->getType()));
  502. DeadInsts.emplace_back(TI);
  503. return true;
  504. }
  505. /// Eliminate an operation that consumes a simple IV and has no observable
  506. /// side-effect given the range of IV values. IVOperand is guaranteed SCEVable,
  507. /// but UseInst may not be.
  508. bool SimplifyIndvar::eliminateIVUser(Instruction *UseInst,
  509. Instruction *IVOperand) {
  510. if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
  511. eliminateIVComparison(ICmp, IVOperand);
  512. return true;
  513. }
  514. if (BinaryOperator *Bin = dyn_cast<BinaryOperator>(UseInst)) {
  515. bool IsSRem = Bin->getOpcode() == Instruction::SRem;
  516. if (IsSRem || Bin->getOpcode() == Instruction::URem) {
  517. simplifyIVRemainder(Bin, IVOperand, IsSRem);
  518. return true;
  519. }
  520. if (Bin->getOpcode() == Instruction::SDiv)
  521. return eliminateSDiv(Bin);
  522. }
  523. if (auto *WO = dyn_cast<WithOverflowInst>(UseInst))
  524. if (eliminateOverflowIntrinsic(WO))
  525. return true;
  526. if (auto *SI = dyn_cast<SaturatingInst>(UseInst))
  527. if (eliminateSaturatingIntrinsic(SI))
  528. return true;
  529. if (auto *TI = dyn_cast<TruncInst>(UseInst))
  530. if (eliminateTrunc(TI))
  531. return true;
  532. if (eliminateIdentitySCEV(UseInst, IVOperand))
  533. return true;
  534. return false;
  535. }
  536. static Instruction *GetLoopInvariantInsertPosition(Loop *L, Instruction *Hint) {
  537. if (auto *BB = L->getLoopPreheader())
  538. return BB->getTerminator();
  539. return Hint;
  540. }
  541. /// Replace the UseInst with a loop invariant expression if it is safe.
  542. bool SimplifyIndvar::replaceIVUserWithLoopInvariant(Instruction *I) {
  543. if (!SE->isSCEVable(I->getType()))
  544. return false;
  545. // Get the symbolic expression for this instruction.
  546. const SCEV *S = SE->getSCEV(I);
  547. if (!SE->isLoopInvariant(S, L))
  548. return false;
  549. // Do not generate something ridiculous even if S is loop invariant.
  550. if (Rewriter.isHighCostExpansion(S, L, SCEVCheapExpansionBudget, TTI, I))
  551. return false;
  552. auto *IP = GetLoopInvariantInsertPosition(L, I);
  553. if (!Rewriter.isSafeToExpandAt(S, IP)) {
  554. LLVM_DEBUG(dbgs() << "INDVARS: Can not replace IV user: " << *I
  555. << " with non-speculable loop invariant: " << *S << '\n');
  556. return false;
  557. }
  558. auto *Invariant = Rewriter.expandCodeFor(S, I->getType(), IP);
  559. I->replaceAllUsesWith(Invariant);
  560. LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *I
  561. << " with loop invariant: " << *S << '\n');
  562. ++NumFoldedUser;
  563. Changed = true;
  564. DeadInsts.emplace_back(I);
  565. return true;
  566. }
  567. /// Eliminate redundant type cast between integer and float.
  568. bool SimplifyIndvar::replaceFloatIVWithIntegerIV(Instruction *UseInst) {
  569. if (UseInst->getOpcode() != CastInst::SIToFP &&
  570. UseInst->getOpcode() != CastInst::UIToFP)
  571. return false;
  572. Instruction *IVOperand = cast<Instruction>(UseInst->getOperand(0));
  573. // Get the symbolic expression for this instruction.
  574. const SCEV *IV = SE->getSCEV(IVOperand);
  575. unsigned MaskBits;
  576. if (UseInst->getOpcode() == CastInst::SIToFP)
  577. MaskBits = SE->getSignedRange(IV).getMinSignedBits();
  578. else
  579. MaskBits = SE->getUnsignedRange(IV).getActiveBits();
  580. unsigned DestNumSigBits = UseInst->getType()->getFPMantissaWidth();
  581. if (MaskBits <= DestNumSigBits) {
  582. for (User *U : UseInst->users()) {
  583. // Match for fptosi/fptoui of sitofp and with same type.
  584. auto *CI = dyn_cast<CastInst>(U);
  585. if (!CI)
  586. continue;
  587. CastInst::CastOps Opcode = CI->getOpcode();
  588. if (Opcode != CastInst::FPToSI && Opcode != CastInst::FPToUI)
  589. continue;
  590. Value *Conv = nullptr;
  591. if (IVOperand->getType() != CI->getType()) {
  592. IRBuilder<> Builder(CI);
  593. StringRef Name = IVOperand->getName();
  594. // To match InstCombine logic, we only need sext if both fptosi and
  595. // sitofp are used. If one of them is unsigned, then we can use zext.
  596. if (SE->getTypeSizeInBits(IVOperand->getType()) >
  597. SE->getTypeSizeInBits(CI->getType())) {
  598. Conv = Builder.CreateTrunc(IVOperand, CI->getType(), Name + ".trunc");
  599. } else if (Opcode == CastInst::FPToUI ||
  600. UseInst->getOpcode() == CastInst::UIToFP) {
  601. Conv = Builder.CreateZExt(IVOperand, CI->getType(), Name + ".zext");
  602. } else {
  603. Conv = Builder.CreateSExt(IVOperand, CI->getType(), Name + ".sext");
  604. }
  605. } else
  606. Conv = IVOperand;
  607. CI->replaceAllUsesWith(Conv);
  608. DeadInsts.push_back(CI);
  609. LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *CI
  610. << " with: " << *Conv << '\n');
  611. ++NumFoldedUser;
  612. Changed = true;
  613. }
  614. }
  615. return Changed;
  616. }
  617. /// Eliminate any operation that SCEV can prove is an identity function.
  618. bool SimplifyIndvar::eliminateIdentitySCEV(Instruction *UseInst,
  619. Instruction *IVOperand) {
  620. if (!SE->isSCEVable(UseInst->getType()) ||
  621. (UseInst->getType() != IVOperand->getType()) ||
  622. (SE->getSCEV(UseInst) != SE->getSCEV(IVOperand)))
  623. return false;
  624. // getSCEV(X) == getSCEV(Y) does not guarantee that X and Y are related in the
  625. // dominator tree, even if X is an operand to Y. For instance, in
  626. //
  627. // %iv = phi i32 {0,+,1}
  628. // br %cond, label %left, label %merge
  629. //
  630. // left:
  631. // %X = add i32 %iv, 0
  632. // br label %merge
  633. //
  634. // merge:
  635. // %M = phi (%X, %iv)
  636. //
  637. // getSCEV(%M) == getSCEV(%X) == {0,+,1}, but %X does not dominate %M, and
  638. // %M.replaceAllUsesWith(%X) would be incorrect.
  639. if (isa<PHINode>(UseInst))
  640. // If UseInst is not a PHI node then we know that IVOperand dominates
  641. // UseInst directly from the legality of SSA.
  642. if (!DT || !DT->dominates(IVOperand, UseInst))
  643. return false;
  644. if (!LI->replacementPreservesLCSSAForm(UseInst, IVOperand))
  645. return false;
  646. LLVM_DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n');
  647. SE->forgetValue(UseInst);
  648. UseInst->replaceAllUsesWith(IVOperand);
  649. ++NumElimIdentity;
  650. Changed = true;
  651. DeadInsts.emplace_back(UseInst);
  652. return true;
  653. }
  654. /// Annotate BO with nsw / nuw if it provably does not signed-overflow /
  655. /// unsigned-overflow. Returns true if anything changed, false otherwise.
  656. bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
  657. Instruction *IVOperand) {
  658. auto Flags = SE->getStrengthenedNoWrapFlagsFromBinOp(
  659. cast<OverflowingBinaryOperator>(BO));
  660. if (!Flags)
  661. return false;
  662. BO->setHasNoUnsignedWrap(ScalarEvolution::maskFlags(*Flags, SCEV::FlagNUW) ==
  663. SCEV::FlagNUW);
  664. BO->setHasNoSignedWrap(ScalarEvolution::maskFlags(*Flags, SCEV::FlagNSW) ==
  665. SCEV::FlagNSW);
  666. // The getStrengthenedNoWrapFlagsFromBinOp() check inferred additional nowrap
  667. // flags on addrecs while performing zero/sign extensions. We could call
  668. // forgetValue() here to make sure those flags also propagate to any other
  669. // SCEV expressions based on the addrec. However, this can have pathological
  670. // compile-time impact, see https://bugs.llvm.org/show_bug.cgi?id=50384.
  671. return true;
  672. }
  673. /// Annotate the Shr in (X << IVOperand) >> C as exact using the
  674. /// information from the IV's range. Returns true if anything changed, false
  675. /// otherwise.
  676. bool SimplifyIndvar::strengthenRightShift(BinaryOperator *BO,
  677. Instruction *IVOperand) {
  678. using namespace llvm::PatternMatch;
  679. if (BO->getOpcode() == Instruction::Shl) {
  680. bool Changed = false;
  681. ConstantRange IVRange = SE->getUnsignedRange(SE->getSCEV(IVOperand));
  682. for (auto *U : BO->users()) {
  683. const APInt *C;
  684. if (match(U,
  685. m_AShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C))) ||
  686. match(U,
  687. m_LShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C)))) {
  688. BinaryOperator *Shr = cast<BinaryOperator>(U);
  689. if (!Shr->isExact() && IVRange.getUnsignedMin().uge(*C)) {
  690. Shr->setIsExact(true);
  691. Changed = true;
  692. }
  693. }
  694. }
  695. return Changed;
  696. }
  697. return false;
  698. }
  699. /// Add all uses of Def to the current IV's worklist.
  700. static void pushIVUsers(
  701. Instruction *Def, Loop *L,
  702. SmallPtrSet<Instruction*,16> &Simplified,
  703. SmallVectorImpl< std::pair<Instruction*,Instruction*> > &SimpleIVUsers) {
  704. for (User *U : Def->users()) {
  705. Instruction *UI = cast<Instruction>(U);
  706. // Avoid infinite or exponential worklist processing.
  707. // Also ensure unique worklist users.
  708. // If Def is a LoopPhi, it may not be in the Simplified set, so check for
  709. // self edges first.
  710. if (UI == Def)
  711. continue;
  712. // Only change the current Loop, do not change the other parts (e.g. other
  713. // Loops).
  714. if (!L->contains(UI))
  715. continue;
  716. // Do not push the same instruction more than once.
  717. if (!Simplified.insert(UI).second)
  718. continue;
  719. SimpleIVUsers.push_back(std::make_pair(UI, Def));
  720. }
  721. }
  722. /// Return true if this instruction generates a simple SCEV
  723. /// expression in terms of that IV.
  724. ///
  725. /// This is similar to IVUsers' isInteresting() but processes each instruction
  726. /// non-recursively when the operand is already known to be a simpleIVUser.
  727. ///
  728. static bool isSimpleIVUser(Instruction *I, const Loop *L, ScalarEvolution *SE) {
  729. if (!SE->isSCEVable(I->getType()))
  730. return false;
  731. // Get the symbolic expression for this instruction.
  732. const SCEV *S = SE->getSCEV(I);
  733. // Only consider affine recurrences.
  734. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S);
  735. if (AR && AR->getLoop() == L)
  736. return true;
  737. return false;
  738. }
  739. /// Iteratively perform simplification on a worklist of users
  740. /// of the specified induction variable. Each successive simplification may push
  741. /// more users which may themselves be candidates for simplification.
  742. ///
  743. /// This algorithm does not require IVUsers analysis. Instead, it simplifies
  744. /// instructions in-place during analysis. Rather than rewriting induction
  745. /// variables bottom-up from their users, it transforms a chain of IVUsers
  746. /// top-down, updating the IR only when it encounters a clear optimization
  747. /// opportunity.
  748. ///
  749. /// Once DisableIVRewrite is default, LSR will be the only client of IVUsers.
  750. ///
  751. void SimplifyIndvar::simplifyUsers(PHINode *CurrIV, IVVisitor *V) {
  752. if (!SE->isSCEVable(CurrIV->getType()))
  753. return;
  754. // Instructions processed by SimplifyIndvar for CurrIV.
  755. SmallPtrSet<Instruction*,16> Simplified;
  756. // Use-def pairs if IV users waiting to be processed for CurrIV.
  757. SmallVector<std::pair<Instruction*, Instruction*>, 8> SimpleIVUsers;
  758. // Push users of the current LoopPhi. In rare cases, pushIVUsers may be
  759. // called multiple times for the same LoopPhi. This is the proper thing to
  760. // do for loop header phis that use each other.
  761. pushIVUsers(CurrIV, L, Simplified, SimpleIVUsers);
  762. while (!SimpleIVUsers.empty()) {
  763. std::pair<Instruction*, Instruction*> UseOper =
  764. SimpleIVUsers.pop_back_val();
  765. Instruction *UseInst = UseOper.first;
  766. // If a user of the IndVar is trivially dead, we prefer just to mark it dead
  767. // rather than try to do some complex analysis or transformation (such as
  768. // widening) basing on it.
  769. // TODO: Propagate TLI and pass it here to handle more cases.
  770. if (isInstructionTriviallyDead(UseInst, /* TLI */ nullptr)) {
  771. DeadInsts.emplace_back(UseInst);
  772. continue;
  773. }
  774. // Bypass back edges to avoid extra work.
  775. if (UseInst == CurrIV) continue;
  776. // Try to replace UseInst with a loop invariant before any other
  777. // simplifications.
  778. if (replaceIVUserWithLoopInvariant(UseInst))
  779. continue;
  780. Instruction *IVOperand = UseOper.second;
  781. for (unsigned N = 0; IVOperand; ++N) {
  782. assert(N <= Simplified.size() && "runaway iteration");
  783. (void) N;
  784. Value *NewOper = foldIVUser(UseInst, IVOperand);
  785. if (!NewOper)
  786. break; // done folding
  787. IVOperand = dyn_cast<Instruction>(NewOper);
  788. }
  789. if (!IVOperand)
  790. continue;
  791. if (eliminateIVUser(UseInst, IVOperand)) {
  792. pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
  793. continue;
  794. }
  795. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(UseInst)) {
  796. if ((isa<OverflowingBinaryOperator>(BO) &&
  797. strengthenOverflowingOperation(BO, IVOperand)) ||
  798. (isa<ShlOperator>(BO) && strengthenRightShift(BO, IVOperand))) {
  799. // re-queue uses of the now modified binary operator and fall
  800. // through to the checks that remain.
  801. pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
  802. }
  803. }
  804. // Try to use integer induction for FPToSI of float induction directly.
  805. if (replaceFloatIVWithIntegerIV(UseInst)) {
  806. // Re-queue the potentially new direct uses of IVOperand.
  807. pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
  808. continue;
  809. }
  810. CastInst *Cast = dyn_cast<CastInst>(UseInst);
  811. if (V && Cast) {
  812. V->visitCast(Cast);
  813. continue;
  814. }
  815. if (isSimpleIVUser(UseInst, L, SE)) {
  816. pushIVUsers(UseInst, L, Simplified, SimpleIVUsers);
  817. }
  818. }
  819. }
  820. namespace llvm {
  821. void IVVisitor::anchor() { }
  822. /// Simplify instructions that use this induction variable
  823. /// by using ScalarEvolution to analyze the IV's recurrence.
  824. bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
  825. LoopInfo *LI, const TargetTransformInfo *TTI,
  826. SmallVectorImpl<WeakTrackingVH> &Dead,
  827. SCEVExpander &Rewriter, IVVisitor *V) {
  828. SimplifyIndvar SIV(LI->getLoopFor(CurrIV->getParent()), SE, DT, LI, TTI,
  829. Rewriter, Dead);
  830. SIV.simplifyUsers(CurrIV, V);
  831. return SIV.hasChanged();
  832. }
  833. /// Simplify users of induction variables within this
  834. /// loop. This does not actually change or add IVs.
  835. bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
  836. LoopInfo *LI, const TargetTransformInfo *TTI,
  837. SmallVectorImpl<WeakTrackingVH> &Dead) {
  838. SCEVExpander Rewriter(*SE, SE->getDataLayout(), "indvars");
  839. #ifndef NDEBUG
  840. Rewriter.setDebugType(DEBUG_TYPE);
  841. #endif
  842. bool Changed = false;
  843. for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
  844. Changed |=
  845. simplifyUsersOfIV(cast<PHINode>(I), SE, DT, LI, TTI, Dead, Rewriter);
  846. }
  847. return Changed;
  848. }
  849. } // namespace llvm
  850. namespace {
  851. //===----------------------------------------------------------------------===//
  852. // Widen Induction Variables - Extend the width of an IV to cover its
  853. // widest uses.
  854. //===----------------------------------------------------------------------===//
  855. class WidenIV {
  856. // Parameters
  857. PHINode *OrigPhi;
  858. Type *WideType;
  859. // Context
  860. LoopInfo *LI;
  861. Loop *L;
  862. ScalarEvolution *SE;
  863. DominatorTree *DT;
  864. // Does the module have any calls to the llvm.experimental.guard intrinsic
  865. // at all? If not we can avoid scanning instructions looking for guards.
  866. bool HasGuards;
  867. bool UsePostIncrementRanges;
  868. // Statistics
  869. unsigned NumElimExt = 0;
  870. unsigned NumWidened = 0;
  871. // Result
  872. PHINode *WidePhi = nullptr;
  873. Instruction *WideInc = nullptr;
  874. const SCEV *WideIncExpr = nullptr;
  875. SmallVectorImpl<WeakTrackingVH> &DeadInsts;
  876. SmallPtrSet<Instruction *,16> Widened;
  877. enum class ExtendKind { Zero, Sign, Unknown };
  878. // A map tracking the kind of extension used to widen each narrow IV
  879. // and narrow IV user.
  880. // Key: pointer to a narrow IV or IV user.
  881. // Value: the kind of extension used to widen this Instruction.
  882. DenseMap<AssertingVH<Instruction>, ExtendKind> ExtendKindMap;
  883. using DefUserPair = std::pair<AssertingVH<Value>, AssertingVH<Instruction>>;
  884. // A map with control-dependent ranges for post increment IV uses. The key is
  885. // a pair of IV def and a use of this def denoting the context. The value is
  886. // a ConstantRange representing possible values of the def at the given
  887. // context.
  888. DenseMap<DefUserPair, ConstantRange> PostIncRangeInfos;
  889. std::optional<ConstantRange> getPostIncRangeInfo(Value *Def,
  890. Instruction *UseI) {
  891. DefUserPair Key(Def, UseI);
  892. auto It = PostIncRangeInfos.find(Key);
  893. return It == PostIncRangeInfos.end()
  894. ? std::optional<ConstantRange>(std::nullopt)
  895. : std::optional<ConstantRange>(It->second);
  896. }
  897. void calculatePostIncRanges(PHINode *OrigPhi);
  898. void calculatePostIncRange(Instruction *NarrowDef, Instruction *NarrowUser);
  899. void updatePostIncRangeInfo(Value *Def, Instruction *UseI, ConstantRange R) {
  900. DefUserPair Key(Def, UseI);
  901. auto It = PostIncRangeInfos.find(Key);
  902. if (It == PostIncRangeInfos.end())
  903. PostIncRangeInfos.insert({Key, R});
  904. else
  905. It->second = R.intersectWith(It->second);
  906. }
  907. public:
  908. /// Record a link in the Narrow IV def-use chain along with the WideIV that
  909. /// computes the same value as the Narrow IV def. This avoids caching Use*
  910. /// pointers.
  911. struct NarrowIVDefUse {
  912. Instruction *NarrowDef = nullptr;
  913. Instruction *NarrowUse = nullptr;
  914. Instruction *WideDef = nullptr;
  915. // True if the narrow def is never negative. Tracking this information lets
  916. // us use a sign extension instead of a zero extension or vice versa, when
  917. // profitable and legal.
  918. bool NeverNegative = false;
  919. NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD,
  920. bool NeverNegative)
  921. : NarrowDef(ND), NarrowUse(NU), WideDef(WD),
  922. NeverNegative(NeverNegative) {}
  923. };
  924. WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
  925. DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
  926. bool HasGuards, bool UsePostIncrementRanges = true);
  927. PHINode *createWideIV(SCEVExpander &Rewriter);
  928. unsigned getNumElimExt() { return NumElimExt; };
  929. unsigned getNumWidened() { return NumWidened; };
  930. protected:
  931. Value *createExtendInst(Value *NarrowOper, Type *WideType, bool IsSigned,
  932. Instruction *Use);
  933. Instruction *cloneIVUser(NarrowIVDefUse DU, const SCEVAddRecExpr *WideAR);
  934. Instruction *cloneArithmeticIVUser(NarrowIVDefUse DU,
  935. const SCEVAddRecExpr *WideAR);
  936. Instruction *cloneBitwiseIVUser(NarrowIVDefUse DU);
  937. ExtendKind getExtendKind(Instruction *I);
  938. using WidenedRecTy = std::pair<const SCEVAddRecExpr *, ExtendKind>;
  939. WidenedRecTy getWideRecurrence(NarrowIVDefUse DU);
  940. WidenedRecTy getExtendedOperandRecurrence(NarrowIVDefUse DU);
  941. const SCEV *getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
  942. unsigned OpCode) const;
  943. Instruction *widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter);
  944. bool widenLoopCompare(NarrowIVDefUse DU);
  945. bool widenWithVariantUse(NarrowIVDefUse DU);
  946. void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef);
  947. private:
  948. SmallVector<NarrowIVDefUse, 8> NarrowIVUsers;
  949. };
  950. } // namespace
  951. /// Determine the insertion point for this user. By default, insert immediately
  952. /// before the user. SCEVExpander or LICM will hoist loop invariants out of the
  953. /// loop. For PHI nodes, there may be multiple uses, so compute the nearest
  954. /// common dominator for the incoming blocks. A nullptr can be returned if no
  955. /// viable location is found: it may happen if User is a PHI and Def only comes
  956. /// to this PHI from unreachable blocks.
  957. static Instruction *getInsertPointForUses(Instruction *User, Value *Def,
  958. DominatorTree *DT, LoopInfo *LI) {
  959. PHINode *PHI = dyn_cast<PHINode>(User);
  960. if (!PHI)
  961. return User;
  962. Instruction *InsertPt = nullptr;
  963. for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) {
  964. if (PHI->getIncomingValue(i) != Def)
  965. continue;
  966. BasicBlock *InsertBB = PHI->getIncomingBlock(i);
  967. if (!DT->isReachableFromEntry(InsertBB))
  968. continue;
  969. if (!InsertPt) {
  970. InsertPt = InsertBB->getTerminator();
  971. continue;
  972. }
  973. InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB);
  974. InsertPt = InsertBB->getTerminator();
  975. }
  976. // If we have skipped all inputs, it means that Def only comes to Phi from
  977. // unreachable blocks.
  978. if (!InsertPt)
  979. return nullptr;
  980. auto *DefI = dyn_cast<Instruction>(Def);
  981. if (!DefI)
  982. return InsertPt;
  983. assert(DT->dominates(DefI, InsertPt) && "def does not dominate all uses");
  984. auto *L = LI->getLoopFor(DefI->getParent());
  985. assert(!L || L->contains(LI->getLoopFor(InsertPt->getParent())));
  986. for (auto *DTN = (*DT)[InsertPt->getParent()]; DTN; DTN = DTN->getIDom())
  987. if (LI->getLoopFor(DTN->getBlock()) == L)
  988. return DTN->getBlock()->getTerminator();
  989. llvm_unreachable("DefI dominates InsertPt!");
  990. }
  991. WidenIV::WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
  992. DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
  993. bool HasGuards, bool UsePostIncrementRanges)
  994. : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), LI(LInfo),
  995. L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree),
  996. HasGuards(HasGuards), UsePostIncrementRanges(UsePostIncrementRanges),
  997. DeadInsts(DI) {
  998. assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
  999. ExtendKindMap[OrigPhi] = WI.IsSigned ? ExtendKind::Sign : ExtendKind::Zero;
  1000. }
  1001. Value *WidenIV::createExtendInst(Value *NarrowOper, Type *WideType,
  1002. bool IsSigned, Instruction *Use) {
  1003. // Set the debug location and conservative insertion point.
  1004. IRBuilder<> Builder(Use);
  1005. // Hoist the insertion point into loop preheaders as far as possible.
  1006. for (const Loop *L = LI->getLoopFor(Use->getParent());
  1007. L && L->getLoopPreheader() && L->isLoopInvariant(NarrowOper);
  1008. L = L->getParentLoop())
  1009. Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
  1010. return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) :
  1011. Builder.CreateZExt(NarrowOper, WideType);
  1012. }
  1013. /// Instantiate a wide operation to replace a narrow operation. This only needs
  1014. /// to handle operations that can evaluation to SCEVAddRec. It can safely return
  1015. /// 0 for any operation we decide not to clone.
  1016. Instruction *WidenIV::cloneIVUser(WidenIV::NarrowIVDefUse DU,
  1017. const SCEVAddRecExpr *WideAR) {
  1018. unsigned Opcode = DU.NarrowUse->getOpcode();
  1019. switch (Opcode) {
  1020. default:
  1021. return nullptr;
  1022. case Instruction::Add:
  1023. case Instruction::Mul:
  1024. case Instruction::UDiv:
  1025. case Instruction::Sub:
  1026. return cloneArithmeticIVUser(DU, WideAR);
  1027. case Instruction::And:
  1028. case Instruction::Or:
  1029. case Instruction::Xor:
  1030. case Instruction::Shl:
  1031. case Instruction::LShr:
  1032. case Instruction::AShr:
  1033. return cloneBitwiseIVUser(DU);
  1034. }
  1035. }
  1036. Instruction *WidenIV::cloneBitwiseIVUser(WidenIV::NarrowIVDefUse DU) {
  1037. Instruction *NarrowUse = DU.NarrowUse;
  1038. Instruction *NarrowDef = DU.NarrowDef;
  1039. Instruction *WideDef = DU.WideDef;
  1040. LLVM_DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n");
  1041. // Replace NarrowDef operands with WideDef. Otherwise, we don't know anything
  1042. // about the narrow operand yet so must insert a [sz]ext. It is probably loop
  1043. // invariant and will be folded or hoisted. If it actually comes from a
  1044. // widened IV, it should be removed during a future call to widenIVUse.
  1045. bool IsSigned = getExtendKind(NarrowDef) == ExtendKind::Sign;
  1046. Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
  1047. ? WideDef
  1048. : createExtendInst(NarrowUse->getOperand(0), WideType,
  1049. IsSigned, NarrowUse);
  1050. Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
  1051. ? WideDef
  1052. : createExtendInst(NarrowUse->getOperand(1), WideType,
  1053. IsSigned, NarrowUse);
  1054. auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
  1055. auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
  1056. NarrowBO->getName());
  1057. IRBuilder<> Builder(NarrowUse);
  1058. Builder.Insert(WideBO);
  1059. WideBO->copyIRFlags(NarrowBO);
  1060. return WideBO;
  1061. }
  1062. Instruction *WidenIV::cloneArithmeticIVUser(WidenIV::NarrowIVDefUse DU,
  1063. const SCEVAddRecExpr *WideAR) {
  1064. Instruction *NarrowUse = DU.NarrowUse;
  1065. Instruction *NarrowDef = DU.NarrowDef;
  1066. Instruction *WideDef = DU.WideDef;
  1067. LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
  1068. unsigned IVOpIdx = (NarrowUse->getOperand(0) == NarrowDef) ? 0 : 1;
  1069. // We're trying to find X such that
  1070. //
  1071. // Widen(NarrowDef `op` NonIVNarrowDef) == WideAR == WideDef `op.wide` X
  1072. //
  1073. // We guess two solutions to X, sext(NonIVNarrowDef) and zext(NonIVNarrowDef),
  1074. // and check using SCEV if any of them are correct.
  1075. // Returns true if extending NonIVNarrowDef according to `SignExt` is a
  1076. // correct solution to X.
  1077. auto GuessNonIVOperand = [&](bool SignExt) {
  1078. const SCEV *WideLHS;
  1079. const SCEV *WideRHS;
  1080. auto GetExtend = [this, SignExt](const SCEV *S, Type *Ty) {
  1081. if (SignExt)
  1082. return SE->getSignExtendExpr(S, Ty);
  1083. return SE->getZeroExtendExpr(S, Ty);
  1084. };
  1085. if (IVOpIdx == 0) {
  1086. WideLHS = SE->getSCEV(WideDef);
  1087. const SCEV *NarrowRHS = SE->getSCEV(NarrowUse->getOperand(1));
  1088. WideRHS = GetExtend(NarrowRHS, WideType);
  1089. } else {
  1090. const SCEV *NarrowLHS = SE->getSCEV(NarrowUse->getOperand(0));
  1091. WideLHS = GetExtend(NarrowLHS, WideType);
  1092. WideRHS = SE->getSCEV(WideDef);
  1093. }
  1094. // WideUse is "WideDef `op.wide` X" as described in the comment.
  1095. const SCEV *WideUse =
  1096. getSCEVByOpCode(WideLHS, WideRHS, NarrowUse->getOpcode());
  1097. return WideUse == WideAR;
  1098. };
  1099. bool SignExtend = getExtendKind(NarrowDef) == ExtendKind::Sign;
  1100. if (!GuessNonIVOperand(SignExtend)) {
  1101. SignExtend = !SignExtend;
  1102. if (!GuessNonIVOperand(SignExtend))
  1103. return nullptr;
  1104. }
  1105. Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
  1106. ? WideDef
  1107. : createExtendInst(NarrowUse->getOperand(0), WideType,
  1108. SignExtend, NarrowUse);
  1109. Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
  1110. ? WideDef
  1111. : createExtendInst(NarrowUse->getOperand(1), WideType,
  1112. SignExtend, NarrowUse);
  1113. auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
  1114. auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
  1115. NarrowBO->getName());
  1116. IRBuilder<> Builder(NarrowUse);
  1117. Builder.Insert(WideBO);
  1118. WideBO->copyIRFlags(NarrowBO);
  1119. return WideBO;
  1120. }
  1121. WidenIV::ExtendKind WidenIV::getExtendKind(Instruction *I) {
  1122. auto It = ExtendKindMap.find(I);
  1123. assert(It != ExtendKindMap.end() && "Instruction not yet extended!");
  1124. return It->second;
  1125. }
  1126. const SCEV *WidenIV::getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
  1127. unsigned OpCode) const {
  1128. switch (OpCode) {
  1129. case Instruction::Add:
  1130. return SE->getAddExpr(LHS, RHS);
  1131. case Instruction::Sub:
  1132. return SE->getMinusSCEV(LHS, RHS);
  1133. case Instruction::Mul:
  1134. return SE->getMulExpr(LHS, RHS);
  1135. case Instruction::UDiv:
  1136. return SE->getUDivExpr(LHS, RHS);
  1137. default:
  1138. llvm_unreachable("Unsupported opcode.");
  1139. };
  1140. }
  1141. /// No-wrap operations can transfer sign extension of their result to their
  1142. /// operands. Generate the SCEV value for the widened operation without
  1143. /// actually modifying the IR yet. If the expression after extending the
  1144. /// operands is an AddRec for this loop, return the AddRec and the kind of
  1145. /// extension used.
  1146. WidenIV::WidenedRecTy
  1147. WidenIV::getExtendedOperandRecurrence(WidenIV::NarrowIVDefUse DU) {
  1148. // Handle the common case of add<nsw/nuw>
  1149. const unsigned OpCode = DU.NarrowUse->getOpcode();
  1150. // Only Add/Sub/Mul instructions supported yet.
  1151. if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
  1152. OpCode != Instruction::Mul)
  1153. return {nullptr, ExtendKind::Unknown};
  1154. // One operand (NarrowDef) has already been extended to WideDef. Now determine
  1155. // if extending the other will lead to a recurrence.
  1156. const unsigned ExtendOperIdx =
  1157. DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0;
  1158. assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU");
  1159. const SCEV *ExtendOperExpr = nullptr;
  1160. const OverflowingBinaryOperator *OBO =
  1161. cast<OverflowingBinaryOperator>(DU.NarrowUse);
  1162. ExtendKind ExtKind = getExtendKind(DU.NarrowDef);
  1163. if (ExtKind == ExtendKind::Sign && OBO->hasNoSignedWrap())
  1164. ExtendOperExpr = SE->getSignExtendExpr(
  1165. SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
  1166. else if (ExtKind == ExtendKind::Zero && OBO->hasNoUnsignedWrap())
  1167. ExtendOperExpr = SE->getZeroExtendExpr(
  1168. SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
  1169. else
  1170. return {nullptr, ExtendKind::Unknown};
  1171. // When creating this SCEV expr, don't apply the current operations NSW or NUW
  1172. // flags. This instruction may be guarded by control flow that the no-wrap
  1173. // behavior depends on. Non-control-equivalent instructions can be mapped to
  1174. // the same SCEV expression, and it would be incorrect to transfer NSW/NUW
  1175. // semantics to those operations.
  1176. const SCEV *lhs = SE->getSCEV(DU.WideDef);
  1177. const SCEV *rhs = ExtendOperExpr;
  1178. // Let's swap operands to the initial order for the case of non-commutative
  1179. // operations, like SUB. See PR21014.
  1180. if (ExtendOperIdx == 0)
  1181. std::swap(lhs, rhs);
  1182. const SCEVAddRecExpr *AddRec =
  1183. dyn_cast<SCEVAddRecExpr>(getSCEVByOpCode(lhs, rhs, OpCode));
  1184. if (!AddRec || AddRec->getLoop() != L)
  1185. return {nullptr, ExtendKind::Unknown};
  1186. return {AddRec, ExtKind};
  1187. }
  1188. /// Is this instruction potentially interesting for further simplification after
  1189. /// widening it's type? In other words, can the extend be safely hoisted out of
  1190. /// the loop with SCEV reducing the value to a recurrence on the same loop. If
  1191. /// so, return the extended recurrence and the kind of extension used. Otherwise
  1192. /// return {nullptr, ExtendKind::Unknown}.
  1193. WidenIV::WidenedRecTy WidenIV::getWideRecurrence(WidenIV::NarrowIVDefUse DU) {
  1194. if (!DU.NarrowUse->getType()->isIntegerTy())
  1195. return {nullptr, ExtendKind::Unknown};
  1196. const SCEV *NarrowExpr = SE->getSCEV(DU.NarrowUse);
  1197. if (SE->getTypeSizeInBits(NarrowExpr->getType()) >=
  1198. SE->getTypeSizeInBits(WideType)) {
  1199. // NarrowUse implicitly widens its operand. e.g. a gep with a narrow
  1200. // index. So don't follow this use.
  1201. return {nullptr, ExtendKind::Unknown};
  1202. }
  1203. const SCEV *WideExpr;
  1204. ExtendKind ExtKind;
  1205. if (DU.NeverNegative) {
  1206. WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
  1207. if (isa<SCEVAddRecExpr>(WideExpr))
  1208. ExtKind = ExtendKind::Sign;
  1209. else {
  1210. WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
  1211. ExtKind = ExtendKind::Zero;
  1212. }
  1213. } else if (getExtendKind(DU.NarrowDef) == ExtendKind::Sign) {
  1214. WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
  1215. ExtKind = ExtendKind::Sign;
  1216. } else {
  1217. WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
  1218. ExtKind = ExtendKind::Zero;
  1219. }
  1220. const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr);
  1221. if (!AddRec || AddRec->getLoop() != L)
  1222. return {nullptr, ExtendKind::Unknown};
  1223. return {AddRec, ExtKind};
  1224. }
  1225. /// This IV user cannot be widened. Replace this use of the original narrow IV
  1226. /// with a truncation of the new wide IV to isolate and eliminate the narrow IV.
  1227. static void truncateIVUse(WidenIV::NarrowIVDefUse DU, DominatorTree *DT,
  1228. LoopInfo *LI) {
  1229. auto *InsertPt = getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI);
  1230. if (!InsertPt)
  1231. return;
  1232. LLVM_DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef << " for user "
  1233. << *DU.NarrowUse << "\n");
  1234. IRBuilder<> Builder(InsertPt);
  1235. Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType());
  1236. DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc);
  1237. }
  1238. /// If the narrow use is a compare instruction, then widen the compare
  1239. // (and possibly the other operand). The extend operation is hoisted into the
  1240. // loop preheader as far as possible.
  1241. bool WidenIV::widenLoopCompare(WidenIV::NarrowIVDefUse DU) {
  1242. ICmpInst *Cmp = dyn_cast<ICmpInst>(DU.NarrowUse);
  1243. if (!Cmp)
  1244. return false;
  1245. // We can legally widen the comparison in the following two cases:
  1246. //
  1247. // - The signedness of the IV extension and comparison match
  1248. //
  1249. // - The narrow IV is always positive (and thus its sign extension is equal
  1250. // to its zero extension). For instance, let's say we're zero extending
  1251. // %narrow for the following use
  1252. //
  1253. // icmp slt i32 %narrow, %val ... (A)
  1254. //
  1255. // and %narrow is always positive. Then
  1256. //
  1257. // (A) == icmp slt i32 sext(%narrow), sext(%val)
  1258. // == icmp slt i32 zext(%narrow), sext(%val)
  1259. bool IsSigned = getExtendKind(DU.NarrowDef) == ExtendKind::Sign;
  1260. if (!(DU.NeverNegative || IsSigned == Cmp->isSigned()))
  1261. return false;
  1262. Value *Op = Cmp->getOperand(Cmp->getOperand(0) == DU.NarrowDef ? 1 : 0);
  1263. unsigned CastWidth = SE->getTypeSizeInBits(Op->getType());
  1264. unsigned IVWidth = SE->getTypeSizeInBits(WideType);
  1265. assert(CastWidth <= IVWidth && "Unexpected width while widening compare.");
  1266. // Widen the compare instruction.
  1267. auto *InsertPt = getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI);
  1268. if (!InsertPt)
  1269. return false;
  1270. IRBuilder<> Builder(InsertPt);
  1271. DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
  1272. // Widen the other operand of the compare, if necessary.
  1273. if (CastWidth < IVWidth) {
  1274. Value *ExtOp = createExtendInst(Op, WideType, Cmp->isSigned(), Cmp);
  1275. DU.NarrowUse->replaceUsesOfWith(Op, ExtOp);
  1276. }
  1277. return true;
  1278. }
  1279. // The widenIVUse avoids generating trunc by evaluating the use as AddRec, this
  1280. // will not work when:
  1281. // 1) SCEV traces back to an instruction inside the loop that SCEV can not
  1282. // expand, eg. add %indvar, (load %addr)
  1283. // 2) SCEV finds a loop variant, eg. add %indvar, %loopvariant
  1284. // While SCEV fails to avoid trunc, we can still try to use instruction
  1285. // combining approach to prove trunc is not required. This can be further
  1286. // extended with other instruction combining checks, but for now we handle the
  1287. // following case (sub can be "add" and "mul", "nsw + sext" can be "nus + zext")
  1288. //
  1289. // Src:
  1290. // %c = sub nsw %b, %indvar
  1291. // %d = sext %c to i64
  1292. // Dst:
  1293. // %indvar.ext1 = sext %indvar to i64
  1294. // %m = sext %b to i64
  1295. // %d = sub nsw i64 %m, %indvar.ext1
  1296. // Therefore, as long as the result of add/sub/mul is extended to wide type, no
  1297. // trunc is required regardless of how %b is generated. This pattern is common
  1298. // when calculating address in 64 bit architecture
  1299. bool WidenIV::widenWithVariantUse(WidenIV::NarrowIVDefUse DU) {
  1300. Instruction *NarrowUse = DU.NarrowUse;
  1301. Instruction *NarrowDef = DU.NarrowDef;
  1302. Instruction *WideDef = DU.WideDef;
  1303. // Handle the common case of add<nsw/nuw>
  1304. const unsigned OpCode = NarrowUse->getOpcode();
  1305. // Only Add/Sub/Mul instructions are supported.
  1306. if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
  1307. OpCode != Instruction::Mul)
  1308. return false;
  1309. // The operand that is not defined by NarrowDef of DU. Let's call it the
  1310. // other operand.
  1311. assert((NarrowUse->getOperand(0) == NarrowDef ||
  1312. NarrowUse->getOperand(1) == NarrowDef) &&
  1313. "bad DU");
  1314. const OverflowingBinaryOperator *OBO =
  1315. cast<OverflowingBinaryOperator>(NarrowUse);
  1316. ExtendKind ExtKind = getExtendKind(NarrowDef);
  1317. bool CanSignExtend = ExtKind == ExtendKind::Sign && OBO->hasNoSignedWrap();
  1318. bool CanZeroExtend = ExtKind == ExtendKind::Zero && OBO->hasNoUnsignedWrap();
  1319. auto AnotherOpExtKind = ExtKind;
  1320. // Check that all uses are either:
  1321. // - narrow def (in case of we are widening the IV increment);
  1322. // - single-input LCSSA Phis;
  1323. // - comparison of the chosen type;
  1324. // - extend of the chosen type (raison d'etre).
  1325. SmallVector<Instruction *, 4> ExtUsers;
  1326. SmallVector<PHINode *, 4> LCSSAPhiUsers;
  1327. SmallVector<ICmpInst *, 4> ICmpUsers;
  1328. for (Use &U : NarrowUse->uses()) {
  1329. Instruction *User = cast<Instruction>(U.getUser());
  1330. if (User == NarrowDef)
  1331. continue;
  1332. if (!L->contains(User)) {
  1333. auto *LCSSAPhi = cast<PHINode>(User);
  1334. // Make sure there is only 1 input, so that we don't have to split
  1335. // critical edges.
  1336. if (LCSSAPhi->getNumOperands() != 1)
  1337. return false;
  1338. LCSSAPhiUsers.push_back(LCSSAPhi);
  1339. continue;
  1340. }
  1341. if (auto *ICmp = dyn_cast<ICmpInst>(User)) {
  1342. auto Pred = ICmp->getPredicate();
  1343. // We have 3 types of predicates: signed, unsigned and equality
  1344. // predicates. For equality, it's legal to widen icmp for either sign and
  1345. // zero extend. For sign extend, we can also do so for signed predicates,
  1346. // likeweise for zero extend we can widen icmp for unsigned predicates.
  1347. if (ExtKind == ExtendKind::Zero && ICmpInst::isSigned(Pred))
  1348. return false;
  1349. if (ExtKind == ExtendKind::Sign && ICmpInst::isUnsigned(Pred))
  1350. return false;
  1351. ICmpUsers.push_back(ICmp);
  1352. continue;
  1353. }
  1354. if (ExtKind == ExtendKind::Sign)
  1355. User = dyn_cast<SExtInst>(User);
  1356. else
  1357. User = dyn_cast<ZExtInst>(User);
  1358. if (!User || User->getType() != WideType)
  1359. return false;
  1360. ExtUsers.push_back(User);
  1361. }
  1362. if (ExtUsers.empty()) {
  1363. DeadInsts.emplace_back(NarrowUse);
  1364. return true;
  1365. }
  1366. // We'll prove some facts that should be true in the context of ext users. If
  1367. // there is no users, we are done now. If there are some, pick their common
  1368. // dominator as context.
  1369. const Instruction *CtxI = findCommonDominator(ExtUsers, *DT);
  1370. if (!CanSignExtend && !CanZeroExtend) {
  1371. // Because InstCombine turns 'sub nuw' to 'add' losing the no-wrap flag, we
  1372. // will most likely not see it. Let's try to prove it.
  1373. if (OpCode != Instruction::Add)
  1374. return false;
  1375. if (ExtKind != ExtendKind::Zero)
  1376. return false;
  1377. const SCEV *LHS = SE->getSCEV(OBO->getOperand(0));
  1378. const SCEV *RHS = SE->getSCEV(OBO->getOperand(1));
  1379. // TODO: Support case for NarrowDef = NarrowUse->getOperand(1).
  1380. if (NarrowUse->getOperand(0) != NarrowDef)
  1381. return false;
  1382. if (!SE->isKnownNegative(RHS))
  1383. return false;
  1384. bool ProvedSubNUW = SE->isKnownPredicateAt(ICmpInst::ICMP_UGE, LHS,
  1385. SE->getNegativeSCEV(RHS), CtxI);
  1386. if (!ProvedSubNUW)
  1387. return false;
  1388. // In fact, our 'add' is 'sub nuw'. We will need to widen the 2nd operand as
  1389. // neg(zext(neg(op))), which is basically sext(op).
  1390. AnotherOpExtKind = ExtendKind::Sign;
  1391. }
  1392. // Verifying that Defining operand is an AddRec
  1393. const SCEV *Op1 = SE->getSCEV(WideDef);
  1394. const SCEVAddRecExpr *AddRecOp1 = dyn_cast<SCEVAddRecExpr>(Op1);
  1395. if (!AddRecOp1 || AddRecOp1->getLoop() != L)
  1396. return false;
  1397. LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
  1398. // Generating a widening use instruction.
  1399. Value *LHS =
  1400. (NarrowUse->getOperand(0) == NarrowDef)
  1401. ? WideDef
  1402. : createExtendInst(NarrowUse->getOperand(0), WideType,
  1403. AnotherOpExtKind == ExtendKind::Sign, NarrowUse);
  1404. Value *RHS =
  1405. (NarrowUse->getOperand(1) == NarrowDef)
  1406. ? WideDef
  1407. : createExtendInst(NarrowUse->getOperand(1), WideType,
  1408. AnotherOpExtKind == ExtendKind::Sign, NarrowUse);
  1409. auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
  1410. auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
  1411. NarrowBO->getName());
  1412. IRBuilder<> Builder(NarrowUse);
  1413. Builder.Insert(WideBO);
  1414. WideBO->copyIRFlags(NarrowBO);
  1415. ExtendKindMap[NarrowUse] = ExtKind;
  1416. for (Instruction *User : ExtUsers) {
  1417. assert(User->getType() == WideType && "Checked before!");
  1418. LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *User << " replaced by "
  1419. << *WideBO << "\n");
  1420. ++NumElimExt;
  1421. User->replaceAllUsesWith(WideBO);
  1422. DeadInsts.emplace_back(User);
  1423. }
  1424. for (PHINode *User : LCSSAPhiUsers) {
  1425. assert(User->getNumOperands() == 1 && "Checked before!");
  1426. Builder.SetInsertPoint(User);
  1427. auto *WidePN =
  1428. Builder.CreatePHI(WideBO->getType(), 1, User->getName() + ".wide");
  1429. BasicBlock *LoopExitingBlock = User->getParent()->getSinglePredecessor();
  1430. assert(LoopExitingBlock && L->contains(LoopExitingBlock) &&
  1431. "Not a LCSSA Phi?");
  1432. WidePN->addIncoming(WideBO, LoopExitingBlock);
  1433. Builder.SetInsertPoint(&*User->getParent()->getFirstInsertionPt());
  1434. auto *TruncPN = Builder.CreateTrunc(WidePN, User->getType());
  1435. User->replaceAllUsesWith(TruncPN);
  1436. DeadInsts.emplace_back(User);
  1437. }
  1438. for (ICmpInst *User : ICmpUsers) {
  1439. Builder.SetInsertPoint(User);
  1440. auto ExtendedOp = [&](Value * V)->Value * {
  1441. if (V == NarrowUse)
  1442. return WideBO;
  1443. if (ExtKind == ExtendKind::Zero)
  1444. return Builder.CreateZExt(V, WideBO->getType());
  1445. else
  1446. return Builder.CreateSExt(V, WideBO->getType());
  1447. };
  1448. auto Pred = User->getPredicate();
  1449. auto *LHS = ExtendedOp(User->getOperand(0));
  1450. auto *RHS = ExtendedOp(User->getOperand(1));
  1451. auto *WideCmp =
  1452. Builder.CreateICmp(Pred, LHS, RHS, User->getName() + ".wide");
  1453. User->replaceAllUsesWith(WideCmp);
  1454. DeadInsts.emplace_back(User);
  1455. }
  1456. return true;
  1457. }
  1458. /// Determine whether an individual user of the narrow IV can be widened. If so,
  1459. /// return the wide clone of the user.
  1460. Instruction *WidenIV::widenIVUse(WidenIV::NarrowIVDefUse DU, SCEVExpander &Rewriter) {
  1461. assert(ExtendKindMap.count(DU.NarrowDef) &&
  1462. "Should already know the kind of extension used to widen NarrowDef");
  1463. // Stop traversing the def-use chain at inner-loop phis or post-loop phis.
  1464. if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) {
  1465. if (LI->getLoopFor(UsePhi->getParent()) != L) {
  1466. // For LCSSA phis, sink the truncate outside the loop.
  1467. // After SimplifyCFG most loop exit targets have a single predecessor.
  1468. // Otherwise fall back to a truncate within the loop.
  1469. if (UsePhi->getNumOperands() != 1)
  1470. truncateIVUse(DU, DT, LI);
  1471. else {
  1472. // Widening the PHI requires us to insert a trunc. The logical place
  1473. // for this trunc is in the same BB as the PHI. This is not possible if
  1474. // the BB is terminated by a catchswitch.
  1475. if (isa<CatchSwitchInst>(UsePhi->getParent()->getTerminator()))
  1476. return nullptr;
  1477. PHINode *WidePhi =
  1478. PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide",
  1479. UsePhi);
  1480. WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
  1481. IRBuilder<> Builder(&*WidePhi->getParent()->getFirstInsertionPt());
  1482. Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType());
  1483. UsePhi->replaceAllUsesWith(Trunc);
  1484. DeadInsts.emplace_back(UsePhi);
  1485. LLVM_DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi << " to "
  1486. << *WidePhi << "\n");
  1487. }
  1488. return nullptr;
  1489. }
  1490. }
  1491. // This narrow use can be widened by a sext if it's non-negative or its narrow
  1492. // def was widended by a sext. Same for zext.
  1493. auto canWidenBySExt = [&]() {
  1494. return DU.NeverNegative || getExtendKind(DU.NarrowDef) == ExtendKind::Sign;
  1495. };
  1496. auto canWidenByZExt = [&]() {
  1497. return DU.NeverNegative || getExtendKind(DU.NarrowDef) == ExtendKind::Zero;
  1498. };
  1499. // Our raison d'etre! Eliminate sign and zero extension.
  1500. if ((isa<SExtInst>(DU.NarrowUse) && canWidenBySExt()) ||
  1501. (isa<ZExtInst>(DU.NarrowUse) && canWidenByZExt())) {
  1502. Value *NewDef = DU.WideDef;
  1503. if (DU.NarrowUse->getType() != WideType) {
  1504. unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType());
  1505. unsigned IVWidth = SE->getTypeSizeInBits(WideType);
  1506. if (CastWidth < IVWidth) {
  1507. // The cast isn't as wide as the IV, so insert a Trunc.
  1508. IRBuilder<> Builder(DU.NarrowUse);
  1509. NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType());
  1510. }
  1511. else {
  1512. // A wider extend was hidden behind a narrower one. This may induce
  1513. // another round of IV widening in which the intermediate IV becomes
  1514. // dead. It should be very rare.
  1515. LLVM_DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi
  1516. << " not wide enough to subsume " << *DU.NarrowUse
  1517. << "\n");
  1518. DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
  1519. NewDef = DU.NarrowUse;
  1520. }
  1521. }
  1522. if (NewDef != DU.NarrowUse) {
  1523. LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse
  1524. << " replaced by " << *DU.WideDef << "\n");
  1525. ++NumElimExt;
  1526. DU.NarrowUse->replaceAllUsesWith(NewDef);
  1527. DeadInsts.emplace_back(DU.NarrowUse);
  1528. }
  1529. // Now that the extend is gone, we want to expose it's uses for potential
  1530. // further simplification. We don't need to directly inform SimplifyIVUsers
  1531. // of the new users, because their parent IV will be processed later as a
  1532. // new loop phi. If we preserved IVUsers analysis, we would also want to
  1533. // push the uses of WideDef here.
  1534. // No further widening is needed. The deceased [sz]ext had done it for us.
  1535. return nullptr;
  1536. }
  1537. // Does this user itself evaluate to a recurrence after widening?
  1538. WidenedRecTy WideAddRec = getExtendedOperandRecurrence(DU);
  1539. if (!WideAddRec.first)
  1540. WideAddRec = getWideRecurrence(DU);
  1541. assert((WideAddRec.first == nullptr) ==
  1542. (WideAddRec.second == ExtendKind::Unknown));
  1543. if (!WideAddRec.first) {
  1544. // If use is a loop condition, try to promote the condition instead of
  1545. // truncating the IV first.
  1546. if (widenLoopCompare(DU))
  1547. return nullptr;
  1548. // We are here about to generate a truncate instruction that may hurt
  1549. // performance because the scalar evolution expression computed earlier
  1550. // in WideAddRec.first does not indicate a polynomial induction expression.
  1551. // In that case, look at the operands of the use instruction to determine
  1552. // if we can still widen the use instead of truncating its operand.
  1553. if (widenWithVariantUse(DU))
  1554. return nullptr;
  1555. // This user does not evaluate to a recurrence after widening, so don't
  1556. // follow it. Instead insert a Trunc to kill off the original use,
  1557. // eventually isolating the original narrow IV so it can be removed.
  1558. truncateIVUse(DU, DT, LI);
  1559. return nullptr;
  1560. }
  1561. // Reuse the IV increment that SCEVExpander created as long as it dominates
  1562. // NarrowUse.
  1563. Instruction *WideUse = nullptr;
  1564. if (WideAddRec.first == WideIncExpr &&
  1565. Rewriter.hoistIVInc(WideInc, DU.NarrowUse))
  1566. WideUse = WideInc;
  1567. else {
  1568. WideUse = cloneIVUser(DU, WideAddRec.first);
  1569. if (!WideUse)
  1570. return nullptr;
  1571. }
  1572. // Evaluation of WideAddRec ensured that the narrow expression could be
  1573. // extended outside the loop without overflow. This suggests that the wide use
  1574. // evaluates to the same expression as the extended narrow use, but doesn't
  1575. // absolutely guarantee it. Hence the following failsafe check. In rare cases
  1576. // where it fails, we simply throw away the newly created wide use.
  1577. if (WideAddRec.first != SE->getSCEV(WideUse)) {
  1578. LLVM_DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": "
  1579. << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first
  1580. << "\n");
  1581. DeadInsts.emplace_back(WideUse);
  1582. return nullptr;
  1583. }
  1584. // if we reached this point then we are going to replace
  1585. // DU.NarrowUse with WideUse. Reattach DbgValue then.
  1586. replaceAllDbgUsesWith(*DU.NarrowUse, *WideUse, *WideUse, *DT);
  1587. ExtendKindMap[DU.NarrowUse] = WideAddRec.second;
  1588. // Returning WideUse pushes it on the worklist.
  1589. return WideUse;
  1590. }
  1591. /// Add eligible users of NarrowDef to NarrowIVUsers.
  1592. void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) {
  1593. const SCEV *NarrowSCEV = SE->getSCEV(NarrowDef);
  1594. bool NonNegativeDef =
  1595. SE->isKnownPredicate(ICmpInst::ICMP_SGE, NarrowSCEV,
  1596. SE->getZero(NarrowSCEV->getType()));
  1597. for (User *U : NarrowDef->users()) {
  1598. Instruction *NarrowUser = cast<Instruction>(U);
  1599. // Handle data flow merges and bizarre phi cycles.
  1600. if (!Widened.insert(NarrowUser).second)
  1601. continue;
  1602. bool NonNegativeUse = false;
  1603. if (!NonNegativeDef) {
  1604. // We might have a control-dependent range information for this context.
  1605. if (auto RangeInfo = getPostIncRangeInfo(NarrowDef, NarrowUser))
  1606. NonNegativeUse = RangeInfo->getSignedMin().isNonNegative();
  1607. }
  1608. NarrowIVUsers.emplace_back(NarrowDef, NarrowUser, WideDef,
  1609. NonNegativeDef || NonNegativeUse);
  1610. }
  1611. }
  1612. /// Process a single induction variable. First use the SCEVExpander to create a
  1613. /// wide induction variable that evaluates to the same recurrence as the
  1614. /// original narrow IV. Then use a worklist to forward traverse the narrow IV's
  1615. /// def-use chain. After widenIVUse has processed all interesting IV users, the
  1616. /// narrow IV will be isolated for removal by DeleteDeadPHIs.
  1617. ///
  1618. /// It would be simpler to delete uses as they are processed, but we must avoid
  1619. /// invalidating SCEV expressions.
  1620. PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) {
  1621. // Is this phi an induction variable?
  1622. const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi));
  1623. if (!AddRec)
  1624. return nullptr;
  1625. // Widen the induction variable expression.
  1626. const SCEV *WideIVExpr = getExtendKind(OrigPhi) == ExtendKind::Sign
  1627. ? SE->getSignExtendExpr(AddRec, WideType)
  1628. : SE->getZeroExtendExpr(AddRec, WideType);
  1629. assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType &&
  1630. "Expect the new IV expression to preserve its type");
  1631. // Can the IV be extended outside the loop without overflow?
  1632. AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr);
  1633. if (!AddRec || AddRec->getLoop() != L)
  1634. return nullptr;
  1635. // An AddRec must have loop-invariant operands. Since this AddRec is
  1636. // materialized by a loop header phi, the expression cannot have any post-loop
  1637. // operands, so they must dominate the loop header.
  1638. assert(
  1639. SE->properlyDominates(AddRec->getStart(), L->getHeader()) &&
  1640. SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) &&
  1641. "Loop header phi recurrence inputs do not dominate the loop");
  1642. // Iterate over IV uses (including transitive ones) looking for IV increments
  1643. // of the form 'add nsw %iv, <const>'. For each increment and each use of
  1644. // the increment calculate control-dependent range information basing on
  1645. // dominating conditions inside of the loop (e.g. a range check inside of the
  1646. // loop). Calculated ranges are stored in PostIncRangeInfos map.
  1647. //
  1648. // Control-dependent range information is later used to prove that a narrow
  1649. // definition is not negative (see pushNarrowIVUsers). It's difficult to do
  1650. // this on demand because when pushNarrowIVUsers needs this information some
  1651. // of the dominating conditions might be already widened.
  1652. if (UsePostIncrementRanges)
  1653. calculatePostIncRanges(OrigPhi);
  1654. // The rewriter provides a value for the desired IV expression. This may
  1655. // either find an existing phi or materialize a new one. Either way, we
  1656. // expect a well-formed cyclic phi-with-increments. i.e. any operand not part
  1657. // of the phi-SCC dominates the loop entry.
  1658. Instruction *InsertPt = &*L->getHeader()->getFirstInsertionPt();
  1659. Value *ExpandInst = Rewriter.expandCodeFor(AddRec, WideType, InsertPt);
  1660. // If the wide phi is not a phi node, for example a cast node, like bitcast,
  1661. // inttoptr, ptrtoint, just skip for now.
  1662. if (!(WidePhi = dyn_cast<PHINode>(ExpandInst))) {
  1663. // if the cast node is an inserted instruction without any user, we should
  1664. // remove it to make sure the pass don't touch the function as we can not
  1665. // wide the phi.
  1666. if (ExpandInst->hasNUses(0) &&
  1667. Rewriter.isInsertedInstruction(cast<Instruction>(ExpandInst)))
  1668. DeadInsts.emplace_back(ExpandInst);
  1669. return nullptr;
  1670. }
  1671. // Remembering the WideIV increment generated by SCEVExpander allows
  1672. // widenIVUse to reuse it when widening the narrow IV's increment. We don't
  1673. // employ a general reuse mechanism because the call above is the only call to
  1674. // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses.
  1675. if (BasicBlock *LatchBlock = L->getLoopLatch()) {
  1676. WideInc =
  1677. cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock));
  1678. WideIncExpr = SE->getSCEV(WideInc);
  1679. // Propagate the debug location associated with the original loop increment
  1680. // to the new (widened) increment.
  1681. auto *OrigInc =
  1682. cast<Instruction>(OrigPhi->getIncomingValueForBlock(LatchBlock));
  1683. WideInc->setDebugLoc(OrigInc->getDebugLoc());
  1684. }
  1685. LLVM_DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n");
  1686. ++NumWidened;
  1687. // Traverse the def-use chain using a worklist starting at the original IV.
  1688. assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" );
  1689. Widened.insert(OrigPhi);
  1690. pushNarrowIVUsers(OrigPhi, WidePhi);
  1691. while (!NarrowIVUsers.empty()) {
  1692. WidenIV::NarrowIVDefUse DU = NarrowIVUsers.pop_back_val();
  1693. // Process a def-use edge. This may replace the use, so don't hold a
  1694. // use_iterator across it.
  1695. Instruction *WideUse = widenIVUse(DU, Rewriter);
  1696. // Follow all def-use edges from the previous narrow use.
  1697. if (WideUse)
  1698. pushNarrowIVUsers(DU.NarrowUse, WideUse);
  1699. // widenIVUse may have removed the def-use edge.
  1700. if (DU.NarrowDef->use_empty())
  1701. DeadInsts.emplace_back(DU.NarrowDef);
  1702. }
  1703. // Attach any debug information to the new PHI.
  1704. replaceAllDbgUsesWith(*OrigPhi, *WidePhi, *WidePhi, *DT);
  1705. return WidePhi;
  1706. }
  1707. /// Calculates control-dependent range for the given def at the given context
  1708. /// by looking at dominating conditions inside of the loop
  1709. void WidenIV::calculatePostIncRange(Instruction *NarrowDef,
  1710. Instruction *NarrowUser) {
  1711. using namespace llvm::PatternMatch;
  1712. Value *NarrowDefLHS;
  1713. const APInt *NarrowDefRHS;
  1714. if (!match(NarrowDef, m_NSWAdd(m_Value(NarrowDefLHS),
  1715. m_APInt(NarrowDefRHS))) ||
  1716. !NarrowDefRHS->isNonNegative())
  1717. return;
  1718. auto UpdateRangeFromCondition = [&] (Value *Condition,
  1719. bool TrueDest) {
  1720. CmpInst::Predicate Pred;
  1721. Value *CmpRHS;
  1722. if (!match(Condition, m_ICmp(Pred, m_Specific(NarrowDefLHS),
  1723. m_Value(CmpRHS))))
  1724. return;
  1725. CmpInst::Predicate P =
  1726. TrueDest ? Pred : CmpInst::getInversePredicate(Pred);
  1727. auto CmpRHSRange = SE->getSignedRange(SE->getSCEV(CmpRHS));
  1728. auto CmpConstrainedLHSRange =
  1729. ConstantRange::makeAllowedICmpRegion(P, CmpRHSRange);
  1730. auto NarrowDefRange = CmpConstrainedLHSRange.addWithNoWrap(
  1731. *NarrowDefRHS, OverflowingBinaryOperator::NoSignedWrap);
  1732. updatePostIncRangeInfo(NarrowDef, NarrowUser, NarrowDefRange);
  1733. };
  1734. auto UpdateRangeFromGuards = [&](Instruction *Ctx) {
  1735. if (!HasGuards)
  1736. return;
  1737. for (Instruction &I : make_range(Ctx->getIterator().getReverse(),
  1738. Ctx->getParent()->rend())) {
  1739. Value *C = nullptr;
  1740. if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(C))))
  1741. UpdateRangeFromCondition(C, /*TrueDest=*/true);
  1742. }
  1743. };
  1744. UpdateRangeFromGuards(NarrowUser);
  1745. BasicBlock *NarrowUserBB = NarrowUser->getParent();
  1746. // If NarrowUserBB is statically unreachable asking dominator queries may
  1747. // yield surprising results. (e.g. the block may not have a dom tree node)
  1748. if (!DT->isReachableFromEntry(NarrowUserBB))
  1749. return;
  1750. for (auto *DTB = (*DT)[NarrowUserBB]->getIDom();
  1751. L->contains(DTB->getBlock());
  1752. DTB = DTB->getIDom()) {
  1753. auto *BB = DTB->getBlock();
  1754. auto *TI = BB->getTerminator();
  1755. UpdateRangeFromGuards(TI);
  1756. auto *BI = dyn_cast<BranchInst>(TI);
  1757. if (!BI || !BI->isConditional())
  1758. continue;
  1759. auto *TrueSuccessor = BI->getSuccessor(0);
  1760. auto *FalseSuccessor = BI->getSuccessor(1);
  1761. auto DominatesNarrowUser = [this, NarrowUser] (BasicBlockEdge BBE) {
  1762. return BBE.isSingleEdge() &&
  1763. DT->dominates(BBE, NarrowUser->getParent());
  1764. };
  1765. if (DominatesNarrowUser(BasicBlockEdge(BB, TrueSuccessor)))
  1766. UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/true);
  1767. if (DominatesNarrowUser(BasicBlockEdge(BB, FalseSuccessor)))
  1768. UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/false);
  1769. }
  1770. }
  1771. /// Calculates PostIncRangeInfos map for the given IV
  1772. void WidenIV::calculatePostIncRanges(PHINode *OrigPhi) {
  1773. SmallPtrSet<Instruction *, 16> Visited;
  1774. SmallVector<Instruction *, 6> Worklist;
  1775. Worklist.push_back(OrigPhi);
  1776. Visited.insert(OrigPhi);
  1777. while (!Worklist.empty()) {
  1778. Instruction *NarrowDef = Worklist.pop_back_val();
  1779. for (Use &U : NarrowDef->uses()) {
  1780. auto *NarrowUser = cast<Instruction>(U.getUser());
  1781. // Don't go looking outside the current loop.
  1782. auto *NarrowUserLoop = (*LI)[NarrowUser->getParent()];
  1783. if (!NarrowUserLoop || !L->contains(NarrowUserLoop))
  1784. continue;
  1785. if (!Visited.insert(NarrowUser).second)
  1786. continue;
  1787. Worklist.push_back(NarrowUser);
  1788. calculatePostIncRange(NarrowDef, NarrowUser);
  1789. }
  1790. }
  1791. }
  1792. PHINode *llvm::createWideIV(const WideIVInfo &WI,
  1793. LoopInfo *LI, ScalarEvolution *SE, SCEVExpander &Rewriter,
  1794. DominatorTree *DT, SmallVectorImpl<WeakTrackingVH> &DeadInsts,
  1795. unsigned &NumElimExt, unsigned &NumWidened,
  1796. bool HasGuards, bool UsePostIncrementRanges) {
  1797. WidenIV Widener(WI, LI, SE, DT, DeadInsts, HasGuards, UsePostIncrementRanges);
  1798. PHINode *WidePHI = Widener.createWideIV(Rewriter);
  1799. NumElimExt = Widener.getNumElimExt();
  1800. NumWidened = Widener.getNumWidened();
  1801. return WidePHI;
  1802. }