IndVarSimplify.cpp 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125
  1. //===- IndVarSimplify.cpp - Induction Variable Elimination ----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This transformation analyzes and transforms the induction variables (and
  10. // computations derived from them) into simpler forms suitable for subsequent
  11. // analysis and transformation.
  12. //
  13. // If the trip count of a loop is computable, this pass also makes the following
  14. // changes:
  15. // 1. The exit condition for the loop is canonicalized to compare the
  16. // induction value against the exit value. This turns loops like:
  17. // 'for (i = 7; i*i < 1000; ++i)' into 'for (i = 0; i != 25; ++i)'
  18. // 2. Any use outside of the loop of an expression derived from the indvar
  19. // is changed to compute the derived value outside of the loop, eliminating
  20. // the dependence on the exit value of the induction variable. If the only
  21. // purpose of the loop is to compute the exit value of some derived
  22. // expression, this transformation will make the loop dead.
  23. //
  24. //===----------------------------------------------------------------------===//
  25. #include "llvm/Transforms/Scalar/IndVarSimplify.h"
  26. #include "llvm/ADT/APFloat.h"
  27. #include "llvm/ADT/APInt.h"
  28. #include "llvm/ADT/ArrayRef.h"
  29. #include "llvm/ADT/DenseMap.h"
  30. #include "llvm/ADT/None.h"
  31. #include "llvm/ADT/Optional.h"
  32. #include "llvm/ADT/STLExtras.h"
  33. #include "llvm/ADT/SmallPtrSet.h"
  34. #include "llvm/ADT/SmallSet.h"
  35. #include "llvm/ADT/SmallVector.h"
  36. #include "llvm/ADT/Statistic.h"
  37. #include "llvm/ADT/iterator_range.h"
  38. #include "llvm/Analysis/LoopInfo.h"
  39. #include "llvm/Analysis/LoopPass.h"
  40. #include "llvm/Analysis/MemorySSA.h"
  41. #include "llvm/Analysis/MemorySSAUpdater.h"
  42. #include "llvm/Analysis/ScalarEvolution.h"
  43. #include "llvm/Analysis/ScalarEvolutionExpressions.h"
  44. #include "llvm/Analysis/TargetLibraryInfo.h"
  45. #include "llvm/Analysis/TargetTransformInfo.h"
  46. #include "llvm/Analysis/ValueTracking.h"
  47. #include "llvm/IR/BasicBlock.h"
  48. #include "llvm/IR/Constant.h"
  49. #include "llvm/IR/ConstantRange.h"
  50. #include "llvm/IR/Constants.h"
  51. #include "llvm/IR/DataLayout.h"
  52. #include "llvm/IR/DerivedTypes.h"
  53. #include "llvm/IR/Dominators.h"
  54. #include "llvm/IR/Function.h"
  55. #include "llvm/IR/IRBuilder.h"
  56. #include "llvm/IR/InstrTypes.h"
  57. #include "llvm/IR/Instruction.h"
  58. #include "llvm/IR/Instructions.h"
  59. #include "llvm/IR/IntrinsicInst.h"
  60. #include "llvm/IR/Intrinsics.h"
  61. #include "llvm/IR/Module.h"
  62. #include "llvm/IR/Operator.h"
  63. #include "llvm/IR/PassManager.h"
  64. #include "llvm/IR/PatternMatch.h"
  65. #include "llvm/IR/Type.h"
  66. #include "llvm/IR/Use.h"
  67. #include "llvm/IR/User.h"
  68. #include "llvm/IR/Value.h"
  69. #include "llvm/IR/ValueHandle.h"
  70. #include "llvm/InitializePasses.h"
  71. #include "llvm/Pass.h"
  72. #include "llvm/Support/Casting.h"
  73. #include "llvm/Support/CommandLine.h"
  74. #include "llvm/Support/Compiler.h"
  75. #include "llvm/Support/Debug.h"
  76. #include "llvm/Support/ErrorHandling.h"
  77. #include "llvm/Support/MathExtras.h"
  78. #include "llvm/Support/raw_ostream.h"
  79. #include "llvm/Transforms/Scalar.h"
  80. #include "llvm/Transforms/Scalar/LoopPassManager.h"
  81. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  82. #include "llvm/Transforms/Utils/Local.h"
  83. #include "llvm/Transforms/Utils/LoopUtils.h"
  84. #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
  85. #include "llvm/Transforms/Utils/SimplifyIndVar.h"
  86. #include <cassert>
  87. #include <cstdint>
  88. #include <utility>
  89. using namespace llvm;
  90. using namespace PatternMatch;
  91. #define DEBUG_TYPE "indvars"
  92. STATISTIC(NumWidened , "Number of indvars widened");
  93. STATISTIC(NumReplaced , "Number of exit values replaced");
  94. STATISTIC(NumLFTR , "Number of loop exit tests replaced");
  95. STATISTIC(NumElimExt , "Number of IV sign/zero extends eliminated");
  96. STATISTIC(NumElimIV , "Number of congruent IVs eliminated");
  97. // Trip count verification can be enabled by default under NDEBUG if we
  98. // implement a strong expression equivalence checker in SCEV. Until then, we
  99. // use the verify-indvars flag, which may assert in some cases.
  100. static cl::opt<bool> VerifyIndvars(
  101. "verify-indvars", cl::Hidden,
  102. cl::desc("Verify the ScalarEvolution result after running indvars. Has no "
  103. "effect in release builds. (Note: this adds additional SCEV "
  104. "queries potentially changing the analysis result)"));
  105. static cl::opt<ReplaceExitVal> ReplaceExitValue(
  106. "replexitval", cl::Hidden, cl::init(OnlyCheapRepl),
  107. cl::desc("Choose the strategy to replace exit value in IndVarSimplify"),
  108. cl::values(clEnumValN(NeverRepl, "never", "never replace exit value"),
  109. clEnumValN(OnlyCheapRepl, "cheap",
  110. "only replace exit value when the cost is cheap"),
  111. clEnumValN(NoHardUse, "noharduse",
  112. "only replace exit values when loop def likely dead"),
  113. clEnumValN(AlwaysRepl, "always",
  114. "always replace exit value whenever possible")));
  115. static cl::opt<bool> UsePostIncrementRanges(
  116. "indvars-post-increment-ranges", cl::Hidden,
  117. cl::desc("Use post increment control-dependent ranges in IndVarSimplify"),
  118. cl::init(true));
  119. static cl::opt<bool>
  120. DisableLFTR("disable-lftr", cl::Hidden, cl::init(false),
  121. cl::desc("Disable Linear Function Test Replace optimization"));
  122. static cl::opt<bool>
  123. LoopPredication("indvars-predicate-loops", cl::Hidden, cl::init(true),
  124. cl::desc("Predicate conditions in read only loops"));
  125. static cl::opt<bool>
  126. AllowIVWidening("indvars-widen-indvars", cl::Hidden, cl::init(true),
  127. cl::desc("Allow widening of indvars to eliminate s/zext"));
  128. namespace {
  129. class IndVarSimplify {
  130. LoopInfo *LI;
  131. ScalarEvolution *SE;
  132. DominatorTree *DT;
  133. const DataLayout &DL;
  134. TargetLibraryInfo *TLI;
  135. const TargetTransformInfo *TTI;
  136. std::unique_ptr<MemorySSAUpdater> MSSAU;
  137. SmallVector<WeakTrackingVH, 16> DeadInsts;
  138. bool WidenIndVars;
  139. bool handleFloatingPointIV(Loop *L, PHINode *PH);
  140. bool rewriteNonIntegerIVs(Loop *L);
  141. bool simplifyAndExtend(Loop *L, SCEVExpander &Rewriter, LoopInfo *LI);
  142. /// Try to improve our exit conditions by converting condition from signed
  143. /// to unsigned or rotating computation out of the loop.
  144. /// (See inline comment about why this is duplicated from simplifyAndExtend)
  145. bool canonicalizeExitCondition(Loop *L);
  146. /// Try to eliminate loop exits based on analyzeable exit counts
  147. bool optimizeLoopExits(Loop *L, SCEVExpander &Rewriter);
  148. /// Try to form loop invariant tests for loop exits by changing how many
  149. /// iterations of the loop run when that is unobservable.
  150. bool predicateLoopExits(Loop *L, SCEVExpander &Rewriter);
  151. bool rewriteFirstIterationLoopExitValues(Loop *L);
  152. bool linearFunctionTestReplace(Loop *L, BasicBlock *ExitingBB,
  153. const SCEV *ExitCount,
  154. PHINode *IndVar, SCEVExpander &Rewriter);
  155. bool sinkUnusedInvariants(Loop *L);
  156. public:
  157. IndVarSimplify(LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
  158. const DataLayout &DL, TargetLibraryInfo *TLI,
  159. TargetTransformInfo *TTI, MemorySSA *MSSA, bool WidenIndVars)
  160. : LI(LI), SE(SE), DT(DT), DL(DL), TLI(TLI), TTI(TTI),
  161. WidenIndVars(WidenIndVars) {
  162. if (MSSA)
  163. MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
  164. }
  165. bool run(Loop *L);
  166. };
  167. } // end anonymous namespace
  168. //===----------------------------------------------------------------------===//
  169. // rewriteNonIntegerIVs and helpers. Prefer integer IVs.
  170. //===----------------------------------------------------------------------===//
  171. /// Convert APF to an integer, if possible.
  172. static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) {
  173. bool isExact = false;
  174. // See if we can convert this to an int64_t
  175. uint64_t UIntVal;
  176. if (APF.convertToInteger(makeMutableArrayRef(UIntVal), 64, true,
  177. APFloat::rmTowardZero, &isExact) != APFloat::opOK ||
  178. !isExact)
  179. return false;
  180. IntVal = UIntVal;
  181. return true;
  182. }
  183. /// If the loop has floating induction variable then insert corresponding
  184. /// integer induction variable if possible.
  185. /// For example,
  186. /// for(double i = 0; i < 10000; ++i)
  187. /// bar(i)
  188. /// is converted into
  189. /// for(int i = 0; i < 10000; ++i)
  190. /// bar((double)i);
  191. bool IndVarSimplify::handleFloatingPointIV(Loop *L, PHINode *PN) {
  192. unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
  193. unsigned BackEdge = IncomingEdge^1;
  194. // Check incoming value.
  195. auto *InitValueVal = dyn_cast<ConstantFP>(PN->getIncomingValue(IncomingEdge));
  196. int64_t InitValue;
  197. if (!InitValueVal || !ConvertToSInt(InitValueVal->getValueAPF(), InitValue))
  198. return false;
  199. // Check IV increment. Reject this PN if increment operation is not
  200. // an add or increment value can not be represented by an integer.
  201. auto *Incr = dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge));
  202. if (Incr == nullptr || Incr->getOpcode() != Instruction::FAdd) return false;
  203. // If this is not an add of the PHI with a constantfp, or if the constant fp
  204. // is not an integer, bail out.
  205. ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1));
  206. int64_t IncValue;
  207. if (IncValueVal == nullptr || Incr->getOperand(0) != PN ||
  208. !ConvertToSInt(IncValueVal->getValueAPF(), IncValue))
  209. return false;
  210. // Check Incr uses. One user is PN and the other user is an exit condition
  211. // used by the conditional terminator.
  212. Value::user_iterator IncrUse = Incr->user_begin();
  213. Instruction *U1 = cast<Instruction>(*IncrUse++);
  214. if (IncrUse == Incr->user_end()) return false;
  215. Instruction *U2 = cast<Instruction>(*IncrUse++);
  216. if (IncrUse != Incr->user_end()) return false;
  217. // Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't
  218. // only used by a branch, we can't transform it.
  219. FCmpInst *Compare = dyn_cast<FCmpInst>(U1);
  220. if (!Compare)
  221. Compare = dyn_cast<FCmpInst>(U2);
  222. if (!Compare || !Compare->hasOneUse() ||
  223. !isa<BranchInst>(Compare->user_back()))
  224. return false;
  225. BranchInst *TheBr = cast<BranchInst>(Compare->user_back());
  226. // We need to verify that the branch actually controls the iteration count
  227. // of the loop. If not, the new IV can overflow and no one will notice.
  228. // The branch block must be in the loop and one of the successors must be out
  229. // of the loop.
  230. assert(TheBr->isConditional() && "Can't use fcmp if not conditional");
  231. if (!L->contains(TheBr->getParent()) ||
  232. (L->contains(TheBr->getSuccessor(0)) &&
  233. L->contains(TheBr->getSuccessor(1))))
  234. return false;
  235. // If it isn't a comparison with an integer-as-fp (the exit value), we can't
  236. // transform it.
  237. ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1));
  238. int64_t ExitValue;
  239. if (ExitValueVal == nullptr ||
  240. !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue))
  241. return false;
  242. // Find new predicate for integer comparison.
  243. CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE;
  244. switch (Compare->getPredicate()) {
  245. default: return false; // Unknown comparison.
  246. case CmpInst::FCMP_OEQ:
  247. case CmpInst::FCMP_UEQ: NewPred = CmpInst::ICMP_EQ; break;
  248. case CmpInst::FCMP_ONE:
  249. case CmpInst::FCMP_UNE: NewPred = CmpInst::ICMP_NE; break;
  250. case CmpInst::FCMP_OGT:
  251. case CmpInst::FCMP_UGT: NewPred = CmpInst::ICMP_SGT; break;
  252. case CmpInst::FCMP_OGE:
  253. case CmpInst::FCMP_UGE: NewPred = CmpInst::ICMP_SGE; break;
  254. case CmpInst::FCMP_OLT:
  255. case CmpInst::FCMP_ULT: NewPred = CmpInst::ICMP_SLT; break;
  256. case CmpInst::FCMP_OLE:
  257. case CmpInst::FCMP_ULE: NewPred = CmpInst::ICMP_SLE; break;
  258. }
  259. // We convert the floating point induction variable to a signed i32 value if
  260. // we can. This is only safe if the comparison will not overflow in a way
  261. // that won't be trapped by the integer equivalent operations. Check for this
  262. // now.
  263. // TODO: We could use i64 if it is native and the range requires it.
  264. // The start/stride/exit values must all fit in signed i32.
  265. if (!isInt<32>(InitValue) || !isInt<32>(IncValue) || !isInt<32>(ExitValue))
  266. return false;
  267. // If not actually striding (add x, 0.0), avoid touching the code.
  268. if (IncValue == 0)
  269. return false;
  270. // Positive and negative strides have different safety conditions.
  271. if (IncValue > 0) {
  272. // If we have a positive stride, we require the init to be less than the
  273. // exit value.
  274. if (InitValue >= ExitValue)
  275. return false;
  276. uint32_t Range = uint32_t(ExitValue-InitValue);
  277. // Check for infinite loop, either:
  278. // while (i <= Exit) or until (i > Exit)
  279. if (NewPred == CmpInst::ICMP_SLE || NewPred == CmpInst::ICMP_SGT) {
  280. if (++Range == 0) return false; // Range overflows.
  281. }
  282. unsigned Leftover = Range % uint32_t(IncValue);
  283. // If this is an equality comparison, we require that the strided value
  284. // exactly land on the exit value, otherwise the IV condition will wrap
  285. // around and do things the fp IV wouldn't.
  286. if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
  287. Leftover != 0)
  288. return false;
  289. // If the stride would wrap around the i32 before exiting, we can't
  290. // transform the IV.
  291. if (Leftover != 0 && int32_t(ExitValue+IncValue) < ExitValue)
  292. return false;
  293. } else {
  294. // If we have a negative stride, we require the init to be greater than the
  295. // exit value.
  296. if (InitValue <= ExitValue)
  297. return false;
  298. uint32_t Range = uint32_t(InitValue-ExitValue);
  299. // Check for infinite loop, either:
  300. // while (i >= Exit) or until (i < Exit)
  301. if (NewPred == CmpInst::ICMP_SGE || NewPred == CmpInst::ICMP_SLT) {
  302. if (++Range == 0) return false; // Range overflows.
  303. }
  304. unsigned Leftover = Range % uint32_t(-IncValue);
  305. // If this is an equality comparison, we require that the strided value
  306. // exactly land on the exit value, otherwise the IV condition will wrap
  307. // around and do things the fp IV wouldn't.
  308. if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
  309. Leftover != 0)
  310. return false;
  311. // If the stride would wrap around the i32 before exiting, we can't
  312. // transform the IV.
  313. if (Leftover != 0 && int32_t(ExitValue+IncValue) > ExitValue)
  314. return false;
  315. }
  316. IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext());
  317. // Insert new integer induction variable.
  318. PHINode *NewPHI = PHINode::Create(Int32Ty, 2, PN->getName()+".int", PN);
  319. NewPHI->addIncoming(ConstantInt::get(Int32Ty, InitValue),
  320. PN->getIncomingBlock(IncomingEdge));
  321. Value *NewAdd =
  322. BinaryOperator::CreateAdd(NewPHI, ConstantInt::get(Int32Ty, IncValue),
  323. Incr->getName()+".int", Incr);
  324. NewPHI->addIncoming(NewAdd, PN->getIncomingBlock(BackEdge));
  325. ICmpInst *NewCompare = new ICmpInst(TheBr, NewPred, NewAdd,
  326. ConstantInt::get(Int32Ty, ExitValue),
  327. Compare->getName());
  328. // In the following deletions, PN may become dead and may be deleted.
  329. // Use a WeakTrackingVH to observe whether this happens.
  330. WeakTrackingVH WeakPH = PN;
  331. // Delete the old floating point exit comparison. The branch starts using the
  332. // new comparison.
  333. NewCompare->takeName(Compare);
  334. Compare->replaceAllUsesWith(NewCompare);
  335. RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI, MSSAU.get());
  336. // Delete the old floating point increment.
  337. Incr->replaceAllUsesWith(UndefValue::get(Incr->getType()));
  338. RecursivelyDeleteTriviallyDeadInstructions(Incr, TLI, MSSAU.get());
  339. // If the FP induction variable still has uses, this is because something else
  340. // in the loop uses its value. In order to canonicalize the induction
  341. // variable, we chose to eliminate the IV and rewrite it in terms of an
  342. // int->fp cast.
  343. //
  344. // We give preference to sitofp over uitofp because it is faster on most
  345. // platforms.
  346. if (WeakPH) {
  347. Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv",
  348. &*PN->getParent()->getFirstInsertionPt());
  349. PN->replaceAllUsesWith(Conv);
  350. RecursivelyDeleteTriviallyDeadInstructions(PN, TLI, MSSAU.get());
  351. }
  352. return true;
  353. }
  354. bool IndVarSimplify::rewriteNonIntegerIVs(Loop *L) {
  355. // First step. Check to see if there are any floating-point recurrences.
  356. // If there are, change them into integer recurrences, permitting analysis by
  357. // the SCEV routines.
  358. BasicBlock *Header = L->getHeader();
  359. SmallVector<WeakTrackingVH, 8> PHIs;
  360. for (PHINode &PN : Header->phis())
  361. PHIs.push_back(&PN);
  362. bool Changed = false;
  363. for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
  364. if (PHINode *PN = dyn_cast_or_null<PHINode>(&*PHIs[i]))
  365. Changed |= handleFloatingPointIV(L, PN);
  366. // If the loop previously had floating-point IV, ScalarEvolution
  367. // may not have been able to compute a trip count. Now that we've done some
  368. // re-writing, the trip count may be computable.
  369. if (Changed)
  370. SE->forgetLoop(L);
  371. return Changed;
  372. }
  373. //===---------------------------------------------------------------------===//
  374. // rewriteFirstIterationLoopExitValues: Rewrite loop exit values if we know
  375. // they will exit at the first iteration.
  376. //===---------------------------------------------------------------------===//
  377. /// Check to see if this loop has loop invariant conditions which lead to loop
  378. /// exits. If so, we know that if the exit path is taken, it is at the first
  379. /// loop iteration. This lets us predict exit values of PHI nodes that live in
  380. /// loop header.
  381. bool IndVarSimplify::rewriteFirstIterationLoopExitValues(Loop *L) {
  382. // Verify the input to the pass is already in LCSSA form.
  383. assert(L->isLCSSAForm(*DT));
  384. SmallVector<BasicBlock *, 8> ExitBlocks;
  385. L->getUniqueExitBlocks(ExitBlocks);
  386. bool MadeAnyChanges = false;
  387. for (auto *ExitBB : ExitBlocks) {
  388. // If there are no more PHI nodes in this exit block, then no more
  389. // values defined inside the loop are used on this path.
  390. for (PHINode &PN : ExitBB->phis()) {
  391. for (unsigned IncomingValIdx = 0, E = PN.getNumIncomingValues();
  392. IncomingValIdx != E; ++IncomingValIdx) {
  393. auto *IncomingBB = PN.getIncomingBlock(IncomingValIdx);
  394. // Can we prove that the exit must run on the first iteration if it
  395. // runs at all? (i.e. early exits are fine for our purposes, but
  396. // traces which lead to this exit being taken on the 2nd iteration
  397. // aren't.) Note that this is about whether the exit branch is
  398. // executed, not about whether it is taken.
  399. if (!L->getLoopLatch() ||
  400. !DT->dominates(IncomingBB, L->getLoopLatch()))
  401. continue;
  402. // Get condition that leads to the exit path.
  403. auto *TermInst = IncomingBB->getTerminator();
  404. Value *Cond = nullptr;
  405. if (auto *BI = dyn_cast<BranchInst>(TermInst)) {
  406. // Must be a conditional branch, otherwise the block
  407. // should not be in the loop.
  408. Cond = BI->getCondition();
  409. } else if (auto *SI = dyn_cast<SwitchInst>(TermInst))
  410. Cond = SI->getCondition();
  411. else
  412. continue;
  413. if (!L->isLoopInvariant(Cond))
  414. continue;
  415. auto *ExitVal = dyn_cast<PHINode>(PN.getIncomingValue(IncomingValIdx));
  416. // Only deal with PHIs in the loop header.
  417. if (!ExitVal || ExitVal->getParent() != L->getHeader())
  418. continue;
  419. // If ExitVal is a PHI on the loop header, then we know its
  420. // value along this exit because the exit can only be taken
  421. // on the first iteration.
  422. auto *LoopPreheader = L->getLoopPreheader();
  423. assert(LoopPreheader && "Invalid loop");
  424. int PreheaderIdx = ExitVal->getBasicBlockIndex(LoopPreheader);
  425. if (PreheaderIdx != -1) {
  426. assert(ExitVal->getParent() == L->getHeader() &&
  427. "ExitVal must be in loop header");
  428. MadeAnyChanges = true;
  429. PN.setIncomingValue(IncomingValIdx,
  430. ExitVal->getIncomingValue(PreheaderIdx));
  431. SE->forgetValue(&PN);
  432. }
  433. }
  434. }
  435. }
  436. return MadeAnyChanges;
  437. }
  438. //===----------------------------------------------------------------------===//
  439. // IV Widening - Extend the width of an IV to cover its widest uses.
  440. //===----------------------------------------------------------------------===//
  441. /// Update information about the induction variable that is extended by this
  442. /// sign or zero extend operation. This is used to determine the final width of
  443. /// the IV before actually widening it.
  444. static void visitIVCast(CastInst *Cast, WideIVInfo &WI,
  445. ScalarEvolution *SE,
  446. const TargetTransformInfo *TTI) {
  447. bool IsSigned = Cast->getOpcode() == Instruction::SExt;
  448. if (!IsSigned && Cast->getOpcode() != Instruction::ZExt)
  449. return;
  450. Type *Ty = Cast->getType();
  451. uint64_t Width = SE->getTypeSizeInBits(Ty);
  452. if (!Cast->getModule()->getDataLayout().isLegalInteger(Width))
  453. return;
  454. // Check that `Cast` actually extends the induction variable (we rely on this
  455. // later). This takes care of cases where `Cast` is extending a truncation of
  456. // the narrow induction variable, and thus can end up being narrower than the
  457. // "narrow" induction variable.
  458. uint64_t NarrowIVWidth = SE->getTypeSizeInBits(WI.NarrowIV->getType());
  459. if (NarrowIVWidth >= Width)
  460. return;
  461. // Cast is either an sext or zext up to this point.
  462. // We should not widen an indvar if arithmetics on the wider indvar are more
  463. // expensive than those on the narrower indvar. We check only the cost of ADD
  464. // because at least an ADD is required to increment the induction variable. We
  465. // could compute more comprehensively the cost of all instructions on the
  466. // induction variable when necessary.
  467. if (TTI &&
  468. TTI->getArithmeticInstrCost(Instruction::Add, Ty) >
  469. TTI->getArithmeticInstrCost(Instruction::Add,
  470. Cast->getOperand(0)->getType())) {
  471. return;
  472. }
  473. if (!WI.WidestNativeType ||
  474. Width > SE->getTypeSizeInBits(WI.WidestNativeType)) {
  475. WI.WidestNativeType = SE->getEffectiveSCEVType(Ty);
  476. WI.IsSigned = IsSigned;
  477. return;
  478. }
  479. // We extend the IV to satisfy the sign of its user(s), or 'signed'
  480. // if there are multiple users with both sign- and zero extensions,
  481. // in order not to introduce nondeterministic behaviour based on the
  482. // unspecified order of a PHI nodes' users-iterator.
  483. WI.IsSigned |= IsSigned;
  484. }
  485. //===----------------------------------------------------------------------===//
  486. // Live IV Reduction - Minimize IVs live across the loop.
  487. //===----------------------------------------------------------------------===//
  488. //===----------------------------------------------------------------------===//
  489. // Simplification of IV users based on SCEV evaluation.
  490. //===----------------------------------------------------------------------===//
  491. namespace {
  492. class IndVarSimplifyVisitor : public IVVisitor {
  493. ScalarEvolution *SE;
  494. const TargetTransformInfo *TTI;
  495. PHINode *IVPhi;
  496. public:
  497. WideIVInfo WI;
  498. IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV,
  499. const TargetTransformInfo *TTI,
  500. const DominatorTree *DTree)
  501. : SE(SCEV), TTI(TTI), IVPhi(IV) {
  502. DT = DTree;
  503. WI.NarrowIV = IVPhi;
  504. }
  505. // Implement the interface used by simplifyUsersOfIV.
  506. void visitCast(CastInst *Cast) override { visitIVCast(Cast, WI, SE, TTI); }
  507. };
  508. } // end anonymous namespace
  509. /// Iteratively perform simplification on a worklist of IV users. Each
  510. /// successive simplification may push more users which may themselves be
  511. /// candidates for simplification.
  512. ///
  513. /// Sign/Zero extend elimination is interleaved with IV simplification.
  514. bool IndVarSimplify::simplifyAndExtend(Loop *L,
  515. SCEVExpander &Rewriter,
  516. LoopInfo *LI) {
  517. SmallVector<WideIVInfo, 8> WideIVs;
  518. auto *GuardDecl = L->getBlocks()[0]->getModule()->getFunction(
  519. Intrinsic::getName(Intrinsic::experimental_guard));
  520. bool HasGuards = GuardDecl && !GuardDecl->use_empty();
  521. SmallVector<PHINode*, 8> LoopPhis;
  522. for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
  523. LoopPhis.push_back(cast<PHINode>(I));
  524. }
  525. // Each round of simplification iterates through the SimplifyIVUsers worklist
  526. // for all current phis, then determines whether any IVs can be
  527. // widened. Widening adds new phis to LoopPhis, inducing another round of
  528. // simplification on the wide IVs.
  529. bool Changed = false;
  530. while (!LoopPhis.empty()) {
  531. // Evaluate as many IV expressions as possible before widening any IVs. This
  532. // forces SCEV to set no-wrap flags before evaluating sign/zero
  533. // extension. The first time SCEV attempts to normalize sign/zero extension,
  534. // the result becomes final. So for the most predictable results, we delay
  535. // evaluation of sign/zero extend evaluation until needed, and avoid running
  536. // other SCEV based analysis prior to simplifyAndExtend.
  537. do {
  538. PHINode *CurrIV = LoopPhis.pop_back_val();
  539. // Information about sign/zero extensions of CurrIV.
  540. IndVarSimplifyVisitor Visitor(CurrIV, SE, TTI, DT);
  541. Changed |= simplifyUsersOfIV(CurrIV, SE, DT, LI, TTI, DeadInsts, Rewriter,
  542. &Visitor);
  543. if (Visitor.WI.WidestNativeType) {
  544. WideIVs.push_back(Visitor.WI);
  545. }
  546. } while(!LoopPhis.empty());
  547. // Continue if we disallowed widening.
  548. if (!WidenIndVars)
  549. continue;
  550. for (; !WideIVs.empty(); WideIVs.pop_back()) {
  551. unsigned ElimExt;
  552. unsigned Widened;
  553. if (PHINode *WidePhi = createWideIV(WideIVs.back(), LI, SE, Rewriter,
  554. DT, DeadInsts, ElimExt, Widened,
  555. HasGuards, UsePostIncrementRanges)) {
  556. NumElimExt += ElimExt;
  557. NumWidened += Widened;
  558. Changed = true;
  559. LoopPhis.push_back(WidePhi);
  560. }
  561. }
  562. }
  563. return Changed;
  564. }
  565. //===----------------------------------------------------------------------===//
  566. // linearFunctionTestReplace and its kin. Rewrite the loop exit condition.
  567. //===----------------------------------------------------------------------===//
  568. /// Given an Value which is hoped to be part of an add recurance in the given
  569. /// loop, return the associated Phi node if so. Otherwise, return null. Note
  570. /// that this is less general than SCEVs AddRec checking.
  571. static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L) {
  572. Instruction *IncI = dyn_cast<Instruction>(IncV);
  573. if (!IncI)
  574. return nullptr;
  575. switch (IncI->getOpcode()) {
  576. case Instruction::Add:
  577. case Instruction::Sub:
  578. break;
  579. case Instruction::GetElementPtr:
  580. // An IV counter must preserve its type.
  581. if (IncI->getNumOperands() == 2)
  582. break;
  583. LLVM_FALLTHROUGH;
  584. default:
  585. return nullptr;
  586. }
  587. PHINode *Phi = dyn_cast<PHINode>(IncI->getOperand(0));
  588. if (Phi && Phi->getParent() == L->getHeader()) {
  589. if (L->isLoopInvariant(IncI->getOperand(1)))
  590. return Phi;
  591. return nullptr;
  592. }
  593. if (IncI->getOpcode() == Instruction::GetElementPtr)
  594. return nullptr;
  595. // Allow add/sub to be commuted.
  596. Phi = dyn_cast<PHINode>(IncI->getOperand(1));
  597. if (Phi && Phi->getParent() == L->getHeader()) {
  598. if (L->isLoopInvariant(IncI->getOperand(0)))
  599. return Phi;
  600. }
  601. return nullptr;
  602. }
  603. /// Whether the current loop exit test is based on this value. Currently this
  604. /// is limited to a direct use in the loop condition.
  605. static bool isLoopExitTestBasedOn(Value *V, BasicBlock *ExitingBB) {
  606. BranchInst *BI = cast<BranchInst>(ExitingBB->getTerminator());
  607. ICmpInst *ICmp = dyn_cast<ICmpInst>(BI->getCondition());
  608. // TODO: Allow non-icmp loop test.
  609. if (!ICmp)
  610. return false;
  611. // TODO: Allow indirect use.
  612. return ICmp->getOperand(0) == V || ICmp->getOperand(1) == V;
  613. }
  614. /// linearFunctionTestReplace policy. Return true unless we can show that the
  615. /// current exit test is already sufficiently canonical.
  616. static bool needsLFTR(Loop *L, BasicBlock *ExitingBB) {
  617. assert(L->getLoopLatch() && "Must be in simplified form");
  618. // Avoid converting a constant or loop invariant test back to a runtime
  619. // test. This is critical for when SCEV's cached ExitCount is less precise
  620. // than the current IR (such as after we've proven a particular exit is
  621. // actually dead and thus the BE count never reaches our ExitCount.)
  622. BranchInst *BI = cast<BranchInst>(ExitingBB->getTerminator());
  623. if (L->isLoopInvariant(BI->getCondition()))
  624. return false;
  625. // Do LFTR to simplify the exit condition to an ICMP.
  626. ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
  627. if (!Cond)
  628. return true;
  629. // Do LFTR to simplify the exit ICMP to EQ/NE
  630. ICmpInst::Predicate Pred = Cond->getPredicate();
  631. if (Pred != ICmpInst::ICMP_NE && Pred != ICmpInst::ICMP_EQ)
  632. return true;
  633. // Look for a loop invariant RHS
  634. Value *LHS = Cond->getOperand(0);
  635. Value *RHS = Cond->getOperand(1);
  636. if (!L->isLoopInvariant(RHS)) {
  637. if (!L->isLoopInvariant(LHS))
  638. return true;
  639. std::swap(LHS, RHS);
  640. }
  641. // Look for a simple IV counter LHS
  642. PHINode *Phi = dyn_cast<PHINode>(LHS);
  643. if (!Phi)
  644. Phi = getLoopPhiForCounter(LHS, L);
  645. if (!Phi)
  646. return true;
  647. // Do LFTR if PHI node is defined in the loop, but is *not* a counter.
  648. int Idx = Phi->getBasicBlockIndex(L->getLoopLatch());
  649. if (Idx < 0)
  650. return true;
  651. // Do LFTR if the exit condition's IV is *not* a simple counter.
  652. Value *IncV = Phi->getIncomingValue(Idx);
  653. return Phi != getLoopPhiForCounter(IncV, L);
  654. }
  655. /// Return true if undefined behavior would provable be executed on the path to
  656. /// OnPathTo if Root produced a posion result. Note that this doesn't say
  657. /// anything about whether OnPathTo is actually executed or whether Root is
  658. /// actually poison. This can be used to assess whether a new use of Root can
  659. /// be added at a location which is control equivalent with OnPathTo (such as
  660. /// immediately before it) without introducing UB which didn't previously
  661. /// exist. Note that a false result conveys no information.
  662. static bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
  663. Instruction *OnPathTo,
  664. DominatorTree *DT) {
  665. // Basic approach is to assume Root is poison, propagate poison forward
  666. // through all users we can easily track, and then check whether any of those
  667. // users are provable UB and must execute before out exiting block might
  668. // exit.
  669. // The set of all recursive users we've visited (which are assumed to all be
  670. // poison because of said visit)
  671. SmallSet<const Value *, 16> KnownPoison;
  672. SmallVector<const Instruction*, 16> Worklist;
  673. Worklist.push_back(Root);
  674. while (!Worklist.empty()) {
  675. const Instruction *I = Worklist.pop_back_val();
  676. // If we know this must trigger UB on a path leading our target.
  677. if (mustTriggerUB(I, KnownPoison) && DT->dominates(I, OnPathTo))
  678. return true;
  679. // If we can't analyze propagation through this instruction, just skip it
  680. // and transitive users. Safe as false is a conservative result.
  681. if (!propagatesPoison(cast<Operator>(I)) && I != Root)
  682. continue;
  683. if (KnownPoison.insert(I).second)
  684. for (const User *User : I->users())
  685. Worklist.push_back(cast<Instruction>(User));
  686. }
  687. // Might be non-UB, or might have a path we couldn't prove must execute on
  688. // way to exiting bb.
  689. return false;
  690. }
  691. /// Recursive helper for hasConcreteDef(). Unfortunately, this currently boils
  692. /// down to checking that all operands are constant and listing instructions
  693. /// that may hide undef.
  694. static bool hasConcreteDefImpl(Value *V, SmallPtrSetImpl<Value*> &Visited,
  695. unsigned Depth) {
  696. if (isa<Constant>(V))
  697. return !isa<UndefValue>(V);
  698. if (Depth >= 6)
  699. return false;
  700. // Conservatively handle non-constant non-instructions. For example, Arguments
  701. // may be undef.
  702. Instruction *I = dyn_cast<Instruction>(V);
  703. if (!I)
  704. return false;
  705. // Load and return values may be undef.
  706. if(I->mayReadFromMemory() || isa<CallInst>(I) || isa<InvokeInst>(I))
  707. return false;
  708. // Optimistically handle other instructions.
  709. for (Value *Op : I->operands()) {
  710. if (!Visited.insert(Op).second)
  711. continue;
  712. if (!hasConcreteDefImpl(Op, Visited, Depth+1))
  713. return false;
  714. }
  715. return true;
  716. }
  717. /// Return true if the given value is concrete. We must prove that undef can
  718. /// never reach it.
  719. ///
  720. /// TODO: If we decide that this is a good approach to checking for undef, we
  721. /// may factor it into a common location.
  722. static bool hasConcreteDef(Value *V) {
  723. SmallPtrSet<Value*, 8> Visited;
  724. Visited.insert(V);
  725. return hasConcreteDefImpl(V, Visited, 0);
  726. }
  727. /// Return true if this IV has any uses other than the (soon to be rewritten)
  728. /// loop exit test.
  729. static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
  730. int LatchIdx = Phi->getBasicBlockIndex(LatchBlock);
  731. Value *IncV = Phi->getIncomingValue(LatchIdx);
  732. for (User *U : Phi->users())
  733. if (U != Cond && U != IncV) return false;
  734. for (User *U : IncV->users())
  735. if (U != Cond && U != Phi) return false;
  736. return true;
  737. }
  738. /// Return true if the given phi is a "counter" in L. A counter is an
  739. /// add recurance (of integer or pointer type) with an arbitrary start, and a
  740. /// step of 1. Note that L must have exactly one latch.
  741. static bool isLoopCounter(PHINode* Phi, Loop *L,
  742. ScalarEvolution *SE) {
  743. assert(Phi->getParent() == L->getHeader());
  744. assert(L->getLoopLatch());
  745. if (!SE->isSCEVable(Phi->getType()))
  746. return false;
  747. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Phi));
  748. if (!AR || AR->getLoop() != L || !AR->isAffine())
  749. return false;
  750. const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
  751. if (!Step || !Step->isOne())
  752. return false;
  753. int LatchIdx = Phi->getBasicBlockIndex(L->getLoopLatch());
  754. Value *IncV = Phi->getIncomingValue(LatchIdx);
  755. return (getLoopPhiForCounter(IncV, L) == Phi &&
  756. isa<SCEVAddRecExpr>(SE->getSCEV(IncV)));
  757. }
  758. /// Search the loop header for a loop counter (anadd rec w/step of one)
  759. /// suitable for use by LFTR. If multiple counters are available, select the
  760. /// "best" one based profitable heuristics.
  761. ///
  762. /// BECount may be an i8* pointer type. The pointer difference is already
  763. /// valid count without scaling the address stride, so it remains a pointer
  764. /// expression as far as SCEV is concerned.
  765. static PHINode *FindLoopCounter(Loop *L, BasicBlock *ExitingBB,
  766. const SCEV *BECount,
  767. ScalarEvolution *SE, DominatorTree *DT) {
  768. uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
  769. Value *Cond = cast<BranchInst>(ExitingBB->getTerminator())->getCondition();
  770. // Loop over all of the PHI nodes, looking for a simple counter.
  771. PHINode *BestPhi = nullptr;
  772. const SCEV *BestInit = nullptr;
  773. BasicBlock *LatchBlock = L->getLoopLatch();
  774. assert(LatchBlock && "Must be in simplified form");
  775. const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
  776. for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
  777. PHINode *Phi = cast<PHINode>(I);
  778. if (!isLoopCounter(Phi, L, SE))
  779. continue;
  780. // Avoid comparing an integer IV against a pointer Limit.
  781. if (BECount->getType()->isPointerTy() && !Phi->getType()->isPointerTy())
  782. continue;
  783. const auto *AR = cast<SCEVAddRecExpr>(SE->getSCEV(Phi));
  784. // AR may be a pointer type, while BECount is an integer type.
  785. // AR may be wider than BECount. With eq/ne tests overflow is immaterial.
  786. // AR may not be a narrower type, or we may never exit.
  787. uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType());
  788. if (PhiWidth < BCWidth || !DL.isLegalInteger(PhiWidth))
  789. continue;
  790. // Avoid reusing a potentially undef value to compute other values that may
  791. // have originally had a concrete definition.
  792. if (!hasConcreteDef(Phi)) {
  793. // We explicitly allow unknown phis as long as they are already used by
  794. // the loop exit test. This is legal since performing LFTR could not
  795. // increase the number of undef users.
  796. Value *IncPhi = Phi->getIncomingValueForBlock(LatchBlock);
  797. if (!isLoopExitTestBasedOn(Phi, ExitingBB) &&
  798. !isLoopExitTestBasedOn(IncPhi, ExitingBB))
  799. continue;
  800. }
  801. // Avoid introducing undefined behavior due to poison which didn't exist in
  802. // the original program. (Annoyingly, the rules for poison and undef
  803. // propagation are distinct, so this does NOT cover the undef case above.)
  804. // We have to ensure that we don't introduce UB by introducing a use on an
  805. // iteration where said IV produces poison. Our strategy here differs for
  806. // pointers and integer IVs. For integers, we strip and reinfer as needed,
  807. // see code in linearFunctionTestReplace. For pointers, we restrict
  808. // transforms as there is no good way to reinfer inbounds once lost.
  809. if (!Phi->getType()->isIntegerTy() &&
  810. !mustExecuteUBIfPoisonOnPathTo(Phi, ExitingBB->getTerminator(), DT))
  811. continue;
  812. const SCEV *Init = AR->getStart();
  813. if (BestPhi && !AlmostDeadIV(BestPhi, LatchBlock, Cond)) {
  814. // Don't force a live loop counter if another IV can be used.
  815. if (AlmostDeadIV(Phi, LatchBlock, Cond))
  816. continue;
  817. // Prefer to count-from-zero. This is a more "canonical" counter form. It
  818. // also prefers integer to pointer IVs.
  819. if (BestInit->isZero() != Init->isZero()) {
  820. if (BestInit->isZero())
  821. continue;
  822. }
  823. // If two IVs both count from zero or both count from nonzero then the
  824. // narrower is likely a dead phi that has been widened. Use the wider phi
  825. // to allow the other to be eliminated.
  826. else if (PhiWidth <= SE->getTypeSizeInBits(BestPhi->getType()))
  827. continue;
  828. }
  829. BestPhi = Phi;
  830. BestInit = Init;
  831. }
  832. return BestPhi;
  833. }
  834. /// Insert an IR expression which computes the value held by the IV IndVar
  835. /// (which must be an loop counter w/unit stride) after the backedge of loop L
  836. /// is taken ExitCount times.
  837. static Value *genLoopLimit(PHINode *IndVar, BasicBlock *ExitingBB,
  838. const SCEV *ExitCount, bool UsePostInc, Loop *L,
  839. SCEVExpander &Rewriter, ScalarEvolution *SE) {
  840. assert(isLoopCounter(IndVar, L, SE));
  841. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
  842. const SCEV *IVInit = AR->getStart();
  843. assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride");
  844. // IVInit may be a pointer while ExitCount is an integer when FindLoopCounter
  845. // finds a valid pointer IV. Sign extend ExitCount in order to materialize a
  846. // GEP. Avoid running SCEVExpander on a new pointer value, instead reusing
  847. // the existing GEPs whenever possible.
  848. if (IndVar->getType()->isPointerTy() &&
  849. !ExitCount->getType()->isPointerTy()) {
  850. // IVOffset will be the new GEP offset that is interpreted by GEP as a
  851. // signed value. ExitCount on the other hand represents the loop trip count,
  852. // which is an unsigned value. FindLoopCounter only allows induction
  853. // variables that have a positive unit stride of one. This means we don't
  854. // have to handle the case of negative offsets (yet) and just need to zero
  855. // extend ExitCount.
  856. Type *OfsTy = SE->getEffectiveSCEVType(IVInit->getType());
  857. const SCEV *IVOffset = SE->getTruncateOrZeroExtend(ExitCount, OfsTy);
  858. if (UsePostInc)
  859. IVOffset = SE->getAddExpr(IVOffset, SE->getOne(OfsTy));
  860. // Expand the code for the iteration count.
  861. assert(SE->isLoopInvariant(IVOffset, L) &&
  862. "Computed iteration count is not loop invariant!");
  863. const SCEV *IVLimit = SE->getAddExpr(IVInit, IVOffset);
  864. BranchInst *BI = cast<BranchInst>(ExitingBB->getTerminator());
  865. return Rewriter.expandCodeFor(IVLimit, IndVar->getType(), BI);
  866. } else {
  867. // In any other case, convert both IVInit and ExitCount to integers before
  868. // comparing. This may result in SCEV expansion of pointers, but in practice
  869. // SCEV will fold the pointer arithmetic away as such:
  870. // BECount = (IVEnd - IVInit - 1) => IVLimit = IVInit (postinc).
  871. //
  872. // Valid Cases: (1) both integers is most common; (2) both may be pointers
  873. // for simple memset-style loops.
  874. //
  875. // IVInit integer and ExitCount pointer would only occur if a canonical IV
  876. // were generated on top of case #2, which is not expected.
  877. // For unit stride, IVCount = Start + ExitCount with 2's complement
  878. // overflow.
  879. // For integer IVs, truncate the IV before computing IVInit + BECount,
  880. // unless we know apriori that the limit must be a constant when evaluated
  881. // in the bitwidth of the IV. We prefer (potentially) keeping a truncate
  882. // of the IV in the loop over a (potentially) expensive expansion of the
  883. // widened exit count add(zext(add)) expression.
  884. if (SE->getTypeSizeInBits(IVInit->getType())
  885. > SE->getTypeSizeInBits(ExitCount->getType())) {
  886. if (isa<SCEVConstant>(IVInit) && isa<SCEVConstant>(ExitCount))
  887. ExitCount = SE->getZeroExtendExpr(ExitCount, IVInit->getType());
  888. else
  889. IVInit = SE->getTruncateExpr(IVInit, ExitCount->getType());
  890. }
  891. const SCEV *IVLimit = SE->getAddExpr(IVInit, ExitCount);
  892. if (UsePostInc)
  893. IVLimit = SE->getAddExpr(IVLimit, SE->getOne(IVLimit->getType()));
  894. // Expand the code for the iteration count.
  895. assert(SE->isLoopInvariant(IVLimit, L) &&
  896. "Computed iteration count is not loop invariant!");
  897. // Ensure that we generate the same type as IndVar, or a smaller integer
  898. // type. In the presence of null pointer values, we have an integer type
  899. // SCEV expression (IVInit) for a pointer type IV value (IndVar).
  900. Type *LimitTy = ExitCount->getType()->isPointerTy() ?
  901. IndVar->getType() : ExitCount->getType();
  902. BranchInst *BI = cast<BranchInst>(ExitingBB->getTerminator());
  903. return Rewriter.expandCodeFor(IVLimit, LimitTy, BI);
  904. }
  905. }
  906. /// This method rewrites the exit condition of the loop to be a canonical !=
  907. /// comparison against the incremented loop induction variable. This pass is
  908. /// able to rewrite the exit tests of any loop where the SCEV analysis can
  909. /// determine a loop-invariant trip count of the loop, which is actually a much
  910. /// broader range than just linear tests.
  911. bool IndVarSimplify::
  912. linearFunctionTestReplace(Loop *L, BasicBlock *ExitingBB,
  913. const SCEV *ExitCount,
  914. PHINode *IndVar, SCEVExpander &Rewriter) {
  915. assert(L->getLoopLatch() && "Loop no longer in simplified form?");
  916. assert(isLoopCounter(IndVar, L, SE));
  917. Instruction * const IncVar =
  918. cast<Instruction>(IndVar->getIncomingValueForBlock(L->getLoopLatch()));
  919. // Initialize CmpIndVar to the preincremented IV.
  920. Value *CmpIndVar = IndVar;
  921. bool UsePostInc = false;
  922. // If the exiting block is the same as the backedge block, we prefer to
  923. // compare against the post-incremented value, otherwise we must compare
  924. // against the preincremented value.
  925. if (ExitingBB == L->getLoopLatch()) {
  926. // For pointer IVs, we chose to not strip inbounds which requires us not
  927. // to add a potentially UB introducing use. We need to either a) show
  928. // the loop test we're modifying is already in post-inc form, or b) show
  929. // that adding a use must not introduce UB.
  930. bool SafeToPostInc =
  931. IndVar->getType()->isIntegerTy() ||
  932. isLoopExitTestBasedOn(IncVar, ExitingBB) ||
  933. mustExecuteUBIfPoisonOnPathTo(IncVar, ExitingBB->getTerminator(), DT);
  934. if (SafeToPostInc) {
  935. UsePostInc = true;
  936. CmpIndVar = IncVar;
  937. }
  938. }
  939. // It may be necessary to drop nowrap flags on the incrementing instruction
  940. // if either LFTR moves from a pre-inc check to a post-inc check (in which
  941. // case the increment might have previously been poison on the last iteration
  942. // only) or if LFTR switches to a different IV that was previously dynamically
  943. // dead (and as such may be arbitrarily poison). We remove any nowrap flags
  944. // that SCEV didn't infer for the post-inc addrec (even if we use a pre-inc
  945. // check), because the pre-inc addrec flags may be adopted from the original
  946. // instruction, while SCEV has to explicitly prove the post-inc nowrap flags.
  947. // TODO: This handling is inaccurate for one case: If we switch to a
  948. // dynamically dead IV that wraps on the first loop iteration only, which is
  949. // not covered by the post-inc addrec. (If the new IV was not dynamically
  950. // dead, it could not be poison on the first iteration in the first place.)
  951. if (auto *BO = dyn_cast<BinaryOperator>(IncVar)) {
  952. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(SE->getSCEV(IncVar));
  953. if (BO->hasNoUnsignedWrap())
  954. BO->setHasNoUnsignedWrap(AR->hasNoUnsignedWrap());
  955. if (BO->hasNoSignedWrap())
  956. BO->setHasNoSignedWrap(AR->hasNoSignedWrap());
  957. }
  958. Value *ExitCnt = genLoopLimit(
  959. IndVar, ExitingBB, ExitCount, UsePostInc, L, Rewriter, SE);
  960. assert(ExitCnt->getType()->isPointerTy() ==
  961. IndVar->getType()->isPointerTy() &&
  962. "genLoopLimit missed a cast");
  963. // Insert a new icmp_ne or icmp_eq instruction before the branch.
  964. BranchInst *BI = cast<BranchInst>(ExitingBB->getTerminator());
  965. ICmpInst::Predicate P;
  966. if (L->contains(BI->getSuccessor(0)))
  967. P = ICmpInst::ICMP_NE;
  968. else
  969. P = ICmpInst::ICMP_EQ;
  970. IRBuilder<> Builder(BI);
  971. // The new loop exit condition should reuse the debug location of the
  972. // original loop exit condition.
  973. if (auto *Cond = dyn_cast<Instruction>(BI->getCondition()))
  974. Builder.SetCurrentDebugLocation(Cond->getDebugLoc());
  975. // For integer IVs, if we evaluated the limit in the narrower bitwidth to
  976. // avoid the expensive expansion of the limit expression in the wider type,
  977. // emit a truncate to narrow the IV to the ExitCount type. This is safe
  978. // since we know (from the exit count bitwidth), that we can't self-wrap in
  979. // the narrower type.
  980. unsigned CmpIndVarSize = SE->getTypeSizeInBits(CmpIndVar->getType());
  981. unsigned ExitCntSize = SE->getTypeSizeInBits(ExitCnt->getType());
  982. if (CmpIndVarSize > ExitCntSize) {
  983. assert(!CmpIndVar->getType()->isPointerTy() &&
  984. !ExitCnt->getType()->isPointerTy());
  985. // Before resorting to actually inserting the truncate, use the same
  986. // reasoning as from SimplifyIndvar::eliminateTrunc to see if we can extend
  987. // the other side of the comparison instead. We still evaluate the limit
  988. // in the narrower bitwidth, we just prefer a zext/sext outside the loop to
  989. // a truncate within in.
  990. bool Extended = false;
  991. const SCEV *IV = SE->getSCEV(CmpIndVar);
  992. const SCEV *TruncatedIV = SE->getTruncateExpr(SE->getSCEV(CmpIndVar),
  993. ExitCnt->getType());
  994. const SCEV *ZExtTrunc =
  995. SE->getZeroExtendExpr(TruncatedIV, CmpIndVar->getType());
  996. if (ZExtTrunc == IV) {
  997. Extended = true;
  998. ExitCnt = Builder.CreateZExt(ExitCnt, IndVar->getType(),
  999. "wide.trip.count");
  1000. } else {
  1001. const SCEV *SExtTrunc =
  1002. SE->getSignExtendExpr(TruncatedIV, CmpIndVar->getType());
  1003. if (SExtTrunc == IV) {
  1004. Extended = true;
  1005. ExitCnt = Builder.CreateSExt(ExitCnt, IndVar->getType(),
  1006. "wide.trip.count");
  1007. }
  1008. }
  1009. if (Extended) {
  1010. bool Discard;
  1011. L->makeLoopInvariant(ExitCnt, Discard);
  1012. } else
  1013. CmpIndVar = Builder.CreateTrunc(CmpIndVar, ExitCnt->getType(),
  1014. "lftr.wideiv");
  1015. }
  1016. LLVM_DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n"
  1017. << " LHS:" << *CmpIndVar << '\n'
  1018. << " op:\t" << (P == ICmpInst::ICMP_NE ? "!=" : "==")
  1019. << "\n"
  1020. << " RHS:\t" << *ExitCnt << "\n"
  1021. << "ExitCount:\t" << *ExitCount << "\n"
  1022. << " was: " << *BI->getCondition() << "\n");
  1023. Value *Cond = Builder.CreateICmp(P, CmpIndVar, ExitCnt, "exitcond");
  1024. Value *OrigCond = BI->getCondition();
  1025. // It's tempting to use replaceAllUsesWith here to fully replace the old
  1026. // comparison, but that's not immediately safe, since users of the old
  1027. // comparison may not be dominated by the new comparison. Instead, just
  1028. // update the branch to use the new comparison; in the common case this
  1029. // will make old comparison dead.
  1030. BI->setCondition(Cond);
  1031. DeadInsts.emplace_back(OrigCond);
  1032. ++NumLFTR;
  1033. return true;
  1034. }
  1035. //===----------------------------------------------------------------------===//
  1036. // sinkUnusedInvariants. A late subpass to cleanup loop preheaders.
  1037. //===----------------------------------------------------------------------===//
  1038. /// If there's a single exit block, sink any loop-invariant values that
  1039. /// were defined in the preheader but not used inside the loop into the
  1040. /// exit block to reduce register pressure in the loop.
  1041. bool IndVarSimplify::sinkUnusedInvariants(Loop *L) {
  1042. BasicBlock *ExitBlock = L->getExitBlock();
  1043. if (!ExitBlock) return false;
  1044. BasicBlock *Preheader = L->getLoopPreheader();
  1045. if (!Preheader) return false;
  1046. bool MadeAnyChanges = false;
  1047. BasicBlock::iterator InsertPt = ExitBlock->getFirstInsertionPt();
  1048. BasicBlock::iterator I(Preheader->getTerminator());
  1049. while (I != Preheader->begin()) {
  1050. --I;
  1051. // New instructions were inserted at the end of the preheader.
  1052. if (isa<PHINode>(I))
  1053. break;
  1054. // Don't move instructions which might have side effects, since the side
  1055. // effects need to complete before instructions inside the loop. Also don't
  1056. // move instructions which might read memory, since the loop may modify
  1057. // memory. Note that it's okay if the instruction might have undefined
  1058. // behavior: LoopSimplify guarantees that the preheader dominates the exit
  1059. // block.
  1060. if (I->mayHaveSideEffects() || I->mayReadFromMemory())
  1061. continue;
  1062. // Skip debug info intrinsics.
  1063. if (isa<DbgInfoIntrinsic>(I))
  1064. continue;
  1065. // Skip eh pad instructions.
  1066. if (I->isEHPad())
  1067. continue;
  1068. // Don't sink alloca: we never want to sink static alloca's out of the
  1069. // entry block, and correctly sinking dynamic alloca's requires
  1070. // checks for stacksave/stackrestore intrinsics.
  1071. // FIXME: Refactor this check somehow?
  1072. if (isa<AllocaInst>(I))
  1073. continue;
  1074. // Determine if there is a use in or before the loop (direct or
  1075. // otherwise).
  1076. bool UsedInLoop = false;
  1077. for (Use &U : I->uses()) {
  1078. Instruction *User = cast<Instruction>(U.getUser());
  1079. BasicBlock *UseBB = User->getParent();
  1080. if (PHINode *P = dyn_cast<PHINode>(User)) {
  1081. unsigned i =
  1082. PHINode::getIncomingValueNumForOperand(U.getOperandNo());
  1083. UseBB = P->getIncomingBlock(i);
  1084. }
  1085. if (UseBB == Preheader || L->contains(UseBB)) {
  1086. UsedInLoop = true;
  1087. break;
  1088. }
  1089. }
  1090. // If there is, the def must remain in the preheader.
  1091. if (UsedInLoop)
  1092. continue;
  1093. // Otherwise, sink it to the exit block.
  1094. Instruction *ToMove = &*I;
  1095. bool Done = false;
  1096. if (I != Preheader->begin()) {
  1097. // Skip debug info intrinsics.
  1098. do {
  1099. --I;
  1100. } while (I->isDebugOrPseudoInst() && I != Preheader->begin());
  1101. if (I->isDebugOrPseudoInst() && I == Preheader->begin())
  1102. Done = true;
  1103. } else {
  1104. Done = true;
  1105. }
  1106. MadeAnyChanges = true;
  1107. ToMove->moveBefore(*ExitBlock, InsertPt);
  1108. if (Done) break;
  1109. InsertPt = ToMove->getIterator();
  1110. }
  1111. return MadeAnyChanges;
  1112. }
  1113. static void replaceExitCond(BranchInst *BI, Value *NewCond,
  1114. SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
  1115. auto *OldCond = BI->getCondition();
  1116. BI->setCondition(NewCond);
  1117. if (OldCond->use_empty())
  1118. DeadInsts.emplace_back(OldCond);
  1119. }
  1120. static void foldExit(const Loop *L, BasicBlock *ExitingBB, bool IsTaken,
  1121. SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
  1122. BranchInst *BI = cast<BranchInst>(ExitingBB->getTerminator());
  1123. bool ExitIfTrue = !L->contains(*succ_begin(ExitingBB));
  1124. auto *OldCond = BI->getCondition();
  1125. auto *NewCond =
  1126. ConstantInt::get(OldCond->getType(), IsTaken ? ExitIfTrue : !ExitIfTrue);
  1127. replaceExitCond(BI, NewCond, DeadInsts);
  1128. }
  1129. static void replaceLoopPHINodesWithPreheaderValues(
  1130. Loop *L, SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
  1131. assert(L->isLoopSimplifyForm() && "Should only do it in simplify form!");
  1132. auto *LoopPreheader = L->getLoopPreheader();
  1133. auto *LoopHeader = L->getHeader();
  1134. for (auto &PN : LoopHeader->phis()) {
  1135. auto *PreheaderIncoming = PN.getIncomingValueForBlock(LoopPreheader);
  1136. PN.replaceAllUsesWith(PreheaderIncoming);
  1137. DeadInsts.emplace_back(&PN);
  1138. }
  1139. }
  1140. static void replaceWithInvariantCond(
  1141. const Loop *L, BasicBlock *ExitingBB, ICmpInst::Predicate InvariantPred,
  1142. const SCEV *InvariantLHS, const SCEV *InvariantRHS, SCEVExpander &Rewriter,
  1143. SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
  1144. BranchInst *BI = cast<BranchInst>(ExitingBB->getTerminator());
  1145. Rewriter.setInsertPoint(BI);
  1146. auto *LHSV = Rewriter.expandCodeFor(InvariantLHS);
  1147. auto *RHSV = Rewriter.expandCodeFor(InvariantRHS);
  1148. bool ExitIfTrue = !L->contains(*succ_begin(ExitingBB));
  1149. if (ExitIfTrue)
  1150. InvariantPred = ICmpInst::getInversePredicate(InvariantPred);
  1151. IRBuilder<> Builder(BI);
  1152. auto *NewCond = Builder.CreateICmp(InvariantPred, LHSV, RHSV,
  1153. BI->getCondition()->getName());
  1154. replaceExitCond(BI, NewCond, DeadInsts);
  1155. }
  1156. static bool optimizeLoopExitWithUnknownExitCount(
  1157. const Loop *L, BranchInst *BI, BasicBlock *ExitingBB,
  1158. const SCEV *MaxIter, bool Inverted, bool SkipLastIter,
  1159. ScalarEvolution *SE, SCEVExpander &Rewriter,
  1160. SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
  1161. ICmpInst::Predicate Pred;
  1162. Value *LHS, *RHS;
  1163. BasicBlock *TrueSucc, *FalseSucc;
  1164. if (!match(BI, m_Br(m_ICmp(Pred, m_Value(LHS), m_Value(RHS)),
  1165. m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc))))
  1166. return false;
  1167. assert((L->contains(TrueSucc) != L->contains(FalseSucc)) &&
  1168. "Not a loop exit!");
  1169. // 'LHS pred RHS' should now mean that we stay in loop.
  1170. if (L->contains(FalseSucc))
  1171. Pred = CmpInst::getInversePredicate(Pred);
  1172. // If we are proving loop exit, invert the predicate.
  1173. if (Inverted)
  1174. Pred = CmpInst::getInversePredicate(Pred);
  1175. const SCEV *LHSS = SE->getSCEVAtScope(LHS, L);
  1176. const SCEV *RHSS = SE->getSCEVAtScope(RHS, L);
  1177. // Can we prove it to be trivially true?
  1178. if (SE->isKnownPredicateAt(Pred, LHSS, RHSS, BI)) {
  1179. foldExit(L, ExitingBB, Inverted, DeadInsts);
  1180. return true;
  1181. }
  1182. // Further logic works for non-inverted condition only.
  1183. if (Inverted)
  1184. return false;
  1185. auto *ARTy = LHSS->getType();
  1186. auto *MaxIterTy = MaxIter->getType();
  1187. // If possible, adjust types.
  1188. if (SE->getTypeSizeInBits(ARTy) > SE->getTypeSizeInBits(MaxIterTy))
  1189. MaxIter = SE->getZeroExtendExpr(MaxIter, ARTy);
  1190. else if (SE->getTypeSizeInBits(ARTy) < SE->getTypeSizeInBits(MaxIterTy)) {
  1191. const SCEV *MinusOne = SE->getMinusOne(ARTy);
  1192. auto *MaxAllowedIter = SE->getZeroExtendExpr(MinusOne, MaxIterTy);
  1193. if (SE->isKnownPredicateAt(ICmpInst::ICMP_ULE, MaxIter, MaxAllowedIter, BI))
  1194. MaxIter = SE->getTruncateExpr(MaxIter, ARTy);
  1195. }
  1196. if (SkipLastIter) {
  1197. const SCEV *One = SE->getOne(MaxIter->getType());
  1198. MaxIter = SE->getMinusSCEV(MaxIter, One);
  1199. }
  1200. // Check if there is a loop-invariant predicate equivalent to our check.
  1201. auto LIP = SE->getLoopInvariantExitCondDuringFirstIterations(Pred, LHSS, RHSS,
  1202. L, BI, MaxIter);
  1203. if (!LIP)
  1204. return false;
  1205. // Can we prove it to be trivially true?
  1206. if (SE->isKnownPredicateAt(LIP->Pred, LIP->LHS, LIP->RHS, BI))
  1207. foldExit(L, ExitingBB, Inverted, DeadInsts);
  1208. else
  1209. replaceWithInvariantCond(L, ExitingBB, LIP->Pred, LIP->LHS, LIP->RHS,
  1210. Rewriter, DeadInsts);
  1211. return true;
  1212. }
  1213. bool IndVarSimplify::canonicalizeExitCondition(Loop *L) {
  1214. // Note: This is duplicating a particular part on SimplifyIndVars reasoning.
  1215. // We need to duplicate it because given icmp zext(small-iv), C, IVUsers
  1216. // never reaches the icmp since the zext doesn't fold to an AddRec unless
  1217. // it already has flags. The alternative to this would be to extending the
  1218. // set of "interesting" IV users to include the icmp, but doing that
  1219. // regresses results in practice by querying SCEVs before trip counts which
  1220. // rely on them which results in SCEV caching sub-optimal answers. The
  1221. // concern about caching sub-optimal results is why we only query SCEVs of
  1222. // the loop invariant RHS here.
  1223. SmallVector<BasicBlock*, 16> ExitingBlocks;
  1224. L->getExitingBlocks(ExitingBlocks);
  1225. bool Changed = false;
  1226. for (auto *ExitingBB : ExitingBlocks) {
  1227. auto *BI = dyn_cast<BranchInst>(ExitingBB->getTerminator());
  1228. if (!BI)
  1229. continue;
  1230. assert(BI->isConditional() && "exit branch must be conditional");
  1231. auto *ICmp = dyn_cast<ICmpInst>(BI->getCondition());
  1232. if (!ICmp || !ICmp->hasOneUse())
  1233. continue;
  1234. auto *LHS = ICmp->getOperand(0);
  1235. auto *RHS = ICmp->getOperand(1);
  1236. // For the range reasoning, avoid computing SCEVs in the loop to avoid
  1237. // poisoning cache with sub-optimal results. For the must-execute case,
  1238. // this is a neccessary precondition for correctness.
  1239. if (!L->isLoopInvariant(RHS)) {
  1240. if (!L->isLoopInvariant(LHS))
  1241. continue;
  1242. // Same logic applies for the inverse case
  1243. std::swap(LHS, RHS);
  1244. }
  1245. // Match (icmp signed-cond zext, RHS)
  1246. Value *LHSOp = nullptr;
  1247. if (!match(LHS, m_ZExt(m_Value(LHSOp))) || !ICmp->isSigned())
  1248. continue;
  1249. const DataLayout &DL = ExitingBB->getModule()->getDataLayout();
  1250. const unsigned InnerBitWidth = DL.getTypeSizeInBits(LHSOp->getType());
  1251. const unsigned OuterBitWidth = DL.getTypeSizeInBits(RHS->getType());
  1252. auto FullCR = ConstantRange::getFull(InnerBitWidth);
  1253. FullCR = FullCR.zeroExtend(OuterBitWidth);
  1254. auto RHSCR = SE->getUnsignedRange(SE->applyLoopGuards(SE->getSCEV(RHS), L));
  1255. if (FullCR.contains(RHSCR)) {
  1256. // We have now matched icmp signed-cond zext(X), zext(Y'), and can thus
  1257. // replace the signed condition with the unsigned version.
  1258. ICmp->setPredicate(ICmp->getUnsignedPredicate());
  1259. Changed = true;
  1260. // Note: No SCEV invalidation needed. We've changed the predicate, but
  1261. // have not changed exit counts, or the values produced by the compare.
  1262. continue;
  1263. }
  1264. }
  1265. // Now that we've canonicalized the condition to match the extend,
  1266. // see if we can rotate the extend out of the loop.
  1267. for (auto *ExitingBB : ExitingBlocks) {
  1268. auto *BI = dyn_cast<BranchInst>(ExitingBB->getTerminator());
  1269. if (!BI)
  1270. continue;
  1271. assert(BI->isConditional() && "exit branch must be conditional");
  1272. auto *ICmp = dyn_cast<ICmpInst>(BI->getCondition());
  1273. if (!ICmp || !ICmp->hasOneUse() || !ICmp->isUnsigned())
  1274. continue;
  1275. bool Swapped = false;
  1276. auto *LHS = ICmp->getOperand(0);
  1277. auto *RHS = ICmp->getOperand(1);
  1278. if (L->isLoopInvariant(LHS) == L->isLoopInvariant(RHS))
  1279. // Nothing to rotate
  1280. continue;
  1281. if (L->isLoopInvariant(LHS)) {
  1282. // Same logic applies for the inverse case until we actually pick
  1283. // which operand of the compare to update.
  1284. Swapped = true;
  1285. std::swap(LHS, RHS);
  1286. }
  1287. assert(!L->isLoopInvariant(LHS) && L->isLoopInvariant(RHS));
  1288. // Match (icmp unsigned-cond zext, RHS)
  1289. // TODO: Extend to handle corresponding sext/signed-cmp case
  1290. // TODO: Extend to other invertible functions
  1291. Value *LHSOp = nullptr;
  1292. if (!match(LHS, m_ZExt(m_Value(LHSOp))))
  1293. continue;
  1294. // In general, we only rotate if we can do so without increasing the number
  1295. // of instructions. The exception is when we have an zext(add-rec). The
  1296. // reason for allowing this exception is that we know we need to get rid
  1297. // of the zext for SCEV to be able to compute a trip count for said loops;
  1298. // we consider the new trip count valuable enough to increase instruction
  1299. // count by one.
  1300. if (!LHS->hasOneUse() && !isa<SCEVAddRecExpr>(SE->getSCEV(LHSOp)))
  1301. continue;
  1302. // Given a icmp unsigned-cond zext(Op) where zext(trunc(RHS)) == RHS
  1303. // replace with an icmp of the form icmp unsigned-cond Op, trunc(RHS)
  1304. // when zext is loop varying and RHS is loop invariant. This converts
  1305. // loop varying work to loop-invariant work.
  1306. auto doRotateTransform = [&]() {
  1307. assert(ICmp->isUnsigned() && "must have proven unsigned already");
  1308. auto *NewRHS =
  1309. CastInst::Create(Instruction::Trunc, RHS, LHSOp->getType(), "",
  1310. L->getLoopPreheader()->getTerminator());
  1311. ICmp->setOperand(Swapped ? 1 : 0, LHSOp);
  1312. ICmp->setOperand(Swapped ? 0 : 1, NewRHS);
  1313. if (LHS->use_empty())
  1314. DeadInsts.push_back(LHS);
  1315. };
  1316. const DataLayout &DL = ExitingBB->getModule()->getDataLayout();
  1317. const unsigned InnerBitWidth = DL.getTypeSizeInBits(LHSOp->getType());
  1318. const unsigned OuterBitWidth = DL.getTypeSizeInBits(RHS->getType());
  1319. auto FullCR = ConstantRange::getFull(InnerBitWidth);
  1320. FullCR = FullCR.zeroExtend(OuterBitWidth);
  1321. auto RHSCR = SE->getUnsignedRange(SE->applyLoopGuards(SE->getSCEV(RHS), L));
  1322. if (FullCR.contains(RHSCR)) {
  1323. doRotateTransform();
  1324. Changed = true;
  1325. // Note, we are leaving SCEV in an unfortunately imprecise case here
  1326. // as rotation tends to reveal information about trip counts not
  1327. // previously visible.
  1328. continue;
  1329. }
  1330. }
  1331. return Changed;
  1332. }
  1333. bool IndVarSimplify::optimizeLoopExits(Loop *L, SCEVExpander &Rewriter) {
  1334. SmallVector<BasicBlock*, 16> ExitingBlocks;
  1335. L->getExitingBlocks(ExitingBlocks);
  1336. // Remove all exits which aren't both rewriteable and execute on every
  1337. // iteration.
  1338. llvm::erase_if(ExitingBlocks, [&](BasicBlock *ExitingBB) {
  1339. // If our exitting block exits multiple loops, we can only rewrite the
  1340. // innermost one. Otherwise, we're changing how many times the innermost
  1341. // loop runs before it exits.
  1342. if (LI->getLoopFor(ExitingBB) != L)
  1343. return true;
  1344. // Can't rewrite non-branch yet.
  1345. BranchInst *BI = dyn_cast<BranchInst>(ExitingBB->getTerminator());
  1346. if (!BI)
  1347. return true;
  1348. // If already constant, nothing to do.
  1349. if (isa<Constant>(BI->getCondition()))
  1350. return true;
  1351. // Likewise, the loop latch must be dominated by the exiting BB.
  1352. if (!DT->dominates(ExitingBB, L->getLoopLatch()))
  1353. return true;
  1354. return false;
  1355. });
  1356. if (ExitingBlocks.empty())
  1357. return false;
  1358. // Get a symbolic upper bound on the loop backedge taken count.
  1359. const SCEV *MaxExitCount = SE->getSymbolicMaxBackedgeTakenCount(L);
  1360. if (isa<SCEVCouldNotCompute>(MaxExitCount))
  1361. return false;
  1362. // Visit our exit blocks in order of dominance. We know from the fact that
  1363. // all exits must dominate the latch, so there is a total dominance order
  1364. // between them.
  1365. llvm::sort(ExitingBlocks, [&](BasicBlock *A, BasicBlock *B) {
  1366. // std::sort sorts in ascending order, so we want the inverse of
  1367. // the normal dominance relation.
  1368. if (A == B) return false;
  1369. if (DT->properlyDominates(A, B))
  1370. return true;
  1371. else {
  1372. assert(DT->properlyDominates(B, A) &&
  1373. "expected total dominance order!");
  1374. return false;
  1375. }
  1376. });
  1377. #ifdef ASSERT
  1378. for (unsigned i = 1; i < ExitingBlocks.size(); i++) {
  1379. assert(DT->dominates(ExitingBlocks[i-1], ExitingBlocks[i]));
  1380. }
  1381. #endif
  1382. bool Changed = false;
  1383. bool SkipLastIter = false;
  1384. SmallSet<const SCEV*, 8> DominatingExitCounts;
  1385. for (BasicBlock *ExitingBB : ExitingBlocks) {
  1386. const SCEV *ExitCount = SE->getExitCount(L, ExitingBB);
  1387. if (isa<SCEVCouldNotCompute>(ExitCount)) {
  1388. // Okay, we do not know the exit count here. Can we at least prove that it
  1389. // will remain the same within iteration space?
  1390. auto *BI = cast<BranchInst>(ExitingBB->getTerminator());
  1391. auto OptimizeCond = [&](bool Inverted, bool SkipLastIter) {
  1392. return optimizeLoopExitWithUnknownExitCount(
  1393. L, BI, ExitingBB, MaxExitCount, Inverted, SkipLastIter, SE,
  1394. Rewriter, DeadInsts);
  1395. };
  1396. // TODO: We might have proved that we can skip the last iteration for
  1397. // this check. In this case, we only want to check the condition on the
  1398. // pre-last iteration (MaxExitCount - 1). However, there is a nasty
  1399. // corner case:
  1400. //
  1401. // for (i = len; i != 0; i--) { ... check (i ult X) ... }
  1402. //
  1403. // If we could not prove that len != 0, then we also could not prove that
  1404. // (len - 1) is not a UINT_MAX. If we simply query (len - 1), then
  1405. // OptimizeCond will likely not prove anything for it, even if it could
  1406. // prove the same fact for len.
  1407. //
  1408. // As a temporary solution, we query both last and pre-last iterations in
  1409. // hope that we will be able to prove triviality for at least one of
  1410. // them. We can stop querying MaxExitCount for this case once SCEV
  1411. // understands that (MaxExitCount - 1) will not overflow here.
  1412. if (OptimizeCond(false, false) || OptimizeCond(true, false))
  1413. Changed = true;
  1414. else if (SkipLastIter)
  1415. if (OptimizeCond(false, true) || OptimizeCond(true, true))
  1416. Changed = true;
  1417. continue;
  1418. }
  1419. if (MaxExitCount == ExitCount)
  1420. // If the loop has more than 1 iteration, all further checks will be
  1421. // executed 1 iteration less.
  1422. SkipLastIter = true;
  1423. // If we know we'd exit on the first iteration, rewrite the exit to
  1424. // reflect this. This does not imply the loop must exit through this
  1425. // exit; there may be an earlier one taken on the first iteration.
  1426. // We know that the backedge can't be taken, so we replace all
  1427. // the header PHIs with values coming from the preheader.
  1428. if (ExitCount->isZero()) {
  1429. foldExit(L, ExitingBB, true, DeadInsts);
  1430. replaceLoopPHINodesWithPreheaderValues(L, DeadInsts);
  1431. Changed = true;
  1432. continue;
  1433. }
  1434. assert(ExitCount->getType()->isIntegerTy() &&
  1435. MaxExitCount->getType()->isIntegerTy() &&
  1436. "Exit counts must be integers");
  1437. Type *WiderType =
  1438. SE->getWiderType(MaxExitCount->getType(), ExitCount->getType());
  1439. ExitCount = SE->getNoopOrZeroExtend(ExitCount, WiderType);
  1440. MaxExitCount = SE->getNoopOrZeroExtend(MaxExitCount, WiderType);
  1441. assert(MaxExitCount->getType() == ExitCount->getType());
  1442. // Can we prove that some other exit must be taken strictly before this
  1443. // one?
  1444. if (SE->isLoopEntryGuardedByCond(L, CmpInst::ICMP_ULT,
  1445. MaxExitCount, ExitCount)) {
  1446. foldExit(L, ExitingBB, false, DeadInsts);
  1447. Changed = true;
  1448. continue;
  1449. }
  1450. // As we run, keep track of which exit counts we've encountered. If we
  1451. // find a duplicate, we've found an exit which would have exited on the
  1452. // exiting iteration, but (from the visit order) strictly follows another
  1453. // which does the same and is thus dead.
  1454. if (!DominatingExitCounts.insert(ExitCount).second) {
  1455. foldExit(L, ExitingBB, false, DeadInsts);
  1456. Changed = true;
  1457. continue;
  1458. }
  1459. // TODO: There might be another oppurtunity to leverage SCEV's reasoning
  1460. // here. If we kept track of the min of dominanting exits so far, we could
  1461. // discharge exits with EC >= MDEC. This is less powerful than the existing
  1462. // transform (since later exits aren't considered), but potentially more
  1463. // powerful for any case where SCEV can prove a >=u b, but neither a == b
  1464. // or a >u b. Such a case is not currently known.
  1465. }
  1466. return Changed;
  1467. }
  1468. bool IndVarSimplify::predicateLoopExits(Loop *L, SCEVExpander &Rewriter) {
  1469. SmallVector<BasicBlock*, 16> ExitingBlocks;
  1470. L->getExitingBlocks(ExitingBlocks);
  1471. // Finally, see if we can rewrite our exit conditions into a loop invariant
  1472. // form. If we have a read-only loop, and we can tell that we must exit down
  1473. // a path which does not need any of the values computed within the loop, we
  1474. // can rewrite the loop to exit on the first iteration. Note that this
  1475. // doesn't either a) tell us the loop exits on the first iteration (unless
  1476. // *all* exits are predicateable) or b) tell us *which* exit might be taken.
  1477. // This transformation looks a lot like a restricted form of dead loop
  1478. // elimination, but restricted to read-only loops and without neccesssarily
  1479. // needing to kill the loop entirely.
  1480. if (!LoopPredication)
  1481. return false;
  1482. // Note: ExactBTC is the exact backedge taken count *iff* the loop exits
  1483. // through *explicit* control flow. We have to eliminate the possibility of
  1484. // implicit exits (see below) before we know it's truly exact.
  1485. const SCEV *ExactBTC = SE->getBackedgeTakenCount(L);
  1486. if (isa<SCEVCouldNotCompute>(ExactBTC) || !isSafeToExpand(ExactBTC, *SE))
  1487. return false;
  1488. assert(SE->isLoopInvariant(ExactBTC, L) && "BTC must be loop invariant");
  1489. assert(ExactBTC->getType()->isIntegerTy() && "BTC must be integer");
  1490. auto BadExit = [&](BasicBlock *ExitingBB) {
  1491. // If our exiting block exits multiple loops, we can only rewrite the
  1492. // innermost one. Otherwise, we're changing how many times the innermost
  1493. // loop runs before it exits.
  1494. if (LI->getLoopFor(ExitingBB) != L)
  1495. return true;
  1496. // Can't rewrite non-branch yet.
  1497. BranchInst *BI = dyn_cast<BranchInst>(ExitingBB->getTerminator());
  1498. if (!BI)
  1499. return true;
  1500. // If already constant, nothing to do.
  1501. if (isa<Constant>(BI->getCondition()))
  1502. return true;
  1503. // If the exit block has phis, we need to be able to compute the values
  1504. // within the loop which contains them. This assumes trivially lcssa phis
  1505. // have already been removed; TODO: generalize
  1506. BasicBlock *ExitBlock =
  1507. BI->getSuccessor(L->contains(BI->getSuccessor(0)) ? 1 : 0);
  1508. if (!ExitBlock->phis().empty())
  1509. return true;
  1510. const SCEV *ExitCount = SE->getExitCount(L, ExitingBB);
  1511. if (isa<SCEVCouldNotCompute>(ExitCount) || !isSafeToExpand(ExitCount, *SE))
  1512. return true;
  1513. assert(SE->isLoopInvariant(ExitCount, L) &&
  1514. "Exit count must be loop invariant");
  1515. assert(ExitCount->getType()->isIntegerTy() && "Exit count must be integer");
  1516. return false;
  1517. };
  1518. // If we have any exits which can't be predicated themselves, than we can't
  1519. // predicate any exit which isn't guaranteed to execute before it. Consider
  1520. // two exits (a) and (b) which would both exit on the same iteration. If we
  1521. // can predicate (b), but not (a), and (a) preceeds (b) along some path, then
  1522. // we could convert a loop from exiting through (a) to one exiting through
  1523. // (b). Note that this problem exists only for exits with the same exit
  1524. // count, and we could be more aggressive when exit counts are known inequal.
  1525. llvm::sort(ExitingBlocks,
  1526. [&](BasicBlock *A, BasicBlock *B) {
  1527. // std::sort sorts in ascending order, so we want the inverse of
  1528. // the normal dominance relation, plus a tie breaker for blocks
  1529. // unordered by dominance.
  1530. if (DT->properlyDominates(A, B)) return true;
  1531. if (DT->properlyDominates(B, A)) return false;
  1532. return A->getName() < B->getName();
  1533. });
  1534. // Check to see if our exit blocks are a total order (i.e. a linear chain of
  1535. // exits before the backedge). If they aren't, reasoning about reachability
  1536. // is complicated and we choose not to for now.
  1537. for (unsigned i = 1; i < ExitingBlocks.size(); i++)
  1538. if (!DT->dominates(ExitingBlocks[i-1], ExitingBlocks[i]))
  1539. return false;
  1540. // Given our sorted total order, we know that exit[j] must be evaluated
  1541. // after all exit[i] such j > i.
  1542. for (unsigned i = 0, e = ExitingBlocks.size(); i < e; i++)
  1543. if (BadExit(ExitingBlocks[i])) {
  1544. ExitingBlocks.resize(i);
  1545. break;
  1546. }
  1547. if (ExitingBlocks.empty())
  1548. return false;
  1549. // We rely on not being able to reach an exiting block on a later iteration
  1550. // then it's statically compute exit count. The implementaton of
  1551. // getExitCount currently has this invariant, but assert it here so that
  1552. // breakage is obvious if this ever changes..
  1553. assert(llvm::all_of(ExitingBlocks, [&](BasicBlock *ExitingBB) {
  1554. return DT->dominates(ExitingBB, L->getLoopLatch());
  1555. }));
  1556. // At this point, ExitingBlocks consists of only those blocks which are
  1557. // predicatable. Given that, we know we have at least one exit we can
  1558. // predicate if the loop is doesn't have side effects and doesn't have any
  1559. // implicit exits (because then our exact BTC isn't actually exact).
  1560. // @Reviewers - As structured, this is O(I^2) for loop nests. Any
  1561. // suggestions on how to improve this? I can obviously bail out for outer
  1562. // loops, but that seems less than ideal. MemorySSA can find memory writes,
  1563. // is that enough for *all* side effects?
  1564. for (BasicBlock *BB : L->blocks())
  1565. for (auto &I : *BB)
  1566. // TODO:isGuaranteedToTransfer
  1567. if (I.mayHaveSideEffects())
  1568. return false;
  1569. bool Changed = false;
  1570. // Finally, do the actual predication for all predicatable blocks. A couple
  1571. // of notes here:
  1572. // 1) We don't bother to constant fold dominated exits with identical exit
  1573. // counts; that's simply a form of CSE/equality propagation and we leave
  1574. // it for dedicated passes.
  1575. // 2) We insert the comparison at the branch. Hoisting introduces additional
  1576. // legality constraints and we leave that to dedicated logic. We want to
  1577. // predicate even if we can't insert a loop invariant expression as
  1578. // peeling or unrolling will likely reduce the cost of the otherwise loop
  1579. // varying check.
  1580. Rewriter.setInsertPoint(L->getLoopPreheader()->getTerminator());
  1581. IRBuilder<> B(L->getLoopPreheader()->getTerminator());
  1582. Value *ExactBTCV = nullptr; // Lazily generated if needed.
  1583. for (BasicBlock *ExitingBB : ExitingBlocks) {
  1584. const SCEV *ExitCount = SE->getExitCount(L, ExitingBB);
  1585. auto *BI = cast<BranchInst>(ExitingBB->getTerminator());
  1586. Value *NewCond;
  1587. if (ExitCount == ExactBTC) {
  1588. NewCond = L->contains(BI->getSuccessor(0)) ?
  1589. B.getFalse() : B.getTrue();
  1590. } else {
  1591. Value *ECV = Rewriter.expandCodeFor(ExitCount);
  1592. if (!ExactBTCV)
  1593. ExactBTCV = Rewriter.expandCodeFor(ExactBTC);
  1594. Value *RHS = ExactBTCV;
  1595. if (ECV->getType() != RHS->getType()) {
  1596. Type *WiderTy = SE->getWiderType(ECV->getType(), RHS->getType());
  1597. ECV = B.CreateZExt(ECV, WiderTy);
  1598. RHS = B.CreateZExt(RHS, WiderTy);
  1599. }
  1600. auto Pred = L->contains(BI->getSuccessor(0)) ?
  1601. ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
  1602. NewCond = B.CreateICmp(Pred, ECV, RHS);
  1603. }
  1604. Value *OldCond = BI->getCondition();
  1605. BI->setCondition(NewCond);
  1606. if (OldCond->use_empty())
  1607. DeadInsts.emplace_back(OldCond);
  1608. Changed = true;
  1609. }
  1610. return Changed;
  1611. }
  1612. //===----------------------------------------------------------------------===//
  1613. // IndVarSimplify driver. Manage several subpasses of IV simplification.
  1614. //===----------------------------------------------------------------------===//
  1615. bool IndVarSimplify::run(Loop *L) {
  1616. // We need (and expect!) the incoming loop to be in LCSSA.
  1617. assert(L->isRecursivelyLCSSAForm(*DT, *LI) &&
  1618. "LCSSA required to run indvars!");
  1619. // If LoopSimplify form is not available, stay out of trouble. Some notes:
  1620. // - LSR currently only supports LoopSimplify-form loops. Indvars'
  1621. // canonicalization can be a pessimization without LSR to "clean up"
  1622. // afterwards.
  1623. // - We depend on having a preheader; in particular,
  1624. // Loop::getCanonicalInductionVariable only supports loops with preheaders,
  1625. // and we're in trouble if we can't find the induction variable even when
  1626. // we've manually inserted one.
  1627. // - LFTR relies on having a single backedge.
  1628. if (!L->isLoopSimplifyForm())
  1629. return false;
  1630. #ifndef NDEBUG
  1631. // Used below for a consistency check only
  1632. // Note: Since the result returned by ScalarEvolution may depend on the order
  1633. // in which previous results are added to its cache, the call to
  1634. // getBackedgeTakenCount() may change following SCEV queries.
  1635. const SCEV *BackedgeTakenCount;
  1636. if (VerifyIndvars)
  1637. BackedgeTakenCount = SE->getBackedgeTakenCount(L);
  1638. #endif
  1639. bool Changed = false;
  1640. // If there are any floating-point recurrences, attempt to
  1641. // transform them to use integer recurrences.
  1642. Changed |= rewriteNonIntegerIVs(L);
  1643. // Create a rewriter object which we'll use to transform the code with.
  1644. SCEVExpander Rewriter(*SE, DL, "indvars");
  1645. #ifndef NDEBUG
  1646. Rewriter.setDebugType(DEBUG_TYPE);
  1647. #endif
  1648. // Eliminate redundant IV users.
  1649. //
  1650. // Simplification works best when run before other consumers of SCEV. We
  1651. // attempt to avoid evaluating SCEVs for sign/zero extend operations until
  1652. // other expressions involving loop IVs have been evaluated. This helps SCEV
  1653. // set no-wrap flags before normalizing sign/zero extension.
  1654. Rewriter.disableCanonicalMode();
  1655. Changed |= simplifyAndExtend(L, Rewriter, LI);
  1656. // Check to see if we can compute the final value of any expressions
  1657. // that are recurrent in the loop, and substitute the exit values from the
  1658. // loop into any instructions outside of the loop that use the final values
  1659. // of the current expressions.
  1660. if (ReplaceExitValue != NeverRepl) {
  1661. if (int Rewrites = rewriteLoopExitValues(L, LI, TLI, SE, TTI, Rewriter, DT,
  1662. ReplaceExitValue, DeadInsts)) {
  1663. NumReplaced += Rewrites;
  1664. Changed = true;
  1665. }
  1666. }
  1667. // Eliminate redundant IV cycles.
  1668. NumElimIV += Rewriter.replaceCongruentIVs(L, DT, DeadInsts, TTI);
  1669. // Try to convert exit conditions to unsigned and rotate computation
  1670. // out of the loop. Note: Handles invalidation internally if needed.
  1671. Changed |= canonicalizeExitCondition(L);
  1672. // Try to eliminate loop exits based on analyzeable exit counts
  1673. if (optimizeLoopExits(L, Rewriter)) {
  1674. Changed = true;
  1675. // Given we've changed exit counts, notify SCEV
  1676. // Some nested loops may share same folded exit basic block,
  1677. // thus we need to notify top most loop.
  1678. SE->forgetTopmostLoop(L);
  1679. }
  1680. // Try to form loop invariant tests for loop exits by changing how many
  1681. // iterations of the loop run when that is unobservable.
  1682. if (predicateLoopExits(L, Rewriter)) {
  1683. Changed = true;
  1684. // Given we've changed exit counts, notify SCEV
  1685. SE->forgetLoop(L);
  1686. }
  1687. // If we have a trip count expression, rewrite the loop's exit condition
  1688. // using it.
  1689. if (!DisableLFTR) {
  1690. BasicBlock *PreHeader = L->getLoopPreheader();
  1691. SmallVector<BasicBlock*, 16> ExitingBlocks;
  1692. L->getExitingBlocks(ExitingBlocks);
  1693. for (BasicBlock *ExitingBB : ExitingBlocks) {
  1694. // Can't rewrite non-branch yet.
  1695. if (!isa<BranchInst>(ExitingBB->getTerminator()))
  1696. continue;
  1697. // If our exitting block exits multiple loops, we can only rewrite the
  1698. // innermost one. Otherwise, we're changing how many times the innermost
  1699. // loop runs before it exits.
  1700. if (LI->getLoopFor(ExitingBB) != L)
  1701. continue;
  1702. if (!needsLFTR(L, ExitingBB))
  1703. continue;
  1704. const SCEV *ExitCount = SE->getExitCount(L, ExitingBB);
  1705. if (isa<SCEVCouldNotCompute>(ExitCount))
  1706. continue;
  1707. // This was handled above, but as we form SCEVs, we can sometimes refine
  1708. // existing ones; this allows exit counts to be folded to zero which
  1709. // weren't when optimizeLoopExits saw them. Arguably, we should iterate
  1710. // until stable to handle cases like this better.
  1711. if (ExitCount->isZero())
  1712. continue;
  1713. PHINode *IndVar = FindLoopCounter(L, ExitingBB, ExitCount, SE, DT);
  1714. if (!IndVar)
  1715. continue;
  1716. // Avoid high cost expansions. Note: This heuristic is questionable in
  1717. // that our definition of "high cost" is not exactly principled.
  1718. if (Rewriter.isHighCostExpansion(ExitCount, L, SCEVCheapExpansionBudget,
  1719. TTI, PreHeader->getTerminator()))
  1720. continue;
  1721. // Check preconditions for proper SCEVExpander operation. SCEV does not
  1722. // express SCEVExpander's dependencies, such as LoopSimplify. Instead
  1723. // any pass that uses the SCEVExpander must do it. This does not work
  1724. // well for loop passes because SCEVExpander makes assumptions about
  1725. // all loops, while LoopPassManager only forces the current loop to be
  1726. // simplified.
  1727. //
  1728. // FIXME: SCEV expansion has no way to bail out, so the caller must
  1729. // explicitly check any assumptions made by SCEV. Brittle.
  1730. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ExitCount);
  1731. if (!AR || AR->getLoop()->getLoopPreheader())
  1732. Changed |= linearFunctionTestReplace(L, ExitingBB,
  1733. ExitCount, IndVar,
  1734. Rewriter);
  1735. }
  1736. }
  1737. // Clear the rewriter cache, because values that are in the rewriter's cache
  1738. // can be deleted in the loop below, causing the AssertingVH in the cache to
  1739. // trigger.
  1740. Rewriter.clear();
  1741. // Now that we're done iterating through lists, clean up any instructions
  1742. // which are now dead.
  1743. while (!DeadInsts.empty()) {
  1744. Value *V = DeadInsts.pop_back_val();
  1745. if (PHINode *PHI = dyn_cast_or_null<PHINode>(V))
  1746. Changed |= RecursivelyDeleteDeadPHINode(PHI, TLI, MSSAU.get());
  1747. else if (Instruction *Inst = dyn_cast_or_null<Instruction>(V))
  1748. Changed |=
  1749. RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI, MSSAU.get());
  1750. }
  1751. // The Rewriter may not be used from this point on.
  1752. // Loop-invariant instructions in the preheader that aren't used in the
  1753. // loop may be sunk below the loop to reduce register pressure.
  1754. Changed |= sinkUnusedInvariants(L);
  1755. // rewriteFirstIterationLoopExitValues does not rely on the computation of
  1756. // trip count and therefore can further simplify exit values in addition to
  1757. // rewriteLoopExitValues.
  1758. Changed |= rewriteFirstIterationLoopExitValues(L);
  1759. // Clean up dead instructions.
  1760. Changed |= DeleteDeadPHIs(L->getHeader(), TLI, MSSAU.get());
  1761. // Check a post-condition.
  1762. assert(L->isRecursivelyLCSSAForm(*DT, *LI) &&
  1763. "Indvars did not preserve LCSSA!");
  1764. // Verify that LFTR, and any other change have not interfered with SCEV's
  1765. // ability to compute trip count. We may have *changed* the exit count, but
  1766. // only by reducing it.
  1767. #ifndef NDEBUG
  1768. if (VerifyIndvars && !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
  1769. SE->forgetLoop(L);
  1770. const SCEV *NewBECount = SE->getBackedgeTakenCount(L);
  1771. if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) <
  1772. SE->getTypeSizeInBits(NewBECount->getType()))
  1773. NewBECount = SE->getTruncateOrNoop(NewBECount,
  1774. BackedgeTakenCount->getType());
  1775. else
  1776. BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount,
  1777. NewBECount->getType());
  1778. assert(!SE->isKnownPredicate(ICmpInst::ICMP_ULT, BackedgeTakenCount,
  1779. NewBECount) && "indvars must preserve SCEV");
  1780. }
  1781. if (VerifyMemorySSA && MSSAU)
  1782. MSSAU->getMemorySSA()->verifyMemorySSA();
  1783. #endif
  1784. return Changed;
  1785. }
  1786. PreservedAnalyses IndVarSimplifyPass::run(Loop &L, LoopAnalysisManager &AM,
  1787. LoopStandardAnalysisResults &AR,
  1788. LPMUpdater &) {
  1789. Function *F = L.getHeader()->getParent();
  1790. const DataLayout &DL = F->getParent()->getDataLayout();
  1791. IndVarSimplify IVS(&AR.LI, &AR.SE, &AR.DT, DL, &AR.TLI, &AR.TTI, AR.MSSA,
  1792. WidenIndVars && AllowIVWidening);
  1793. if (!IVS.run(&L))
  1794. return PreservedAnalyses::all();
  1795. auto PA = getLoopPassPreservedAnalyses();
  1796. PA.preserveSet<CFGAnalyses>();
  1797. if (AR.MSSA)
  1798. PA.preserve<MemorySSAAnalysis>();
  1799. return PA;
  1800. }
  1801. namespace {
  1802. struct IndVarSimplifyLegacyPass : public LoopPass {
  1803. static char ID; // Pass identification, replacement for typeid
  1804. IndVarSimplifyLegacyPass() : LoopPass(ID) {
  1805. initializeIndVarSimplifyLegacyPassPass(*PassRegistry::getPassRegistry());
  1806. }
  1807. bool runOnLoop(Loop *L, LPPassManager &LPM) override {
  1808. if (skipLoop(L))
  1809. return false;
  1810. auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  1811. auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
  1812. auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  1813. auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
  1814. auto *TLI = TLIP ? &TLIP->getTLI(*L->getHeader()->getParent()) : nullptr;
  1815. auto *TTIP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
  1816. auto *TTI = TTIP ? &TTIP->getTTI(*L->getHeader()->getParent()) : nullptr;
  1817. const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
  1818. auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>();
  1819. MemorySSA *MSSA = nullptr;
  1820. if (MSSAAnalysis)
  1821. MSSA = &MSSAAnalysis->getMSSA();
  1822. IndVarSimplify IVS(LI, SE, DT, DL, TLI, TTI, MSSA, AllowIVWidening);
  1823. return IVS.run(L);
  1824. }
  1825. void getAnalysisUsage(AnalysisUsage &AU) const override {
  1826. AU.setPreservesCFG();
  1827. AU.addPreserved<MemorySSAWrapperPass>();
  1828. getLoopAnalysisUsage(AU);
  1829. }
  1830. };
  1831. } // end anonymous namespace
  1832. char IndVarSimplifyLegacyPass::ID = 0;
  1833. INITIALIZE_PASS_BEGIN(IndVarSimplifyLegacyPass, "indvars",
  1834. "Induction Variable Simplification", false, false)
  1835. INITIALIZE_PASS_DEPENDENCY(LoopPass)
  1836. INITIALIZE_PASS_END(IndVarSimplifyLegacyPass, "indvars",
  1837. "Induction Variable Simplification", false, false)
  1838. Pass *llvm::createIndVarSimplifyPass() {
  1839. return new IndVarSimplifyLegacyPass();
  1840. }