StraightLineStrengthReduce.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. //===- StraightLineStrengthReduce.cpp - -----------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements straight-line strength reduction (SLSR). Unlike loop
  10. // strength reduction, this algorithm is designed to reduce arithmetic
  11. // redundancy in straight-line code instead of loops. It has proven to be
  12. // effective in simplifying arithmetic statements derived from an unrolled loop.
  13. // It can also simplify the logic of SeparateConstOffsetFromGEP.
  14. //
  15. // There are many optimizations we can perform in the domain of SLSR. This file
  16. // for now contains only an initial step. Specifically, we look for strength
  17. // reduction candidates in the following forms:
  18. //
  19. // Form 1: B + i * S
  20. // Form 2: (B + i) * S
  21. // Form 3: &B[i * S]
  22. //
  23. // where S is an integer variable, and i is a constant integer. If we found two
  24. // candidates S1 and S2 in the same form and S1 dominates S2, we may rewrite S2
  25. // in a simpler way with respect to S1. For example,
  26. //
  27. // S1: X = B + i * S
  28. // S2: Y = B + i' * S => X + (i' - i) * S
  29. //
  30. // S1: X = (B + i) * S
  31. // S2: Y = (B + i') * S => X + (i' - i) * S
  32. //
  33. // S1: X = &B[i * S]
  34. // S2: Y = &B[i' * S] => &X[(i' - i) * S]
  35. //
  36. // Note: (i' - i) * S is folded to the extent possible.
  37. //
  38. // This rewriting is in general a good idea. The code patterns we focus on
  39. // usually come from loop unrolling, so (i' - i) * S is likely the same
  40. // across iterations and can be reused. When that happens, the optimized form
  41. // takes only one add starting from the second iteration.
  42. //
  43. // When such rewriting is possible, we call S1 a "basis" of S2. When S2 has
  44. // multiple bases, we choose to rewrite S2 with respect to its "immediate"
  45. // basis, the basis that is the closest ancestor in the dominator tree.
  46. //
  47. // TODO:
  48. //
  49. // - Floating point arithmetics when fast math is enabled.
  50. //
  51. // - SLSR may decrease ILP at the architecture level. Targets that are very
  52. // sensitive to ILP may want to disable it. Having SLSR to consider ILP is
  53. // left as future work.
  54. //
  55. // - When (i' - i) is constant but i and i' are not, we could still perform
  56. // SLSR.
  57. #include "llvm/Transforms/Scalar/StraightLineStrengthReduce.h"
  58. #include "llvm/ADT/APInt.h"
  59. #include "llvm/ADT/DepthFirstIterator.h"
  60. #include "llvm/ADT/SmallVector.h"
  61. #include "llvm/Analysis/ScalarEvolution.h"
  62. #include "llvm/Analysis/TargetTransformInfo.h"
  63. #include "llvm/Analysis/ValueTracking.h"
  64. #include "llvm/IR/Constants.h"
  65. #include "llvm/IR/DataLayout.h"
  66. #include "llvm/IR/DerivedTypes.h"
  67. #include "llvm/IR/Dominators.h"
  68. #include "llvm/IR/GetElementPtrTypeIterator.h"
  69. #include "llvm/IR/IRBuilder.h"
  70. #include "llvm/IR/InstrTypes.h"
  71. #include "llvm/IR/Instruction.h"
  72. #include "llvm/IR/Instructions.h"
  73. #include "llvm/IR/Module.h"
  74. #include "llvm/IR/Operator.h"
  75. #include "llvm/IR/PatternMatch.h"
  76. #include "llvm/IR/Type.h"
  77. #include "llvm/IR/Value.h"
  78. #include "llvm/InitializePasses.h"
  79. #include "llvm/Pass.h"
  80. #include "llvm/Support/Casting.h"
  81. #include "llvm/Support/ErrorHandling.h"
  82. #include "llvm/Transforms/Scalar.h"
  83. #include "llvm/Transforms/Utils/Local.h"
  84. #include <cassert>
  85. #include <cstdint>
  86. #include <limits>
  87. #include <list>
  88. #include <vector>
  89. using namespace llvm;
  90. using namespace PatternMatch;
  91. static const unsigned UnknownAddressSpace =
  92. std::numeric_limits<unsigned>::max();
  93. namespace {
  94. class StraightLineStrengthReduceLegacyPass : public FunctionPass {
  95. const DataLayout *DL = nullptr;
  96. public:
  97. static char ID;
  98. StraightLineStrengthReduceLegacyPass() : FunctionPass(ID) {
  99. initializeStraightLineStrengthReduceLegacyPassPass(
  100. *PassRegistry::getPassRegistry());
  101. }
  102. void getAnalysisUsage(AnalysisUsage &AU) const override {
  103. AU.addRequired<DominatorTreeWrapperPass>();
  104. AU.addRequired<ScalarEvolutionWrapperPass>();
  105. AU.addRequired<TargetTransformInfoWrapperPass>();
  106. // We do not modify the shape of the CFG.
  107. AU.setPreservesCFG();
  108. }
  109. bool doInitialization(Module &M) override {
  110. DL = &M.getDataLayout();
  111. return false;
  112. }
  113. bool runOnFunction(Function &F) override;
  114. };
  115. class StraightLineStrengthReduce {
  116. public:
  117. StraightLineStrengthReduce(const DataLayout *DL, DominatorTree *DT,
  118. ScalarEvolution *SE, TargetTransformInfo *TTI)
  119. : DL(DL), DT(DT), SE(SE), TTI(TTI) {}
  120. // SLSR candidate. Such a candidate must be in one of the forms described in
  121. // the header comments.
  122. struct Candidate {
  123. enum Kind {
  124. Invalid, // reserved for the default constructor
  125. Add, // B + i * S
  126. Mul, // (B + i) * S
  127. GEP, // &B[..][i * S][..]
  128. };
  129. Candidate() = default;
  130. Candidate(Kind CT, const SCEV *B, ConstantInt *Idx, Value *S,
  131. Instruction *I)
  132. : CandidateKind(CT), Base(B), Index(Idx), Stride(S), Ins(I) {}
  133. Kind CandidateKind = Invalid;
  134. const SCEV *Base = nullptr;
  135. // Note that Index and Stride of a GEP candidate do not necessarily have the
  136. // same integer type. In that case, during rewriting, Stride will be
  137. // sign-extended or truncated to Index's type.
  138. ConstantInt *Index = nullptr;
  139. Value *Stride = nullptr;
  140. // The instruction this candidate corresponds to. It helps us to rewrite a
  141. // candidate with respect to its immediate basis. Note that one instruction
  142. // can correspond to multiple candidates depending on how you associate the
  143. // expression. For instance,
  144. //
  145. // (a + 1) * (b + 2)
  146. //
  147. // can be treated as
  148. //
  149. // <Base: a, Index: 1, Stride: b + 2>
  150. //
  151. // or
  152. //
  153. // <Base: b, Index: 2, Stride: a + 1>
  154. Instruction *Ins = nullptr;
  155. // Points to the immediate basis of this candidate, or nullptr if we cannot
  156. // find any basis for this candidate.
  157. Candidate *Basis = nullptr;
  158. };
  159. bool runOnFunction(Function &F);
  160. private:
  161. // Returns true if Basis is a basis for C, i.e., Basis dominates C and they
  162. // share the same base and stride.
  163. bool isBasisFor(const Candidate &Basis, const Candidate &C);
  164. // Returns whether the candidate can be folded into an addressing mode.
  165. bool isFoldable(const Candidate &C, TargetTransformInfo *TTI,
  166. const DataLayout *DL);
  167. // Returns true if C is already in a simplest form and not worth being
  168. // rewritten.
  169. bool isSimplestForm(const Candidate &C);
  170. // Checks whether I is in a candidate form. If so, adds all the matching forms
  171. // to Candidates, and tries to find the immediate basis for each of them.
  172. void allocateCandidatesAndFindBasis(Instruction *I);
  173. // Allocate candidates and find bases for Add instructions.
  174. void allocateCandidatesAndFindBasisForAdd(Instruction *I);
  175. // Given I = LHS + RHS, factors RHS into i * S and makes (LHS + i * S) a
  176. // candidate.
  177. void allocateCandidatesAndFindBasisForAdd(Value *LHS, Value *RHS,
  178. Instruction *I);
  179. // Allocate candidates and find bases for Mul instructions.
  180. void allocateCandidatesAndFindBasisForMul(Instruction *I);
  181. // Splits LHS into Base + Index and, if succeeds, calls
  182. // allocateCandidatesAndFindBasis.
  183. void allocateCandidatesAndFindBasisForMul(Value *LHS, Value *RHS,
  184. Instruction *I);
  185. // Allocate candidates and find bases for GetElementPtr instructions.
  186. void allocateCandidatesAndFindBasisForGEP(GetElementPtrInst *GEP);
  187. // A helper function that scales Idx with ElementSize before invoking
  188. // allocateCandidatesAndFindBasis.
  189. void allocateCandidatesAndFindBasisForGEP(const SCEV *B, ConstantInt *Idx,
  190. Value *S, uint64_t ElementSize,
  191. Instruction *I);
  192. // Adds the given form <CT, B, Idx, S> to Candidates, and finds its immediate
  193. // basis.
  194. void allocateCandidatesAndFindBasis(Candidate::Kind CT, const SCEV *B,
  195. ConstantInt *Idx, Value *S,
  196. Instruction *I);
  197. // Rewrites candidate C with respect to Basis.
  198. void rewriteCandidateWithBasis(const Candidate &C, const Candidate &Basis);
  199. // A helper function that factors ArrayIdx to a product of a stride and a
  200. // constant index, and invokes allocateCandidatesAndFindBasis with the
  201. // factorings.
  202. void factorArrayIndex(Value *ArrayIdx, const SCEV *Base, uint64_t ElementSize,
  203. GetElementPtrInst *GEP);
  204. // Emit code that computes the "bump" from Basis to C. If the candidate is a
  205. // GEP and the bump is not divisible by the element size of the GEP, this
  206. // function sets the BumpWithUglyGEP flag to notify its caller to bump the
  207. // basis using an ugly GEP.
  208. static Value *emitBump(const Candidate &Basis, const Candidate &C,
  209. IRBuilder<> &Builder, const DataLayout *DL,
  210. bool &BumpWithUglyGEP);
  211. const DataLayout *DL = nullptr;
  212. DominatorTree *DT = nullptr;
  213. ScalarEvolution *SE;
  214. TargetTransformInfo *TTI = nullptr;
  215. std::list<Candidate> Candidates;
  216. // Temporarily holds all instructions that are unlinked (but not deleted) by
  217. // rewriteCandidateWithBasis. These instructions will be actually removed
  218. // after all rewriting finishes.
  219. std::vector<Instruction *> UnlinkedInstructions;
  220. };
  221. } // end anonymous namespace
  222. char StraightLineStrengthReduceLegacyPass::ID = 0;
  223. INITIALIZE_PASS_BEGIN(StraightLineStrengthReduceLegacyPass, "slsr",
  224. "Straight line strength reduction", false, false)
  225. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  226. INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
  227. INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
  228. INITIALIZE_PASS_END(StraightLineStrengthReduceLegacyPass, "slsr",
  229. "Straight line strength reduction", false, false)
  230. FunctionPass *llvm::createStraightLineStrengthReducePass() {
  231. return new StraightLineStrengthReduceLegacyPass();
  232. }
  233. bool StraightLineStrengthReduce::isBasisFor(const Candidate &Basis,
  234. const Candidate &C) {
  235. return (Basis.Ins != C.Ins && // skip the same instruction
  236. // They must have the same type too. Basis.Base == C.Base doesn't
  237. // guarantee their types are the same (PR23975).
  238. Basis.Ins->getType() == C.Ins->getType() &&
  239. // Basis must dominate C in order to rewrite C with respect to Basis.
  240. DT->dominates(Basis.Ins->getParent(), C.Ins->getParent()) &&
  241. // They share the same base, stride, and candidate kind.
  242. Basis.Base == C.Base && Basis.Stride == C.Stride &&
  243. Basis.CandidateKind == C.CandidateKind);
  244. }
  245. static bool isGEPFoldable(GetElementPtrInst *GEP,
  246. const TargetTransformInfo *TTI) {
  247. SmallVector<const Value *, 4> Indices(GEP->indices());
  248. return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(),
  249. Indices) == TargetTransformInfo::TCC_Free;
  250. }
  251. // Returns whether (Base + Index * Stride) can be folded to an addressing mode.
  252. static bool isAddFoldable(const SCEV *Base, ConstantInt *Index, Value *Stride,
  253. TargetTransformInfo *TTI) {
  254. // Index->getSExtValue() may crash if Index is wider than 64-bit.
  255. return Index->getBitWidth() <= 64 &&
  256. TTI->isLegalAddressingMode(Base->getType(), nullptr, 0, true,
  257. Index->getSExtValue(), UnknownAddressSpace);
  258. }
  259. bool StraightLineStrengthReduce::isFoldable(const Candidate &C,
  260. TargetTransformInfo *TTI,
  261. const DataLayout *DL) {
  262. if (C.CandidateKind == Candidate::Add)
  263. return isAddFoldable(C.Base, C.Index, C.Stride, TTI);
  264. if (C.CandidateKind == Candidate::GEP)
  265. return isGEPFoldable(cast<GetElementPtrInst>(C.Ins), TTI);
  266. return false;
  267. }
  268. // Returns true if GEP has zero or one non-zero index.
  269. static bool hasOnlyOneNonZeroIndex(GetElementPtrInst *GEP) {
  270. unsigned NumNonZeroIndices = 0;
  271. for (Use &Idx : GEP->indices()) {
  272. ConstantInt *ConstIdx = dyn_cast<ConstantInt>(Idx);
  273. if (ConstIdx == nullptr || !ConstIdx->isZero())
  274. ++NumNonZeroIndices;
  275. }
  276. return NumNonZeroIndices <= 1;
  277. }
  278. bool StraightLineStrengthReduce::isSimplestForm(const Candidate &C) {
  279. if (C.CandidateKind == Candidate::Add) {
  280. // B + 1 * S or B + (-1) * S
  281. return C.Index->isOne() || C.Index->isMinusOne();
  282. }
  283. if (C.CandidateKind == Candidate::Mul) {
  284. // (B + 0) * S
  285. return C.Index->isZero();
  286. }
  287. if (C.CandidateKind == Candidate::GEP) {
  288. // (char*)B + S or (char*)B - S
  289. return ((C.Index->isOne() || C.Index->isMinusOne()) &&
  290. hasOnlyOneNonZeroIndex(cast<GetElementPtrInst>(C.Ins)));
  291. }
  292. return false;
  293. }
  294. // TODO: We currently implement an algorithm whose time complexity is linear in
  295. // the number of existing candidates. However, we could do better by using
  296. // ScopedHashTable. Specifically, while traversing the dominator tree, we could
  297. // maintain all the candidates that dominate the basic block being traversed in
  298. // a ScopedHashTable. This hash table is indexed by the base and the stride of
  299. // a candidate. Therefore, finding the immediate basis of a candidate boils down
  300. // to one hash-table look up.
  301. void StraightLineStrengthReduce::allocateCandidatesAndFindBasis(
  302. Candidate::Kind CT, const SCEV *B, ConstantInt *Idx, Value *S,
  303. Instruction *I) {
  304. Candidate C(CT, B, Idx, S, I);
  305. // SLSR can complicate an instruction in two cases:
  306. //
  307. // 1. If we can fold I into an addressing mode, computing I is likely free or
  308. // takes only one instruction.
  309. //
  310. // 2. I is already in a simplest form. For example, when
  311. // X = B + 8 * S
  312. // Y = B + S,
  313. // rewriting Y to X - 7 * S is probably a bad idea.
  314. //
  315. // In the above cases, we still add I to the candidate list so that I can be
  316. // the basis of other candidates, but we leave I's basis blank so that I
  317. // won't be rewritten.
  318. if (!isFoldable(C, TTI, DL) && !isSimplestForm(C)) {
  319. // Try to compute the immediate basis of C.
  320. unsigned NumIterations = 0;
  321. // Limit the scan radius to avoid running in quadratice time.
  322. static const unsigned MaxNumIterations = 50;
  323. for (auto Basis = Candidates.rbegin();
  324. Basis != Candidates.rend() && NumIterations < MaxNumIterations;
  325. ++Basis, ++NumIterations) {
  326. if (isBasisFor(*Basis, C)) {
  327. C.Basis = &(*Basis);
  328. break;
  329. }
  330. }
  331. }
  332. // Regardless of whether we find a basis for C, we need to push C to the
  333. // candidate list so that it can be the basis of other candidates.
  334. Candidates.push_back(C);
  335. }
  336. void StraightLineStrengthReduce::allocateCandidatesAndFindBasis(
  337. Instruction *I) {
  338. switch (I->getOpcode()) {
  339. case Instruction::Add:
  340. allocateCandidatesAndFindBasisForAdd(I);
  341. break;
  342. case Instruction::Mul:
  343. allocateCandidatesAndFindBasisForMul(I);
  344. break;
  345. case Instruction::GetElementPtr:
  346. allocateCandidatesAndFindBasisForGEP(cast<GetElementPtrInst>(I));
  347. break;
  348. }
  349. }
  350. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd(
  351. Instruction *I) {
  352. // Try matching B + i * S.
  353. if (!isa<IntegerType>(I->getType()))
  354. return;
  355. assert(I->getNumOperands() == 2 && "isn't I an add?");
  356. Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
  357. allocateCandidatesAndFindBasisForAdd(LHS, RHS, I);
  358. if (LHS != RHS)
  359. allocateCandidatesAndFindBasisForAdd(RHS, LHS, I);
  360. }
  361. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd(
  362. Value *LHS, Value *RHS, Instruction *I) {
  363. Value *S = nullptr;
  364. ConstantInt *Idx = nullptr;
  365. if (match(RHS, m_Mul(m_Value(S), m_ConstantInt(Idx)))) {
  366. // I = LHS + RHS = LHS + Idx * S
  367. allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), Idx, S, I);
  368. } else if (match(RHS, m_Shl(m_Value(S), m_ConstantInt(Idx)))) {
  369. // I = LHS + RHS = LHS + (S << Idx) = LHS + S * (1 << Idx)
  370. APInt One(Idx->getBitWidth(), 1);
  371. Idx = ConstantInt::get(Idx->getContext(), One << Idx->getValue());
  372. allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), Idx, S, I);
  373. } else {
  374. // At least, I = LHS + 1 * RHS
  375. ConstantInt *One = ConstantInt::get(cast<IntegerType>(I->getType()), 1);
  376. allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), One, RHS,
  377. I);
  378. }
  379. }
  380. // Returns true if A matches B + C where C is constant.
  381. static bool matchesAdd(Value *A, Value *&B, ConstantInt *&C) {
  382. return (match(A, m_Add(m_Value(B), m_ConstantInt(C))) ||
  383. match(A, m_Add(m_ConstantInt(C), m_Value(B))));
  384. }
  385. // Returns true if A matches B | C where C is constant.
  386. static bool matchesOr(Value *A, Value *&B, ConstantInt *&C) {
  387. return (match(A, m_Or(m_Value(B), m_ConstantInt(C))) ||
  388. match(A, m_Or(m_ConstantInt(C), m_Value(B))));
  389. }
  390. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul(
  391. Value *LHS, Value *RHS, Instruction *I) {
  392. Value *B = nullptr;
  393. ConstantInt *Idx = nullptr;
  394. if (matchesAdd(LHS, B, Idx)) {
  395. // If LHS is in the form of "Base + Index", then I is in the form of
  396. // "(Base + Index) * RHS".
  397. allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(B), Idx, RHS, I);
  398. } else if (matchesOr(LHS, B, Idx) && haveNoCommonBitsSet(B, Idx, *DL)) {
  399. // If LHS is in the form of "Base | Index" and Base and Index have no common
  400. // bits set, then
  401. // Base | Index = Base + Index
  402. // and I is thus in the form of "(Base + Index) * RHS".
  403. allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(B), Idx, RHS, I);
  404. } else {
  405. // Otherwise, at least try the form (LHS + 0) * RHS.
  406. ConstantInt *Zero = ConstantInt::get(cast<IntegerType>(I->getType()), 0);
  407. allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(LHS), Zero, RHS,
  408. I);
  409. }
  410. }
  411. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul(
  412. Instruction *I) {
  413. // Try matching (B + i) * S.
  414. // TODO: we could extend SLSR to float and vector types.
  415. if (!isa<IntegerType>(I->getType()))
  416. return;
  417. assert(I->getNumOperands() == 2 && "isn't I a mul?");
  418. Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
  419. allocateCandidatesAndFindBasisForMul(LHS, RHS, I);
  420. if (LHS != RHS) {
  421. // Symmetrically, try to split RHS to Base + Index.
  422. allocateCandidatesAndFindBasisForMul(RHS, LHS, I);
  423. }
  424. }
  425. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
  426. const SCEV *B, ConstantInt *Idx, Value *S, uint64_t ElementSize,
  427. Instruction *I) {
  428. // I = B + sext(Idx *nsw S) * ElementSize
  429. // = B + (sext(Idx) * sext(S)) * ElementSize
  430. // = B + (sext(Idx) * ElementSize) * sext(S)
  431. // Casting to IntegerType is safe because we skipped vector GEPs.
  432. IntegerType *IntPtrTy = cast<IntegerType>(DL->getIntPtrType(I->getType()));
  433. ConstantInt *ScaledIdx = ConstantInt::get(
  434. IntPtrTy, Idx->getSExtValue() * (int64_t)ElementSize, true);
  435. allocateCandidatesAndFindBasis(Candidate::GEP, B, ScaledIdx, S, I);
  436. }
  437. void StraightLineStrengthReduce::factorArrayIndex(Value *ArrayIdx,
  438. const SCEV *Base,
  439. uint64_t ElementSize,
  440. GetElementPtrInst *GEP) {
  441. // At least, ArrayIdx = ArrayIdx *nsw 1.
  442. allocateCandidatesAndFindBasisForGEP(
  443. Base, ConstantInt::get(cast<IntegerType>(ArrayIdx->getType()), 1),
  444. ArrayIdx, ElementSize, GEP);
  445. Value *LHS = nullptr;
  446. ConstantInt *RHS = nullptr;
  447. // One alternative is matching the SCEV of ArrayIdx instead of ArrayIdx
  448. // itself. This would allow us to handle the shl case for free. However,
  449. // matching SCEVs has two issues:
  450. //
  451. // 1. this would complicate rewriting because the rewriting procedure
  452. // would have to translate SCEVs back to IR instructions. This translation
  453. // is difficult when LHS is further evaluated to a composite SCEV.
  454. //
  455. // 2. ScalarEvolution is designed to be control-flow oblivious. It tends
  456. // to strip nsw/nuw flags which are critical for SLSR to trace into
  457. // sext'ed multiplication.
  458. if (match(ArrayIdx, m_NSWMul(m_Value(LHS), m_ConstantInt(RHS)))) {
  459. // SLSR is currently unsafe if i * S may overflow.
  460. // GEP = Base + sext(LHS *nsw RHS) * ElementSize
  461. allocateCandidatesAndFindBasisForGEP(Base, RHS, LHS, ElementSize, GEP);
  462. } else if (match(ArrayIdx, m_NSWShl(m_Value(LHS), m_ConstantInt(RHS)))) {
  463. // GEP = Base + sext(LHS <<nsw RHS) * ElementSize
  464. // = Base + sext(LHS *nsw (1 << RHS)) * ElementSize
  465. APInt One(RHS->getBitWidth(), 1);
  466. ConstantInt *PowerOf2 =
  467. ConstantInt::get(RHS->getContext(), One << RHS->getValue());
  468. allocateCandidatesAndFindBasisForGEP(Base, PowerOf2, LHS, ElementSize, GEP);
  469. }
  470. }
  471. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
  472. GetElementPtrInst *GEP) {
  473. // TODO: handle vector GEPs
  474. if (GEP->getType()->isVectorTy())
  475. return;
  476. SmallVector<const SCEV *, 4> IndexExprs;
  477. for (Use &Idx : GEP->indices())
  478. IndexExprs.push_back(SE->getSCEV(Idx));
  479. gep_type_iterator GTI = gep_type_begin(GEP);
  480. for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
  481. if (GTI.isStruct())
  482. continue;
  483. const SCEV *OrigIndexExpr = IndexExprs[I - 1];
  484. IndexExprs[I - 1] = SE->getZero(OrigIndexExpr->getType());
  485. // The base of this candidate is GEP's base plus the offsets of all
  486. // indices except this current one.
  487. const SCEV *BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs);
  488. Value *ArrayIdx = GEP->getOperand(I);
  489. uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
  490. if (ArrayIdx->getType()->getIntegerBitWidth() <=
  491. DL->getPointerSizeInBits(GEP->getAddressSpace())) {
  492. // Skip factoring if ArrayIdx is wider than the pointer size, because
  493. // ArrayIdx is implicitly truncated to the pointer size.
  494. factorArrayIndex(ArrayIdx, BaseExpr, ElementSize, GEP);
  495. }
  496. // When ArrayIdx is the sext of a value, we try to factor that value as
  497. // well. Handling this case is important because array indices are
  498. // typically sign-extended to the pointer size.
  499. Value *TruncatedArrayIdx = nullptr;
  500. if (match(ArrayIdx, m_SExt(m_Value(TruncatedArrayIdx))) &&
  501. TruncatedArrayIdx->getType()->getIntegerBitWidth() <=
  502. DL->getPointerSizeInBits(GEP->getAddressSpace())) {
  503. // Skip factoring if TruncatedArrayIdx is wider than the pointer size,
  504. // because TruncatedArrayIdx is implicitly truncated to the pointer size.
  505. factorArrayIndex(TruncatedArrayIdx, BaseExpr, ElementSize, GEP);
  506. }
  507. IndexExprs[I - 1] = OrigIndexExpr;
  508. }
  509. }
  510. // A helper function that unifies the bitwidth of A and B.
  511. static void unifyBitWidth(APInt &A, APInt &B) {
  512. if (A.getBitWidth() < B.getBitWidth())
  513. A = A.sext(B.getBitWidth());
  514. else if (A.getBitWidth() > B.getBitWidth())
  515. B = B.sext(A.getBitWidth());
  516. }
  517. Value *StraightLineStrengthReduce::emitBump(const Candidate &Basis,
  518. const Candidate &C,
  519. IRBuilder<> &Builder,
  520. const DataLayout *DL,
  521. bool &BumpWithUglyGEP) {
  522. APInt Idx = C.Index->getValue(), BasisIdx = Basis.Index->getValue();
  523. unifyBitWidth(Idx, BasisIdx);
  524. APInt IndexOffset = Idx - BasisIdx;
  525. BumpWithUglyGEP = false;
  526. if (Basis.CandidateKind == Candidate::GEP) {
  527. APInt ElementSize(
  528. IndexOffset.getBitWidth(),
  529. DL->getTypeAllocSize(
  530. cast<GetElementPtrInst>(Basis.Ins)->getResultElementType()));
  531. APInt Q, R;
  532. APInt::sdivrem(IndexOffset, ElementSize, Q, R);
  533. if (R == 0)
  534. IndexOffset = Q;
  535. else
  536. BumpWithUglyGEP = true;
  537. }
  538. // Compute Bump = C - Basis = (i' - i) * S.
  539. // Common case 1: if (i' - i) is 1, Bump = S.
  540. if (IndexOffset == 1)
  541. return C.Stride;
  542. // Common case 2: if (i' - i) is -1, Bump = -S.
  543. if (IndexOffset.isAllOnes())
  544. return Builder.CreateNeg(C.Stride);
  545. // Otherwise, Bump = (i' - i) * sext/trunc(S). Note that (i' - i) and S may
  546. // have different bit widths.
  547. IntegerType *DeltaType =
  548. IntegerType::get(Basis.Ins->getContext(), IndexOffset.getBitWidth());
  549. Value *ExtendedStride = Builder.CreateSExtOrTrunc(C.Stride, DeltaType);
  550. if (IndexOffset.isPowerOf2()) {
  551. // If (i' - i) is a power of 2, Bump = sext/trunc(S) << log(i' - i).
  552. ConstantInt *Exponent = ConstantInt::get(DeltaType, IndexOffset.logBase2());
  553. return Builder.CreateShl(ExtendedStride, Exponent);
  554. }
  555. if (IndexOffset.isNegatedPowerOf2()) {
  556. // If (i - i') is a power of 2, Bump = -sext/trunc(S) << log(i' - i).
  557. ConstantInt *Exponent =
  558. ConstantInt::get(DeltaType, (-IndexOffset).logBase2());
  559. return Builder.CreateNeg(Builder.CreateShl(ExtendedStride, Exponent));
  560. }
  561. Constant *Delta = ConstantInt::get(DeltaType, IndexOffset);
  562. return Builder.CreateMul(ExtendedStride, Delta);
  563. }
  564. void StraightLineStrengthReduce::rewriteCandidateWithBasis(
  565. const Candidate &C, const Candidate &Basis) {
  566. assert(C.CandidateKind == Basis.CandidateKind && C.Base == Basis.Base &&
  567. C.Stride == Basis.Stride);
  568. // We run rewriteCandidateWithBasis on all candidates in a post-order, so the
  569. // basis of a candidate cannot be unlinked before the candidate.
  570. assert(Basis.Ins->getParent() != nullptr && "the basis is unlinked");
  571. // An instruction can correspond to multiple candidates. Therefore, instead of
  572. // simply deleting an instruction when we rewrite it, we mark its parent as
  573. // nullptr (i.e. unlink it) so that we can skip the candidates whose
  574. // instruction is already rewritten.
  575. if (!C.Ins->getParent())
  576. return;
  577. IRBuilder<> Builder(C.Ins);
  578. bool BumpWithUglyGEP;
  579. Value *Bump = emitBump(Basis, C, Builder, DL, BumpWithUglyGEP);
  580. Value *Reduced = nullptr; // equivalent to but weaker than C.Ins
  581. switch (C.CandidateKind) {
  582. case Candidate::Add:
  583. case Candidate::Mul: {
  584. // C = Basis + Bump
  585. Value *NegBump;
  586. if (match(Bump, m_Neg(m_Value(NegBump)))) {
  587. // If Bump is a neg instruction, emit C = Basis - (-Bump).
  588. Reduced = Builder.CreateSub(Basis.Ins, NegBump);
  589. // We only use the negative argument of Bump, and Bump itself may be
  590. // trivially dead.
  591. RecursivelyDeleteTriviallyDeadInstructions(Bump);
  592. } else {
  593. // It's tempting to preserve nsw on Bump and/or Reduced. However, it's
  594. // usually unsound, e.g.,
  595. //
  596. // X = (-2 +nsw 1) *nsw INT_MAX
  597. // Y = (-2 +nsw 3) *nsw INT_MAX
  598. // =>
  599. // Y = X + 2 * INT_MAX
  600. //
  601. // Neither + and * in the resultant expression are nsw.
  602. Reduced = Builder.CreateAdd(Basis.Ins, Bump);
  603. }
  604. break;
  605. }
  606. case Candidate::GEP:
  607. {
  608. Type *IntPtrTy = DL->getIntPtrType(C.Ins->getType());
  609. bool InBounds = cast<GetElementPtrInst>(C.Ins)->isInBounds();
  610. if (BumpWithUglyGEP) {
  611. // C = (char *)Basis + Bump
  612. unsigned AS = Basis.Ins->getType()->getPointerAddressSpace();
  613. Type *CharTy = Type::getInt8PtrTy(Basis.Ins->getContext(), AS);
  614. Reduced = Builder.CreateBitCast(Basis.Ins, CharTy);
  615. if (InBounds)
  616. Reduced =
  617. Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Reduced, Bump);
  618. else
  619. Reduced = Builder.CreateGEP(Builder.getInt8Ty(), Reduced, Bump);
  620. Reduced = Builder.CreateBitCast(Reduced, C.Ins->getType());
  621. } else {
  622. // C = gep Basis, Bump
  623. // Canonicalize bump to pointer size.
  624. Bump = Builder.CreateSExtOrTrunc(Bump, IntPtrTy);
  625. if (InBounds)
  626. Reduced = Builder.CreateInBoundsGEP(
  627. cast<GetElementPtrInst>(Basis.Ins)->getResultElementType(),
  628. Basis.Ins, Bump);
  629. else
  630. Reduced = Builder.CreateGEP(
  631. cast<GetElementPtrInst>(Basis.Ins)->getResultElementType(),
  632. Basis.Ins, Bump);
  633. }
  634. break;
  635. }
  636. default:
  637. llvm_unreachable("C.CandidateKind is invalid");
  638. };
  639. Reduced->takeName(C.Ins);
  640. C.Ins->replaceAllUsesWith(Reduced);
  641. // Unlink C.Ins so that we can skip other candidates also corresponding to
  642. // C.Ins. The actual deletion is postponed to the end of runOnFunction.
  643. C.Ins->removeFromParent();
  644. UnlinkedInstructions.push_back(C.Ins);
  645. }
  646. bool StraightLineStrengthReduceLegacyPass::runOnFunction(Function &F) {
  647. if (skipFunction(F))
  648. return false;
  649. auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
  650. auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  651. auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
  652. return StraightLineStrengthReduce(DL, DT, SE, TTI).runOnFunction(F);
  653. }
  654. bool StraightLineStrengthReduce::runOnFunction(Function &F) {
  655. // Traverse the dominator tree in the depth-first order. This order makes sure
  656. // all bases of a candidate are in Candidates when we process it.
  657. for (const auto Node : depth_first(DT))
  658. for (auto &I : *(Node->getBlock()))
  659. allocateCandidatesAndFindBasis(&I);
  660. // Rewrite candidates in the reverse depth-first order. This order makes sure
  661. // a candidate being rewritten is not a basis for any other candidate.
  662. while (!Candidates.empty()) {
  663. const Candidate &C = Candidates.back();
  664. if (C.Basis != nullptr) {
  665. rewriteCandidateWithBasis(C, *C.Basis);
  666. }
  667. Candidates.pop_back();
  668. }
  669. // Delete all unlink instructions.
  670. for (auto *UnlinkedInst : UnlinkedInstructions) {
  671. for (unsigned I = 0, E = UnlinkedInst->getNumOperands(); I != E; ++I) {
  672. Value *Op = UnlinkedInst->getOperand(I);
  673. UnlinkedInst->setOperand(I, nullptr);
  674. RecursivelyDeleteTriviallyDeadInstructions(Op);
  675. }
  676. UnlinkedInst->deleteValue();
  677. }
  678. bool Ret = !UnlinkedInstructions.empty();
  679. UnlinkedInstructions.clear();
  680. return Ret;
  681. }
  682. namespace llvm {
  683. PreservedAnalyses
  684. StraightLineStrengthReducePass::run(Function &F, FunctionAnalysisManager &AM) {
  685. const DataLayout *DL = &F.getParent()->getDataLayout();
  686. auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
  687. auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
  688. auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
  689. if (!StraightLineStrengthReduce(DL, DT, SE, TTI).runOnFunction(F))
  690. return PreservedAnalyses::all();
  691. PreservedAnalyses PA;
  692. PA.preserveSet<CFGAnalyses>();
  693. PA.preserve<DominatorTreeAnalysis>();
  694. PA.preserve<ScalarEvolutionAnalysis>();
  695. PA.preserve<TargetIRAnalysis>();
  696. return PA;
  697. }
  698. } // namespace llvm