StraightLineStrengthReduce.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. //===- StraightLineStrengthReduce.cpp - -----------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements straight-line strength reduction (SLSR). Unlike loop
  10. // strength reduction, this algorithm is designed to reduce arithmetic
  11. // redundancy in straight-line code instead of loops. It has proven to be
  12. // effective in simplifying arithmetic statements derived from an unrolled loop.
  13. // It can also simplify the logic of SeparateConstOffsetFromGEP.
  14. //
  15. // There are many optimizations we can perform in the domain of SLSR. This file
  16. // for now contains only an initial step. Specifically, we look for strength
  17. // reduction candidates in the following forms:
  18. //
  19. // Form 1: B + i * S
  20. // Form 2: (B + i) * S
  21. // Form 3: &B[i * S]
  22. //
  23. // where S is an integer variable, and i is a constant integer. If we found two
  24. // candidates S1 and S2 in the same form and S1 dominates S2, we may rewrite S2
  25. // in a simpler way with respect to S1. For example,
  26. //
  27. // S1: X = B + i * S
  28. // S2: Y = B + i' * S => X + (i' - i) * S
  29. //
  30. // S1: X = (B + i) * S
  31. // S2: Y = (B + i') * S => X + (i' - i) * S
  32. //
  33. // S1: X = &B[i * S]
  34. // S2: Y = &B[i' * S] => &X[(i' - i) * S]
  35. //
  36. // Note: (i' - i) * S is folded to the extent possible.
  37. //
  38. // This rewriting is in general a good idea. The code patterns we focus on
  39. // usually come from loop unrolling, so (i' - i) * S is likely the same
  40. // across iterations and can be reused. When that happens, the optimized form
  41. // takes only one add starting from the second iteration.
  42. //
  43. // When such rewriting is possible, we call S1 a "basis" of S2. When S2 has
  44. // multiple bases, we choose to rewrite S2 with respect to its "immediate"
  45. // basis, the basis that is the closest ancestor in the dominator tree.
  46. //
  47. // TODO:
  48. //
  49. // - Floating point arithmetics when fast math is enabled.
  50. //
  51. // - SLSR may decrease ILP at the architecture level. Targets that are very
  52. // sensitive to ILP may want to disable it. Having SLSR to consider ILP is
  53. // left as future work.
  54. //
  55. // - When (i' - i) is constant but i and i' are not, we could still perform
  56. // SLSR.
  57. #include "llvm/Transforms/Scalar/StraightLineStrengthReduce.h"
  58. #include "llvm/ADT/APInt.h"
  59. #include "llvm/ADT/DepthFirstIterator.h"
  60. #include "llvm/ADT/SmallVector.h"
  61. #include "llvm/Analysis/ScalarEvolution.h"
  62. #include "llvm/Analysis/TargetTransformInfo.h"
  63. #include "llvm/Analysis/ValueTracking.h"
  64. #include "llvm/IR/Constants.h"
  65. #include "llvm/IR/DataLayout.h"
  66. #include "llvm/IR/DerivedTypes.h"
  67. #include "llvm/IR/Dominators.h"
  68. #include "llvm/IR/GetElementPtrTypeIterator.h"
  69. #include "llvm/IR/IRBuilder.h"
  70. #include "llvm/IR/Instruction.h"
  71. #include "llvm/IR/Instructions.h"
  72. #include "llvm/IR/Module.h"
  73. #include "llvm/IR/Operator.h"
  74. #include "llvm/IR/PatternMatch.h"
  75. #include "llvm/IR/Type.h"
  76. #include "llvm/IR/Value.h"
  77. #include "llvm/InitializePasses.h"
  78. #include "llvm/Pass.h"
  79. #include "llvm/Support/Casting.h"
  80. #include "llvm/Support/ErrorHandling.h"
  81. #include "llvm/Transforms/Scalar.h"
  82. #include "llvm/Transforms/Utils/Local.h"
  83. #include <cassert>
  84. #include <cstdint>
  85. #include <limits>
  86. #include <list>
  87. #include <vector>
  88. using namespace llvm;
  89. using namespace PatternMatch;
  90. static const unsigned UnknownAddressSpace =
  91. std::numeric_limits<unsigned>::max();
  92. namespace {
  93. class StraightLineStrengthReduceLegacyPass : public FunctionPass {
  94. const DataLayout *DL = nullptr;
  95. public:
  96. static char ID;
  97. StraightLineStrengthReduceLegacyPass() : FunctionPass(ID) {
  98. initializeStraightLineStrengthReduceLegacyPassPass(
  99. *PassRegistry::getPassRegistry());
  100. }
  101. void getAnalysisUsage(AnalysisUsage &AU) const override {
  102. AU.addRequired<DominatorTreeWrapperPass>();
  103. AU.addRequired<ScalarEvolutionWrapperPass>();
  104. AU.addRequired<TargetTransformInfoWrapperPass>();
  105. // We do not modify the shape of the CFG.
  106. AU.setPreservesCFG();
  107. }
  108. bool doInitialization(Module &M) override {
  109. DL = &M.getDataLayout();
  110. return false;
  111. }
  112. bool runOnFunction(Function &F) override;
  113. };
  114. class StraightLineStrengthReduce {
  115. public:
  116. StraightLineStrengthReduce(const DataLayout *DL, DominatorTree *DT,
  117. ScalarEvolution *SE, TargetTransformInfo *TTI)
  118. : DL(DL), DT(DT), SE(SE), TTI(TTI) {}
  119. // SLSR candidate. Such a candidate must be in one of the forms described in
  120. // the header comments.
  121. struct Candidate {
  122. enum Kind {
  123. Invalid, // reserved for the default constructor
  124. Add, // B + i * S
  125. Mul, // (B + i) * S
  126. GEP, // &B[..][i * S][..]
  127. };
  128. Candidate() = default;
  129. Candidate(Kind CT, const SCEV *B, ConstantInt *Idx, Value *S,
  130. Instruction *I)
  131. : CandidateKind(CT), Base(B), Index(Idx), Stride(S), Ins(I) {}
  132. Kind CandidateKind = Invalid;
  133. const SCEV *Base = nullptr;
  134. // Note that Index and Stride of a GEP candidate do not necessarily have the
  135. // same integer type. In that case, during rewriting, Stride will be
  136. // sign-extended or truncated to Index's type.
  137. ConstantInt *Index = nullptr;
  138. Value *Stride = nullptr;
  139. // The instruction this candidate corresponds to. It helps us to rewrite a
  140. // candidate with respect to its immediate basis. Note that one instruction
  141. // can correspond to multiple candidates depending on how you associate the
  142. // expression. For instance,
  143. //
  144. // (a + 1) * (b + 2)
  145. //
  146. // can be treated as
  147. //
  148. // <Base: a, Index: 1, Stride: b + 2>
  149. //
  150. // or
  151. //
  152. // <Base: b, Index: 2, Stride: a + 1>
  153. Instruction *Ins = nullptr;
  154. // Points to the immediate basis of this candidate, or nullptr if we cannot
  155. // find any basis for this candidate.
  156. Candidate *Basis = nullptr;
  157. };
  158. bool runOnFunction(Function &F);
  159. private:
  160. // Returns true if Basis is a basis for C, i.e., Basis dominates C and they
  161. // share the same base and stride.
  162. bool isBasisFor(const Candidate &Basis, const Candidate &C);
  163. // Returns whether the candidate can be folded into an addressing mode.
  164. bool isFoldable(const Candidate &C, TargetTransformInfo *TTI,
  165. const DataLayout *DL);
  166. // Returns true if C is already in a simplest form and not worth being
  167. // rewritten.
  168. bool isSimplestForm(const Candidate &C);
  169. // Checks whether I is in a candidate form. If so, adds all the matching forms
  170. // to Candidates, and tries to find the immediate basis for each of them.
  171. void allocateCandidatesAndFindBasis(Instruction *I);
  172. // Allocate candidates and find bases for Add instructions.
  173. void allocateCandidatesAndFindBasisForAdd(Instruction *I);
  174. // Given I = LHS + RHS, factors RHS into i * S and makes (LHS + i * S) a
  175. // candidate.
  176. void allocateCandidatesAndFindBasisForAdd(Value *LHS, Value *RHS,
  177. Instruction *I);
  178. // Allocate candidates and find bases for Mul instructions.
  179. void allocateCandidatesAndFindBasisForMul(Instruction *I);
  180. // Splits LHS into Base + Index and, if succeeds, calls
  181. // allocateCandidatesAndFindBasis.
  182. void allocateCandidatesAndFindBasisForMul(Value *LHS, Value *RHS,
  183. Instruction *I);
  184. // Allocate candidates and find bases for GetElementPtr instructions.
  185. void allocateCandidatesAndFindBasisForGEP(GetElementPtrInst *GEP);
  186. // A helper function that scales Idx with ElementSize before invoking
  187. // allocateCandidatesAndFindBasis.
  188. void allocateCandidatesAndFindBasisForGEP(const SCEV *B, ConstantInt *Idx,
  189. Value *S, uint64_t ElementSize,
  190. Instruction *I);
  191. // Adds the given form <CT, B, Idx, S> to Candidates, and finds its immediate
  192. // basis.
  193. void allocateCandidatesAndFindBasis(Candidate::Kind CT, const SCEV *B,
  194. ConstantInt *Idx, Value *S,
  195. Instruction *I);
  196. // Rewrites candidate C with respect to Basis.
  197. void rewriteCandidateWithBasis(const Candidate &C, const Candidate &Basis);
  198. // A helper function that factors ArrayIdx to a product of a stride and a
  199. // constant index, and invokes allocateCandidatesAndFindBasis with the
  200. // factorings.
  201. void factorArrayIndex(Value *ArrayIdx, const SCEV *Base, uint64_t ElementSize,
  202. GetElementPtrInst *GEP);
  203. // Emit code that computes the "bump" from Basis to C. If the candidate is a
  204. // GEP and the bump is not divisible by the element size of the GEP, this
  205. // function sets the BumpWithUglyGEP flag to notify its caller to bump the
  206. // basis using an ugly GEP.
  207. static Value *emitBump(const Candidate &Basis, const Candidate &C,
  208. IRBuilder<> &Builder, const DataLayout *DL,
  209. bool &BumpWithUglyGEP);
  210. const DataLayout *DL = nullptr;
  211. DominatorTree *DT = nullptr;
  212. ScalarEvolution *SE;
  213. TargetTransformInfo *TTI = nullptr;
  214. std::list<Candidate> Candidates;
  215. // Temporarily holds all instructions that are unlinked (but not deleted) by
  216. // rewriteCandidateWithBasis. These instructions will be actually removed
  217. // after all rewriting finishes.
  218. std::vector<Instruction *> UnlinkedInstructions;
  219. };
  220. } // end anonymous namespace
  221. char StraightLineStrengthReduceLegacyPass::ID = 0;
  222. INITIALIZE_PASS_BEGIN(StraightLineStrengthReduceLegacyPass, "slsr",
  223. "Straight line strength reduction", false, false)
  224. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  225. INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
  226. INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
  227. INITIALIZE_PASS_END(StraightLineStrengthReduceLegacyPass, "slsr",
  228. "Straight line strength reduction", false, false)
  229. FunctionPass *llvm::createStraightLineStrengthReducePass() {
  230. return new StraightLineStrengthReduceLegacyPass();
  231. }
  232. bool StraightLineStrengthReduce::isBasisFor(const Candidate &Basis,
  233. const Candidate &C) {
  234. return (Basis.Ins != C.Ins && // skip the same instruction
  235. // They must have the same type too. Basis.Base == C.Base doesn't
  236. // guarantee their types are the same (PR23975).
  237. Basis.Ins->getType() == C.Ins->getType() &&
  238. // Basis must dominate C in order to rewrite C with respect to Basis.
  239. DT->dominates(Basis.Ins->getParent(), C.Ins->getParent()) &&
  240. // They share the same base, stride, and candidate kind.
  241. Basis.Base == C.Base && Basis.Stride == C.Stride &&
  242. Basis.CandidateKind == C.CandidateKind);
  243. }
  244. static bool isGEPFoldable(GetElementPtrInst *GEP,
  245. const TargetTransformInfo *TTI) {
  246. SmallVector<const Value *, 4> Indices(GEP->indices());
  247. return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(),
  248. Indices) == TargetTransformInfo::TCC_Free;
  249. }
  250. // Returns whether (Base + Index * Stride) can be folded to an addressing mode.
  251. static bool isAddFoldable(const SCEV *Base, ConstantInt *Index, Value *Stride,
  252. TargetTransformInfo *TTI) {
  253. // Index->getSExtValue() may crash if Index is wider than 64-bit.
  254. return Index->getBitWidth() <= 64 &&
  255. TTI->isLegalAddressingMode(Base->getType(), nullptr, 0, true,
  256. Index->getSExtValue(), UnknownAddressSpace);
  257. }
  258. bool StraightLineStrengthReduce::isFoldable(const Candidate &C,
  259. TargetTransformInfo *TTI,
  260. const DataLayout *DL) {
  261. if (C.CandidateKind == Candidate::Add)
  262. return isAddFoldable(C.Base, C.Index, C.Stride, TTI);
  263. if (C.CandidateKind == Candidate::GEP)
  264. return isGEPFoldable(cast<GetElementPtrInst>(C.Ins), TTI);
  265. return false;
  266. }
  267. // Returns true if GEP has zero or one non-zero index.
  268. static bool hasOnlyOneNonZeroIndex(GetElementPtrInst *GEP) {
  269. unsigned NumNonZeroIndices = 0;
  270. for (Use &Idx : GEP->indices()) {
  271. ConstantInt *ConstIdx = dyn_cast<ConstantInt>(Idx);
  272. if (ConstIdx == nullptr || !ConstIdx->isZero())
  273. ++NumNonZeroIndices;
  274. }
  275. return NumNonZeroIndices <= 1;
  276. }
  277. bool StraightLineStrengthReduce::isSimplestForm(const Candidate &C) {
  278. if (C.CandidateKind == Candidate::Add) {
  279. // B + 1 * S or B + (-1) * S
  280. return C.Index->isOne() || C.Index->isMinusOne();
  281. }
  282. if (C.CandidateKind == Candidate::Mul) {
  283. // (B + 0) * S
  284. return C.Index->isZero();
  285. }
  286. if (C.CandidateKind == Candidate::GEP) {
  287. // (char*)B + S or (char*)B - S
  288. return ((C.Index->isOne() || C.Index->isMinusOne()) &&
  289. hasOnlyOneNonZeroIndex(cast<GetElementPtrInst>(C.Ins)));
  290. }
  291. return false;
  292. }
  293. // TODO: We currently implement an algorithm whose time complexity is linear in
  294. // the number of existing candidates. However, we could do better by using
  295. // ScopedHashTable. Specifically, while traversing the dominator tree, we could
  296. // maintain all the candidates that dominate the basic block being traversed in
  297. // a ScopedHashTable. This hash table is indexed by the base and the stride of
  298. // a candidate. Therefore, finding the immediate basis of a candidate boils down
  299. // to one hash-table look up.
  300. void StraightLineStrengthReduce::allocateCandidatesAndFindBasis(
  301. Candidate::Kind CT, const SCEV *B, ConstantInt *Idx, Value *S,
  302. Instruction *I) {
  303. Candidate C(CT, B, Idx, S, I);
  304. // SLSR can complicate an instruction in two cases:
  305. //
  306. // 1. If we can fold I into an addressing mode, computing I is likely free or
  307. // takes only one instruction.
  308. //
  309. // 2. I is already in a simplest form. For example, when
  310. // X = B + 8 * S
  311. // Y = B + S,
  312. // rewriting Y to X - 7 * S is probably a bad idea.
  313. //
  314. // In the above cases, we still add I to the candidate list so that I can be
  315. // the basis of other candidates, but we leave I's basis blank so that I
  316. // won't be rewritten.
  317. if (!isFoldable(C, TTI, DL) && !isSimplestForm(C)) {
  318. // Try to compute the immediate basis of C.
  319. unsigned NumIterations = 0;
  320. // Limit the scan radius to avoid running in quadratice time.
  321. static const unsigned MaxNumIterations = 50;
  322. for (auto Basis = Candidates.rbegin();
  323. Basis != Candidates.rend() && NumIterations < MaxNumIterations;
  324. ++Basis, ++NumIterations) {
  325. if (isBasisFor(*Basis, C)) {
  326. C.Basis = &(*Basis);
  327. break;
  328. }
  329. }
  330. }
  331. // Regardless of whether we find a basis for C, we need to push C to the
  332. // candidate list so that it can be the basis of other candidates.
  333. Candidates.push_back(C);
  334. }
  335. void StraightLineStrengthReduce::allocateCandidatesAndFindBasis(
  336. Instruction *I) {
  337. switch (I->getOpcode()) {
  338. case Instruction::Add:
  339. allocateCandidatesAndFindBasisForAdd(I);
  340. break;
  341. case Instruction::Mul:
  342. allocateCandidatesAndFindBasisForMul(I);
  343. break;
  344. case Instruction::GetElementPtr:
  345. allocateCandidatesAndFindBasisForGEP(cast<GetElementPtrInst>(I));
  346. break;
  347. }
  348. }
  349. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd(
  350. Instruction *I) {
  351. // Try matching B + i * S.
  352. if (!isa<IntegerType>(I->getType()))
  353. return;
  354. assert(I->getNumOperands() == 2 && "isn't I an add?");
  355. Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
  356. allocateCandidatesAndFindBasisForAdd(LHS, RHS, I);
  357. if (LHS != RHS)
  358. allocateCandidatesAndFindBasisForAdd(RHS, LHS, I);
  359. }
  360. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd(
  361. Value *LHS, Value *RHS, Instruction *I) {
  362. Value *S = nullptr;
  363. ConstantInt *Idx = nullptr;
  364. if (match(RHS, m_Mul(m_Value(S), m_ConstantInt(Idx)))) {
  365. // I = LHS + RHS = LHS + Idx * S
  366. allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), Idx, S, I);
  367. } else if (match(RHS, m_Shl(m_Value(S), m_ConstantInt(Idx)))) {
  368. // I = LHS + RHS = LHS + (S << Idx) = LHS + S * (1 << Idx)
  369. APInt One(Idx->getBitWidth(), 1);
  370. Idx = ConstantInt::get(Idx->getContext(), One << Idx->getValue());
  371. allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), Idx, S, I);
  372. } else {
  373. // At least, I = LHS + 1 * RHS
  374. ConstantInt *One = ConstantInt::get(cast<IntegerType>(I->getType()), 1);
  375. allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), One, RHS,
  376. I);
  377. }
  378. }
  379. // Returns true if A matches B + C where C is constant.
  380. static bool matchesAdd(Value *A, Value *&B, ConstantInt *&C) {
  381. return (match(A, m_Add(m_Value(B), m_ConstantInt(C))) ||
  382. match(A, m_Add(m_ConstantInt(C), m_Value(B))));
  383. }
  384. // Returns true if A matches B | C where C is constant.
  385. static bool matchesOr(Value *A, Value *&B, ConstantInt *&C) {
  386. return (match(A, m_Or(m_Value(B), m_ConstantInt(C))) ||
  387. match(A, m_Or(m_ConstantInt(C), m_Value(B))));
  388. }
  389. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul(
  390. Value *LHS, Value *RHS, Instruction *I) {
  391. Value *B = nullptr;
  392. ConstantInt *Idx = nullptr;
  393. if (matchesAdd(LHS, B, Idx)) {
  394. // If LHS is in the form of "Base + Index", then I is in the form of
  395. // "(Base + Index) * RHS".
  396. allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(B), Idx, RHS, I);
  397. } else if (matchesOr(LHS, B, Idx) && haveNoCommonBitsSet(B, Idx, *DL)) {
  398. // If LHS is in the form of "Base | Index" and Base and Index have no common
  399. // bits set, then
  400. // Base | Index = Base + Index
  401. // and I is thus in the form of "(Base + Index) * RHS".
  402. allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(B), Idx, RHS, I);
  403. } else {
  404. // Otherwise, at least try the form (LHS + 0) * RHS.
  405. ConstantInt *Zero = ConstantInt::get(cast<IntegerType>(I->getType()), 0);
  406. allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(LHS), Zero, RHS,
  407. I);
  408. }
  409. }
  410. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul(
  411. Instruction *I) {
  412. // Try matching (B + i) * S.
  413. // TODO: we could extend SLSR to float and vector types.
  414. if (!isa<IntegerType>(I->getType()))
  415. return;
  416. assert(I->getNumOperands() == 2 && "isn't I a mul?");
  417. Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
  418. allocateCandidatesAndFindBasisForMul(LHS, RHS, I);
  419. if (LHS != RHS) {
  420. // Symmetrically, try to split RHS to Base + Index.
  421. allocateCandidatesAndFindBasisForMul(RHS, LHS, I);
  422. }
  423. }
  424. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
  425. const SCEV *B, ConstantInt *Idx, Value *S, uint64_t ElementSize,
  426. Instruction *I) {
  427. // I = B + sext(Idx *nsw S) * ElementSize
  428. // = B + (sext(Idx) * sext(S)) * ElementSize
  429. // = B + (sext(Idx) * ElementSize) * sext(S)
  430. // Casting to IntegerType is safe because we skipped vector GEPs.
  431. IntegerType *IntPtrTy = cast<IntegerType>(DL->getIntPtrType(I->getType()));
  432. ConstantInt *ScaledIdx = ConstantInt::get(
  433. IntPtrTy, Idx->getSExtValue() * (int64_t)ElementSize, true);
  434. allocateCandidatesAndFindBasis(Candidate::GEP, B, ScaledIdx, S, I);
  435. }
  436. void StraightLineStrengthReduce::factorArrayIndex(Value *ArrayIdx,
  437. const SCEV *Base,
  438. uint64_t ElementSize,
  439. GetElementPtrInst *GEP) {
  440. // At least, ArrayIdx = ArrayIdx *nsw 1.
  441. allocateCandidatesAndFindBasisForGEP(
  442. Base, ConstantInt::get(cast<IntegerType>(ArrayIdx->getType()), 1),
  443. ArrayIdx, ElementSize, GEP);
  444. Value *LHS = nullptr;
  445. ConstantInt *RHS = nullptr;
  446. // One alternative is matching the SCEV of ArrayIdx instead of ArrayIdx
  447. // itself. This would allow us to handle the shl case for free. However,
  448. // matching SCEVs has two issues:
  449. //
  450. // 1. this would complicate rewriting because the rewriting procedure
  451. // would have to translate SCEVs back to IR instructions. This translation
  452. // is difficult when LHS is further evaluated to a composite SCEV.
  453. //
  454. // 2. ScalarEvolution is designed to be control-flow oblivious. It tends
  455. // to strip nsw/nuw flags which are critical for SLSR to trace into
  456. // sext'ed multiplication.
  457. if (match(ArrayIdx, m_NSWMul(m_Value(LHS), m_ConstantInt(RHS)))) {
  458. // SLSR is currently unsafe if i * S may overflow.
  459. // GEP = Base + sext(LHS *nsw RHS) * ElementSize
  460. allocateCandidatesAndFindBasisForGEP(Base, RHS, LHS, ElementSize, GEP);
  461. } else if (match(ArrayIdx, m_NSWShl(m_Value(LHS), m_ConstantInt(RHS)))) {
  462. // GEP = Base + sext(LHS <<nsw RHS) * ElementSize
  463. // = Base + sext(LHS *nsw (1 << RHS)) * ElementSize
  464. APInt One(RHS->getBitWidth(), 1);
  465. ConstantInt *PowerOf2 =
  466. ConstantInt::get(RHS->getContext(), One << RHS->getValue());
  467. allocateCandidatesAndFindBasisForGEP(Base, PowerOf2, LHS, ElementSize, GEP);
  468. }
  469. }
  470. void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP(
  471. GetElementPtrInst *GEP) {
  472. // TODO: handle vector GEPs
  473. if (GEP->getType()->isVectorTy())
  474. return;
  475. SmallVector<const SCEV *, 4> IndexExprs;
  476. for (Use &Idx : GEP->indices())
  477. IndexExprs.push_back(SE->getSCEV(Idx));
  478. gep_type_iterator GTI = gep_type_begin(GEP);
  479. for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
  480. if (GTI.isStruct())
  481. continue;
  482. const SCEV *OrigIndexExpr = IndexExprs[I - 1];
  483. IndexExprs[I - 1] = SE->getZero(OrigIndexExpr->getType());
  484. // The base of this candidate is GEP's base plus the offsets of all
  485. // indices except this current one.
  486. const SCEV *BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs);
  487. Value *ArrayIdx = GEP->getOperand(I);
  488. uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
  489. if (ArrayIdx->getType()->getIntegerBitWidth() <=
  490. DL->getPointerSizeInBits(GEP->getAddressSpace())) {
  491. // Skip factoring if ArrayIdx is wider than the pointer size, because
  492. // ArrayIdx is implicitly truncated to the pointer size.
  493. factorArrayIndex(ArrayIdx, BaseExpr, ElementSize, GEP);
  494. }
  495. // When ArrayIdx is the sext of a value, we try to factor that value as
  496. // well. Handling this case is important because array indices are
  497. // typically sign-extended to the pointer size.
  498. Value *TruncatedArrayIdx = nullptr;
  499. if (match(ArrayIdx, m_SExt(m_Value(TruncatedArrayIdx))) &&
  500. TruncatedArrayIdx->getType()->getIntegerBitWidth() <=
  501. DL->getPointerSizeInBits(GEP->getAddressSpace())) {
  502. // Skip factoring if TruncatedArrayIdx is wider than the pointer size,
  503. // because TruncatedArrayIdx is implicitly truncated to the pointer size.
  504. factorArrayIndex(TruncatedArrayIdx, BaseExpr, ElementSize, GEP);
  505. }
  506. IndexExprs[I - 1] = OrigIndexExpr;
  507. }
  508. }
  509. // A helper function that unifies the bitwidth of A and B.
  510. static void unifyBitWidth(APInt &A, APInt &B) {
  511. if (A.getBitWidth() < B.getBitWidth())
  512. A = A.sext(B.getBitWidth());
  513. else if (A.getBitWidth() > B.getBitWidth())
  514. B = B.sext(A.getBitWidth());
  515. }
  516. Value *StraightLineStrengthReduce::emitBump(const Candidate &Basis,
  517. const Candidate &C,
  518. IRBuilder<> &Builder,
  519. const DataLayout *DL,
  520. bool &BumpWithUglyGEP) {
  521. APInt Idx = C.Index->getValue(), BasisIdx = Basis.Index->getValue();
  522. unifyBitWidth(Idx, BasisIdx);
  523. APInt IndexOffset = Idx - BasisIdx;
  524. BumpWithUglyGEP = false;
  525. if (Basis.CandidateKind == Candidate::GEP) {
  526. APInt ElementSize(
  527. IndexOffset.getBitWidth(),
  528. DL->getTypeAllocSize(
  529. cast<GetElementPtrInst>(Basis.Ins)->getResultElementType()));
  530. APInt Q, R;
  531. APInt::sdivrem(IndexOffset, ElementSize, Q, R);
  532. if (R == 0)
  533. IndexOffset = Q;
  534. else
  535. BumpWithUglyGEP = true;
  536. }
  537. // Compute Bump = C - Basis = (i' - i) * S.
  538. // Common case 1: if (i' - i) is 1, Bump = S.
  539. if (IndexOffset == 1)
  540. return C.Stride;
  541. // Common case 2: if (i' - i) is -1, Bump = -S.
  542. if (IndexOffset.isAllOnes())
  543. return Builder.CreateNeg(C.Stride);
  544. // Otherwise, Bump = (i' - i) * sext/trunc(S). Note that (i' - i) and S may
  545. // have different bit widths.
  546. IntegerType *DeltaType =
  547. IntegerType::get(Basis.Ins->getContext(), IndexOffset.getBitWidth());
  548. Value *ExtendedStride = Builder.CreateSExtOrTrunc(C.Stride, DeltaType);
  549. if (IndexOffset.isPowerOf2()) {
  550. // If (i' - i) is a power of 2, Bump = sext/trunc(S) << log(i' - i).
  551. ConstantInt *Exponent = ConstantInt::get(DeltaType, IndexOffset.logBase2());
  552. return Builder.CreateShl(ExtendedStride, Exponent);
  553. }
  554. if (IndexOffset.isNegatedPowerOf2()) {
  555. // If (i - i') is a power of 2, Bump = -sext/trunc(S) << log(i' - i).
  556. ConstantInt *Exponent =
  557. ConstantInt::get(DeltaType, (-IndexOffset).logBase2());
  558. return Builder.CreateNeg(Builder.CreateShl(ExtendedStride, Exponent));
  559. }
  560. Constant *Delta = ConstantInt::get(DeltaType, IndexOffset);
  561. return Builder.CreateMul(ExtendedStride, Delta);
  562. }
  563. void StraightLineStrengthReduce::rewriteCandidateWithBasis(
  564. const Candidate &C, const Candidate &Basis) {
  565. assert(C.CandidateKind == Basis.CandidateKind && C.Base == Basis.Base &&
  566. C.Stride == Basis.Stride);
  567. // We run rewriteCandidateWithBasis on all candidates in a post-order, so the
  568. // basis of a candidate cannot be unlinked before the candidate.
  569. assert(Basis.Ins->getParent() != nullptr && "the basis is unlinked");
  570. // An instruction can correspond to multiple candidates. Therefore, instead of
  571. // simply deleting an instruction when we rewrite it, we mark its parent as
  572. // nullptr (i.e. unlink it) so that we can skip the candidates whose
  573. // instruction is already rewritten.
  574. if (!C.Ins->getParent())
  575. return;
  576. IRBuilder<> Builder(C.Ins);
  577. bool BumpWithUglyGEP;
  578. Value *Bump = emitBump(Basis, C, Builder, DL, BumpWithUglyGEP);
  579. Value *Reduced = nullptr; // equivalent to but weaker than C.Ins
  580. switch (C.CandidateKind) {
  581. case Candidate::Add:
  582. case Candidate::Mul: {
  583. // C = Basis + Bump
  584. Value *NegBump;
  585. if (match(Bump, m_Neg(m_Value(NegBump)))) {
  586. // If Bump is a neg instruction, emit C = Basis - (-Bump).
  587. Reduced = Builder.CreateSub(Basis.Ins, NegBump);
  588. // We only use the negative argument of Bump, and Bump itself may be
  589. // trivially dead.
  590. RecursivelyDeleteTriviallyDeadInstructions(Bump);
  591. } else {
  592. // It's tempting to preserve nsw on Bump and/or Reduced. However, it's
  593. // usually unsound, e.g.,
  594. //
  595. // X = (-2 +nsw 1) *nsw INT_MAX
  596. // Y = (-2 +nsw 3) *nsw INT_MAX
  597. // =>
  598. // Y = X + 2 * INT_MAX
  599. //
  600. // Neither + and * in the resultant expression are nsw.
  601. Reduced = Builder.CreateAdd(Basis.Ins, Bump);
  602. }
  603. break;
  604. }
  605. case Candidate::GEP:
  606. {
  607. Type *IntPtrTy = DL->getIntPtrType(C.Ins->getType());
  608. bool InBounds = cast<GetElementPtrInst>(C.Ins)->isInBounds();
  609. if (BumpWithUglyGEP) {
  610. // C = (char *)Basis + Bump
  611. unsigned AS = Basis.Ins->getType()->getPointerAddressSpace();
  612. Type *CharTy = Type::getInt8PtrTy(Basis.Ins->getContext(), AS);
  613. Reduced = Builder.CreateBitCast(Basis.Ins, CharTy);
  614. Reduced =
  615. Builder.CreateGEP(Builder.getInt8Ty(), Reduced, Bump, "", InBounds);
  616. Reduced = Builder.CreateBitCast(Reduced, C.Ins->getType());
  617. } else {
  618. // C = gep Basis, Bump
  619. // Canonicalize bump to pointer size.
  620. Bump = Builder.CreateSExtOrTrunc(Bump, IntPtrTy);
  621. Reduced = Builder.CreateGEP(
  622. cast<GetElementPtrInst>(Basis.Ins)->getResultElementType(),
  623. Basis.Ins, Bump, "", InBounds);
  624. }
  625. break;
  626. }
  627. default:
  628. llvm_unreachable("C.CandidateKind is invalid");
  629. };
  630. Reduced->takeName(C.Ins);
  631. C.Ins->replaceAllUsesWith(Reduced);
  632. // Unlink C.Ins so that we can skip other candidates also corresponding to
  633. // C.Ins. The actual deletion is postponed to the end of runOnFunction.
  634. C.Ins->removeFromParent();
  635. UnlinkedInstructions.push_back(C.Ins);
  636. }
  637. bool StraightLineStrengthReduceLegacyPass::runOnFunction(Function &F) {
  638. if (skipFunction(F))
  639. return false;
  640. auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
  641. auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  642. auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
  643. return StraightLineStrengthReduce(DL, DT, SE, TTI).runOnFunction(F);
  644. }
  645. bool StraightLineStrengthReduce::runOnFunction(Function &F) {
  646. // Traverse the dominator tree in the depth-first order. This order makes sure
  647. // all bases of a candidate are in Candidates when we process it.
  648. for (const auto Node : depth_first(DT))
  649. for (auto &I : *(Node->getBlock()))
  650. allocateCandidatesAndFindBasis(&I);
  651. // Rewrite candidates in the reverse depth-first order. This order makes sure
  652. // a candidate being rewritten is not a basis for any other candidate.
  653. while (!Candidates.empty()) {
  654. const Candidate &C = Candidates.back();
  655. if (C.Basis != nullptr) {
  656. rewriteCandidateWithBasis(C, *C.Basis);
  657. }
  658. Candidates.pop_back();
  659. }
  660. // Delete all unlink instructions.
  661. for (auto *UnlinkedInst : UnlinkedInstructions) {
  662. for (unsigned I = 0, E = UnlinkedInst->getNumOperands(); I != E; ++I) {
  663. Value *Op = UnlinkedInst->getOperand(I);
  664. UnlinkedInst->setOperand(I, nullptr);
  665. RecursivelyDeleteTriviallyDeadInstructions(Op);
  666. }
  667. UnlinkedInst->deleteValue();
  668. }
  669. bool Ret = !UnlinkedInstructions.empty();
  670. UnlinkedInstructions.clear();
  671. return Ret;
  672. }
  673. namespace llvm {
  674. PreservedAnalyses
  675. StraightLineStrengthReducePass::run(Function &F, FunctionAnalysisManager &AM) {
  676. const DataLayout *DL = &F.getParent()->getDataLayout();
  677. auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
  678. auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
  679. auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
  680. if (!StraightLineStrengthReduce(DL, DT, SE, TTI).runOnFunction(F))
  681. return PreservedAnalyses::all();
  682. PreservedAnalyses PA;
  683. PA.preserveSet<CFGAnalyses>();
  684. PA.preserve<DominatorTreeAnalysis>();
  685. PA.preserve<ScalarEvolutionAnalysis>();
  686. PA.preserve<TargetIRAnalysis>();
  687. return PA;
  688. }
  689. } // namespace llvm