GVNSink.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927
  1. //===- GVNSink.cpp - sink expressions into successors ---------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. /// \file GVNSink.cpp
  10. /// This pass attempts to sink instructions into successors, reducing static
  11. /// instruction count and enabling if-conversion.
  12. ///
  13. /// We use a variant of global value numbering to decide what can be sunk.
  14. /// Consider:
  15. ///
  16. /// [ %a1 = add i32 %b, 1 ] [ %c1 = add i32 %d, 1 ]
  17. /// [ %a2 = xor i32 %a1, 1 ] [ %c2 = xor i32 %c1, 1 ]
  18. /// \ /
  19. /// [ %e = phi i32 %a2, %c2 ]
  20. /// [ add i32 %e, 4 ]
  21. ///
  22. ///
  23. /// GVN would number %a1 and %c1 differently because they compute different
  24. /// results - the VN of an instruction is a function of its opcode and the
  25. /// transitive closure of its operands. This is the key property for hoisting
  26. /// and CSE.
  27. ///
  28. /// What we want when sinking however is for a numbering that is a function of
  29. /// the *uses* of an instruction, which allows us to answer the question "if I
  30. /// replace %a1 with %c1, will it contribute in an equivalent way to all
  31. /// successive instructions?". The PostValueTable class in GVN provides this
  32. /// mapping.
  33. //
  34. //===----------------------------------------------------------------------===//
  35. #include "llvm/ADT/ArrayRef.h"
  36. #include "llvm/ADT/DenseMap.h"
  37. #include "llvm/ADT/DenseMapInfo.h"
  38. #include "llvm/ADT/DenseSet.h"
  39. #include "llvm/ADT/Hashing.h"
  40. #include "llvm/ADT/None.h"
  41. #include "llvm/ADT/Optional.h"
  42. #include "llvm/ADT/PostOrderIterator.h"
  43. #include "llvm/ADT/STLExtras.h"
  44. #include "llvm/ADT/SmallPtrSet.h"
  45. #include "llvm/ADT/SmallVector.h"
  46. #include "llvm/ADT/Statistic.h"
  47. #include "llvm/ADT/StringExtras.h"
  48. #include "llvm/Analysis/GlobalsModRef.h"
  49. #include "llvm/IR/BasicBlock.h"
  50. #include "llvm/IR/CFG.h"
  51. #include "llvm/IR/Constants.h"
  52. #include "llvm/IR/Function.h"
  53. #include "llvm/IR/InstrTypes.h"
  54. #include "llvm/IR/Instruction.h"
  55. #include "llvm/IR/Instructions.h"
  56. #include "llvm/IR/PassManager.h"
  57. #include "llvm/IR/Type.h"
  58. #include "llvm/IR/Use.h"
  59. #include "llvm/IR/Value.h"
  60. #include "llvm/InitializePasses.h"
  61. #include "llvm/Pass.h"
  62. #include "llvm/Support/Allocator.h"
  63. #include "llvm/Support/ArrayRecycler.h"
  64. #include "llvm/Support/AtomicOrdering.h"
  65. #include "llvm/Support/Casting.h"
  66. #include "llvm/Support/Compiler.h"
  67. #include "llvm/Support/Debug.h"
  68. #include "llvm/Support/raw_ostream.h"
  69. #include "llvm/Transforms/Scalar.h"
  70. #include "llvm/Transforms/Scalar/GVN.h"
  71. #include "llvm/Transforms/Scalar/GVNExpression.h"
  72. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  73. #include "llvm/Transforms/Utils/Local.h"
  74. #include <algorithm>
  75. #include <cassert>
  76. #include <cstddef>
  77. #include <cstdint>
  78. #include <iterator>
  79. #include <utility>
  80. using namespace llvm;
  81. #define DEBUG_TYPE "gvn-sink"
  82. STATISTIC(NumRemoved, "Number of instructions removed");
  83. namespace llvm {
  84. namespace GVNExpression {
  85. LLVM_DUMP_METHOD void Expression::dump() const {
  86. print(dbgs());
  87. dbgs() << "\n";
  88. }
  89. } // end namespace GVNExpression
  90. } // end namespace llvm
  91. namespace {
  92. static bool isMemoryInst(const Instruction *I) {
  93. return isa<LoadInst>(I) || isa<StoreInst>(I) ||
  94. (isa<InvokeInst>(I) && !cast<InvokeInst>(I)->doesNotAccessMemory()) ||
  95. (isa<CallInst>(I) && !cast<CallInst>(I)->doesNotAccessMemory());
  96. }
  97. /// Iterates through instructions in a set of blocks in reverse order from the
  98. /// first non-terminator. For example (assume all blocks have size n):
  99. /// LockstepReverseIterator I([B1, B2, B3]);
  100. /// *I-- = [B1[n], B2[n], B3[n]];
  101. /// *I-- = [B1[n-1], B2[n-1], B3[n-1]];
  102. /// *I-- = [B1[n-2], B2[n-2], B3[n-2]];
  103. /// ...
  104. ///
  105. /// It continues until all blocks have been exhausted. Use \c getActiveBlocks()
  106. /// to
  107. /// determine which blocks are still going and the order they appear in the
  108. /// list returned by operator*.
  109. class LockstepReverseIterator {
  110. ArrayRef<BasicBlock *> Blocks;
  111. SmallSetVector<BasicBlock *, 4> ActiveBlocks;
  112. SmallVector<Instruction *, 4> Insts;
  113. bool Fail;
  114. public:
  115. LockstepReverseIterator(ArrayRef<BasicBlock *> Blocks) : Blocks(Blocks) {
  116. reset();
  117. }
  118. void reset() {
  119. Fail = false;
  120. ActiveBlocks.clear();
  121. for (BasicBlock *BB : Blocks)
  122. ActiveBlocks.insert(BB);
  123. Insts.clear();
  124. for (BasicBlock *BB : Blocks) {
  125. if (BB->size() <= 1) {
  126. // Block wasn't big enough - only contained a terminator.
  127. ActiveBlocks.remove(BB);
  128. continue;
  129. }
  130. Insts.push_back(BB->getTerminator()->getPrevNode());
  131. }
  132. if (Insts.empty())
  133. Fail = true;
  134. }
  135. bool isValid() const { return !Fail; }
  136. ArrayRef<Instruction *> operator*() const { return Insts; }
  137. // Note: This needs to return a SmallSetVector as the elements of
  138. // ActiveBlocks will be later copied to Blocks using std::copy. The
  139. // resultant order of elements in Blocks needs to be deterministic.
  140. // Using SmallPtrSet instead causes non-deterministic order while
  141. // copying. And we cannot simply sort Blocks as they need to match the
  142. // corresponding Values.
  143. SmallSetVector<BasicBlock *, 4> &getActiveBlocks() { return ActiveBlocks; }
  144. void restrictToBlocks(SmallSetVector<BasicBlock *, 4> &Blocks) {
  145. for (auto II = Insts.begin(); II != Insts.end();) {
  146. if (!llvm::is_contained(Blocks, (*II)->getParent())) {
  147. ActiveBlocks.remove((*II)->getParent());
  148. II = Insts.erase(II);
  149. } else {
  150. ++II;
  151. }
  152. }
  153. }
  154. void operator--() {
  155. if (Fail)
  156. return;
  157. SmallVector<Instruction *, 4> NewInsts;
  158. for (auto *Inst : Insts) {
  159. if (Inst == &Inst->getParent()->front())
  160. ActiveBlocks.remove(Inst->getParent());
  161. else
  162. NewInsts.push_back(Inst->getPrevNode());
  163. }
  164. if (NewInsts.empty()) {
  165. Fail = true;
  166. return;
  167. }
  168. Insts = NewInsts;
  169. }
  170. };
  171. //===----------------------------------------------------------------------===//
  172. /// Candidate solution for sinking. There may be different ways to
  173. /// sink instructions, differing in the number of instructions sunk,
  174. /// the number of predecessors sunk from and the number of PHIs
  175. /// required.
  176. struct SinkingInstructionCandidate {
  177. unsigned NumBlocks;
  178. unsigned NumInstructions;
  179. unsigned NumPHIs;
  180. unsigned NumMemoryInsts;
  181. int Cost = -1;
  182. SmallVector<BasicBlock *, 4> Blocks;
  183. void calculateCost(unsigned NumOrigPHIs, unsigned NumOrigBlocks) {
  184. unsigned NumExtraPHIs = NumPHIs - NumOrigPHIs;
  185. unsigned SplitEdgeCost = (NumOrigBlocks > NumBlocks) ? 2 : 0;
  186. Cost = (NumInstructions * (NumBlocks - 1)) -
  187. (NumExtraPHIs *
  188. NumExtraPHIs) // PHIs are expensive, so make sure they're worth it.
  189. - SplitEdgeCost;
  190. }
  191. bool operator>(const SinkingInstructionCandidate &Other) const {
  192. return Cost > Other.Cost;
  193. }
  194. };
  195. #ifndef NDEBUG
  196. raw_ostream &operator<<(raw_ostream &OS, const SinkingInstructionCandidate &C) {
  197. OS << "<Candidate Cost=" << C.Cost << " #Blocks=" << C.NumBlocks
  198. << " #Insts=" << C.NumInstructions << " #PHIs=" << C.NumPHIs << ">";
  199. return OS;
  200. }
  201. #endif
  202. //===----------------------------------------------------------------------===//
  203. /// Describes a PHI node that may or may not exist. These track the PHIs
  204. /// that must be created if we sunk a sequence of instructions. It provides
  205. /// a hash function for efficient equality comparisons.
  206. class ModelledPHI {
  207. SmallVector<Value *, 4> Values;
  208. SmallVector<BasicBlock *, 4> Blocks;
  209. public:
  210. ModelledPHI() = default;
  211. ModelledPHI(const PHINode *PN) {
  212. // BasicBlock comes first so we sort by basic block pointer order, then by value pointer order.
  213. SmallVector<std::pair<BasicBlock *, Value *>, 4> Ops;
  214. for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I)
  215. Ops.push_back({PN->getIncomingBlock(I), PN->getIncomingValue(I)});
  216. llvm::sort(Ops);
  217. for (auto &P : Ops) {
  218. Blocks.push_back(P.first);
  219. Values.push_back(P.second);
  220. }
  221. }
  222. /// Create a dummy ModelledPHI that will compare unequal to any other ModelledPHI
  223. /// without the same ID.
  224. /// \note This is specifically for DenseMapInfo - do not use this!
  225. static ModelledPHI createDummy(size_t ID) {
  226. ModelledPHI M;
  227. M.Values.push_back(reinterpret_cast<Value*>(ID));
  228. return M;
  229. }
  230. /// Create a PHI from an array of incoming values and incoming blocks.
  231. template <typename VArray, typename BArray>
  232. ModelledPHI(const VArray &V, const BArray &B) {
  233. llvm::copy(V, std::back_inserter(Values));
  234. llvm::copy(B, std::back_inserter(Blocks));
  235. }
  236. /// Create a PHI from [I[OpNum] for I in Insts].
  237. template <typename BArray>
  238. ModelledPHI(ArrayRef<Instruction *> Insts, unsigned OpNum, const BArray &B) {
  239. llvm::copy(B, std::back_inserter(Blocks));
  240. for (auto *I : Insts)
  241. Values.push_back(I->getOperand(OpNum));
  242. }
  243. /// Restrict the PHI's contents down to only \c NewBlocks.
  244. /// \c NewBlocks must be a subset of \c this->Blocks.
  245. void restrictToBlocks(const SmallSetVector<BasicBlock *, 4> &NewBlocks) {
  246. auto BI = Blocks.begin();
  247. auto VI = Values.begin();
  248. while (BI != Blocks.end()) {
  249. assert(VI != Values.end());
  250. if (!llvm::is_contained(NewBlocks, *BI)) {
  251. BI = Blocks.erase(BI);
  252. VI = Values.erase(VI);
  253. } else {
  254. ++BI;
  255. ++VI;
  256. }
  257. }
  258. assert(Blocks.size() == NewBlocks.size());
  259. }
  260. ArrayRef<Value *> getValues() const { return Values; }
  261. bool areAllIncomingValuesSame() const {
  262. return llvm::all_of(Values, [&](Value *V) { return V == Values[0]; });
  263. }
  264. bool areAllIncomingValuesSameType() const {
  265. return llvm::all_of(
  266. Values, [&](Value *V) { return V->getType() == Values[0]->getType(); });
  267. }
  268. bool areAnyIncomingValuesConstant() const {
  269. return llvm::any_of(Values, [&](Value *V) { return isa<Constant>(V); });
  270. }
  271. // Hash functor
  272. unsigned hash() const {
  273. return (unsigned)hash_combine_range(Values.begin(), Values.end());
  274. }
  275. bool operator==(const ModelledPHI &Other) const {
  276. return Values == Other.Values && Blocks == Other.Blocks;
  277. }
  278. };
  279. template <typename ModelledPHI> struct DenseMapInfo {
  280. static inline ModelledPHI &getEmptyKey() {
  281. static ModelledPHI Dummy = ModelledPHI::createDummy(0);
  282. return Dummy;
  283. }
  284. static inline ModelledPHI &getTombstoneKey() {
  285. static ModelledPHI Dummy = ModelledPHI::createDummy(1);
  286. return Dummy;
  287. }
  288. static unsigned getHashValue(const ModelledPHI &V) { return V.hash(); }
  289. static bool isEqual(const ModelledPHI &LHS, const ModelledPHI &RHS) {
  290. return LHS == RHS;
  291. }
  292. };
  293. using ModelledPHISet = DenseSet<ModelledPHI, DenseMapInfo<ModelledPHI>>;
  294. //===----------------------------------------------------------------------===//
  295. // ValueTable
  296. //===----------------------------------------------------------------------===//
  297. // This is a value number table where the value number is a function of the
  298. // *uses* of a value, rather than its operands. Thus, if VN(A) == VN(B) we know
  299. // that the program would be equivalent if we replaced A with PHI(A, B).
  300. //===----------------------------------------------------------------------===//
  301. /// A GVN expression describing how an instruction is used. The operands
  302. /// field of BasicExpression is used to store uses, not operands.
  303. ///
  304. /// This class also contains fields for discriminators used when determining
  305. /// equivalence of instructions with sideeffects.
  306. class InstructionUseExpr : public GVNExpression::BasicExpression {
  307. unsigned MemoryUseOrder = -1;
  308. bool Volatile = false;
  309. ArrayRef<int> ShuffleMask;
  310. public:
  311. InstructionUseExpr(Instruction *I, ArrayRecycler<Value *> &R,
  312. BumpPtrAllocator &A)
  313. : GVNExpression::BasicExpression(I->getNumUses()) {
  314. allocateOperands(R, A);
  315. setOpcode(I->getOpcode());
  316. setType(I->getType());
  317. if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I))
  318. ShuffleMask = SVI->getShuffleMask().copy(A);
  319. for (auto &U : I->uses())
  320. op_push_back(U.getUser());
  321. llvm::sort(op_begin(), op_end());
  322. }
  323. void setMemoryUseOrder(unsigned MUO) { MemoryUseOrder = MUO; }
  324. void setVolatile(bool V) { Volatile = V; }
  325. hash_code getHashValue() const override {
  326. return hash_combine(GVNExpression::BasicExpression::getHashValue(),
  327. MemoryUseOrder, Volatile, ShuffleMask);
  328. }
  329. template <typename Function> hash_code getHashValue(Function MapFn) {
  330. hash_code H = hash_combine(getOpcode(), getType(), MemoryUseOrder, Volatile,
  331. ShuffleMask);
  332. for (auto *V : operands())
  333. H = hash_combine(H, MapFn(V));
  334. return H;
  335. }
  336. };
  337. class ValueTable {
  338. DenseMap<Value *, uint32_t> ValueNumbering;
  339. DenseMap<GVNExpression::Expression *, uint32_t> ExpressionNumbering;
  340. DenseMap<size_t, uint32_t> HashNumbering;
  341. BumpPtrAllocator Allocator;
  342. ArrayRecycler<Value *> Recycler;
  343. uint32_t nextValueNumber = 1;
  344. /// Create an expression for I based on its opcode and its uses. If I
  345. /// touches or reads memory, the expression is also based upon its memory
  346. /// order - see \c getMemoryUseOrder().
  347. InstructionUseExpr *createExpr(Instruction *I) {
  348. InstructionUseExpr *E =
  349. new (Allocator) InstructionUseExpr(I, Recycler, Allocator);
  350. if (isMemoryInst(I))
  351. E->setMemoryUseOrder(getMemoryUseOrder(I));
  352. if (CmpInst *C = dyn_cast<CmpInst>(I)) {
  353. CmpInst::Predicate Predicate = C->getPredicate();
  354. E->setOpcode((C->getOpcode() << 8) | Predicate);
  355. }
  356. return E;
  357. }
  358. /// Helper to compute the value number for a memory instruction
  359. /// (LoadInst/StoreInst), including checking the memory ordering and
  360. /// volatility.
  361. template <class Inst> InstructionUseExpr *createMemoryExpr(Inst *I) {
  362. if (isStrongerThanUnordered(I->getOrdering()) || I->isAtomic())
  363. return nullptr;
  364. InstructionUseExpr *E = createExpr(I);
  365. E->setVolatile(I->isVolatile());
  366. return E;
  367. }
  368. public:
  369. ValueTable() = default;
  370. /// Returns the value number for the specified value, assigning
  371. /// it a new number if it did not have one before.
  372. uint32_t lookupOrAdd(Value *V) {
  373. auto VI = ValueNumbering.find(V);
  374. if (VI != ValueNumbering.end())
  375. return VI->second;
  376. if (!isa<Instruction>(V)) {
  377. ValueNumbering[V] = nextValueNumber;
  378. return nextValueNumber++;
  379. }
  380. Instruction *I = cast<Instruction>(V);
  381. InstructionUseExpr *exp = nullptr;
  382. switch (I->getOpcode()) {
  383. case Instruction::Load:
  384. exp = createMemoryExpr(cast<LoadInst>(I));
  385. break;
  386. case Instruction::Store:
  387. exp = createMemoryExpr(cast<StoreInst>(I));
  388. break;
  389. case Instruction::Call:
  390. case Instruction::Invoke:
  391. case Instruction::FNeg:
  392. case Instruction::Add:
  393. case Instruction::FAdd:
  394. case Instruction::Sub:
  395. case Instruction::FSub:
  396. case Instruction::Mul:
  397. case Instruction::FMul:
  398. case Instruction::UDiv:
  399. case Instruction::SDiv:
  400. case Instruction::FDiv:
  401. case Instruction::URem:
  402. case Instruction::SRem:
  403. case Instruction::FRem:
  404. case Instruction::Shl:
  405. case Instruction::LShr:
  406. case Instruction::AShr:
  407. case Instruction::And:
  408. case Instruction::Or:
  409. case Instruction::Xor:
  410. case Instruction::ICmp:
  411. case Instruction::FCmp:
  412. case Instruction::Trunc:
  413. case Instruction::ZExt:
  414. case Instruction::SExt:
  415. case Instruction::FPToUI:
  416. case Instruction::FPToSI:
  417. case Instruction::UIToFP:
  418. case Instruction::SIToFP:
  419. case Instruction::FPTrunc:
  420. case Instruction::FPExt:
  421. case Instruction::PtrToInt:
  422. case Instruction::IntToPtr:
  423. case Instruction::BitCast:
  424. case Instruction::AddrSpaceCast:
  425. case Instruction::Select:
  426. case Instruction::ExtractElement:
  427. case Instruction::InsertElement:
  428. case Instruction::ShuffleVector:
  429. case Instruction::InsertValue:
  430. case Instruction::GetElementPtr:
  431. exp = createExpr(I);
  432. break;
  433. default:
  434. break;
  435. }
  436. if (!exp) {
  437. ValueNumbering[V] = nextValueNumber;
  438. return nextValueNumber++;
  439. }
  440. uint32_t e = ExpressionNumbering[exp];
  441. if (!e) {
  442. hash_code H = exp->getHashValue([=](Value *V) { return lookupOrAdd(V); });
  443. auto I = HashNumbering.find(H);
  444. if (I != HashNumbering.end()) {
  445. e = I->second;
  446. } else {
  447. e = nextValueNumber++;
  448. HashNumbering[H] = e;
  449. ExpressionNumbering[exp] = e;
  450. }
  451. }
  452. ValueNumbering[V] = e;
  453. return e;
  454. }
  455. /// Returns the value number of the specified value. Fails if the value has
  456. /// not yet been numbered.
  457. uint32_t lookup(Value *V) const {
  458. auto VI = ValueNumbering.find(V);
  459. assert(VI != ValueNumbering.end() && "Value not numbered?");
  460. return VI->second;
  461. }
  462. /// Removes all value numberings and resets the value table.
  463. void clear() {
  464. ValueNumbering.clear();
  465. ExpressionNumbering.clear();
  466. HashNumbering.clear();
  467. Recycler.clear(Allocator);
  468. nextValueNumber = 1;
  469. }
  470. /// \c Inst uses or touches memory. Return an ID describing the memory state
  471. /// at \c Inst such that if getMemoryUseOrder(I1) == getMemoryUseOrder(I2),
  472. /// the exact same memory operations happen after I1 and I2.
  473. ///
  474. /// This is a very hard problem in general, so we use domain-specific
  475. /// knowledge that we only ever check for equivalence between blocks sharing a
  476. /// single immediate successor that is common, and when determining if I1 ==
  477. /// I2 we will have already determined that next(I1) == next(I2). This
  478. /// inductive property allows us to simply return the value number of the next
  479. /// instruction that defines memory.
  480. uint32_t getMemoryUseOrder(Instruction *Inst) {
  481. auto *BB = Inst->getParent();
  482. for (auto I = std::next(Inst->getIterator()), E = BB->end();
  483. I != E && !I->isTerminator(); ++I) {
  484. if (!isMemoryInst(&*I))
  485. continue;
  486. if (isa<LoadInst>(&*I))
  487. continue;
  488. CallInst *CI = dyn_cast<CallInst>(&*I);
  489. if (CI && CI->onlyReadsMemory())
  490. continue;
  491. InvokeInst *II = dyn_cast<InvokeInst>(&*I);
  492. if (II && II->onlyReadsMemory())
  493. continue;
  494. return lookupOrAdd(&*I);
  495. }
  496. return 0;
  497. }
  498. };
  499. //===----------------------------------------------------------------------===//
  500. class GVNSink {
  501. public:
  502. GVNSink() = default;
  503. bool run(Function &F) {
  504. LLVM_DEBUG(dbgs() << "GVNSink: running on function @" << F.getName()
  505. << "\n");
  506. unsigned NumSunk = 0;
  507. ReversePostOrderTraversal<Function*> RPOT(&F);
  508. for (auto *N : RPOT)
  509. NumSunk += sinkBB(N);
  510. return NumSunk > 0;
  511. }
  512. private:
  513. ValueTable VN;
  514. bool shouldAvoidSinkingInstruction(Instruction *I) {
  515. // These instructions may change or break semantics if moved.
  516. if (isa<PHINode>(I) || I->isEHPad() || isa<AllocaInst>(I) ||
  517. I->getType()->isTokenTy())
  518. return true;
  519. return false;
  520. }
  521. /// The main heuristic function. Analyze the set of instructions pointed to by
  522. /// LRI and return a candidate solution if these instructions can be sunk, or
  523. /// None otherwise.
  524. Optional<SinkingInstructionCandidate> analyzeInstructionForSinking(
  525. LockstepReverseIterator &LRI, unsigned &InstNum, unsigned &MemoryInstNum,
  526. ModelledPHISet &NeededPHIs, SmallPtrSetImpl<Value *> &PHIContents);
  527. /// Create a ModelledPHI for each PHI in BB, adding to PHIs.
  528. void analyzeInitialPHIs(BasicBlock *BB, ModelledPHISet &PHIs,
  529. SmallPtrSetImpl<Value *> &PHIContents) {
  530. for (PHINode &PN : BB->phis()) {
  531. auto MPHI = ModelledPHI(&PN);
  532. PHIs.insert(MPHI);
  533. for (auto *V : MPHI.getValues())
  534. PHIContents.insert(V);
  535. }
  536. }
  537. /// The main instruction sinking driver. Set up state and try and sink
  538. /// instructions into BBEnd from its predecessors.
  539. unsigned sinkBB(BasicBlock *BBEnd);
  540. /// Perform the actual mechanics of sinking an instruction from Blocks into
  541. /// BBEnd, which is their only successor.
  542. void sinkLastInstruction(ArrayRef<BasicBlock *> Blocks, BasicBlock *BBEnd);
  543. /// Remove PHIs that all have the same incoming value.
  544. void foldPointlessPHINodes(BasicBlock *BB) {
  545. auto I = BB->begin();
  546. while (PHINode *PN = dyn_cast<PHINode>(I++)) {
  547. if (!llvm::all_of(PN->incoming_values(), [&](const Value *V) {
  548. return V == PN->getIncomingValue(0);
  549. }))
  550. continue;
  551. if (PN->getIncomingValue(0) != PN)
  552. PN->replaceAllUsesWith(PN->getIncomingValue(0));
  553. else
  554. PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
  555. PN->eraseFromParent();
  556. }
  557. }
  558. };
  559. Optional<SinkingInstructionCandidate> GVNSink::analyzeInstructionForSinking(
  560. LockstepReverseIterator &LRI, unsigned &InstNum, unsigned &MemoryInstNum,
  561. ModelledPHISet &NeededPHIs, SmallPtrSetImpl<Value *> &PHIContents) {
  562. auto Insts = *LRI;
  563. LLVM_DEBUG(dbgs() << " -- Analyzing instruction set: [\n"; for (auto *I
  564. : Insts) {
  565. I->dump();
  566. } dbgs() << " ]\n";);
  567. DenseMap<uint32_t, unsigned> VNums;
  568. for (auto *I : Insts) {
  569. uint32_t N = VN.lookupOrAdd(I);
  570. LLVM_DEBUG(dbgs() << " VN=" << Twine::utohexstr(N) << " for" << *I << "\n");
  571. if (N == ~0U)
  572. return None;
  573. VNums[N]++;
  574. }
  575. unsigned VNumToSink =
  576. std::max_element(VNums.begin(), VNums.end(),
  577. [](const std::pair<uint32_t, unsigned> &I,
  578. const std::pair<uint32_t, unsigned> &J) {
  579. return I.second < J.second;
  580. })
  581. ->first;
  582. if (VNums[VNumToSink] == 1)
  583. // Can't sink anything!
  584. return None;
  585. // Now restrict the number of incoming blocks down to only those with
  586. // VNumToSink.
  587. auto &ActivePreds = LRI.getActiveBlocks();
  588. unsigned InitialActivePredSize = ActivePreds.size();
  589. SmallVector<Instruction *, 4> NewInsts;
  590. for (auto *I : Insts) {
  591. if (VN.lookup(I) != VNumToSink)
  592. ActivePreds.remove(I->getParent());
  593. else
  594. NewInsts.push_back(I);
  595. }
  596. for (auto *I : NewInsts)
  597. if (shouldAvoidSinkingInstruction(I))
  598. return None;
  599. // If we've restricted the incoming blocks, restrict all needed PHIs also
  600. // to that set.
  601. bool RecomputePHIContents = false;
  602. if (ActivePreds.size() != InitialActivePredSize) {
  603. ModelledPHISet NewNeededPHIs;
  604. for (auto P : NeededPHIs) {
  605. P.restrictToBlocks(ActivePreds);
  606. NewNeededPHIs.insert(P);
  607. }
  608. NeededPHIs = NewNeededPHIs;
  609. LRI.restrictToBlocks(ActivePreds);
  610. RecomputePHIContents = true;
  611. }
  612. // The sunk instruction's results.
  613. ModelledPHI NewPHI(NewInsts, ActivePreds);
  614. // Does sinking this instruction render previous PHIs redundant?
  615. if (NeededPHIs.erase(NewPHI))
  616. RecomputePHIContents = true;
  617. if (RecomputePHIContents) {
  618. // The needed PHIs have changed, so recompute the set of all needed
  619. // values.
  620. PHIContents.clear();
  621. for (auto &PHI : NeededPHIs)
  622. PHIContents.insert(PHI.getValues().begin(), PHI.getValues().end());
  623. }
  624. // Is this instruction required by a later PHI that doesn't match this PHI?
  625. // if so, we can't sink this instruction.
  626. for (auto *V : NewPHI.getValues())
  627. if (PHIContents.count(V))
  628. // V exists in this PHI, but the whole PHI is different to NewPHI
  629. // (else it would have been removed earlier). We cannot continue
  630. // because this isn't representable.
  631. return None;
  632. // Which operands need PHIs?
  633. // FIXME: If any of these fail, we should partition up the candidates to
  634. // try and continue making progress.
  635. Instruction *I0 = NewInsts[0];
  636. // If all instructions that are going to participate don't have the same
  637. // number of operands, we can't do any useful PHI analysis for all operands.
  638. auto hasDifferentNumOperands = [&I0](Instruction *I) {
  639. return I->getNumOperands() != I0->getNumOperands();
  640. };
  641. if (any_of(NewInsts, hasDifferentNumOperands))
  642. return None;
  643. for (unsigned OpNum = 0, E = I0->getNumOperands(); OpNum != E; ++OpNum) {
  644. ModelledPHI PHI(NewInsts, OpNum, ActivePreds);
  645. if (PHI.areAllIncomingValuesSame())
  646. continue;
  647. if (!canReplaceOperandWithVariable(I0, OpNum))
  648. // We can 't create a PHI from this instruction!
  649. return None;
  650. if (NeededPHIs.count(PHI))
  651. continue;
  652. if (!PHI.areAllIncomingValuesSameType())
  653. return None;
  654. // Don't create indirect calls! The called value is the final operand.
  655. if ((isa<CallInst>(I0) || isa<InvokeInst>(I0)) && OpNum == E - 1 &&
  656. PHI.areAnyIncomingValuesConstant())
  657. return None;
  658. NeededPHIs.reserve(NeededPHIs.size());
  659. NeededPHIs.insert(PHI);
  660. PHIContents.insert(PHI.getValues().begin(), PHI.getValues().end());
  661. }
  662. if (isMemoryInst(NewInsts[0]))
  663. ++MemoryInstNum;
  664. SinkingInstructionCandidate Cand;
  665. Cand.NumInstructions = ++InstNum;
  666. Cand.NumMemoryInsts = MemoryInstNum;
  667. Cand.NumBlocks = ActivePreds.size();
  668. Cand.NumPHIs = NeededPHIs.size();
  669. append_range(Cand.Blocks, ActivePreds);
  670. return Cand;
  671. }
  672. unsigned GVNSink::sinkBB(BasicBlock *BBEnd) {
  673. LLVM_DEBUG(dbgs() << "GVNSink: running on basic block ";
  674. BBEnd->printAsOperand(dbgs()); dbgs() << "\n");
  675. SmallVector<BasicBlock *, 4> Preds;
  676. for (auto *B : predecessors(BBEnd)) {
  677. auto *T = B->getTerminator();
  678. if (isa<BranchInst>(T) || isa<SwitchInst>(T))
  679. Preds.push_back(B);
  680. else
  681. return 0;
  682. }
  683. if (Preds.size() < 2)
  684. return 0;
  685. llvm::sort(Preds);
  686. unsigned NumOrigPreds = Preds.size();
  687. // We can only sink instructions through unconditional branches.
  688. for (auto I = Preds.begin(); I != Preds.end();) {
  689. if ((*I)->getTerminator()->getNumSuccessors() != 1)
  690. I = Preds.erase(I);
  691. else
  692. ++I;
  693. }
  694. LockstepReverseIterator LRI(Preds);
  695. SmallVector<SinkingInstructionCandidate, 4> Candidates;
  696. unsigned InstNum = 0, MemoryInstNum = 0;
  697. ModelledPHISet NeededPHIs;
  698. SmallPtrSet<Value *, 4> PHIContents;
  699. analyzeInitialPHIs(BBEnd, NeededPHIs, PHIContents);
  700. unsigned NumOrigPHIs = NeededPHIs.size();
  701. while (LRI.isValid()) {
  702. auto Cand = analyzeInstructionForSinking(LRI, InstNum, MemoryInstNum,
  703. NeededPHIs, PHIContents);
  704. if (!Cand)
  705. break;
  706. Cand->calculateCost(NumOrigPHIs, Preds.size());
  707. Candidates.emplace_back(*Cand);
  708. --LRI;
  709. }
  710. llvm::stable_sort(Candidates, std::greater<SinkingInstructionCandidate>());
  711. LLVM_DEBUG(dbgs() << " -- Sinking candidates:\n"; for (auto &C
  712. : Candidates) dbgs()
  713. << " " << C << "\n";);
  714. // Pick the top candidate, as long it is positive!
  715. if (Candidates.empty() || Candidates.front().Cost <= 0)
  716. return 0;
  717. auto C = Candidates.front();
  718. LLVM_DEBUG(dbgs() << " -- Sinking: " << C << "\n");
  719. BasicBlock *InsertBB = BBEnd;
  720. if (C.Blocks.size() < NumOrigPreds) {
  721. LLVM_DEBUG(dbgs() << " -- Splitting edge to ";
  722. BBEnd->printAsOperand(dbgs()); dbgs() << "\n");
  723. InsertBB = SplitBlockPredecessors(BBEnd, C.Blocks, ".gvnsink.split");
  724. if (!InsertBB) {
  725. LLVM_DEBUG(dbgs() << " -- FAILED to split edge!\n");
  726. // Edge couldn't be split.
  727. return 0;
  728. }
  729. }
  730. for (unsigned I = 0; I < C.NumInstructions; ++I)
  731. sinkLastInstruction(C.Blocks, InsertBB);
  732. return C.NumInstructions;
  733. }
  734. void GVNSink::sinkLastInstruction(ArrayRef<BasicBlock *> Blocks,
  735. BasicBlock *BBEnd) {
  736. SmallVector<Instruction *, 4> Insts;
  737. for (BasicBlock *BB : Blocks)
  738. Insts.push_back(BB->getTerminator()->getPrevNode());
  739. Instruction *I0 = Insts.front();
  740. SmallVector<Value *, 4> NewOperands;
  741. for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O) {
  742. bool NeedPHI = llvm::any_of(Insts, [&I0, O](const Instruction *I) {
  743. return I->getOperand(O) != I0->getOperand(O);
  744. });
  745. if (!NeedPHI) {
  746. NewOperands.push_back(I0->getOperand(O));
  747. continue;
  748. }
  749. // Create a new PHI in the successor block and populate it.
  750. auto *Op = I0->getOperand(O);
  751. assert(!Op->getType()->isTokenTy() && "Can't PHI tokens!");
  752. auto *PN = PHINode::Create(Op->getType(), Insts.size(),
  753. Op->getName() + ".sink", &BBEnd->front());
  754. for (auto *I : Insts)
  755. PN->addIncoming(I->getOperand(O), I->getParent());
  756. NewOperands.push_back(PN);
  757. }
  758. // Arbitrarily use I0 as the new "common" instruction; remap its operands
  759. // and move it to the start of the successor block.
  760. for (unsigned O = 0, E = I0->getNumOperands(); O != E; ++O)
  761. I0->getOperandUse(O).set(NewOperands[O]);
  762. I0->moveBefore(&*BBEnd->getFirstInsertionPt());
  763. // Update metadata and IR flags.
  764. for (auto *I : Insts)
  765. if (I != I0) {
  766. combineMetadataForCSE(I0, I, true);
  767. I0->andIRFlags(I);
  768. }
  769. for (auto *I : Insts)
  770. if (I != I0)
  771. I->replaceAllUsesWith(I0);
  772. foldPointlessPHINodes(BBEnd);
  773. // Finally nuke all instructions apart from the common instruction.
  774. for (auto *I : Insts)
  775. if (I != I0)
  776. I->eraseFromParent();
  777. NumRemoved += Insts.size() - 1;
  778. }
  779. ////////////////////////////////////////////////////////////////////////////////
  780. // Pass machinery / boilerplate
  781. class GVNSinkLegacyPass : public FunctionPass {
  782. public:
  783. static char ID;
  784. GVNSinkLegacyPass() : FunctionPass(ID) {
  785. initializeGVNSinkLegacyPassPass(*PassRegistry::getPassRegistry());
  786. }
  787. bool runOnFunction(Function &F) override {
  788. if (skipFunction(F))
  789. return false;
  790. GVNSink G;
  791. return G.run(F);
  792. }
  793. void getAnalysisUsage(AnalysisUsage &AU) const override {
  794. AU.addPreserved<GlobalsAAWrapperPass>();
  795. }
  796. };
  797. } // end anonymous namespace
  798. PreservedAnalyses GVNSinkPass::run(Function &F, FunctionAnalysisManager &AM) {
  799. GVNSink G;
  800. if (!G.run(F))
  801. return PreservedAnalyses::all();
  802. return PreservedAnalyses::none();
  803. }
  804. char GVNSinkLegacyPass::ID = 0;
  805. INITIALIZE_PASS_BEGIN(GVNSinkLegacyPass, "gvn-sink",
  806. "Early GVN sinking of Expressions", false, false)
  807. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  808. INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
  809. INITIALIZE_PASS_END(GVNSinkLegacyPass, "gvn-sink",
  810. "Early GVN sinking of Expressions", false, false)
  811. FunctionPass *llvm::createGVNSinkPass() { return new GVNSinkLegacyPass(); }