MergeICmps.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924
  1. //===- MergeICmps.cpp - Optimize chains of integer comparisons ------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass turns chains of integer comparisons into memcmp (the memcmp is
  10. // later typically inlined as a chain of efficient hardware comparisons). This
  11. // typically benefits c++ member or nonmember operator==().
  12. //
  13. // The basic idea is to replace a longer chain of integer comparisons loaded
  14. // from contiguous memory locations into a shorter chain of larger integer
  15. // comparisons. Benefits are double:
  16. // - There are less jumps, and therefore less opportunities for mispredictions
  17. // and I-cache misses.
  18. // - Code size is smaller, both because jumps are removed and because the
  19. // encoding of a 2*n byte compare is smaller than that of two n-byte
  20. // compares.
  21. //
  22. // Example:
  23. //
  24. // struct S {
  25. // int a;
  26. // char b;
  27. // char c;
  28. // uint16_t d;
  29. // bool operator==(const S& o) const {
  30. // return a == o.a && b == o.b && c == o.c && d == o.d;
  31. // }
  32. // };
  33. //
  34. // Is optimized as :
  35. //
  36. // bool S::operator==(const S& o) const {
  37. // return memcmp(this, &o, 8) == 0;
  38. // }
  39. //
  40. // Which will later be expanded (ExpandMemCmp) as a single 8-bytes icmp.
  41. //
  42. //===----------------------------------------------------------------------===//
  43. #include "llvm/Transforms/Scalar/MergeICmps.h"
  44. #include "llvm/Analysis/DomTreeUpdater.h"
  45. #include "llvm/Analysis/GlobalsModRef.h"
  46. #include "llvm/Analysis/Loads.h"
  47. #include "llvm/Analysis/TargetLibraryInfo.h"
  48. #include "llvm/Analysis/TargetTransformInfo.h"
  49. #include "llvm/IR/Dominators.h"
  50. #include "llvm/IR/Function.h"
  51. #include "llvm/IR/IRBuilder.h"
  52. #include "llvm/InitializePasses.h"
  53. #include "llvm/Pass.h"
  54. #include "llvm/Transforms/Scalar.h"
  55. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  56. #include "llvm/Transforms/Utils/BuildLibCalls.h"
  57. #include <algorithm>
  58. #include <numeric>
  59. #include <utility>
  60. #include <vector>
  61. using namespace llvm;
  62. namespace {
  63. #define DEBUG_TYPE "mergeicmps"
  64. // A BCE atom "Binary Compare Expression Atom" represents an integer load
  65. // that is a constant offset from a base value, e.g. `a` or `o.c` in the example
  66. // at the top.
  67. struct BCEAtom {
  68. BCEAtom() = default;
  69. BCEAtom(GetElementPtrInst *GEP, LoadInst *LoadI, int BaseId, APInt Offset)
  70. : GEP(GEP), LoadI(LoadI), BaseId(BaseId), Offset(Offset) {}
  71. BCEAtom(const BCEAtom &) = delete;
  72. BCEAtom &operator=(const BCEAtom &) = delete;
  73. BCEAtom(BCEAtom &&that) = default;
  74. BCEAtom &operator=(BCEAtom &&that) {
  75. if (this == &that)
  76. return *this;
  77. GEP = that.GEP;
  78. LoadI = that.LoadI;
  79. BaseId = that.BaseId;
  80. Offset = std::move(that.Offset);
  81. return *this;
  82. }
  83. // We want to order BCEAtoms by (Base, Offset). However we cannot use
  84. // the pointer values for Base because these are non-deterministic.
  85. // To make sure that the sort order is stable, we first assign to each atom
  86. // base value an index based on its order of appearance in the chain of
  87. // comparisons. We call this index `BaseOrdering`. For example, for:
  88. // b[3] == c[2] && a[1] == d[1] && b[4] == c[3]
  89. // | block 1 | | block 2 | | block 3 |
  90. // b gets assigned index 0 and a index 1, because b appears as LHS in block 1,
  91. // which is before block 2.
  92. // We then sort by (BaseOrdering[LHS.Base()], LHS.Offset), which is stable.
  93. bool operator<(const BCEAtom &O) const {
  94. return BaseId != O.BaseId ? BaseId < O.BaseId : Offset.slt(O.Offset);
  95. }
  96. GetElementPtrInst *GEP = nullptr;
  97. LoadInst *LoadI = nullptr;
  98. unsigned BaseId = 0;
  99. APInt Offset;
  100. };
  101. // A class that assigns increasing ids to values in the order in which they are
  102. // seen. See comment in `BCEAtom::operator<()``.
  103. class BaseIdentifier {
  104. public:
  105. // Returns the id for value `Base`, after assigning one if `Base` has not been
  106. // seen before.
  107. int getBaseId(const Value *Base) {
  108. assert(Base && "invalid base");
  109. const auto Insertion = BaseToIndex.try_emplace(Base, Order);
  110. if (Insertion.second)
  111. ++Order;
  112. return Insertion.first->second;
  113. }
  114. private:
  115. unsigned Order = 1;
  116. DenseMap<const Value*, int> BaseToIndex;
  117. };
  118. // If this value is a load from a constant offset w.r.t. a base address, and
  119. // there are no other users of the load or address, returns the base address and
  120. // the offset.
  121. BCEAtom visitICmpLoadOperand(Value *const Val, BaseIdentifier &BaseId) {
  122. auto *const LoadI = dyn_cast<LoadInst>(Val);
  123. if (!LoadI)
  124. return {};
  125. LLVM_DEBUG(dbgs() << "load\n");
  126. if (LoadI->isUsedOutsideOfBlock(LoadI->getParent())) {
  127. LLVM_DEBUG(dbgs() << "used outside of block\n");
  128. return {};
  129. }
  130. // Do not optimize atomic loads to non-atomic memcmp
  131. if (!LoadI->isSimple()) {
  132. LLVM_DEBUG(dbgs() << "volatile or atomic\n");
  133. return {};
  134. }
  135. Value *Addr = LoadI->getOperand(0);
  136. if (Addr->getType()->getPointerAddressSpace() != 0) {
  137. LLVM_DEBUG(dbgs() << "from non-zero AddressSpace\n");
  138. return {};
  139. }
  140. const auto &DL = LoadI->getModule()->getDataLayout();
  141. if (!isDereferenceablePointer(Addr, LoadI->getType(), DL)) {
  142. LLVM_DEBUG(dbgs() << "not dereferenceable\n");
  143. // We need to make sure that we can do comparison in any order, so we
  144. // require memory to be unconditionally dereferenceable.
  145. return {};
  146. }
  147. APInt Offset = APInt(DL.getPointerTypeSizeInBits(Addr->getType()), 0);
  148. Value *Base = Addr;
  149. auto *GEP = dyn_cast<GetElementPtrInst>(Addr);
  150. if (GEP) {
  151. LLVM_DEBUG(dbgs() << "GEP\n");
  152. if (GEP->isUsedOutsideOfBlock(LoadI->getParent())) {
  153. LLVM_DEBUG(dbgs() << "used outside of block\n");
  154. return {};
  155. }
  156. if (!GEP->accumulateConstantOffset(DL, Offset))
  157. return {};
  158. Base = GEP->getPointerOperand();
  159. }
  160. return BCEAtom(GEP, LoadI, BaseId.getBaseId(Base), Offset);
  161. }
  162. // A comparison between two BCE atoms, e.g. `a == o.a` in the example at the
  163. // top.
  164. // Note: the terminology is misleading: the comparison is symmetric, so there
  165. // is no real {l/r}hs. What we want though is to have the same base on the
  166. // left (resp. right), so that we can detect consecutive loads. To ensure this
  167. // we put the smallest atom on the left.
  168. struct BCECmp {
  169. BCEAtom Lhs;
  170. BCEAtom Rhs;
  171. int SizeBits;
  172. const ICmpInst *CmpI;
  173. BCECmp(BCEAtom L, BCEAtom R, int SizeBits, const ICmpInst *CmpI)
  174. : Lhs(std::move(L)), Rhs(std::move(R)), SizeBits(SizeBits), CmpI(CmpI) {
  175. if (Rhs < Lhs) std::swap(Rhs, Lhs);
  176. }
  177. };
  178. // A basic block with a comparison between two BCE atoms.
  179. // The block might do extra work besides the atom comparison, in which case
  180. // doesOtherWork() returns true. Under some conditions, the block can be
  181. // split into the atom comparison part and the "other work" part
  182. // (see canSplit()).
  183. class BCECmpBlock {
  184. public:
  185. typedef SmallDenseSet<const Instruction *, 8> InstructionSet;
  186. BCECmpBlock(BCECmp Cmp, BasicBlock *BB, InstructionSet BlockInsts)
  187. : BB(BB), BlockInsts(std::move(BlockInsts)), Cmp(std::move(Cmp)) {}
  188. const BCEAtom &Lhs() const { return Cmp.Lhs; }
  189. const BCEAtom &Rhs() const { return Cmp.Rhs; }
  190. int SizeBits() const { return Cmp.SizeBits; }
  191. // Returns true if the block does other works besides comparison.
  192. bool doesOtherWork() const;
  193. // Returns true if the non-BCE-cmp instructions can be separated from BCE-cmp
  194. // instructions in the block.
  195. bool canSplit(AliasAnalysis &AA) const;
  196. // Return true if this all the relevant instructions in the BCE-cmp-block can
  197. // be sunk below this instruction. By doing this, we know we can separate the
  198. // BCE-cmp-block instructions from the non-BCE-cmp-block instructions in the
  199. // block.
  200. bool canSinkBCECmpInst(const Instruction *, AliasAnalysis &AA) const;
  201. // We can separate the BCE-cmp-block instructions and the non-BCE-cmp-block
  202. // instructions. Split the old block and move all non-BCE-cmp-insts into the
  203. // new parent block.
  204. void split(BasicBlock *NewParent, AliasAnalysis &AA) const;
  205. // The basic block where this comparison happens.
  206. BasicBlock *BB;
  207. // Instructions relating to the BCECmp and branch.
  208. InstructionSet BlockInsts;
  209. // The block requires splitting.
  210. bool RequireSplit = false;
  211. // Original order of this block in the chain.
  212. unsigned OrigOrder = 0;
  213. private:
  214. BCECmp Cmp;
  215. };
  216. bool BCECmpBlock::canSinkBCECmpInst(const Instruction *Inst,
  217. AliasAnalysis &AA) const {
  218. // If this instruction may clobber the loads and is in middle of the BCE cmp
  219. // block instructions, then bail for now.
  220. if (Inst->mayWriteToMemory()) {
  221. auto MayClobber = [&](LoadInst *LI) {
  222. // If a potentially clobbering instruction comes before the load,
  223. // we can still safely sink the load.
  224. return (Inst->getParent() != LI->getParent() || !Inst->comesBefore(LI)) &&
  225. isModSet(AA.getModRefInfo(Inst, MemoryLocation::get(LI)));
  226. };
  227. if (MayClobber(Cmp.Lhs.LoadI) || MayClobber(Cmp.Rhs.LoadI))
  228. return false;
  229. }
  230. // Make sure this instruction does not use any of the BCE cmp block
  231. // instructions as operand.
  232. return llvm::none_of(Inst->operands(), [&](const Value *Op) {
  233. const Instruction *OpI = dyn_cast<Instruction>(Op);
  234. return OpI && BlockInsts.contains(OpI);
  235. });
  236. }
  237. void BCECmpBlock::split(BasicBlock *NewParent, AliasAnalysis &AA) const {
  238. llvm::SmallVector<Instruction *, 4> OtherInsts;
  239. for (Instruction &Inst : *BB) {
  240. if (BlockInsts.count(&Inst))
  241. continue;
  242. assert(canSinkBCECmpInst(&Inst, AA) && "Split unsplittable block");
  243. // This is a non-BCE-cmp-block instruction. And it can be separated
  244. // from the BCE-cmp-block instruction.
  245. OtherInsts.push_back(&Inst);
  246. }
  247. // Do the actual spliting.
  248. for (Instruction *Inst : reverse(OtherInsts))
  249. Inst->moveBefore(*NewParent, NewParent->begin());
  250. }
  251. bool BCECmpBlock::canSplit(AliasAnalysis &AA) const {
  252. for (Instruction &Inst : *BB) {
  253. if (!BlockInsts.count(&Inst)) {
  254. if (!canSinkBCECmpInst(&Inst, AA))
  255. return false;
  256. }
  257. }
  258. return true;
  259. }
  260. bool BCECmpBlock::doesOtherWork() const {
  261. // TODO(courbet): Can we allow some other things ? This is very conservative.
  262. // We might be able to get away with anything does not have any side
  263. // effects outside of the basic block.
  264. // Note: The GEPs and/or loads are not necessarily in the same block.
  265. for (const Instruction &Inst : *BB) {
  266. if (!BlockInsts.count(&Inst))
  267. return true;
  268. }
  269. return false;
  270. }
  271. // Visit the given comparison. If this is a comparison between two valid
  272. // BCE atoms, returns the comparison.
  273. std::optional<BCECmp> visitICmp(const ICmpInst *const CmpI,
  274. const ICmpInst::Predicate ExpectedPredicate,
  275. BaseIdentifier &BaseId) {
  276. // The comparison can only be used once:
  277. // - For intermediate blocks, as a branch condition.
  278. // - For the final block, as an incoming value for the Phi.
  279. // If there are any other uses of the comparison, we cannot merge it with
  280. // other comparisons as we would create an orphan use of the value.
  281. if (!CmpI->hasOneUse()) {
  282. LLVM_DEBUG(dbgs() << "cmp has several uses\n");
  283. return std::nullopt;
  284. }
  285. if (CmpI->getPredicate() != ExpectedPredicate)
  286. return std::nullopt;
  287. LLVM_DEBUG(dbgs() << "cmp "
  288. << (ExpectedPredicate == ICmpInst::ICMP_EQ ? "eq" : "ne")
  289. << "\n");
  290. auto Lhs = visitICmpLoadOperand(CmpI->getOperand(0), BaseId);
  291. if (!Lhs.BaseId)
  292. return std::nullopt;
  293. auto Rhs = visitICmpLoadOperand(CmpI->getOperand(1), BaseId);
  294. if (!Rhs.BaseId)
  295. return std::nullopt;
  296. const auto &DL = CmpI->getModule()->getDataLayout();
  297. return BCECmp(std::move(Lhs), std::move(Rhs),
  298. DL.getTypeSizeInBits(CmpI->getOperand(0)->getType()), CmpI);
  299. }
  300. // Visit the given comparison block. If this is a comparison between two valid
  301. // BCE atoms, returns the comparison.
  302. std::optional<BCECmpBlock> visitCmpBlock(Value *const Val,
  303. BasicBlock *const Block,
  304. const BasicBlock *const PhiBlock,
  305. BaseIdentifier &BaseId) {
  306. if (Block->empty())
  307. return std::nullopt;
  308. auto *const BranchI = dyn_cast<BranchInst>(Block->getTerminator());
  309. if (!BranchI)
  310. return std::nullopt;
  311. LLVM_DEBUG(dbgs() << "branch\n");
  312. Value *Cond;
  313. ICmpInst::Predicate ExpectedPredicate;
  314. if (BranchI->isUnconditional()) {
  315. // In this case, we expect an incoming value which is the result of the
  316. // comparison. This is the last link in the chain of comparisons (note
  317. // that this does not mean that this is the last incoming value, blocks
  318. // can be reordered).
  319. Cond = Val;
  320. ExpectedPredicate = ICmpInst::ICMP_EQ;
  321. } else {
  322. // In this case, we expect a constant incoming value (the comparison is
  323. // chained).
  324. const auto *const Const = cast<ConstantInt>(Val);
  325. LLVM_DEBUG(dbgs() << "const\n");
  326. if (!Const->isZero())
  327. return std::nullopt;
  328. LLVM_DEBUG(dbgs() << "false\n");
  329. assert(BranchI->getNumSuccessors() == 2 && "expecting a cond branch");
  330. BasicBlock *const FalseBlock = BranchI->getSuccessor(1);
  331. Cond = BranchI->getCondition();
  332. ExpectedPredicate =
  333. FalseBlock == PhiBlock ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
  334. }
  335. auto *CmpI = dyn_cast<ICmpInst>(Cond);
  336. if (!CmpI)
  337. return std::nullopt;
  338. LLVM_DEBUG(dbgs() << "icmp\n");
  339. std::optional<BCECmp> Result = visitICmp(CmpI, ExpectedPredicate, BaseId);
  340. if (!Result)
  341. return std::nullopt;
  342. BCECmpBlock::InstructionSet BlockInsts(
  343. {Result->Lhs.LoadI, Result->Rhs.LoadI, Result->CmpI, BranchI});
  344. if (Result->Lhs.GEP)
  345. BlockInsts.insert(Result->Lhs.GEP);
  346. if (Result->Rhs.GEP)
  347. BlockInsts.insert(Result->Rhs.GEP);
  348. return BCECmpBlock(std::move(*Result), Block, BlockInsts);
  349. }
  350. static inline void enqueueBlock(std::vector<BCECmpBlock> &Comparisons,
  351. BCECmpBlock &&Comparison) {
  352. LLVM_DEBUG(dbgs() << "Block '" << Comparison.BB->getName()
  353. << "': Found cmp of " << Comparison.SizeBits()
  354. << " bits between " << Comparison.Lhs().BaseId << " + "
  355. << Comparison.Lhs().Offset << " and "
  356. << Comparison.Rhs().BaseId << " + "
  357. << Comparison.Rhs().Offset << "\n");
  358. LLVM_DEBUG(dbgs() << "\n");
  359. Comparison.OrigOrder = Comparisons.size();
  360. Comparisons.push_back(std::move(Comparison));
  361. }
  362. // A chain of comparisons.
  363. class BCECmpChain {
  364. public:
  365. using ContiguousBlocks = std::vector<BCECmpBlock>;
  366. BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi,
  367. AliasAnalysis &AA);
  368. bool simplify(const TargetLibraryInfo &TLI, AliasAnalysis &AA,
  369. DomTreeUpdater &DTU);
  370. bool atLeastOneMerged() const {
  371. return any_of(MergedBlocks_,
  372. [](const auto &Blocks) { return Blocks.size() > 1; });
  373. }
  374. private:
  375. PHINode &Phi_;
  376. // The list of all blocks in the chain, grouped by contiguity.
  377. std::vector<ContiguousBlocks> MergedBlocks_;
  378. // The original entry block (before sorting);
  379. BasicBlock *EntryBlock_;
  380. };
  381. static bool areContiguous(const BCECmpBlock &First, const BCECmpBlock &Second) {
  382. return First.Lhs().BaseId == Second.Lhs().BaseId &&
  383. First.Rhs().BaseId == Second.Rhs().BaseId &&
  384. First.Lhs().Offset + First.SizeBits() / 8 == Second.Lhs().Offset &&
  385. First.Rhs().Offset + First.SizeBits() / 8 == Second.Rhs().Offset;
  386. }
  387. static unsigned getMinOrigOrder(const BCECmpChain::ContiguousBlocks &Blocks) {
  388. unsigned MinOrigOrder = std::numeric_limits<unsigned>::max();
  389. for (const BCECmpBlock &Block : Blocks)
  390. MinOrigOrder = std::min(MinOrigOrder, Block.OrigOrder);
  391. return MinOrigOrder;
  392. }
  393. /// Given a chain of comparison blocks, groups the blocks into contiguous
  394. /// ranges that can be merged together into a single comparison.
  395. static std::vector<BCECmpChain::ContiguousBlocks>
  396. mergeBlocks(std::vector<BCECmpBlock> &&Blocks) {
  397. std::vector<BCECmpChain::ContiguousBlocks> MergedBlocks;
  398. // Sort to detect continuous offsets.
  399. llvm::sort(Blocks,
  400. [](const BCECmpBlock &LhsBlock, const BCECmpBlock &RhsBlock) {
  401. return std::tie(LhsBlock.Lhs(), LhsBlock.Rhs()) <
  402. std::tie(RhsBlock.Lhs(), RhsBlock.Rhs());
  403. });
  404. BCECmpChain::ContiguousBlocks *LastMergedBlock = nullptr;
  405. for (BCECmpBlock &Block : Blocks) {
  406. if (!LastMergedBlock || !areContiguous(LastMergedBlock->back(), Block)) {
  407. MergedBlocks.emplace_back();
  408. LastMergedBlock = &MergedBlocks.back();
  409. } else {
  410. LLVM_DEBUG(dbgs() << "Merging block " << Block.BB->getName() << " into "
  411. << LastMergedBlock->back().BB->getName() << "\n");
  412. }
  413. LastMergedBlock->push_back(std::move(Block));
  414. }
  415. // While we allow reordering for merging, do not reorder unmerged comparisons.
  416. // Doing so may introduce branch on poison.
  417. llvm::sort(MergedBlocks, [](const BCECmpChain::ContiguousBlocks &LhsBlocks,
  418. const BCECmpChain::ContiguousBlocks &RhsBlocks) {
  419. return getMinOrigOrder(LhsBlocks) < getMinOrigOrder(RhsBlocks);
  420. });
  421. return MergedBlocks;
  422. }
  423. BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi,
  424. AliasAnalysis &AA)
  425. : Phi_(Phi) {
  426. assert(!Blocks.empty() && "a chain should have at least one block");
  427. // Now look inside blocks to check for BCE comparisons.
  428. std::vector<BCECmpBlock> Comparisons;
  429. BaseIdentifier BaseId;
  430. for (BasicBlock *const Block : Blocks) {
  431. assert(Block && "invalid block");
  432. std::optional<BCECmpBlock> Comparison = visitCmpBlock(
  433. Phi.getIncomingValueForBlock(Block), Block, Phi.getParent(), BaseId);
  434. if (!Comparison) {
  435. LLVM_DEBUG(dbgs() << "chain with invalid BCECmpBlock, no merge.\n");
  436. return;
  437. }
  438. if (Comparison->doesOtherWork()) {
  439. LLVM_DEBUG(dbgs() << "block '" << Comparison->BB->getName()
  440. << "' does extra work besides compare\n");
  441. if (Comparisons.empty()) {
  442. // This is the initial block in the chain, in case this block does other
  443. // work, we can try to split the block and move the irrelevant
  444. // instructions to the predecessor.
  445. //
  446. // If this is not the initial block in the chain, splitting it wont
  447. // work.
  448. //
  449. // As once split, there will still be instructions before the BCE cmp
  450. // instructions that do other work in program order, i.e. within the
  451. // chain before sorting. Unless we can abort the chain at this point
  452. // and start anew.
  453. //
  454. // NOTE: we only handle blocks a with single predecessor for now.
  455. if (Comparison->canSplit(AA)) {
  456. LLVM_DEBUG(dbgs()
  457. << "Split initial block '" << Comparison->BB->getName()
  458. << "' that does extra work besides compare\n");
  459. Comparison->RequireSplit = true;
  460. enqueueBlock(Comparisons, std::move(*Comparison));
  461. } else {
  462. LLVM_DEBUG(dbgs()
  463. << "ignoring initial block '" << Comparison->BB->getName()
  464. << "' that does extra work besides compare\n");
  465. }
  466. continue;
  467. }
  468. // TODO(courbet): Right now we abort the whole chain. We could be
  469. // merging only the blocks that don't do other work and resume the
  470. // chain from there. For example:
  471. // if (a[0] == b[0]) { // bb1
  472. // if (a[1] == b[1]) { // bb2
  473. // some_value = 3; //bb3
  474. // if (a[2] == b[2]) { //bb3
  475. // do a ton of stuff //bb4
  476. // }
  477. // }
  478. // }
  479. //
  480. // This is:
  481. //
  482. // bb1 --eq--> bb2 --eq--> bb3* -eq--> bb4 --+
  483. // \ \ \ \
  484. // ne ne ne \
  485. // \ \ \ v
  486. // +------------+-----------+----------> bb_phi
  487. //
  488. // We can only merge the first two comparisons, because bb3* does
  489. // "other work" (setting some_value to 3).
  490. // We could still merge bb1 and bb2 though.
  491. return;
  492. }
  493. enqueueBlock(Comparisons, std::move(*Comparison));
  494. }
  495. // It is possible we have no suitable comparison to merge.
  496. if (Comparisons.empty()) {
  497. LLVM_DEBUG(dbgs() << "chain with no BCE basic blocks, no merge\n");
  498. return;
  499. }
  500. EntryBlock_ = Comparisons[0].BB;
  501. MergedBlocks_ = mergeBlocks(std::move(Comparisons));
  502. }
  503. namespace {
  504. // A class to compute the name of a set of merged basic blocks.
  505. // This is optimized for the common case of no block names.
  506. class MergedBlockName {
  507. // Storage for the uncommon case of several named blocks.
  508. SmallString<16> Scratch;
  509. public:
  510. explicit MergedBlockName(ArrayRef<BCECmpBlock> Comparisons)
  511. : Name(makeName(Comparisons)) {}
  512. const StringRef Name;
  513. private:
  514. StringRef makeName(ArrayRef<BCECmpBlock> Comparisons) {
  515. assert(!Comparisons.empty() && "no basic block");
  516. // Fast path: only one block, or no names at all.
  517. if (Comparisons.size() == 1)
  518. return Comparisons[0].BB->getName();
  519. const int size = std::accumulate(Comparisons.begin(), Comparisons.end(), 0,
  520. [](int i, const BCECmpBlock &Cmp) {
  521. return i + Cmp.BB->getName().size();
  522. });
  523. if (size == 0)
  524. return StringRef("", 0);
  525. // Slow path: at least two blocks, at least one block with a name.
  526. Scratch.clear();
  527. // We'll have `size` bytes for name and `Comparisons.size() - 1` bytes for
  528. // separators.
  529. Scratch.reserve(size + Comparisons.size() - 1);
  530. const auto append = [this](StringRef str) {
  531. Scratch.append(str.begin(), str.end());
  532. };
  533. append(Comparisons[0].BB->getName());
  534. for (int I = 1, E = Comparisons.size(); I < E; ++I) {
  535. const BasicBlock *const BB = Comparisons[I].BB;
  536. if (!BB->getName().empty()) {
  537. append("+");
  538. append(BB->getName());
  539. }
  540. }
  541. return Scratch.str();
  542. }
  543. };
  544. } // namespace
  545. // Merges the given contiguous comparison blocks into one memcmp block.
  546. static BasicBlock *mergeComparisons(ArrayRef<BCECmpBlock> Comparisons,
  547. BasicBlock *const InsertBefore,
  548. BasicBlock *const NextCmpBlock,
  549. PHINode &Phi, const TargetLibraryInfo &TLI,
  550. AliasAnalysis &AA, DomTreeUpdater &DTU) {
  551. assert(!Comparisons.empty() && "merging zero comparisons");
  552. LLVMContext &Context = NextCmpBlock->getContext();
  553. const BCECmpBlock &FirstCmp = Comparisons[0];
  554. // Create a new cmp block before next cmp block.
  555. BasicBlock *const BB =
  556. BasicBlock::Create(Context, MergedBlockName(Comparisons).Name,
  557. NextCmpBlock->getParent(), InsertBefore);
  558. IRBuilder<> Builder(BB);
  559. // Add the GEPs from the first BCECmpBlock.
  560. Value *Lhs, *Rhs;
  561. if (FirstCmp.Lhs().GEP)
  562. Lhs = Builder.Insert(FirstCmp.Lhs().GEP->clone());
  563. else
  564. Lhs = FirstCmp.Lhs().LoadI->getPointerOperand();
  565. if (FirstCmp.Rhs().GEP)
  566. Rhs = Builder.Insert(FirstCmp.Rhs().GEP->clone());
  567. else
  568. Rhs = FirstCmp.Rhs().LoadI->getPointerOperand();
  569. Value *IsEqual = nullptr;
  570. LLVM_DEBUG(dbgs() << "Merging " << Comparisons.size() << " comparisons -> "
  571. << BB->getName() << "\n");
  572. // If there is one block that requires splitting, we do it now, i.e.
  573. // just before we know we will collapse the chain. The instructions
  574. // can be executed before any of the instructions in the chain.
  575. const auto ToSplit = llvm::find_if(
  576. Comparisons, [](const BCECmpBlock &B) { return B.RequireSplit; });
  577. if (ToSplit != Comparisons.end()) {
  578. LLVM_DEBUG(dbgs() << "Splitting non_BCE work to header\n");
  579. ToSplit->split(BB, AA);
  580. }
  581. if (Comparisons.size() == 1) {
  582. LLVM_DEBUG(dbgs() << "Only one comparison, updating branches\n");
  583. Value *const LhsLoad =
  584. Builder.CreateLoad(FirstCmp.Lhs().LoadI->getType(), Lhs);
  585. Value *const RhsLoad =
  586. Builder.CreateLoad(FirstCmp.Rhs().LoadI->getType(), Rhs);
  587. // There are no blocks to merge, just do the comparison.
  588. IsEqual = Builder.CreateICmpEQ(LhsLoad, RhsLoad);
  589. } else {
  590. const unsigned TotalSizeBits = std::accumulate(
  591. Comparisons.begin(), Comparisons.end(), 0u,
  592. [](int Size, const BCECmpBlock &C) { return Size + C.SizeBits(); });
  593. // memcmp expects a 'size_t' argument and returns 'int'.
  594. unsigned SizeTBits = TLI.getSizeTSize(*Phi.getModule());
  595. unsigned IntBits = TLI.getIntSize();
  596. // Create memcmp() == 0.
  597. const auto &DL = Phi.getModule()->getDataLayout();
  598. Value *const MemCmpCall = emitMemCmp(
  599. Lhs, Rhs,
  600. ConstantInt::get(Builder.getIntNTy(SizeTBits), TotalSizeBits / 8),
  601. Builder, DL, &TLI);
  602. IsEqual = Builder.CreateICmpEQ(
  603. MemCmpCall, ConstantInt::get(Builder.getIntNTy(IntBits), 0));
  604. }
  605. BasicBlock *const PhiBB = Phi.getParent();
  606. // Add a branch to the next basic block in the chain.
  607. if (NextCmpBlock == PhiBB) {
  608. // Continue to phi, passing it the comparison result.
  609. Builder.CreateBr(PhiBB);
  610. Phi.addIncoming(IsEqual, BB);
  611. DTU.applyUpdates({{DominatorTree::Insert, BB, PhiBB}});
  612. } else {
  613. // Continue to next block if equal, exit to phi else.
  614. Builder.CreateCondBr(IsEqual, NextCmpBlock, PhiBB);
  615. Phi.addIncoming(ConstantInt::getFalse(Context), BB);
  616. DTU.applyUpdates({{DominatorTree::Insert, BB, NextCmpBlock},
  617. {DominatorTree::Insert, BB, PhiBB}});
  618. }
  619. return BB;
  620. }
  621. bool BCECmpChain::simplify(const TargetLibraryInfo &TLI, AliasAnalysis &AA,
  622. DomTreeUpdater &DTU) {
  623. assert(atLeastOneMerged() && "simplifying trivial BCECmpChain");
  624. LLVM_DEBUG(dbgs() << "Simplifying comparison chain starting at block "
  625. << EntryBlock_->getName() << "\n");
  626. // Effectively merge blocks. We go in the reverse direction from the phi block
  627. // so that the next block is always available to branch to.
  628. BasicBlock *InsertBefore = EntryBlock_;
  629. BasicBlock *NextCmpBlock = Phi_.getParent();
  630. for (const auto &Blocks : reverse(MergedBlocks_)) {
  631. InsertBefore = NextCmpBlock = mergeComparisons(
  632. Blocks, InsertBefore, NextCmpBlock, Phi_, TLI, AA, DTU);
  633. }
  634. // Replace the original cmp chain with the new cmp chain by pointing all
  635. // predecessors of EntryBlock_ to NextCmpBlock instead. This makes all cmp
  636. // blocks in the old chain unreachable.
  637. while (!pred_empty(EntryBlock_)) {
  638. BasicBlock* const Pred = *pred_begin(EntryBlock_);
  639. LLVM_DEBUG(dbgs() << "Updating jump into old chain from " << Pred->getName()
  640. << "\n");
  641. Pred->getTerminator()->replaceUsesOfWith(EntryBlock_, NextCmpBlock);
  642. DTU.applyUpdates({{DominatorTree::Delete, Pred, EntryBlock_},
  643. {DominatorTree::Insert, Pred, NextCmpBlock}});
  644. }
  645. // If the old cmp chain was the function entry, we need to update the function
  646. // entry.
  647. const bool ChainEntryIsFnEntry = EntryBlock_->isEntryBlock();
  648. if (ChainEntryIsFnEntry && DTU.hasDomTree()) {
  649. LLVM_DEBUG(dbgs() << "Changing function entry from "
  650. << EntryBlock_->getName() << " to "
  651. << NextCmpBlock->getName() << "\n");
  652. DTU.getDomTree().setNewRoot(NextCmpBlock);
  653. DTU.applyUpdates({{DominatorTree::Delete, NextCmpBlock, EntryBlock_}});
  654. }
  655. EntryBlock_ = nullptr;
  656. // Delete merged blocks. This also removes incoming values in phi.
  657. SmallVector<BasicBlock *, 16> DeadBlocks;
  658. for (const auto &Blocks : MergedBlocks_) {
  659. for (const BCECmpBlock &Block : Blocks) {
  660. LLVM_DEBUG(dbgs() << "Deleting merged block " << Block.BB->getName()
  661. << "\n");
  662. DeadBlocks.push_back(Block.BB);
  663. }
  664. }
  665. DeleteDeadBlocks(DeadBlocks, &DTU);
  666. MergedBlocks_.clear();
  667. return true;
  668. }
  669. std::vector<BasicBlock *> getOrderedBlocks(PHINode &Phi,
  670. BasicBlock *const LastBlock,
  671. int NumBlocks) {
  672. // Walk up from the last block to find other blocks.
  673. std::vector<BasicBlock *> Blocks(NumBlocks);
  674. assert(LastBlock && "invalid last block");
  675. BasicBlock *CurBlock = LastBlock;
  676. for (int BlockIndex = NumBlocks - 1; BlockIndex > 0; --BlockIndex) {
  677. if (CurBlock->hasAddressTaken()) {
  678. // Somebody is jumping to the block through an address, all bets are
  679. // off.
  680. LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex
  681. << " has its address taken\n");
  682. return {};
  683. }
  684. Blocks[BlockIndex] = CurBlock;
  685. auto *SinglePredecessor = CurBlock->getSinglePredecessor();
  686. if (!SinglePredecessor) {
  687. // The block has two or more predecessors.
  688. LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex
  689. << " has two or more predecessors\n");
  690. return {};
  691. }
  692. if (Phi.getBasicBlockIndex(SinglePredecessor) < 0) {
  693. // The block does not link back to the phi.
  694. LLVM_DEBUG(dbgs() << "skip: block " << BlockIndex
  695. << " does not link back to the phi\n");
  696. return {};
  697. }
  698. CurBlock = SinglePredecessor;
  699. }
  700. Blocks[0] = CurBlock;
  701. return Blocks;
  702. }
  703. bool processPhi(PHINode &Phi, const TargetLibraryInfo &TLI, AliasAnalysis &AA,
  704. DomTreeUpdater &DTU) {
  705. LLVM_DEBUG(dbgs() << "processPhi()\n");
  706. if (Phi.getNumIncomingValues() <= 1) {
  707. LLVM_DEBUG(dbgs() << "skip: only one incoming value in phi\n");
  708. return false;
  709. }
  710. // We are looking for something that has the following structure:
  711. // bb1 --eq--> bb2 --eq--> bb3 --eq--> bb4 --+
  712. // \ \ \ \
  713. // ne ne ne \
  714. // \ \ \ v
  715. // +------------+-----------+----------> bb_phi
  716. //
  717. // - The last basic block (bb4 here) must branch unconditionally to bb_phi.
  718. // It's the only block that contributes a non-constant value to the Phi.
  719. // - All other blocks (b1, b2, b3) must have exactly two successors, one of
  720. // them being the phi block.
  721. // - All intermediate blocks (bb2, bb3) must have only one predecessor.
  722. // - Blocks cannot do other work besides the comparison, see doesOtherWork()
  723. // The blocks are not necessarily ordered in the phi, so we start from the
  724. // last block and reconstruct the order.
  725. BasicBlock *LastBlock = nullptr;
  726. for (unsigned I = 0; I < Phi.getNumIncomingValues(); ++I) {
  727. if (isa<ConstantInt>(Phi.getIncomingValue(I))) continue;
  728. if (LastBlock) {
  729. // There are several non-constant values.
  730. LLVM_DEBUG(dbgs() << "skip: several non-constant values\n");
  731. return false;
  732. }
  733. if (!isa<ICmpInst>(Phi.getIncomingValue(I)) ||
  734. cast<ICmpInst>(Phi.getIncomingValue(I))->getParent() !=
  735. Phi.getIncomingBlock(I)) {
  736. // Non-constant incoming value is not from a cmp instruction or not
  737. // produced by the last block. We could end up processing the value
  738. // producing block more than once.
  739. //
  740. // This is an uncommon case, so we bail.
  741. LLVM_DEBUG(
  742. dbgs()
  743. << "skip: non-constant value not from cmp or not from last block.\n");
  744. return false;
  745. }
  746. LastBlock = Phi.getIncomingBlock(I);
  747. }
  748. if (!LastBlock) {
  749. // There is no non-constant block.
  750. LLVM_DEBUG(dbgs() << "skip: no non-constant block\n");
  751. return false;
  752. }
  753. if (LastBlock->getSingleSuccessor() != Phi.getParent()) {
  754. LLVM_DEBUG(dbgs() << "skip: last block non-phi successor\n");
  755. return false;
  756. }
  757. const auto Blocks =
  758. getOrderedBlocks(Phi, LastBlock, Phi.getNumIncomingValues());
  759. if (Blocks.empty()) return false;
  760. BCECmpChain CmpChain(Blocks, Phi, AA);
  761. if (!CmpChain.atLeastOneMerged()) {
  762. LLVM_DEBUG(dbgs() << "skip: nothing merged\n");
  763. return false;
  764. }
  765. return CmpChain.simplify(TLI, AA, DTU);
  766. }
  767. static bool runImpl(Function &F, const TargetLibraryInfo &TLI,
  768. const TargetTransformInfo &TTI, AliasAnalysis &AA,
  769. DominatorTree *DT) {
  770. LLVM_DEBUG(dbgs() << "MergeICmpsLegacyPass: " << F.getName() << "\n");
  771. // We only try merging comparisons if the target wants to expand memcmp later.
  772. // The rationale is to avoid turning small chains into memcmp calls.
  773. if (!TTI.enableMemCmpExpansion(F.hasOptSize(), true))
  774. return false;
  775. // If we don't have memcmp avaiable we can't emit calls to it.
  776. if (!TLI.has(LibFunc_memcmp))
  777. return false;
  778. DomTreeUpdater DTU(DT, /*PostDominatorTree*/ nullptr,
  779. DomTreeUpdater::UpdateStrategy::Eager);
  780. bool MadeChange = false;
  781. for (BasicBlock &BB : llvm::drop_begin(F)) {
  782. // A Phi operation is always first in a basic block.
  783. if (auto *const Phi = dyn_cast<PHINode>(&*BB.begin()))
  784. MadeChange |= processPhi(*Phi, TLI, AA, DTU);
  785. }
  786. return MadeChange;
  787. }
  788. class MergeICmpsLegacyPass : public FunctionPass {
  789. public:
  790. static char ID;
  791. MergeICmpsLegacyPass() : FunctionPass(ID) {
  792. initializeMergeICmpsLegacyPassPass(*PassRegistry::getPassRegistry());
  793. }
  794. bool runOnFunction(Function &F) override {
  795. if (skipFunction(F)) return false;
  796. const auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
  797. const auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
  798. // MergeICmps does not need the DominatorTree, but we update it if it's
  799. // already available.
  800. auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
  801. auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
  802. return runImpl(F, TLI, TTI, AA, DTWP ? &DTWP->getDomTree() : nullptr);
  803. }
  804. private:
  805. void getAnalysisUsage(AnalysisUsage &AU) const override {
  806. AU.addRequired<TargetLibraryInfoWrapperPass>();
  807. AU.addRequired<TargetTransformInfoWrapperPass>();
  808. AU.addRequired<AAResultsWrapperPass>();
  809. AU.addPreserved<GlobalsAAWrapperPass>();
  810. AU.addPreserved<DominatorTreeWrapperPass>();
  811. }
  812. };
  813. } // namespace
  814. char MergeICmpsLegacyPass::ID = 0;
  815. INITIALIZE_PASS_BEGIN(MergeICmpsLegacyPass, "mergeicmps",
  816. "Merge contiguous icmps into a memcmp", false, false)
  817. INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
  818. INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
  819. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  820. INITIALIZE_PASS_END(MergeICmpsLegacyPass, "mergeicmps",
  821. "Merge contiguous icmps into a memcmp", false, false)
  822. Pass *llvm::createMergeICmpsLegacyPass() { return new MergeICmpsLegacyPass(); }
  823. PreservedAnalyses MergeICmpsPass::run(Function &F,
  824. FunctionAnalysisManager &AM) {
  825. auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
  826. auto &TTI = AM.getResult<TargetIRAnalysis>(F);
  827. auto &AA = AM.getResult<AAManager>(F);
  828. auto *DT = AM.getCachedResult<DominatorTreeAnalysis>(F);
  829. const bool MadeChanges = runImpl(F, TLI, TTI, AA, DT);
  830. if (!MadeChanges)
  831. return PreservedAnalyses::all();
  832. PreservedAnalyses PA;
  833. PA.preserve<DominatorTreeAnalysis>();
  834. return PA;
  835. }