MemorySSAUpdater.cpp 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455
  1. //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------===//
  8. //
  9. // This file implements the MemorySSAUpdater class.
  10. //
  11. //===----------------------------------------------------------------===//
  12. #include "llvm/Analysis/MemorySSAUpdater.h"
  13. #include "llvm/ADT/STLExtras.h"
  14. #include "llvm/ADT/SetVector.h"
  15. #include "llvm/ADT/SmallPtrSet.h"
  16. #include "llvm/Analysis/IteratedDominanceFrontier.h"
  17. #include "llvm/Analysis/LoopIterator.h"
  18. #include "llvm/Analysis/MemorySSA.h"
  19. #include "llvm/IR/BasicBlock.h"
  20. #include "llvm/IR/Dominators.h"
  21. #include "llvm/Support/Debug.h"
  22. #include <algorithm>
  23. #define DEBUG_TYPE "memoryssa"
  24. using namespace llvm;
  25. // This is the marker algorithm from "Simple and Efficient Construction of
  26. // Static Single Assignment Form"
  27. // The simple, non-marker algorithm places phi nodes at any join
  28. // Here, we place markers, and only place phi nodes if they end up necessary.
  29. // They are only necessary if they break a cycle (IE we recursively visit
  30. // ourselves again), or we discover, while getting the value of the operands,
  31. // that there are two or more definitions needing to be merged.
  32. // This still will leave non-minimal form in the case of irreducible control
  33. // flow, where phi nodes may be in cycles with themselves, but unnecessary.
  34. MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
  35. BasicBlock *BB,
  36. DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
  37. // First, do a cache lookup. Without this cache, certain CFG structures
  38. // (like a series of if statements) take exponential time to visit.
  39. auto Cached = CachedPreviousDef.find(BB);
  40. if (Cached != CachedPreviousDef.end())
  41. return Cached->second;
  42. // If this method is called from an unreachable block, return LoE.
  43. if (!MSSA->DT->isReachableFromEntry(BB))
  44. return MSSA->getLiveOnEntryDef();
  45. if (BasicBlock *Pred = BB->getUniquePredecessor()) {
  46. VisitedBlocks.insert(BB);
  47. // Single predecessor case, just recurse, we can only have one definition.
  48. MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
  49. CachedPreviousDef.insert({BB, Result});
  50. return Result;
  51. }
  52. if (VisitedBlocks.count(BB)) {
  53. // We hit our node again, meaning we had a cycle, we must insert a phi
  54. // node to break it so we have an operand. The only case this will
  55. // insert useless phis is if we have irreducible control flow.
  56. MemoryAccess *Result = MSSA->createMemoryPhi(BB);
  57. CachedPreviousDef.insert({BB, Result});
  58. return Result;
  59. }
  60. if (VisitedBlocks.insert(BB).second) {
  61. // Mark us visited so we can detect a cycle
  62. SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps;
  63. // Recurse to get the values in our predecessors for placement of a
  64. // potential phi node. This will insert phi nodes if we cycle in order to
  65. // break the cycle and have an operand.
  66. bool UniqueIncomingAccess = true;
  67. MemoryAccess *SingleAccess = nullptr;
  68. for (auto *Pred : predecessors(BB)) {
  69. if (MSSA->DT->isReachableFromEntry(Pred)) {
  70. auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
  71. if (!SingleAccess)
  72. SingleAccess = IncomingAccess;
  73. else if (IncomingAccess != SingleAccess)
  74. UniqueIncomingAccess = false;
  75. PhiOps.push_back(IncomingAccess);
  76. } else
  77. PhiOps.push_back(MSSA->getLiveOnEntryDef());
  78. }
  79. // Now try to simplify the ops to avoid placing a phi.
  80. // This may return null if we never created a phi yet, that's okay
  81. MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
  82. // See if we can avoid the phi by simplifying it.
  83. auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
  84. // If we couldn't simplify, we may have to create a phi
  85. if (Result == Phi && UniqueIncomingAccess && SingleAccess) {
  86. // A concrete Phi only exists if we created an empty one to break a cycle.
  87. if (Phi) {
  88. assert(Phi->operands().empty() && "Expected empty Phi");
  89. Phi->replaceAllUsesWith(SingleAccess);
  90. removeMemoryAccess(Phi);
  91. }
  92. Result = SingleAccess;
  93. } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
  94. if (!Phi)
  95. Phi = MSSA->createMemoryPhi(BB);
  96. // See if the existing phi operands match what we need.
  97. // Unlike normal SSA, we only allow one phi node per block, so we can't just
  98. // create a new one.
  99. if (Phi->getNumOperands() != 0) {
  100. // FIXME: Figure out whether this is dead code and if so remove it.
  101. if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
  102. // These will have been filled in by the recursive read we did above.
  103. llvm::copy(PhiOps, Phi->op_begin());
  104. std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
  105. }
  106. } else {
  107. unsigned i = 0;
  108. for (auto *Pred : predecessors(BB))
  109. Phi->addIncoming(&*PhiOps[i++], Pred);
  110. InsertedPHIs.push_back(Phi);
  111. }
  112. Result = Phi;
  113. }
  114. // Set ourselves up for the next variable by resetting visited state.
  115. VisitedBlocks.erase(BB);
  116. CachedPreviousDef.insert({BB, Result});
  117. return Result;
  118. }
  119. llvm_unreachable("Should have hit one of the three cases above");
  120. }
  121. // This starts at the memory access, and goes backwards in the block to find the
  122. // previous definition. If a definition is not found the block of the access,
  123. // it continues globally, creating phi nodes to ensure we have a single
  124. // definition.
  125. MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
  126. if (auto *LocalResult = getPreviousDefInBlock(MA))
  127. return LocalResult;
  128. DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
  129. return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
  130. }
  131. // This starts at the memory access, and goes backwards in the block to the find
  132. // the previous definition. If the definition is not found in the block of the
  133. // access, it returns nullptr.
  134. MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
  135. auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
  136. // It's possible there are no defs, or we got handed the first def to start.
  137. if (Defs) {
  138. // If this is a def, we can just use the def iterators.
  139. if (!isa<MemoryUse>(MA)) {
  140. auto Iter = MA->getReverseDefsIterator();
  141. ++Iter;
  142. if (Iter != Defs->rend())
  143. return &*Iter;
  144. } else {
  145. // Otherwise, have to walk the all access iterator.
  146. auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
  147. for (auto &U : make_range(++MA->getReverseIterator(), End))
  148. if (!isa<MemoryUse>(U))
  149. return cast<MemoryAccess>(&U);
  150. // Note that if MA comes before Defs->begin(), we won't hit a def.
  151. return nullptr;
  152. }
  153. }
  154. return nullptr;
  155. }
  156. // This starts at the end of block
  157. MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
  158. BasicBlock *BB,
  159. DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
  160. auto *Defs = MSSA->getWritableBlockDefs(BB);
  161. if (Defs) {
  162. CachedPreviousDef.insert({BB, &*Defs->rbegin()});
  163. return &*Defs->rbegin();
  164. }
  165. return getPreviousDefRecursive(BB, CachedPreviousDef);
  166. }
  167. // Recurse over a set of phi uses to eliminate the trivial ones
  168. MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
  169. if (!Phi)
  170. return nullptr;
  171. TrackingVH<MemoryAccess> Res(Phi);
  172. SmallVector<TrackingVH<Value>, 8> Uses;
  173. std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
  174. for (auto &U : Uses)
  175. if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
  176. tryRemoveTrivialPhi(UsePhi);
  177. return Res;
  178. }
  179. // Eliminate trivial phis
  180. // Phis are trivial if they are defined either by themselves, or all the same
  181. // argument.
  182. // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
  183. // We recursively try to remove them.
  184. MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) {
  185. assert(Phi && "Can only remove concrete Phi.");
  186. auto OperRange = Phi->operands();
  187. return tryRemoveTrivialPhi(Phi, OperRange);
  188. }
  189. template <class RangeType>
  190. MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
  191. RangeType &Operands) {
  192. // Bail out on non-opt Phis.
  193. if (NonOptPhis.count(Phi))
  194. return Phi;
  195. // Detect equal or self arguments
  196. MemoryAccess *Same = nullptr;
  197. for (auto &Op : Operands) {
  198. // If the same or self, good so far
  199. if (Op == Phi || Op == Same)
  200. continue;
  201. // not the same, return the phi since it's not eliminatable by us
  202. if (Same)
  203. return Phi;
  204. Same = cast<MemoryAccess>(&*Op);
  205. }
  206. // Never found a non-self reference, the phi is undef
  207. if (Same == nullptr)
  208. return MSSA->getLiveOnEntryDef();
  209. if (Phi) {
  210. Phi->replaceAllUsesWith(Same);
  211. removeMemoryAccess(Phi);
  212. }
  213. // We should only end up recursing in case we replaced something, in which
  214. // case, we may have made other Phis trivial.
  215. return recursePhi(Same);
  216. }
  217. void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) {
  218. VisitedBlocks.clear();
  219. InsertedPHIs.clear();
  220. MU->setDefiningAccess(getPreviousDef(MU));
  221. // In cases without unreachable blocks, because uses do not create new
  222. // may-defs, there are only two cases:
  223. // 1. There was a def already below us, and therefore, we should not have
  224. // created a phi node because it was already needed for the def.
  225. //
  226. // 2. There is no def below us, and therefore, there is no extra renaming work
  227. // to do.
  228. // In cases with unreachable blocks, where the unnecessary Phis were
  229. // optimized out, adding the Use may re-insert those Phis. Hence, when
  230. // inserting Uses outside of the MSSA creation process, and new Phis were
  231. // added, rename all uses if we are asked.
  232. if (!RenameUses && !InsertedPHIs.empty()) {
  233. auto *Defs = MSSA->getBlockDefs(MU->getBlock());
  234. (void)Defs;
  235. assert((!Defs || (++Defs->begin() == Defs->end())) &&
  236. "Block may have only a Phi or no defs");
  237. }
  238. if (RenameUses && InsertedPHIs.size()) {
  239. SmallPtrSet<BasicBlock *, 16> Visited;
  240. BasicBlock *StartBlock = MU->getBlock();
  241. if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) {
  242. MemoryAccess *FirstDef = &*Defs->begin();
  243. // Convert to incoming value if it's a memorydef. A phi *is* already an
  244. // incoming value.
  245. if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
  246. FirstDef = MD->getDefiningAccess();
  247. MSSA->renamePass(MU->getBlock(), FirstDef, Visited);
  248. }
  249. // We just inserted a phi into this block, so the incoming value will
  250. // become the phi anyway, so it does not matter what we pass.
  251. for (auto &MP : InsertedPHIs)
  252. if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
  253. MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
  254. }
  255. }
  256. // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
  257. static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
  258. MemoryAccess *NewDef) {
  259. // Replace any operand with us an incoming block with the new defining
  260. // access.
  261. int i = MP->getBasicBlockIndex(BB);
  262. assert(i != -1 && "Should have found the basic block in the phi");
  263. // We can't just compare i against getNumOperands since one is signed and the
  264. // other not. So use it to index into the block iterator.
  265. for (const BasicBlock *BlockBB : llvm::drop_begin(MP->blocks(), i)) {
  266. if (BlockBB != BB)
  267. break;
  268. MP->setIncomingValue(i, NewDef);
  269. ++i;
  270. }
  271. }
  272. // A brief description of the algorithm:
  273. // First, we compute what should define the new def, using the SSA
  274. // construction algorithm.
  275. // Then, we update the defs below us (and any new phi nodes) in the graph to
  276. // point to the correct new defs, to ensure we only have one variable, and no
  277. // disconnected stores.
  278. void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
  279. // Don't bother updating dead code.
  280. if (!MSSA->DT->isReachableFromEntry(MD->getBlock())) {
  281. MD->setDefiningAccess(MSSA->getLiveOnEntryDef());
  282. return;
  283. }
  284. VisitedBlocks.clear();
  285. InsertedPHIs.clear();
  286. // See if we had a local def, and if not, go hunting.
  287. MemoryAccess *DefBefore = getPreviousDef(MD);
  288. bool DefBeforeSameBlock = false;
  289. if (DefBefore->getBlock() == MD->getBlock() &&
  290. !(isa<MemoryPhi>(DefBefore) &&
  291. llvm::is_contained(InsertedPHIs, DefBefore)))
  292. DefBeforeSameBlock = true;
  293. // There is a def before us, which means we can replace any store/phi uses
  294. // of that thing with us, since we are in the way of whatever was there
  295. // before.
  296. // We now define that def's memorydefs and memoryphis
  297. if (DefBeforeSameBlock) {
  298. DefBefore->replaceUsesWithIf(MD, [MD](Use &U) {
  299. // Leave the MemoryUses alone.
  300. // Also make sure we skip ourselves to avoid self references.
  301. User *Usr = U.getUser();
  302. return !isa<MemoryUse>(Usr) && Usr != MD;
  303. // Defs are automatically unoptimized when the user is set to MD below,
  304. // because the isOptimized() call will fail to find the same ID.
  305. });
  306. }
  307. // and that def is now our defining access.
  308. MD->setDefiningAccess(DefBefore);
  309. SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
  310. SmallSet<WeakVH, 8> ExistingPhis;
  311. // Remember the index where we may insert new phis.
  312. unsigned NewPhiIndex = InsertedPHIs.size();
  313. if (!DefBeforeSameBlock) {
  314. // If there was a local def before us, we must have the same effect it
  315. // did. Because every may-def is the same, any phis/etc we would create, it
  316. // would also have created. If there was no local def before us, we
  317. // performed a global update, and have to search all successors and make
  318. // sure we update the first def in each of them (following all paths until
  319. // we hit the first def along each path). This may also insert phi nodes.
  320. // TODO: There are other cases we can skip this work, such as when we have a
  321. // single successor, and only used a straight line of single pred blocks
  322. // backwards to find the def. To make that work, we'd have to track whether
  323. // getDefRecursive only ever used the single predecessor case. These types
  324. // of paths also only exist in between CFG simplifications.
  325. // If this is the first def in the block and this insert is in an arbitrary
  326. // place, compute IDF and place phis.
  327. SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
  328. // If this is the last Def in the block, we may need additional Phis.
  329. // Compute IDF in all cases, as renaming needs to be done even when MD is
  330. // not the last access, because it can introduce a new access past which a
  331. // previous access was optimized; that access needs to be reoptimized.
  332. DefiningBlocks.insert(MD->getBlock());
  333. for (const auto &VH : InsertedPHIs)
  334. if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
  335. DefiningBlocks.insert(RealPHI->getBlock());
  336. ForwardIDFCalculator IDFs(*MSSA->DT);
  337. SmallVector<BasicBlock *, 32> IDFBlocks;
  338. IDFs.setDefiningBlocks(DefiningBlocks);
  339. IDFs.calculate(IDFBlocks);
  340. SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
  341. for (auto *BBIDF : IDFBlocks) {
  342. auto *MPhi = MSSA->getMemoryAccess(BBIDF);
  343. if (!MPhi) {
  344. MPhi = MSSA->createMemoryPhi(BBIDF);
  345. NewInsertedPHIs.push_back(MPhi);
  346. } else {
  347. ExistingPhis.insert(MPhi);
  348. }
  349. // Add the phis created into the IDF blocks to NonOptPhis, so they are not
  350. // optimized out as trivial by the call to getPreviousDefFromEnd below.
  351. // Once they are complete, all these Phis are added to the FixupList, and
  352. // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may
  353. // need fixing as well, and potentially be trivial before this insertion,
  354. // hence add all IDF Phis. See PR43044.
  355. NonOptPhis.insert(MPhi);
  356. }
  357. for (auto &MPhi : NewInsertedPHIs) {
  358. auto *BBIDF = MPhi->getBlock();
  359. for (auto *Pred : predecessors(BBIDF)) {
  360. DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
  361. MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred);
  362. }
  363. }
  364. // Re-take the index where we're adding the new phis, because the above call
  365. // to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
  366. NewPhiIndex = InsertedPHIs.size();
  367. for (auto &MPhi : NewInsertedPHIs) {
  368. InsertedPHIs.push_back(&*MPhi);
  369. FixupList.push_back(&*MPhi);
  370. }
  371. FixupList.push_back(MD);
  372. }
  373. // Remember the index where we stopped inserting new phis above, since the
  374. // fixupDefs call in the loop below may insert more, that are already minimal.
  375. unsigned NewPhiIndexEnd = InsertedPHIs.size();
  376. while (!FixupList.empty()) {
  377. unsigned StartingPHISize = InsertedPHIs.size();
  378. fixupDefs(FixupList);
  379. FixupList.clear();
  380. // Put any new phis on the fixup list, and process them
  381. FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
  382. }
  383. // Optimize potentially non-minimal phis added in this method.
  384. unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
  385. if (NewPhiSize)
  386. tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
  387. // Now that all fixups are done, rename all uses if we are asked. The defs are
  388. // guaranteed to be in reachable code due to the check at the method entry.
  389. BasicBlock *StartBlock = MD->getBlock();
  390. if (RenameUses) {
  391. SmallPtrSet<BasicBlock *, 16> Visited;
  392. // We are guaranteed there is a def in the block, because we just got it
  393. // handed to us in this function.
  394. MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
  395. // Convert to incoming value if it's a memorydef. A phi *is* already an
  396. // incoming value.
  397. if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
  398. FirstDef = MD->getDefiningAccess();
  399. MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
  400. // We just inserted a phi into this block, so the incoming value will become
  401. // the phi anyway, so it does not matter what we pass.
  402. for (auto &MP : InsertedPHIs) {
  403. MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
  404. if (Phi)
  405. MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
  406. }
  407. // Existing Phi blocks may need renaming too, if an access was previously
  408. // optimized and the inserted Defs "covers" the Optimized value.
  409. for (const auto &MP : ExistingPhis) {
  410. MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
  411. if (Phi)
  412. MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
  413. }
  414. }
  415. }
  416. void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
  417. SmallPtrSet<const BasicBlock *, 8> Seen;
  418. SmallVector<const BasicBlock *, 16> Worklist;
  419. for (const auto &Var : Vars) {
  420. MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
  421. if (!NewDef)
  422. continue;
  423. // First, see if there is a local def after the operand.
  424. auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
  425. auto DefIter = NewDef->getDefsIterator();
  426. // The temporary Phi is being fixed, unmark it for not to optimize.
  427. if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
  428. NonOptPhis.erase(Phi);
  429. // If there is a local def after us, we only have to rename that.
  430. if (++DefIter != Defs->end()) {
  431. cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
  432. continue;
  433. }
  434. // Otherwise, we need to search down through the CFG.
  435. // For each of our successors, handle it directly if their is a phi, or
  436. // place on the fixup worklist.
  437. for (const auto *S : successors(NewDef->getBlock())) {
  438. if (auto *MP = MSSA->getMemoryAccess(S))
  439. setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
  440. else
  441. Worklist.push_back(S);
  442. }
  443. while (!Worklist.empty()) {
  444. const BasicBlock *FixupBlock = Worklist.pop_back_val();
  445. // Get the first def in the block that isn't a phi node.
  446. if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
  447. auto *FirstDef = &*Defs->begin();
  448. // The loop above and below should have taken care of phi nodes
  449. assert(!isa<MemoryPhi>(FirstDef) &&
  450. "Should have already handled phi nodes!");
  451. // We are now this def's defining access, make sure we actually dominate
  452. // it
  453. assert(MSSA->dominates(NewDef, FirstDef) &&
  454. "Should have dominated the new access");
  455. // This may insert new phi nodes, because we are not guaranteed the
  456. // block we are processing has a single pred, and depending where the
  457. // store was inserted, it may require phi nodes below it.
  458. cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
  459. return;
  460. }
  461. // We didn't find a def, so we must continue.
  462. for (const auto *S : successors(FixupBlock)) {
  463. // If there is a phi node, handle it.
  464. // Otherwise, put the block on the worklist
  465. if (auto *MP = MSSA->getMemoryAccess(S))
  466. setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
  467. else {
  468. // If we cycle, we should have ended up at a phi node that we already
  469. // processed. FIXME: Double check this
  470. if (!Seen.insert(S).second)
  471. continue;
  472. Worklist.push_back(S);
  473. }
  474. }
  475. }
  476. }
  477. }
  478. void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) {
  479. if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
  480. MPhi->unorderedDeleteIncomingBlock(From);
  481. tryRemoveTrivialPhi(MPhi);
  482. }
  483. }
  484. void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From,
  485. const BasicBlock *To) {
  486. if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
  487. bool Found = false;
  488. MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
  489. if (From != B)
  490. return false;
  491. if (Found)
  492. return true;
  493. Found = true;
  494. return false;
  495. });
  496. tryRemoveTrivialPhi(MPhi);
  497. }
  498. }
  499. /// If all arguments of a MemoryPHI are defined by the same incoming
  500. /// argument, return that argument.
  501. static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
  502. MemoryAccess *MA = nullptr;
  503. for (auto &Arg : MP->operands()) {
  504. if (!MA)
  505. MA = cast<MemoryAccess>(Arg);
  506. else if (MA != Arg)
  507. return nullptr;
  508. }
  509. return MA;
  510. }
  511. static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA,
  512. const ValueToValueMapTy &VMap,
  513. PhiToDefMap &MPhiMap,
  514. bool CloneWasSimplified,
  515. MemorySSA *MSSA) {
  516. MemoryAccess *InsnDefining = MA;
  517. if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
  518. if (!MSSA->isLiveOnEntryDef(DefMUD)) {
  519. Instruction *DefMUDI = DefMUD->getMemoryInst();
  520. assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
  521. if (Instruction *NewDefMUDI =
  522. cast_or_null<Instruction>(VMap.lookup(DefMUDI))) {
  523. InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
  524. if (!CloneWasSimplified)
  525. assert(InsnDefining && "Defining instruction cannot be nullptr.");
  526. else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
  527. // The clone was simplified, it's no longer a MemoryDef, look up.
  528. auto DefIt = DefMUD->getDefsIterator();
  529. // Since simplified clones only occur in single block cloning, a
  530. // previous definition must exist, otherwise NewDefMUDI would not
  531. // have been found in VMap.
  532. assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() &&
  533. "Previous def must exist");
  534. InsnDefining = getNewDefiningAccessForClone(
  535. &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
  536. }
  537. }
  538. }
  539. } else {
  540. MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
  541. if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
  542. InsnDefining = NewDefPhi;
  543. }
  544. assert(InsnDefining && "Defining instruction cannot be nullptr.");
  545. return InsnDefining;
  546. }
  547. void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
  548. const ValueToValueMapTy &VMap,
  549. PhiToDefMap &MPhiMap,
  550. bool CloneWasSimplified) {
  551. const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
  552. if (!Acc)
  553. return;
  554. for (const MemoryAccess &MA : *Acc) {
  555. if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
  556. Instruction *Insn = MUD->getMemoryInst();
  557. // Entry does not exist if the clone of the block did not clone all
  558. // instructions. This occurs in LoopRotate when cloning instructions
  559. // from the old header to the old preheader. The cloned instruction may
  560. // also be a simplified Value, not an Instruction (see LoopRotate).
  561. // Also in LoopRotate, even when it's an instruction, due to it being
  562. // simplified, it may be a Use rather than a Def, so we cannot use MUD as
  563. // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
  564. if (Instruction *NewInsn =
  565. dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
  566. MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
  567. NewInsn,
  568. getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap,
  569. MPhiMap, CloneWasSimplified, MSSA),
  570. /*Template=*/CloneWasSimplified ? nullptr : MUD,
  571. /*CreationMustSucceed=*/CloneWasSimplified ? false : true);
  572. if (NewUseOrDef)
  573. MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
  574. }
  575. }
  576. }
  577. }
  578. void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock(
  579. BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
  580. auto *MPhi = MSSA->getMemoryAccess(Header);
  581. if (!MPhi)
  582. return;
  583. // Create phi node in the backedge block and populate it with the same
  584. // incoming values as MPhi. Skip incoming values coming from Preheader.
  585. auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
  586. bool HasUniqueIncomingValue = true;
  587. MemoryAccess *UniqueValue = nullptr;
  588. for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
  589. BasicBlock *IBB = MPhi->getIncomingBlock(I);
  590. MemoryAccess *IV = MPhi->getIncomingValue(I);
  591. if (IBB != Preheader) {
  592. NewMPhi->addIncoming(IV, IBB);
  593. if (HasUniqueIncomingValue) {
  594. if (!UniqueValue)
  595. UniqueValue = IV;
  596. else if (UniqueValue != IV)
  597. HasUniqueIncomingValue = false;
  598. }
  599. }
  600. }
  601. // Update incoming edges into MPhi. Remove all but the incoming edge from
  602. // Preheader. Add an edge from NewMPhi
  603. auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
  604. MPhi->setIncomingValue(0, AccFromPreheader);
  605. MPhi->setIncomingBlock(0, Preheader);
  606. for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
  607. MPhi->unorderedDeleteIncoming(I);
  608. MPhi->addIncoming(NewMPhi, BEBlock);
  609. // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
  610. // replaced with the unique value.
  611. tryRemoveTrivialPhi(NewMPhi);
  612. }
  613. void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
  614. ArrayRef<BasicBlock *> ExitBlocks,
  615. const ValueToValueMapTy &VMap,
  616. bool IgnoreIncomingWithNoClones) {
  617. PhiToDefMap MPhiMap;
  618. auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
  619. assert(Phi && NewPhi && "Invalid Phi nodes.");
  620. BasicBlock *NewPhiBB = NewPhi->getBlock();
  621. SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
  622. pred_end(NewPhiBB));
  623. for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
  624. MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
  625. BasicBlock *IncBB = Phi->getIncomingBlock(It);
  626. if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
  627. IncBB = NewIncBB;
  628. else if (IgnoreIncomingWithNoClones)
  629. continue;
  630. // Now we have IncBB, and will need to add incoming from it to NewPhi.
  631. // If IncBB is not a predecessor of NewPhiBB, then do not add it.
  632. // NewPhiBB was cloned without that edge.
  633. if (!NewPhiBBPreds.count(IncBB))
  634. continue;
  635. // Determine incoming value and add it as incoming from IncBB.
  636. if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
  637. if (!MSSA->isLiveOnEntryDef(IncMUD)) {
  638. Instruction *IncI = IncMUD->getMemoryInst();
  639. assert(IncI && "Found MemoryUseOrDef with no Instruction.");
  640. if (Instruction *NewIncI =
  641. cast_or_null<Instruction>(VMap.lookup(IncI))) {
  642. IncMUD = MSSA->getMemoryAccess(NewIncI);
  643. assert(IncMUD &&
  644. "MemoryUseOrDef cannot be null, all preds processed.");
  645. }
  646. }
  647. NewPhi->addIncoming(IncMUD, IncBB);
  648. } else {
  649. MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
  650. if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
  651. NewPhi->addIncoming(NewDefPhi, IncBB);
  652. else
  653. NewPhi->addIncoming(IncPhi, IncBB);
  654. }
  655. }
  656. if (auto *SingleAccess = onlySingleValue(NewPhi)) {
  657. MPhiMap[Phi] = SingleAccess;
  658. removeMemoryAccess(NewPhi);
  659. }
  660. };
  661. auto ProcessBlock = [&](BasicBlock *BB) {
  662. BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
  663. if (!NewBlock)
  664. return;
  665. assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
  666. "Cloned block should have no accesses");
  667. // Add MemoryPhi.
  668. if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
  669. MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
  670. MPhiMap[MPhi] = NewPhi;
  671. }
  672. // Update Uses and Defs.
  673. cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
  674. };
  675. for (auto *BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
  676. ProcessBlock(BB);
  677. for (auto *BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
  678. if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
  679. if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
  680. FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
  681. }
  682. void MemorySSAUpdater::updateForClonedBlockIntoPred(
  683. BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
  684. // All defs/phis from outside BB that are used in BB, are valid uses in P1.
  685. // Since those defs/phis must have dominated BB, and also dominate P1.
  686. // Defs from BB being used in BB will be replaced with the cloned defs from
  687. // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
  688. // incoming def into the Phi from P1.
  689. // Instructions cloned into the predecessor are in practice sometimes
  690. // simplified, so disable the use of the template, and create an access from
  691. // scratch.
  692. PhiToDefMap MPhiMap;
  693. if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
  694. MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
  695. cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true);
  696. }
  697. template <typename Iter>
  698. void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
  699. ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
  700. DominatorTree &DT) {
  701. SmallVector<CFGUpdate, 4> Updates;
  702. // Update/insert phis in all successors of exit blocks.
  703. for (auto *Exit : ExitBlocks)
  704. for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
  705. if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
  706. BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
  707. Updates.push_back({DT.Insert, NewExit, ExitSucc});
  708. }
  709. applyInsertUpdates(Updates, DT);
  710. }
  711. void MemorySSAUpdater::updateExitBlocksForClonedLoop(
  712. ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
  713. DominatorTree &DT) {
  714. const ValueToValueMapTy *const Arr[] = {&VMap};
  715. privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
  716. std::end(Arr), DT);
  717. }
  718. void MemorySSAUpdater::updateExitBlocksForClonedLoop(
  719. ArrayRef<BasicBlock *> ExitBlocks,
  720. ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
  721. auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
  722. return I.get();
  723. };
  724. using MappedIteratorType =
  725. mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *,
  726. decltype(GetPtr)>;
  727. auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
  728. auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
  729. privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
  730. }
  731. void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates,
  732. DominatorTree &DT, bool UpdateDT) {
  733. SmallVector<CFGUpdate, 4> DeleteUpdates;
  734. SmallVector<CFGUpdate, 4> RevDeleteUpdates;
  735. SmallVector<CFGUpdate, 4> InsertUpdates;
  736. for (const auto &Update : Updates) {
  737. if (Update.getKind() == DT.Insert)
  738. InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
  739. else {
  740. DeleteUpdates.push_back({DT.Delete, Update.getFrom(), Update.getTo()});
  741. RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
  742. }
  743. }
  744. if (!DeleteUpdates.empty()) {
  745. if (!InsertUpdates.empty()) {
  746. if (!UpdateDT) {
  747. SmallVector<CFGUpdate, 0> Empty;
  748. // Deletes are reversed applied, because this CFGView is pretending the
  749. // deletes did not happen yet, hence the edges still exist.
  750. DT.applyUpdates(Empty, RevDeleteUpdates);
  751. } else {
  752. // Apply all updates, with the RevDeleteUpdates as PostCFGView.
  753. DT.applyUpdates(Updates, RevDeleteUpdates);
  754. }
  755. // Note: the MSSA update below doesn't distinguish between a GD with
  756. // (RevDelete,false) and (Delete, true), but this matters for the DT
  757. // updates above; for "children" purposes they are equivalent; but the
  758. // updates themselves convey the desired update, used inside DT only.
  759. GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
  760. applyInsertUpdates(InsertUpdates, DT, &GD);
  761. // Update DT to redelete edges; this matches the real CFG so we can
  762. // perform the standard update without a postview of the CFG.
  763. DT.applyUpdates(DeleteUpdates);
  764. } else {
  765. if (UpdateDT)
  766. DT.applyUpdates(DeleteUpdates);
  767. }
  768. } else {
  769. if (UpdateDT)
  770. DT.applyUpdates(Updates);
  771. GraphDiff<BasicBlock *> GD;
  772. applyInsertUpdates(InsertUpdates, DT, &GD);
  773. }
  774. // Update for deleted edges
  775. for (auto &Update : DeleteUpdates)
  776. removeEdge(Update.getFrom(), Update.getTo());
  777. }
  778. void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
  779. DominatorTree &DT) {
  780. GraphDiff<BasicBlock *> GD;
  781. applyInsertUpdates(Updates, DT, &GD);
  782. }
  783. void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
  784. DominatorTree &DT,
  785. const GraphDiff<BasicBlock *> *GD) {
  786. // Get recursive last Def, assuming well formed MSSA and updated DT.
  787. auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
  788. while (true) {
  789. MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
  790. // Return last Def or Phi in BB, if it exists.
  791. if (Defs)
  792. return &*(--Defs->end());
  793. // Check number of predecessors, we only care if there's more than one.
  794. unsigned Count = 0;
  795. BasicBlock *Pred = nullptr;
  796. for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
  797. Pred = Pi;
  798. Count++;
  799. if (Count == 2)
  800. break;
  801. }
  802. // If BB has multiple predecessors, get last definition from IDom.
  803. if (Count != 1) {
  804. // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
  805. // DT is invalidated. Return LoE as its last def. This will be added to
  806. // MemoryPhi node, and later deleted when the block is deleted.
  807. if (!DT.getNode(BB))
  808. return MSSA->getLiveOnEntryDef();
  809. if (auto *IDom = DT.getNode(BB)->getIDom())
  810. if (IDom->getBlock() != BB) {
  811. BB = IDom->getBlock();
  812. continue;
  813. }
  814. return MSSA->getLiveOnEntryDef();
  815. } else {
  816. // Single predecessor, BB cannot be dead. GetLastDef of Pred.
  817. assert(Count == 1 && Pred && "Single predecessor expected.");
  818. // BB can be unreachable though, return LoE if that is the case.
  819. if (!DT.getNode(BB))
  820. return MSSA->getLiveOnEntryDef();
  821. BB = Pred;
  822. }
  823. };
  824. llvm_unreachable("Unable to get last definition.");
  825. };
  826. // Get nearest IDom given a set of blocks.
  827. // TODO: this can be optimized by starting the search at the node with the
  828. // lowest level (highest in the tree).
  829. auto FindNearestCommonDominator =
  830. [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
  831. BasicBlock *PrevIDom = *BBSet.begin();
  832. for (auto *BB : BBSet)
  833. PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
  834. return PrevIDom;
  835. };
  836. // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
  837. // include CurrIDom.
  838. auto GetNoLongerDomBlocks =
  839. [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
  840. SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
  841. if (PrevIDom == CurrIDom)
  842. return;
  843. BlocksPrevDom.push_back(PrevIDom);
  844. BasicBlock *NextIDom = PrevIDom;
  845. while (BasicBlock *UpIDom =
  846. DT.getNode(NextIDom)->getIDom()->getBlock()) {
  847. if (UpIDom == CurrIDom)
  848. break;
  849. BlocksPrevDom.push_back(UpIDom);
  850. NextIDom = UpIDom;
  851. }
  852. };
  853. // Map a BB to its predecessors: added + previously existing. To get a
  854. // deterministic order, store predecessors as SetVectors. The order in each
  855. // will be defined by the order in Updates (fixed) and the order given by
  856. // children<> (also fixed). Since we further iterate over these ordered sets,
  857. // we lose the information of multiple edges possibly existing between two
  858. // blocks, so we'll keep and EdgeCount map for that.
  859. // An alternate implementation could keep unordered set for the predecessors,
  860. // traverse either Updates or children<> each time to get the deterministic
  861. // order, and drop the usage of EdgeCount. This alternate approach would still
  862. // require querying the maps for each predecessor, and children<> call has
  863. // additional computation inside for creating the snapshot-graph predecessors.
  864. // As such, we favor using a little additional storage and less compute time.
  865. // This decision can be revisited if we find the alternative more favorable.
  866. struct PredInfo {
  867. SmallSetVector<BasicBlock *, 2> Added;
  868. SmallSetVector<BasicBlock *, 2> Prev;
  869. };
  870. SmallDenseMap<BasicBlock *, PredInfo> PredMap;
  871. for (const auto &Edge : Updates) {
  872. BasicBlock *BB = Edge.getTo();
  873. auto &AddedBlockSet = PredMap[BB].Added;
  874. AddedBlockSet.insert(Edge.getFrom());
  875. }
  876. // Store all existing predecessor for each BB, at least one must exist.
  877. SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap;
  878. SmallPtrSet<BasicBlock *, 2> NewBlocks;
  879. for (auto &BBPredPair : PredMap) {
  880. auto *BB = BBPredPair.first;
  881. const auto &AddedBlockSet = BBPredPair.second.Added;
  882. auto &PrevBlockSet = BBPredPair.second.Prev;
  883. for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
  884. if (!AddedBlockSet.count(Pi))
  885. PrevBlockSet.insert(Pi);
  886. EdgeCountMap[{Pi, BB}]++;
  887. }
  888. if (PrevBlockSet.empty()) {
  889. assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
  890. LLVM_DEBUG(
  891. dbgs()
  892. << "Adding a predecessor to a block with no predecessors. "
  893. "This must be an edge added to a new, likely cloned, block. "
  894. "Its memory accesses must be already correct, assuming completed "
  895. "via the updateExitBlocksForClonedLoop API. "
  896. "Assert a single such edge is added so no phi addition or "
  897. "additional processing is required.\n");
  898. assert(AddedBlockSet.size() == 1 &&
  899. "Can only handle adding one predecessor to a new block.");
  900. // Need to remove new blocks from PredMap. Remove below to not invalidate
  901. // iterator here.
  902. NewBlocks.insert(BB);
  903. }
  904. }
  905. // Nothing to process for new/cloned blocks.
  906. for (auto *BB : NewBlocks)
  907. PredMap.erase(BB);
  908. SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
  909. SmallVector<WeakVH, 8> InsertedPhis;
  910. // First create MemoryPhis in all blocks that don't have one. Create in the
  911. // order found in Updates, not in PredMap, to get deterministic numbering.
  912. for (const auto &Edge : Updates) {
  913. BasicBlock *BB = Edge.getTo();
  914. if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
  915. InsertedPhis.push_back(MSSA->createMemoryPhi(BB));
  916. }
  917. // Now we'll fill in the MemoryPhis with the right incoming values.
  918. for (auto &BBPredPair : PredMap) {
  919. auto *BB = BBPredPair.first;
  920. const auto &PrevBlockSet = BBPredPair.second.Prev;
  921. const auto &AddedBlockSet = BBPredPair.second.Added;
  922. assert(!PrevBlockSet.empty() &&
  923. "At least one previous predecessor must exist.");
  924. // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
  925. // keeping this map before the loop. We can reuse already populated entries
  926. // if an edge is added from the same predecessor to two different blocks,
  927. // and this does happen in rotate. Note that the map needs to be updated
  928. // when deleting non-necessary phis below, if the phi is in the map by
  929. // replacing the value with DefP1.
  930. SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred;
  931. for (auto *AddedPred : AddedBlockSet) {
  932. auto *DefPn = GetLastDef(AddedPred);
  933. assert(DefPn != nullptr && "Unable to find last definition.");
  934. LastDefAddedPred[AddedPred] = DefPn;
  935. }
  936. MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
  937. // If Phi is not empty, add an incoming edge from each added pred. Must
  938. // still compute blocks with defs to replace for this block below.
  939. if (NewPhi->getNumOperands()) {
  940. for (auto *Pred : AddedBlockSet) {
  941. auto *LastDefForPred = LastDefAddedPred[Pred];
  942. for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
  943. NewPhi->addIncoming(LastDefForPred, Pred);
  944. }
  945. } else {
  946. // Pick any existing predecessor and get its definition. All other
  947. // existing predecessors should have the same one, since no phi existed.
  948. auto *P1 = *PrevBlockSet.begin();
  949. MemoryAccess *DefP1 = GetLastDef(P1);
  950. // Check DefP1 against all Defs in LastDefPredPair. If all the same,
  951. // nothing to add.
  952. bool InsertPhi = false;
  953. for (auto LastDefPredPair : LastDefAddedPred)
  954. if (DefP1 != LastDefPredPair.second) {
  955. InsertPhi = true;
  956. break;
  957. }
  958. if (!InsertPhi) {
  959. // Since NewPhi may be used in other newly added Phis, replace all uses
  960. // of NewPhi with the definition coming from all predecessors (DefP1),
  961. // before deleting it.
  962. NewPhi->replaceAllUsesWith(DefP1);
  963. removeMemoryAccess(NewPhi);
  964. continue;
  965. }
  966. // Update Phi with new values for new predecessors and old value for all
  967. // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
  968. // sets, the order of entries in NewPhi is deterministic.
  969. for (auto *Pred : AddedBlockSet) {
  970. auto *LastDefForPred = LastDefAddedPred[Pred];
  971. for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
  972. NewPhi->addIncoming(LastDefForPred, Pred);
  973. }
  974. for (auto *Pred : PrevBlockSet)
  975. for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
  976. NewPhi->addIncoming(DefP1, Pred);
  977. }
  978. // Get all blocks that used to dominate BB and no longer do after adding
  979. // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
  980. assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
  981. BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
  982. assert(PrevIDom && "Previous IDom should exists");
  983. BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
  984. assert(NewIDom && "BB should have a new valid idom");
  985. assert(DT.dominates(NewIDom, PrevIDom) &&
  986. "New idom should dominate old idom");
  987. GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
  988. }
  989. tryRemoveTrivialPhis(InsertedPhis);
  990. // Create the set of blocks that now have a definition. We'll use this to
  991. // compute IDF and add Phis there next.
  992. SmallVector<BasicBlock *, 8> BlocksToProcess;
  993. for (auto &VH : InsertedPhis)
  994. if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
  995. BlocksToProcess.push_back(MPhi->getBlock());
  996. // Compute IDF and add Phis in all IDF blocks that do not have one.
  997. SmallVector<BasicBlock *, 32> IDFBlocks;
  998. if (!BlocksToProcess.empty()) {
  999. ForwardIDFCalculator IDFs(DT, GD);
  1000. SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
  1001. BlocksToProcess.end());
  1002. IDFs.setDefiningBlocks(DefiningBlocks);
  1003. IDFs.calculate(IDFBlocks);
  1004. SmallSetVector<MemoryPhi *, 4> PhisToFill;
  1005. // First create all needed Phis.
  1006. for (auto *BBIDF : IDFBlocks)
  1007. if (!MSSA->getMemoryAccess(BBIDF)) {
  1008. auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
  1009. InsertedPhis.push_back(IDFPhi);
  1010. PhisToFill.insert(IDFPhi);
  1011. }
  1012. // Then update or insert their correct incoming values.
  1013. for (auto *BBIDF : IDFBlocks) {
  1014. auto *IDFPhi = MSSA->getMemoryAccess(BBIDF);
  1015. assert(IDFPhi && "Phi must exist");
  1016. if (!PhisToFill.count(IDFPhi)) {
  1017. // Update existing Phi.
  1018. // FIXME: some updates may be redundant, try to optimize and skip some.
  1019. for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
  1020. IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
  1021. } else {
  1022. for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF))
  1023. IDFPhi->addIncoming(GetLastDef(Pi), Pi);
  1024. }
  1025. }
  1026. }
  1027. // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
  1028. // longer dominate, replace those with the closest dominating def.
  1029. // This will also update optimized accesses, as they're also uses.
  1030. for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
  1031. if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
  1032. for (auto &DefToReplaceUses : *DefsList) {
  1033. BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
  1034. for (Use &U : llvm::make_early_inc_range(DefToReplaceUses.uses())) {
  1035. MemoryAccess *Usr = cast<MemoryAccess>(U.getUser());
  1036. if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
  1037. BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
  1038. if (!DT.dominates(DominatingBlock, DominatedBlock))
  1039. U.set(GetLastDef(DominatedBlock));
  1040. } else {
  1041. BasicBlock *DominatedBlock = Usr->getBlock();
  1042. if (!DT.dominates(DominatingBlock, DominatedBlock)) {
  1043. if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
  1044. U.set(DomBlPhi);
  1045. else {
  1046. auto *IDom = DT.getNode(DominatedBlock)->getIDom();
  1047. assert(IDom && "Block must have a valid IDom.");
  1048. U.set(GetLastDef(IDom->getBlock()));
  1049. }
  1050. cast<MemoryUseOrDef>(Usr)->resetOptimized();
  1051. }
  1052. }
  1053. }
  1054. }
  1055. }
  1056. }
  1057. tryRemoveTrivialPhis(InsertedPhis);
  1058. }
  1059. // Move What before Where in the MemorySSA IR.
  1060. template <class WhereType>
  1061. void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
  1062. WhereType Where) {
  1063. // Mark MemoryPhi users of What not to be optimized.
  1064. for (auto *U : What->users())
  1065. if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
  1066. NonOptPhis.insert(PhiUser);
  1067. // Replace all our users with our defining access.
  1068. What->replaceAllUsesWith(What->getDefiningAccess());
  1069. // Let MemorySSA take care of moving it around in the lists.
  1070. MSSA->moveTo(What, BB, Where);
  1071. // Now reinsert it into the IR and do whatever fixups needed.
  1072. if (auto *MD = dyn_cast<MemoryDef>(What))
  1073. insertDef(MD, /*RenameUses=*/true);
  1074. else
  1075. insertUse(cast<MemoryUse>(What), /*RenameUses=*/true);
  1076. // Clear dangling pointers. We added all MemoryPhi users, but not all
  1077. // of them are removed by fixupDefs().
  1078. NonOptPhis.clear();
  1079. }
  1080. // Move What before Where in the MemorySSA IR.
  1081. void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
  1082. moveTo(What, Where->getBlock(), Where->getIterator());
  1083. }
  1084. // Move What after Where in the MemorySSA IR.
  1085. void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
  1086. moveTo(What, Where->getBlock(), ++Where->getIterator());
  1087. }
  1088. void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
  1089. MemorySSA::InsertionPlace Where) {
  1090. if (Where != MemorySSA::InsertionPlace::BeforeTerminator)
  1091. return moveTo(What, BB, Where);
  1092. if (auto *Where = MSSA->getMemoryAccess(BB->getTerminator()))
  1093. return moveBefore(What, Where);
  1094. else
  1095. return moveTo(What, BB, MemorySSA::InsertionPlace::End);
  1096. }
  1097. // All accesses in To used to be in From. Move to end and update access lists.
  1098. void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
  1099. Instruction *Start) {
  1100. MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
  1101. if (!Accs)
  1102. return;
  1103. assert(Start->getParent() == To && "Incorrect Start instruction");
  1104. MemoryAccess *FirstInNew = nullptr;
  1105. for (Instruction &I : make_range(Start->getIterator(), To->end()))
  1106. if ((FirstInNew = MSSA->getMemoryAccess(&I)))
  1107. break;
  1108. if (FirstInNew) {
  1109. auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
  1110. do {
  1111. auto NextIt = ++MUD->getIterator();
  1112. MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
  1113. ? nullptr
  1114. : cast<MemoryUseOrDef>(&*NextIt);
  1115. MSSA->moveTo(MUD, To, MemorySSA::End);
  1116. // Moving MUD from Accs in the moveTo above, may delete Accs, so we need
  1117. // to retrieve it again.
  1118. Accs = MSSA->getWritableBlockAccesses(From);
  1119. MUD = NextMUD;
  1120. } while (MUD);
  1121. }
  1122. // If all accesses were moved and only a trivial Phi remains, we try to remove
  1123. // that Phi. This is needed when From is going to be deleted.
  1124. auto *Defs = MSSA->getWritableBlockDefs(From);
  1125. if (Defs && !Defs->empty())
  1126. if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin()))
  1127. tryRemoveTrivialPhi(Phi);
  1128. }
  1129. void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From,
  1130. BasicBlock *To,
  1131. Instruction *Start) {
  1132. assert(MSSA->getBlockAccesses(To) == nullptr &&
  1133. "To block is expected to be free of MemoryAccesses.");
  1134. moveAllAccesses(From, To, Start);
  1135. for (BasicBlock *Succ : successors(To))
  1136. if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
  1137. MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
  1138. }
  1139. void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
  1140. Instruction *Start) {
  1141. assert(From->getUniquePredecessor() == To &&
  1142. "From block is expected to have a single predecessor (To).");
  1143. moveAllAccesses(From, To, Start);
  1144. for (BasicBlock *Succ : successors(From))
  1145. if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
  1146. MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
  1147. }
  1148. void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor(
  1149. BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
  1150. bool IdenticalEdgesWereMerged) {
  1151. assert(!MSSA->getWritableBlockAccesses(New) &&
  1152. "Access list should be null for a new block.");
  1153. MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
  1154. if (!Phi)
  1155. return;
  1156. if (Old->hasNPredecessors(1)) {
  1157. assert(pred_size(New) == Preds.size() &&
  1158. "Should have moved all predecessors.");
  1159. MSSA->moveTo(Phi, New, MemorySSA::Beginning);
  1160. } else {
  1161. assert(!Preds.empty() && "Must be moving at least one predecessor to the "
  1162. "new immediate predecessor.");
  1163. MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
  1164. SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
  1165. // Currently only support the case of removing a single incoming edge when
  1166. // identical edges were not merged.
  1167. if (!IdenticalEdgesWereMerged)
  1168. assert(PredsSet.size() == Preds.size() &&
  1169. "If identical edges were not merged, we cannot have duplicate "
  1170. "blocks in the predecessors");
  1171. Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
  1172. if (PredsSet.count(B)) {
  1173. NewPhi->addIncoming(MA, B);
  1174. if (!IdenticalEdgesWereMerged)
  1175. PredsSet.erase(B);
  1176. return true;
  1177. }
  1178. return false;
  1179. });
  1180. Phi->addIncoming(NewPhi, New);
  1181. tryRemoveTrivialPhi(NewPhi);
  1182. }
  1183. }
  1184. void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) {
  1185. assert(!MSSA->isLiveOnEntryDef(MA) &&
  1186. "Trying to remove the live on entry def");
  1187. // We can only delete phi nodes if they have no uses, or we can replace all
  1188. // uses with a single definition.
  1189. MemoryAccess *NewDefTarget = nullptr;
  1190. if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
  1191. // Note that it is sufficient to know that all edges of the phi node have
  1192. // the same argument. If they do, by the definition of dominance frontiers
  1193. // (which we used to place this phi), that argument must dominate this phi,
  1194. // and thus, must dominate the phi's uses, and so we will not hit the assert
  1195. // below.
  1196. NewDefTarget = onlySingleValue(MP);
  1197. assert((NewDefTarget || MP->use_empty()) &&
  1198. "We can't delete this memory phi");
  1199. } else {
  1200. NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
  1201. }
  1202. SmallSetVector<MemoryPhi *, 4> PhisToCheck;
  1203. // Re-point the uses at our defining access
  1204. if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
  1205. // Reset optimized on users of this store, and reset the uses.
  1206. // A few notes:
  1207. // 1. This is a slightly modified version of RAUW to avoid walking the
  1208. // uses twice here.
  1209. // 2. If we wanted to be complete, we would have to reset the optimized
  1210. // flags on users of phi nodes if doing the below makes a phi node have all
  1211. // the same arguments. Instead, we prefer users to removeMemoryAccess those
  1212. // phi nodes, because doing it here would be N^3.
  1213. if (MA->hasValueHandle())
  1214. ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
  1215. // Note: We assume MemorySSA is not used in metadata since it's not really
  1216. // part of the IR.
  1217. assert(NewDefTarget != MA && "Going into an infinite loop");
  1218. while (!MA->use_empty()) {
  1219. Use &U = *MA->use_begin();
  1220. if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
  1221. MUD->resetOptimized();
  1222. if (OptimizePhis)
  1223. if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
  1224. PhisToCheck.insert(MP);
  1225. U.set(NewDefTarget);
  1226. }
  1227. }
  1228. // The call below to erase will destroy MA, so we can't change the order we
  1229. // are doing things here
  1230. MSSA->removeFromLookups(MA);
  1231. MSSA->removeFromLists(MA);
  1232. // Optionally optimize Phi uses. This will recursively remove trivial phis.
  1233. if (!PhisToCheck.empty()) {
  1234. SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
  1235. PhisToCheck.end()};
  1236. PhisToCheck.clear();
  1237. unsigned PhisSize = PhisToOptimize.size();
  1238. while (PhisSize-- > 0)
  1239. if (MemoryPhi *MP =
  1240. cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
  1241. tryRemoveTrivialPhi(MP);
  1242. }
  1243. }
  1244. void MemorySSAUpdater::removeBlocks(
  1245. const SmallSetVector<BasicBlock *, 8> &DeadBlocks) {
  1246. // First delete all uses of BB in MemoryPhis.
  1247. for (BasicBlock *BB : DeadBlocks) {
  1248. Instruction *TI = BB->getTerminator();
  1249. assert(TI && "Basic block expected to have a terminator instruction");
  1250. for (BasicBlock *Succ : successors(TI))
  1251. if (!DeadBlocks.count(Succ))
  1252. if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
  1253. MP->unorderedDeleteIncomingBlock(BB);
  1254. tryRemoveTrivialPhi(MP);
  1255. }
  1256. // Drop all references of all accesses in BB
  1257. if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
  1258. for (MemoryAccess &MA : *Acc)
  1259. MA.dropAllReferences();
  1260. }
  1261. // Next, delete all memory accesses in each block
  1262. for (BasicBlock *BB : DeadBlocks) {
  1263. MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
  1264. if (!Acc)
  1265. continue;
  1266. for (MemoryAccess &MA : llvm::make_early_inc_range(*Acc)) {
  1267. MSSA->removeFromLookups(&MA);
  1268. MSSA->removeFromLists(&MA);
  1269. }
  1270. }
  1271. }
  1272. void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
  1273. for (const auto &VH : UpdatedPHIs)
  1274. if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
  1275. tryRemoveTrivialPhi(MPhi);
  1276. }
  1277. void MemorySSAUpdater::changeToUnreachable(const Instruction *I) {
  1278. const BasicBlock *BB = I->getParent();
  1279. // Remove memory accesses in BB for I and all following instructions.
  1280. auto BBI = I->getIterator(), BBE = BB->end();
  1281. // FIXME: If this becomes too expensive, iterate until the first instruction
  1282. // with a memory access, then iterate over MemoryAccesses.
  1283. while (BBI != BBE)
  1284. removeMemoryAccess(&*(BBI++));
  1285. // Update phis in BB's successors to remove BB.
  1286. SmallVector<WeakVH, 16> UpdatedPHIs;
  1287. for (const BasicBlock *Successor : successors(BB)) {
  1288. removeDuplicatePhiEdgesBetween(BB, Successor);
  1289. if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
  1290. MPhi->unorderedDeleteIncomingBlock(BB);
  1291. UpdatedPHIs.push_back(MPhi);
  1292. }
  1293. }
  1294. // Optimize trivial phis.
  1295. tryRemoveTrivialPhis(UpdatedPHIs);
  1296. }
  1297. MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
  1298. Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
  1299. MemorySSA::InsertionPlace Point) {
  1300. MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
  1301. MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
  1302. return NewAccess;
  1303. }
  1304. MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore(
  1305. Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
  1306. assert(I->getParent() == InsertPt->getBlock() &&
  1307. "New and old access must be in the same block");
  1308. MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
  1309. MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
  1310. InsertPt->getIterator());
  1311. return NewAccess;
  1312. }
  1313. MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
  1314. Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
  1315. assert(I->getParent() == InsertPt->getBlock() &&
  1316. "New and old access must be in the same block");
  1317. MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
  1318. MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
  1319. ++InsertPt->getIterator());
  1320. return NewAccess;
  1321. }