RDFLiveness.cpp 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169
  1. //===- RDFLiveness.cpp ----------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Computation of the liveness information from the data-flow graph.
  10. //
  11. // The main functionality of this code is to compute block live-in
  12. // information. With the live-in information in place, the placement
  13. // of kill flags can also be recalculated.
  14. //
  15. // The block live-in calculation is based on the ideas from the following
  16. // publication:
  17. //
  18. // Dibyendu Das, Ramakrishna Upadrasta, Benoit Dupont de Dinechin.
  19. // "Efficient Liveness Computation Using Merge Sets and DJ-Graphs."
  20. // ACM Transactions on Architecture and Code Optimization, Association for
  21. // Computing Machinery, 2012, ACM TACO Special Issue on "High-Performance
  22. // and Embedded Architectures and Compilers", 8 (4),
  23. // <10.1145/2086696.2086706>. <hal-00647369>
  24. //
  25. #include "llvm/CodeGen/RDFLiveness.h"
  26. #include "llvm/ADT/BitVector.h"
  27. #include "llvm/ADT/DenseMap.h"
  28. #include "llvm/ADT/STLExtras.h"
  29. #include "llvm/ADT/SetVector.h"
  30. #include "llvm/ADT/SmallSet.h"
  31. #include "llvm/CodeGen/MachineBasicBlock.h"
  32. #include "llvm/CodeGen/MachineDominanceFrontier.h"
  33. #include "llvm/CodeGen/MachineDominators.h"
  34. #include "llvm/CodeGen/MachineFunction.h"
  35. #include "llvm/CodeGen/MachineInstr.h"
  36. #include "llvm/CodeGen/RDFGraph.h"
  37. #include "llvm/CodeGen/RDFRegisters.h"
  38. #include "llvm/CodeGen/TargetRegisterInfo.h"
  39. #include "llvm/MC/LaneBitmask.h"
  40. #include "llvm/MC/MCRegisterInfo.h"
  41. #include "llvm/Support/CommandLine.h"
  42. #include "llvm/Support/ErrorHandling.h"
  43. #include "llvm/Support/raw_ostream.h"
  44. #include <algorithm>
  45. #include <cassert>
  46. #include <cstdint>
  47. #include <iterator>
  48. #include <map>
  49. #include <unordered_map>
  50. #include <utility>
  51. #include <vector>
  52. using namespace llvm;
  53. using namespace rdf;
  54. static cl::opt<unsigned> MaxRecNest("rdf-liveness-max-rec", cl::init(25),
  55. cl::Hidden, cl::desc("Maximum recursion level"));
  56. namespace llvm {
  57. namespace rdf {
  58. raw_ostream &operator<< (raw_ostream &OS, const Print<Liveness::RefMap> &P) {
  59. OS << '{';
  60. for (const auto &I : P.Obj) {
  61. OS << ' ' << printReg(I.first, &P.G.getTRI()) << '{';
  62. for (auto J = I.second.begin(), E = I.second.end(); J != E; ) {
  63. OS << Print(J->first, P.G) << PrintLaneMaskOpt(J->second);
  64. if (++J != E)
  65. OS << ',';
  66. }
  67. OS << '}';
  68. }
  69. OS << " }";
  70. return OS;
  71. }
  72. } // end namespace rdf
  73. } // end namespace llvm
  74. // The order in the returned sequence is the order of reaching defs in the
  75. // upward traversal: the first def is the closest to the given reference RefA,
  76. // the next one is further up, and so on.
  77. // The list ends at a reaching phi def, or when the reference from RefA is
  78. // covered by the defs in the list (see FullChain).
  79. // This function provides two modes of operation:
  80. // (1) Returning the sequence of reaching defs for a particular reference
  81. // node. This sequence will terminate at the first phi node [1].
  82. // (2) Returning a partial sequence of reaching defs, where the final goal
  83. // is to traverse past phi nodes to the actual defs arising from the code
  84. // itself.
  85. // In mode (2), the register reference for which the search was started
  86. // may be different from the reference node RefA, for which this call was
  87. // made, hence the argument RefRR, which holds the original register.
  88. // Also, some definitions may have already been encountered in a previous
  89. // call that will influence register covering. The register references
  90. // already defined are passed in through DefRRs.
  91. // In mode (1), the "continuation" considerations do not apply, and the
  92. // RefRR is the same as the register in RefA, and the set DefRRs is empty.
  93. //
  94. // [1] It is possible for multiple phi nodes to be included in the returned
  95. // sequence:
  96. // SubA = phi ...
  97. // SubB = phi ...
  98. // ... = SuperAB(rdef:SubA), SuperAB"(rdef:SubB)
  99. // However, these phi nodes are independent from one another in terms of
  100. // the data-flow.
  101. NodeList Liveness::getAllReachingDefs(RegisterRef RefRR,
  102. NodeAddr<RefNode*> RefA, bool TopShadows, bool FullChain,
  103. const RegisterAggr &DefRRs) {
  104. NodeList RDefs; // Return value.
  105. SetVector<NodeId> DefQ;
  106. DenseMap<MachineInstr*, uint32_t> OrdMap;
  107. // Dead defs will be treated as if they were live, since they are actually
  108. // on the data-flow path. They cannot be ignored because even though they
  109. // do not generate meaningful values, they still modify registers.
  110. // If the reference is undefined, there is nothing to do.
  111. if (RefA.Addr->getFlags() & NodeAttrs::Undef)
  112. return RDefs;
  113. // The initial queue should not have reaching defs for shadows. The
  114. // whole point of a shadow is that it will have a reaching def that
  115. // is not aliased to the reaching defs of the related shadows.
  116. NodeId Start = RefA.Id;
  117. auto SNA = DFG.addr<RefNode*>(Start);
  118. if (NodeId RD = SNA.Addr->getReachingDef())
  119. DefQ.insert(RD);
  120. if (TopShadows) {
  121. for (auto S : DFG.getRelatedRefs(RefA.Addr->getOwner(DFG), RefA))
  122. if (NodeId RD = NodeAddr<RefNode*>(S).Addr->getReachingDef())
  123. DefQ.insert(RD);
  124. }
  125. // Collect all the reaching defs, going up until a phi node is encountered,
  126. // or there are no more reaching defs. From this set, the actual set of
  127. // reaching defs will be selected.
  128. // The traversal upwards must go on until a covering def is encountered.
  129. // It is possible that a collection of non-covering (individually) defs
  130. // will be sufficient, but keep going until a covering one is found.
  131. for (unsigned i = 0; i < DefQ.size(); ++i) {
  132. auto TA = DFG.addr<DefNode*>(DefQ[i]);
  133. if (TA.Addr->getFlags() & NodeAttrs::PhiRef)
  134. continue;
  135. // Stop at the covering/overwriting def of the initial register reference.
  136. RegisterRef RR = TA.Addr->getRegRef(DFG);
  137. if (!DFG.IsPreservingDef(TA))
  138. if (RegisterAggr::isCoverOf(RR, RefRR, PRI))
  139. continue;
  140. // Get the next level of reaching defs. This will include multiple
  141. // reaching defs for shadows.
  142. for (auto S : DFG.getRelatedRefs(TA.Addr->getOwner(DFG), TA))
  143. if (NodeId RD = NodeAddr<RefNode*>(S).Addr->getReachingDef())
  144. DefQ.insert(RD);
  145. // Don't visit sibling defs. They share the same reaching def (which
  146. // will be visited anyway), but they define something not aliased to
  147. // this ref.
  148. }
  149. // Return the MachineBasicBlock containing a given instruction.
  150. auto Block = [this] (NodeAddr<InstrNode*> IA) -> MachineBasicBlock* {
  151. if (IA.Addr->getKind() == NodeAttrs::Stmt)
  152. return NodeAddr<StmtNode*>(IA).Addr->getCode()->getParent();
  153. assert(IA.Addr->getKind() == NodeAttrs::Phi);
  154. NodeAddr<PhiNode*> PA = IA;
  155. NodeAddr<BlockNode*> BA = PA.Addr->getOwner(DFG);
  156. return BA.Addr->getCode();
  157. };
  158. SmallSet<NodeId,32> Defs;
  159. // Remove all non-phi defs that are not aliased to RefRR, and separate
  160. // the the remaining defs into buckets for containing blocks.
  161. std::map<NodeId, NodeAddr<InstrNode*>> Owners;
  162. std::map<MachineBasicBlock*, SmallVector<NodeId,32>> Blocks;
  163. for (NodeId N : DefQ) {
  164. auto TA = DFG.addr<DefNode*>(N);
  165. bool IsPhi = TA.Addr->getFlags() & NodeAttrs::PhiRef;
  166. if (!IsPhi && !PRI.alias(RefRR, TA.Addr->getRegRef(DFG)))
  167. continue;
  168. Defs.insert(TA.Id);
  169. NodeAddr<InstrNode*> IA = TA.Addr->getOwner(DFG);
  170. Owners[TA.Id] = IA;
  171. Blocks[Block(IA)].push_back(IA.Id);
  172. }
  173. auto Precedes = [this,&OrdMap] (NodeId A, NodeId B) {
  174. if (A == B)
  175. return false;
  176. NodeAddr<InstrNode*> OA = DFG.addr<InstrNode*>(A);
  177. NodeAddr<InstrNode*> OB = DFG.addr<InstrNode*>(B);
  178. bool StmtA = OA.Addr->getKind() == NodeAttrs::Stmt;
  179. bool StmtB = OB.Addr->getKind() == NodeAttrs::Stmt;
  180. if (StmtA && StmtB) {
  181. const MachineInstr *InA = NodeAddr<StmtNode*>(OA).Addr->getCode();
  182. const MachineInstr *InB = NodeAddr<StmtNode*>(OB).Addr->getCode();
  183. assert(InA->getParent() == InB->getParent());
  184. auto FA = OrdMap.find(InA);
  185. if (FA != OrdMap.end())
  186. return FA->second < OrdMap.find(InB)->second;
  187. const MachineBasicBlock *BB = InA->getParent();
  188. for (auto It = BB->begin(), E = BB->end(); It != E; ++It) {
  189. if (It == InA->getIterator())
  190. return true;
  191. if (It == InB->getIterator())
  192. return false;
  193. }
  194. llvm_unreachable("InA and InB should be in the same block");
  195. }
  196. // One of them is a phi node.
  197. if (!StmtA && !StmtB) {
  198. // Both are phis, which are unordered. Break the tie by id numbers.
  199. return A < B;
  200. }
  201. // Only one of them is a phi. Phis always precede statements.
  202. return !StmtA;
  203. };
  204. auto GetOrder = [&OrdMap] (MachineBasicBlock &B) {
  205. uint32_t Pos = 0;
  206. for (MachineInstr &In : B)
  207. OrdMap.insert({&In, ++Pos});
  208. };
  209. // For each block, sort the nodes in it.
  210. std::vector<MachineBasicBlock*> TmpBB;
  211. for (auto &Bucket : Blocks) {
  212. TmpBB.push_back(Bucket.first);
  213. if (Bucket.second.size() > 2)
  214. GetOrder(*Bucket.first);
  215. llvm::sort(Bucket.second, Precedes);
  216. }
  217. // Sort the blocks with respect to dominance.
  218. llvm::sort(TmpBB,
  219. [this](auto A, auto B) { return MDT.properlyDominates(A, B); });
  220. std::vector<NodeId> TmpInst;
  221. for (MachineBasicBlock *MBB : llvm::reverse(TmpBB)) {
  222. auto &Bucket = Blocks[MBB];
  223. TmpInst.insert(TmpInst.end(), Bucket.rbegin(), Bucket.rend());
  224. }
  225. // The vector is a list of instructions, so that defs coming from
  226. // the same instruction don't need to be artificially ordered.
  227. // Then, when computing the initial segment, and iterating over an
  228. // instruction, pick the defs that contribute to the covering (i.e. is
  229. // not covered by previously added defs). Check the defs individually,
  230. // i.e. first check each def if is covered or not (without adding them
  231. // to the tracking set), and then add all the selected ones.
  232. // The reason for this is this example:
  233. // *d1<A>, *d2<B>, ... Assume A and B are aliased (can happen in phi nodes).
  234. // *d3<C> If A \incl BuC, and B \incl AuC, then *d2 would be
  235. // covered if we added A first, and A would be covered
  236. // if we added B first.
  237. // In this example we want both A and B, because we don't want to give
  238. // either one priority over the other, since they belong to the same
  239. // statement.
  240. RegisterAggr RRs(DefRRs);
  241. auto DefInSet = [&Defs] (NodeAddr<RefNode*> TA) -> bool {
  242. return TA.Addr->getKind() == NodeAttrs::Def &&
  243. Defs.count(TA.Id);
  244. };
  245. for (NodeId T : TmpInst) {
  246. if (!FullChain && RRs.hasCoverOf(RefRR))
  247. break;
  248. auto TA = DFG.addr<InstrNode*>(T);
  249. bool IsPhi = DFG.IsCode<NodeAttrs::Phi>(TA);
  250. NodeList Ds;
  251. for (NodeAddr<DefNode*> DA : TA.Addr->members_if(DefInSet, DFG)) {
  252. RegisterRef QR = DA.Addr->getRegRef(DFG);
  253. // Add phi defs even if they are covered by subsequent defs. This is
  254. // for cases where the reached use is not covered by any of the defs
  255. // encountered so far: the phi def is needed to expose the liveness
  256. // of that use to the entry of the block.
  257. // Example:
  258. // phi d1<R3>(,d2,), ... Phi def d1 is covered by d2.
  259. // d2<R3>(d1,,u3), ...
  260. // ..., u3<D1>(d2) This use needs to be live on entry.
  261. if (FullChain || IsPhi || !RRs.hasCoverOf(QR))
  262. Ds.push_back(DA);
  263. }
  264. llvm::append_range(RDefs, Ds);
  265. for (NodeAddr<DefNode*> DA : Ds) {
  266. // When collecting a full chain of definitions, do not consider phi
  267. // defs to actually define a register.
  268. uint16_t Flags = DA.Addr->getFlags();
  269. if (!FullChain || !(Flags & NodeAttrs::PhiRef))
  270. if (!(Flags & NodeAttrs::Preserving)) // Don't care about Undef here.
  271. RRs.insert(DA.Addr->getRegRef(DFG));
  272. }
  273. }
  274. auto DeadP = [](const NodeAddr<DefNode*> DA) -> bool {
  275. return DA.Addr->getFlags() & NodeAttrs::Dead;
  276. };
  277. llvm::erase_if(RDefs, DeadP);
  278. return RDefs;
  279. }
  280. std::pair<NodeSet,bool>
  281. Liveness::getAllReachingDefsRec(RegisterRef RefRR, NodeAddr<RefNode*> RefA,
  282. NodeSet &Visited, const NodeSet &Defs) {
  283. return getAllReachingDefsRecImpl(RefRR, RefA, Visited, Defs, 0, MaxRecNest);
  284. }
  285. std::pair<NodeSet,bool>
  286. Liveness::getAllReachingDefsRecImpl(RegisterRef RefRR, NodeAddr<RefNode*> RefA,
  287. NodeSet &Visited, const NodeSet &Defs, unsigned Nest, unsigned MaxNest) {
  288. if (Nest > MaxNest)
  289. return { NodeSet(), false };
  290. // Collect all defined registers. Do not consider phis to be defining
  291. // anything, only collect "real" definitions.
  292. RegisterAggr DefRRs(PRI);
  293. for (NodeId D : Defs) {
  294. const auto DA = DFG.addr<const DefNode*>(D);
  295. if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef))
  296. DefRRs.insert(DA.Addr->getRegRef(DFG));
  297. }
  298. NodeList RDs = getAllReachingDefs(RefRR, RefA, false, true, DefRRs);
  299. if (RDs.empty())
  300. return { Defs, true };
  301. // Make a copy of the preexisting definitions and add the newly found ones.
  302. NodeSet TmpDefs = Defs;
  303. for (NodeAddr<NodeBase*> R : RDs)
  304. TmpDefs.insert(R.Id);
  305. NodeSet Result = Defs;
  306. for (NodeAddr<DefNode*> DA : RDs) {
  307. Result.insert(DA.Id);
  308. if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef))
  309. continue;
  310. NodeAddr<PhiNode*> PA = DA.Addr->getOwner(DFG);
  311. if (!Visited.insert(PA.Id).second)
  312. continue;
  313. // Go over all phi uses and get the reaching defs for each use.
  314. for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) {
  315. const auto &T = getAllReachingDefsRecImpl(RefRR, U, Visited, TmpDefs,
  316. Nest+1, MaxNest);
  317. if (!T.second)
  318. return { T.first, false };
  319. Result.insert(T.first.begin(), T.first.end());
  320. }
  321. }
  322. return { Result, true };
  323. }
  324. /// Find the nearest ref node aliased to RefRR, going upwards in the data
  325. /// flow, starting from the instruction immediately preceding Inst.
  326. NodeAddr<RefNode*> Liveness::getNearestAliasedRef(RegisterRef RefRR,
  327. NodeAddr<InstrNode*> IA) {
  328. NodeAddr<BlockNode*> BA = IA.Addr->getOwner(DFG);
  329. NodeList Ins = BA.Addr->members(DFG);
  330. NodeId FindId = IA.Id;
  331. auto E = Ins.rend();
  332. auto B = std::find_if(Ins.rbegin(), E,
  333. [FindId] (const NodeAddr<InstrNode*> T) {
  334. return T.Id == FindId;
  335. });
  336. // Do not scan IA (which is what B would point to).
  337. if (B != E)
  338. ++B;
  339. do {
  340. // Process the range of instructions from B to E.
  341. for (NodeAddr<InstrNode*> I : make_range(B, E)) {
  342. NodeList Refs = I.Addr->members(DFG);
  343. NodeAddr<RefNode*> Clob, Use;
  344. // Scan all the refs in I aliased to RefRR, and return the one that
  345. // is the closest to the output of I, i.e. def > clobber > use.
  346. for (NodeAddr<RefNode*> R : Refs) {
  347. if (!PRI.alias(R.Addr->getRegRef(DFG), RefRR))
  348. continue;
  349. if (DFG.IsDef(R)) {
  350. // If it's a non-clobbering def, just return it.
  351. if (!(R.Addr->getFlags() & NodeAttrs::Clobbering))
  352. return R;
  353. Clob = R;
  354. } else {
  355. Use = R;
  356. }
  357. }
  358. if (Clob.Id != 0)
  359. return Clob;
  360. if (Use.Id != 0)
  361. return Use;
  362. }
  363. // Go up to the immediate dominator, if any.
  364. MachineBasicBlock *BB = BA.Addr->getCode();
  365. BA = NodeAddr<BlockNode*>();
  366. if (MachineDomTreeNode *N = MDT.getNode(BB)) {
  367. if ((N = N->getIDom()))
  368. BA = DFG.findBlock(N->getBlock());
  369. }
  370. if (!BA.Id)
  371. break;
  372. Ins = BA.Addr->members(DFG);
  373. B = Ins.rbegin();
  374. E = Ins.rend();
  375. } while (true);
  376. return NodeAddr<RefNode*>();
  377. }
  378. NodeSet Liveness::getAllReachedUses(RegisterRef RefRR,
  379. NodeAddr<DefNode*> DefA, const RegisterAggr &DefRRs) {
  380. NodeSet Uses;
  381. // If the original register is already covered by all the intervening
  382. // defs, no more uses can be reached.
  383. if (DefRRs.hasCoverOf(RefRR))
  384. return Uses;
  385. // Add all directly reached uses.
  386. // If the def is dead, it does not provide a value for any use.
  387. bool IsDead = DefA.Addr->getFlags() & NodeAttrs::Dead;
  388. NodeId U = !IsDead ? DefA.Addr->getReachedUse() : 0;
  389. while (U != 0) {
  390. auto UA = DFG.addr<UseNode*>(U);
  391. if (!(UA.Addr->getFlags() & NodeAttrs::Undef)) {
  392. RegisterRef UR = UA.Addr->getRegRef(DFG);
  393. if (PRI.alias(RefRR, UR) && !DefRRs.hasCoverOf(UR))
  394. Uses.insert(U);
  395. }
  396. U = UA.Addr->getSibling();
  397. }
  398. // Traverse all reached defs. This time dead defs cannot be ignored.
  399. for (NodeId D = DefA.Addr->getReachedDef(), NextD; D != 0; D = NextD) {
  400. auto DA = DFG.addr<DefNode*>(D);
  401. NextD = DA.Addr->getSibling();
  402. RegisterRef DR = DA.Addr->getRegRef(DFG);
  403. // If this def is already covered, it cannot reach anything new.
  404. // Similarly, skip it if it is not aliased to the interesting register.
  405. if (DefRRs.hasCoverOf(DR) || !PRI.alias(RefRR, DR))
  406. continue;
  407. NodeSet T;
  408. if (DFG.IsPreservingDef(DA)) {
  409. // If it is a preserving def, do not update the set of intervening defs.
  410. T = getAllReachedUses(RefRR, DA, DefRRs);
  411. } else {
  412. RegisterAggr NewDefRRs = DefRRs;
  413. NewDefRRs.insert(DR);
  414. T = getAllReachedUses(RefRR, DA, NewDefRRs);
  415. }
  416. Uses.insert(T.begin(), T.end());
  417. }
  418. return Uses;
  419. }
  420. void Liveness::computePhiInfo() {
  421. RealUseMap.clear();
  422. NodeList Phis;
  423. NodeAddr<FuncNode*> FA = DFG.getFunc();
  424. NodeList Blocks = FA.Addr->members(DFG);
  425. for (NodeAddr<BlockNode*> BA : Blocks) {
  426. auto Ps = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG);
  427. llvm::append_range(Phis, Ps);
  428. }
  429. // phi use -> (map: reaching phi -> set of registers defined in between)
  430. std::map<NodeId,std::map<NodeId,RegisterAggr>> PhiUp;
  431. std::vector<NodeId> PhiUQ; // Work list of phis for upward propagation.
  432. std::unordered_map<NodeId,RegisterAggr> PhiDRs; // Phi -> registers defined by it.
  433. // Go over all phis.
  434. for (NodeAddr<PhiNode*> PhiA : Phis) {
  435. // Go over all defs and collect the reached uses that are non-phi uses
  436. // (i.e. the "real uses").
  437. RefMap &RealUses = RealUseMap[PhiA.Id];
  438. NodeList PhiRefs = PhiA.Addr->members(DFG);
  439. // Have a work queue of defs whose reached uses need to be found.
  440. // For each def, add to the queue all reached (non-phi) defs.
  441. SetVector<NodeId> DefQ;
  442. NodeSet PhiDefs;
  443. RegisterAggr DRs(PRI);
  444. for (NodeAddr<RefNode*> R : PhiRefs) {
  445. if (!DFG.IsRef<NodeAttrs::Def>(R))
  446. continue;
  447. DRs.insert(R.Addr->getRegRef(DFG));
  448. DefQ.insert(R.Id);
  449. PhiDefs.insert(R.Id);
  450. }
  451. PhiDRs.insert(std::make_pair(PhiA.Id, DRs));
  452. // Collect the super-set of all possible reached uses. This set will
  453. // contain all uses reached from this phi, either directly from the
  454. // phi defs, or (recursively) via non-phi defs reached by the phi defs.
  455. // This set of uses will later be trimmed to only contain these uses that
  456. // are actually reached by the phi defs.
  457. for (unsigned i = 0; i < DefQ.size(); ++i) {
  458. NodeAddr<DefNode*> DA = DFG.addr<DefNode*>(DefQ[i]);
  459. // Visit all reached uses. Phi defs should not really have the "dead"
  460. // flag set, but check it anyway for consistency.
  461. bool IsDead = DA.Addr->getFlags() & NodeAttrs::Dead;
  462. NodeId UN = !IsDead ? DA.Addr->getReachedUse() : 0;
  463. while (UN != 0) {
  464. NodeAddr<UseNode*> A = DFG.addr<UseNode*>(UN);
  465. uint16_t F = A.Addr->getFlags();
  466. if ((F & (NodeAttrs::Undef | NodeAttrs::PhiRef)) == 0) {
  467. RegisterRef R = A.Addr->getRegRef(DFG);
  468. RealUses[R.Reg].insert({A.Id,R.Mask});
  469. }
  470. UN = A.Addr->getSibling();
  471. }
  472. // Visit all reached defs, and add them to the queue. These defs may
  473. // override some of the uses collected here, but that will be handled
  474. // later.
  475. NodeId DN = DA.Addr->getReachedDef();
  476. while (DN != 0) {
  477. NodeAddr<DefNode*> A = DFG.addr<DefNode*>(DN);
  478. for (auto T : DFG.getRelatedRefs(A.Addr->getOwner(DFG), A)) {
  479. uint16_t Flags = NodeAddr<DefNode*>(T).Addr->getFlags();
  480. // Must traverse the reached-def chain. Consider:
  481. // def(D0) -> def(R0) -> def(R0) -> use(D0)
  482. // The reachable use of D0 passes through a def of R0.
  483. if (!(Flags & NodeAttrs::PhiRef))
  484. DefQ.insert(T.Id);
  485. }
  486. DN = A.Addr->getSibling();
  487. }
  488. }
  489. // Filter out these uses that appear to be reachable, but really
  490. // are not. For example:
  491. //
  492. // R1:0 = d1
  493. // = R1:0 u2 Reached by d1.
  494. // R0 = d3
  495. // = R1:0 u4 Still reached by d1: indirectly through
  496. // the def d3.
  497. // R1 = d5
  498. // = R1:0 u6 Not reached by d1 (covered collectively
  499. // by d3 and d5), but following reached
  500. // defs and uses from d1 will lead here.
  501. for (auto UI = RealUses.begin(), UE = RealUses.end(); UI != UE; ) {
  502. // For each reached register UI->first, there is a set UI->second, of
  503. // uses of it. For each such use, check if it is reached by this phi,
  504. // i.e. check if the set of its reaching uses intersects the set of
  505. // this phi's defs.
  506. NodeRefSet Uses = UI->second;
  507. UI->second.clear();
  508. for (std::pair<NodeId,LaneBitmask> I : Uses) {
  509. auto UA = DFG.addr<UseNode*>(I.first);
  510. // Undef flag is checked above.
  511. assert((UA.Addr->getFlags() & NodeAttrs::Undef) == 0);
  512. RegisterRef R(UI->first, I.second);
  513. // Calculate the exposed part of the reached use.
  514. RegisterAggr Covered(PRI);
  515. for (NodeAddr<DefNode*> DA : getAllReachingDefs(R, UA)) {
  516. if (PhiDefs.count(DA.Id))
  517. break;
  518. Covered.insert(DA.Addr->getRegRef(DFG));
  519. }
  520. if (RegisterRef RC = Covered.clearIn(R)) {
  521. // We are updating the map for register UI->first, so we need
  522. // to map RC to be expressed in terms of that register.
  523. RegisterRef S = PRI.mapTo(RC, UI->first);
  524. UI->second.insert({I.first, S.Mask});
  525. }
  526. }
  527. UI = UI->second.empty() ? RealUses.erase(UI) : std::next(UI);
  528. }
  529. // If this phi reaches some "real" uses, add it to the queue for upward
  530. // propagation.
  531. if (!RealUses.empty())
  532. PhiUQ.push_back(PhiA.Id);
  533. // Go over all phi uses and check if the reaching def is another phi.
  534. // Collect the phis that are among the reaching defs of these uses.
  535. // While traversing the list of reaching defs for each phi use, accumulate
  536. // the set of registers defined between this phi (PhiA) and the owner phi
  537. // of the reaching def.
  538. NodeSet SeenUses;
  539. for (auto I : PhiRefs) {
  540. if (!DFG.IsRef<NodeAttrs::Use>(I) || SeenUses.count(I.Id))
  541. continue;
  542. NodeAddr<PhiUseNode*> PUA = I;
  543. if (PUA.Addr->getReachingDef() == 0)
  544. continue;
  545. RegisterRef UR = PUA.Addr->getRegRef(DFG);
  546. NodeList Ds = getAllReachingDefs(UR, PUA, true, false, NoRegs);
  547. RegisterAggr DefRRs(PRI);
  548. for (NodeAddr<DefNode*> D : Ds) {
  549. if (D.Addr->getFlags() & NodeAttrs::PhiRef) {
  550. NodeId RP = D.Addr->getOwner(DFG).Id;
  551. std::map<NodeId,RegisterAggr> &M = PhiUp[PUA.Id];
  552. auto F = M.find(RP);
  553. if (F == M.end())
  554. M.insert(std::make_pair(RP, DefRRs));
  555. else
  556. F->second.insert(DefRRs);
  557. }
  558. DefRRs.insert(D.Addr->getRegRef(DFG));
  559. }
  560. for (NodeAddr<PhiUseNode*> T : DFG.getRelatedRefs(PhiA, PUA))
  561. SeenUses.insert(T.Id);
  562. }
  563. }
  564. if (Trace) {
  565. dbgs() << "Phi-up-to-phi map with intervening defs:\n";
  566. for (auto I : PhiUp) {
  567. dbgs() << "phi " << Print(I.first, DFG) << " -> {";
  568. for (auto R : I.second)
  569. dbgs() << ' ' << Print(R.first, DFG) << Print(R.second, DFG);
  570. dbgs() << " }\n";
  571. }
  572. }
  573. // Propagate the reached registers up in the phi chain.
  574. //
  575. // The following type of situation needs careful handling:
  576. //
  577. // phi d1<R1:0> (1)
  578. // |
  579. // ... d2<R1>
  580. // |
  581. // phi u3<R1:0> (2)
  582. // |
  583. // ... u4<R1>
  584. //
  585. // The phi node (2) defines a register pair R1:0, and reaches a "real"
  586. // use u4 of just R1. The same phi node is also known to reach (upwards)
  587. // the phi node (1). However, the use u4 is not reached by phi (1),
  588. // because of the intervening definition d2 of R1. The data flow between
  589. // phis (1) and (2) is restricted to R1:0 minus R1, i.e. R0.
  590. //
  591. // When propagating uses up the phi chains, get the all reaching defs
  592. // for a given phi use, and traverse the list until the propagated ref
  593. // is covered, or until reaching the final phi. Only assume that the
  594. // reference reaches the phi in the latter case.
  595. // The operation "clearIn" can be expensive. For a given set of intervening
  596. // defs, cache the result of subtracting these defs from a given register
  597. // ref.
  598. using SubMap = std::unordered_map<RegisterRef, RegisterRef>;
  599. std::unordered_map<RegisterAggr, SubMap> Subs;
  600. auto ClearIn = [] (RegisterRef RR, const RegisterAggr &Mid, SubMap &SM) {
  601. if (Mid.empty())
  602. return RR;
  603. auto F = SM.find(RR);
  604. if (F != SM.end())
  605. return F->second;
  606. RegisterRef S = Mid.clearIn(RR);
  607. SM.insert({RR, S});
  608. return S;
  609. };
  610. // Go over all phis.
  611. for (unsigned i = 0; i < PhiUQ.size(); ++i) {
  612. auto PA = DFG.addr<PhiNode*>(PhiUQ[i]);
  613. NodeList PUs = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG);
  614. RefMap &RUM = RealUseMap[PA.Id];
  615. for (NodeAddr<UseNode*> UA : PUs) {
  616. std::map<NodeId,RegisterAggr> &PUM = PhiUp[UA.Id];
  617. RegisterRef UR = UA.Addr->getRegRef(DFG);
  618. for (const std::pair<const NodeId, RegisterAggr> &P : PUM) {
  619. bool Changed = false;
  620. const RegisterAggr &MidDefs = P.second;
  621. // Collect the set PropUp of uses that are reached by the current
  622. // phi PA, and are not covered by any intervening def between the
  623. // currently visited use UA and the upward phi P.
  624. if (MidDefs.hasCoverOf(UR))
  625. continue;
  626. SubMap &SM = Subs[MidDefs];
  627. // General algorithm:
  628. // for each (R,U) : U is use node of R, U is reached by PA
  629. // if MidDefs does not cover (R,U)
  630. // then add (R-MidDefs,U) to RealUseMap[P]
  631. //
  632. for (const std::pair<const RegisterId, NodeRefSet> &T : RUM) {
  633. RegisterRef R(T.first);
  634. // The current phi (PA) could be a phi for a regmask. It could
  635. // reach a whole variety of uses that are not related to the
  636. // specific upward phi (P.first).
  637. const RegisterAggr &DRs = PhiDRs.at(P.first);
  638. if (!DRs.hasAliasOf(R))
  639. continue;
  640. R = PRI.mapTo(DRs.intersectWith(R), T.first);
  641. for (std::pair<NodeId,LaneBitmask> V : T.second) {
  642. LaneBitmask M = R.Mask & V.second;
  643. if (M.none())
  644. continue;
  645. if (RegisterRef SS = ClearIn(RegisterRef(R.Reg, M), MidDefs, SM)) {
  646. NodeRefSet &RS = RealUseMap[P.first][SS.Reg];
  647. Changed |= RS.insert({V.first,SS.Mask}).second;
  648. }
  649. }
  650. }
  651. if (Changed)
  652. PhiUQ.push_back(P.first);
  653. }
  654. }
  655. }
  656. if (Trace) {
  657. dbgs() << "Real use map:\n";
  658. for (auto I : RealUseMap) {
  659. dbgs() << "phi " << Print(I.first, DFG);
  660. NodeAddr<PhiNode*> PA = DFG.addr<PhiNode*>(I.first);
  661. NodeList Ds = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Def>, DFG);
  662. if (!Ds.empty()) {
  663. RegisterRef RR = NodeAddr<DefNode*>(Ds[0]).Addr->getRegRef(DFG);
  664. dbgs() << '<' << Print(RR, DFG) << '>';
  665. } else {
  666. dbgs() << "<noreg>";
  667. }
  668. dbgs() << " -> " << Print(I.second, DFG) << '\n';
  669. }
  670. }
  671. }
  672. void Liveness::computeLiveIns() {
  673. // Populate the node-to-block map. This speeds up the calculations
  674. // significantly.
  675. NBMap.clear();
  676. for (NodeAddr<BlockNode*> BA : DFG.getFunc().Addr->members(DFG)) {
  677. MachineBasicBlock *BB = BA.Addr->getCode();
  678. for (NodeAddr<InstrNode*> IA : BA.Addr->members(DFG)) {
  679. for (NodeAddr<RefNode*> RA : IA.Addr->members(DFG))
  680. NBMap.insert(std::make_pair(RA.Id, BB));
  681. NBMap.insert(std::make_pair(IA.Id, BB));
  682. }
  683. }
  684. MachineFunction &MF = DFG.getMF();
  685. // Compute IDF first, then the inverse.
  686. decltype(IIDF) IDF;
  687. for (MachineBasicBlock &B : MF) {
  688. auto F1 = MDF.find(&B);
  689. if (F1 == MDF.end())
  690. continue;
  691. SetVector<MachineBasicBlock*> IDFB(F1->second.begin(), F1->second.end());
  692. for (unsigned i = 0; i < IDFB.size(); ++i) {
  693. auto F2 = MDF.find(IDFB[i]);
  694. if (F2 != MDF.end())
  695. IDFB.insert(F2->second.begin(), F2->second.end());
  696. }
  697. // Add B to the IDF(B). This will put B in the IIDF(B).
  698. IDFB.insert(&B);
  699. IDF[&B].insert(IDFB.begin(), IDFB.end());
  700. }
  701. for (auto I : IDF)
  702. for (auto *S : I.second)
  703. IIDF[S].insert(I.first);
  704. computePhiInfo();
  705. NodeAddr<FuncNode*> FA = DFG.getFunc();
  706. NodeList Blocks = FA.Addr->members(DFG);
  707. // Build the phi live-on-entry map.
  708. for (NodeAddr<BlockNode*> BA : Blocks) {
  709. MachineBasicBlock *MB = BA.Addr->getCode();
  710. RefMap &LON = PhiLON[MB];
  711. for (auto P : BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG))
  712. for (const RefMap::value_type &S : RealUseMap[P.Id])
  713. LON[S.first].insert(S.second.begin(), S.second.end());
  714. }
  715. if (Trace) {
  716. dbgs() << "Phi live-on-entry map:\n";
  717. for (auto &I : PhiLON)
  718. dbgs() << "block #" << I.first->getNumber() << " -> "
  719. << Print(I.second, DFG) << '\n';
  720. }
  721. // Build the phi live-on-exit map. Each phi node has some set of reached
  722. // "real" uses. Propagate this set backwards into the block predecessors
  723. // through the reaching defs of the corresponding phi uses.
  724. for (NodeAddr<BlockNode*> BA : Blocks) {
  725. NodeList Phis = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG);
  726. for (NodeAddr<PhiNode*> PA : Phis) {
  727. RefMap &RUs = RealUseMap[PA.Id];
  728. if (RUs.empty())
  729. continue;
  730. NodeSet SeenUses;
  731. for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) {
  732. if (!SeenUses.insert(U.Id).second)
  733. continue;
  734. NodeAddr<PhiUseNode*> PUA = U;
  735. if (PUA.Addr->getReachingDef() == 0)
  736. continue;
  737. // Each phi has some set (possibly empty) of reached "real" uses,
  738. // that is, uses that are part of the compiled program. Such a use
  739. // may be located in some farther block, but following a chain of
  740. // reaching defs will eventually lead to this phi.
  741. // Any chain of reaching defs may fork at a phi node, but there
  742. // will be a path upwards that will lead to this phi. Now, this
  743. // chain will need to fork at this phi, since some of the reached
  744. // uses may have definitions joining in from multiple predecessors.
  745. // For each reached "real" use, identify the set of reaching defs
  746. // coming from each predecessor P, and add them to PhiLOX[P].
  747. //
  748. auto PrA = DFG.addr<BlockNode*>(PUA.Addr->getPredecessor());
  749. RefMap &LOX = PhiLOX[PrA.Addr->getCode()];
  750. for (const std::pair<const RegisterId, NodeRefSet> &RS : RUs) {
  751. // We need to visit each individual use.
  752. for (std::pair<NodeId,LaneBitmask> P : RS.second) {
  753. // Create a register ref corresponding to the use, and find
  754. // all reaching defs starting from the phi use, and treating
  755. // all related shadows as a single use cluster.
  756. RegisterRef S(RS.first, P.second);
  757. NodeList Ds = getAllReachingDefs(S, PUA, true, false, NoRegs);
  758. for (NodeAddr<DefNode*> D : Ds) {
  759. // Calculate the mask corresponding to the visited def.
  760. RegisterAggr TA(PRI);
  761. TA.insert(D.Addr->getRegRef(DFG)).intersect(S);
  762. LaneBitmask TM = TA.makeRegRef().Mask;
  763. LOX[S.Reg].insert({D.Id, TM});
  764. }
  765. }
  766. }
  767. for (NodeAddr<PhiUseNode*> T : DFG.getRelatedRefs(PA, PUA))
  768. SeenUses.insert(T.Id);
  769. } // for U : phi uses
  770. } // for P : Phis
  771. } // for B : Blocks
  772. if (Trace) {
  773. dbgs() << "Phi live-on-exit map:\n";
  774. for (auto &I : PhiLOX)
  775. dbgs() << "block #" << I.first->getNumber() << " -> "
  776. << Print(I.second, DFG) << '\n';
  777. }
  778. RefMap LiveIn;
  779. traverse(&MF.front(), LiveIn);
  780. // Add function live-ins to the live-in set of the function entry block.
  781. LiveMap[&MF.front()].insert(DFG.getLiveIns());
  782. if (Trace) {
  783. // Dump the liveness map
  784. for (MachineBasicBlock &B : MF) {
  785. std::vector<RegisterRef> LV;
  786. for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins())
  787. LV.push_back(RegisterRef(LI.PhysReg, LI.LaneMask));
  788. llvm::sort(LV);
  789. dbgs() << printMBBReference(B) << "\t rec = {";
  790. for (auto I : LV)
  791. dbgs() << ' ' << Print(I, DFG);
  792. dbgs() << " }\n";
  793. //dbgs() << "\tcomp = " << Print(LiveMap[&B], DFG) << '\n';
  794. LV.clear();
  795. const RegisterAggr &LG = LiveMap[&B];
  796. for (auto I = LG.rr_begin(), E = LG.rr_end(); I != E; ++I)
  797. LV.push_back(*I);
  798. llvm::sort(LV);
  799. dbgs() << "\tcomp = {";
  800. for (auto I : LV)
  801. dbgs() << ' ' << Print(I, DFG);
  802. dbgs() << " }\n";
  803. }
  804. }
  805. }
  806. void Liveness::resetLiveIns() {
  807. for (auto &B : DFG.getMF()) {
  808. // Remove all live-ins.
  809. std::vector<unsigned> T;
  810. for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins())
  811. T.push_back(LI.PhysReg);
  812. for (auto I : T)
  813. B.removeLiveIn(I);
  814. // Add the newly computed live-ins.
  815. const RegisterAggr &LiveIns = LiveMap[&B];
  816. for (const RegisterRef R : make_range(LiveIns.rr_begin(), LiveIns.rr_end()))
  817. B.addLiveIn({MCPhysReg(R.Reg), R.Mask});
  818. }
  819. }
  820. void Liveness::resetKills() {
  821. for (auto &B : DFG.getMF())
  822. resetKills(&B);
  823. }
  824. void Liveness::resetKills(MachineBasicBlock *B) {
  825. auto CopyLiveIns = [this] (MachineBasicBlock *B, BitVector &LV) -> void {
  826. for (auto I : B->liveins()) {
  827. MCSubRegIndexIterator S(I.PhysReg, &TRI);
  828. if (!S.isValid()) {
  829. LV.set(I.PhysReg);
  830. continue;
  831. }
  832. do {
  833. LaneBitmask M = TRI.getSubRegIndexLaneMask(S.getSubRegIndex());
  834. if ((M & I.LaneMask).any())
  835. LV.set(S.getSubReg());
  836. ++S;
  837. } while (S.isValid());
  838. }
  839. };
  840. BitVector LiveIn(TRI.getNumRegs()), Live(TRI.getNumRegs());
  841. CopyLiveIns(B, LiveIn);
  842. for (auto *SI : B->successors())
  843. CopyLiveIns(SI, Live);
  844. for (MachineInstr &MI : llvm::reverse(*B)) {
  845. if (MI.isDebugInstr())
  846. continue;
  847. MI.clearKillInfo();
  848. for (auto &Op : MI.operands()) {
  849. // An implicit def of a super-register may not necessarily start a
  850. // live range of it, since an implicit use could be used to keep parts
  851. // of it live. Instead of analyzing the implicit operands, ignore
  852. // implicit defs.
  853. if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
  854. continue;
  855. Register R = Op.getReg();
  856. if (!R.isPhysical())
  857. continue;
  858. for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR)
  859. Live.reset(*SR);
  860. }
  861. for (auto &Op : MI.operands()) {
  862. if (!Op.isReg() || !Op.isUse() || Op.isUndef())
  863. continue;
  864. Register R = Op.getReg();
  865. if (!R.isPhysical())
  866. continue;
  867. bool IsLive = false;
  868. for (MCRegAliasIterator AR(R, &TRI, true); AR.isValid(); ++AR) {
  869. if (!Live[*AR])
  870. continue;
  871. IsLive = true;
  872. break;
  873. }
  874. if (!IsLive)
  875. Op.setIsKill(true);
  876. for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR)
  877. Live.set(*SR);
  878. }
  879. }
  880. }
  881. // Helper function to obtain the basic block containing the reaching def
  882. // of the given use.
  883. MachineBasicBlock *Liveness::getBlockWithRef(NodeId RN) const {
  884. auto F = NBMap.find(RN);
  885. if (F != NBMap.end())
  886. return F->second;
  887. llvm_unreachable("Node id not in map");
  888. }
  889. void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) {
  890. // The LiveIn map, for each (physical) register, contains the set of live
  891. // reaching defs of that register that are live on entry to the associated
  892. // block.
  893. // The summary of the traversal algorithm:
  894. //
  895. // R is live-in in B, if there exists a U(R), such that rdef(R) dom B
  896. // and (U \in IDF(B) or B dom U).
  897. //
  898. // for (C : children) {
  899. // LU = {}
  900. // traverse(C, LU)
  901. // LiveUses += LU
  902. // }
  903. //
  904. // LiveUses -= Defs(B);
  905. // LiveUses += UpwardExposedUses(B);
  906. // for (C : IIDF[B])
  907. // for (U : LiveUses)
  908. // if (Rdef(U) dom C)
  909. // C.addLiveIn(U)
  910. //
  911. // Go up the dominator tree (depth-first).
  912. MachineDomTreeNode *N = MDT.getNode(B);
  913. for (auto *I : *N) {
  914. RefMap L;
  915. MachineBasicBlock *SB = I->getBlock();
  916. traverse(SB, L);
  917. for (auto S : L)
  918. LiveIn[S.first].insert(S.second.begin(), S.second.end());
  919. }
  920. if (Trace) {
  921. dbgs() << "\n-- " << printMBBReference(*B) << ": " << __func__
  922. << " after recursion into: {";
  923. for (auto *I : *N)
  924. dbgs() << ' ' << I->getBlock()->getNumber();
  925. dbgs() << " }\n";
  926. dbgs() << " LiveIn: " << Print(LiveIn, DFG) << '\n';
  927. dbgs() << " Local: " << Print(LiveMap[B], DFG) << '\n';
  928. }
  929. // Add reaching defs of phi uses that are live on exit from this block.
  930. RefMap &PUs = PhiLOX[B];
  931. for (auto &S : PUs)
  932. LiveIn[S.first].insert(S.second.begin(), S.second.end());
  933. if (Trace) {
  934. dbgs() << "after LOX\n";
  935. dbgs() << " LiveIn: " << Print(LiveIn, DFG) << '\n';
  936. dbgs() << " Local: " << Print(LiveMap[B], DFG) << '\n';
  937. }
  938. // The LiveIn map at this point has all defs that are live-on-exit from B,
  939. // as if they were live-on-entry to B. First, we need to filter out all
  940. // defs that are present in this block. Then we will add reaching defs of
  941. // all upward-exposed uses.
  942. // To filter out the defs, first make a copy of LiveIn, and then re-populate
  943. // LiveIn with the defs that should remain.
  944. RefMap LiveInCopy = LiveIn;
  945. LiveIn.clear();
  946. for (const std::pair<const RegisterId, NodeRefSet> &LE : LiveInCopy) {
  947. RegisterRef LRef(LE.first);
  948. NodeRefSet &NewDefs = LiveIn[LRef.Reg]; // To be filled.
  949. const NodeRefSet &OldDefs = LE.second;
  950. for (NodeRef OR : OldDefs) {
  951. // R is a def node that was live-on-exit
  952. auto DA = DFG.addr<DefNode*>(OR.first);
  953. NodeAddr<InstrNode*> IA = DA.Addr->getOwner(DFG);
  954. NodeAddr<BlockNode*> BA = IA.Addr->getOwner(DFG);
  955. if (B != BA.Addr->getCode()) {
  956. // Defs from a different block need to be preserved. Defs from this
  957. // block will need to be processed further, except for phi defs, the
  958. // liveness of which is handled through the PhiLON/PhiLOX maps.
  959. NewDefs.insert(OR);
  960. continue;
  961. }
  962. // Defs from this block need to stop the liveness from being
  963. // propagated upwards. This only applies to non-preserving defs,
  964. // and to the parts of the register actually covered by those defs.
  965. // (Note that phi defs should always be preserving.)
  966. RegisterAggr RRs(PRI);
  967. LRef.Mask = OR.second;
  968. if (!DFG.IsPreservingDef(DA)) {
  969. assert(!(IA.Addr->getFlags() & NodeAttrs::Phi));
  970. // DA is a non-phi def that is live-on-exit from this block, and
  971. // that is also located in this block. LRef is a register ref
  972. // whose use this def reaches. If DA covers LRef, then no part
  973. // of LRef is exposed upwards.A
  974. if (RRs.insert(DA.Addr->getRegRef(DFG)).hasCoverOf(LRef))
  975. continue;
  976. }
  977. // DA itself was not sufficient to cover LRef. In general, it is
  978. // the last in a chain of aliased defs before the exit from this block.
  979. // There could be other defs in this block that are a part of that
  980. // chain. Check that now: accumulate the registers from these defs,
  981. // and if they all together cover LRef, it is not live-on-entry.
  982. for (NodeAddr<DefNode*> TA : getAllReachingDefs(DA)) {
  983. // DefNode -> InstrNode -> BlockNode.
  984. NodeAddr<InstrNode*> ITA = TA.Addr->getOwner(DFG);
  985. NodeAddr<BlockNode*> BTA = ITA.Addr->getOwner(DFG);
  986. // Reaching defs are ordered in the upward direction.
  987. if (BTA.Addr->getCode() != B) {
  988. // We have reached past the beginning of B, and the accumulated
  989. // registers are not covering LRef. The first def from the
  990. // upward chain will be live.
  991. // Subtract all accumulated defs (RRs) from LRef.
  992. RegisterRef T = RRs.clearIn(LRef);
  993. assert(T);
  994. NewDefs.insert({TA.Id,T.Mask});
  995. break;
  996. }
  997. // TA is in B. Only add this def to the accumulated cover if it is
  998. // not preserving.
  999. if (!(TA.Addr->getFlags() & NodeAttrs::Preserving))
  1000. RRs.insert(TA.Addr->getRegRef(DFG));
  1001. // If this is enough to cover LRef, then stop.
  1002. if (RRs.hasCoverOf(LRef))
  1003. break;
  1004. }
  1005. }
  1006. }
  1007. emptify(LiveIn);
  1008. if (Trace) {
  1009. dbgs() << "after defs in block\n";
  1010. dbgs() << " LiveIn: " << Print(LiveIn, DFG) << '\n';
  1011. dbgs() << " Local: " << Print(LiveMap[B], DFG) << '\n';
  1012. }
  1013. // Scan the block for upward-exposed uses and add them to the tracking set.
  1014. for (auto I : DFG.getFunc().Addr->findBlock(B, DFG).Addr->members(DFG)) {
  1015. NodeAddr<InstrNode*> IA = I;
  1016. if (IA.Addr->getKind() != NodeAttrs::Stmt)
  1017. continue;
  1018. for (NodeAddr<UseNode*> UA : IA.Addr->members_if(DFG.IsUse, DFG)) {
  1019. if (UA.Addr->getFlags() & NodeAttrs::Undef)
  1020. continue;
  1021. RegisterRef RR = UA.Addr->getRegRef(DFG);
  1022. for (NodeAddr<DefNode*> D : getAllReachingDefs(UA))
  1023. if (getBlockWithRef(D.Id) != B)
  1024. LiveIn[RR.Reg].insert({D.Id,RR.Mask});
  1025. }
  1026. }
  1027. if (Trace) {
  1028. dbgs() << "after uses in block\n";
  1029. dbgs() << " LiveIn: " << Print(LiveIn, DFG) << '\n';
  1030. dbgs() << " Local: " << Print(LiveMap[B], DFG) << '\n';
  1031. }
  1032. // Phi uses should not be propagated up the dominator tree, since they
  1033. // are not dominated by their corresponding reaching defs.
  1034. RegisterAggr &Local = LiveMap[B];
  1035. RefMap &LON = PhiLON[B];
  1036. for (auto &R : LON) {
  1037. LaneBitmask M;
  1038. for (auto P : R.second)
  1039. M |= P.second;
  1040. Local.insert(RegisterRef(R.first,M));
  1041. }
  1042. if (Trace) {
  1043. dbgs() << "after phi uses in block\n";
  1044. dbgs() << " LiveIn: " << Print(LiveIn, DFG) << '\n';
  1045. dbgs() << " Local: " << Print(Local, DFG) << '\n';
  1046. }
  1047. for (auto *C : IIDF[B]) {
  1048. RegisterAggr &LiveC = LiveMap[C];
  1049. for (const std::pair<const RegisterId, NodeRefSet> &S : LiveIn)
  1050. for (auto R : S.second)
  1051. if (MDT.properlyDominates(getBlockWithRef(R.first), C))
  1052. LiveC.insert(RegisterRef(S.first, R.second));
  1053. }
  1054. }
  1055. void Liveness::emptify(RefMap &M) {
  1056. for (auto I = M.begin(), E = M.end(); I != E; )
  1057. I = I->second.empty() ? M.erase(I) : std::next(I);
  1058. }