RDFGraph.cpp 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821
  1. //===- RDFGraph.cpp -------------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Target-independent, SSA-based data flow graph for register data flow (RDF).
  10. //
  11. #include "llvm/CodeGen/RDFGraph.h"
  12. #include "llvm/ADT/BitVector.h"
  13. #include "llvm/ADT/STLExtras.h"
  14. #include "llvm/ADT/SetVector.h"
  15. #include "llvm/CodeGen/MachineBasicBlock.h"
  16. #include "llvm/CodeGen/MachineDominanceFrontier.h"
  17. #include "llvm/CodeGen/MachineDominators.h"
  18. #include "llvm/CodeGen/MachineFunction.h"
  19. #include "llvm/CodeGen/MachineInstr.h"
  20. #include "llvm/CodeGen/MachineOperand.h"
  21. #include "llvm/CodeGen/MachineRegisterInfo.h"
  22. #include "llvm/CodeGen/RDFRegisters.h"
  23. #include "llvm/CodeGen/TargetInstrInfo.h"
  24. #include "llvm/CodeGen/TargetLowering.h"
  25. #include "llvm/CodeGen/TargetRegisterInfo.h"
  26. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  27. #include "llvm/IR/Function.h"
  28. #include "llvm/MC/LaneBitmask.h"
  29. #include "llvm/MC/MCInstrDesc.h"
  30. #include "llvm/Support/ErrorHandling.h"
  31. #include "llvm/Support/raw_ostream.h"
  32. #include <algorithm>
  33. #include <cassert>
  34. #include <cstdint>
  35. #include <cstring>
  36. #include <iterator>
  37. #include <set>
  38. #include <utility>
  39. #include <vector>
  40. using namespace llvm;
  41. using namespace rdf;
  42. // Printing functions. Have them here first, so that the rest of the code
  43. // can use them.
  44. namespace llvm {
  45. namespace rdf {
  46. raw_ostream &operator<< (raw_ostream &OS, const PrintLaneMaskOpt &P) {
  47. if (!P.Mask.all())
  48. OS << ':' << PrintLaneMask(P.Mask);
  49. return OS;
  50. }
  51. raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterRef> &P) {
  52. auto &TRI = P.G.getTRI();
  53. if (P.Obj.Reg > 0 && P.Obj.Reg < TRI.getNumRegs())
  54. OS << TRI.getName(P.Obj.Reg);
  55. else
  56. OS << '#' << P.Obj.Reg;
  57. OS << PrintLaneMaskOpt(P.Obj.Mask);
  58. return OS;
  59. }
  60. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeId> &P) {
  61. auto NA = P.G.addr<NodeBase*>(P.Obj);
  62. uint16_t Attrs = NA.Addr->getAttrs();
  63. uint16_t Kind = NodeAttrs::kind(Attrs);
  64. uint16_t Flags = NodeAttrs::flags(Attrs);
  65. switch (NodeAttrs::type(Attrs)) {
  66. case NodeAttrs::Code:
  67. switch (Kind) {
  68. case NodeAttrs::Func: OS << 'f'; break;
  69. case NodeAttrs::Block: OS << 'b'; break;
  70. case NodeAttrs::Stmt: OS << 's'; break;
  71. case NodeAttrs::Phi: OS << 'p'; break;
  72. default: OS << "c?"; break;
  73. }
  74. break;
  75. case NodeAttrs::Ref:
  76. if (Flags & NodeAttrs::Undef)
  77. OS << '/';
  78. if (Flags & NodeAttrs::Dead)
  79. OS << '\\';
  80. if (Flags & NodeAttrs::Preserving)
  81. OS << '+';
  82. if (Flags & NodeAttrs::Clobbering)
  83. OS << '~';
  84. switch (Kind) {
  85. case NodeAttrs::Use: OS << 'u'; break;
  86. case NodeAttrs::Def: OS << 'd'; break;
  87. case NodeAttrs::Block: OS << 'b'; break;
  88. default: OS << "r?"; break;
  89. }
  90. break;
  91. default:
  92. OS << '?';
  93. break;
  94. }
  95. OS << P.Obj;
  96. if (Flags & NodeAttrs::Shadow)
  97. OS << '"';
  98. return OS;
  99. }
  100. static void printRefHeader(raw_ostream &OS, const NodeAddr<RefNode*> RA,
  101. const DataFlowGraph &G) {
  102. OS << Print(RA.Id, G) << '<'
  103. << Print(RA.Addr->getRegRef(G), G) << '>';
  104. if (RA.Addr->getFlags() & NodeAttrs::Fixed)
  105. OS << '!';
  106. }
  107. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<DefNode*>> &P) {
  108. printRefHeader(OS, P.Obj, P.G);
  109. OS << '(';
  110. if (NodeId N = P.Obj.Addr->getReachingDef())
  111. OS << Print(N, P.G);
  112. OS << ',';
  113. if (NodeId N = P.Obj.Addr->getReachedDef())
  114. OS << Print(N, P.G);
  115. OS << ',';
  116. if (NodeId N = P.Obj.Addr->getReachedUse())
  117. OS << Print(N, P.G);
  118. OS << "):";
  119. if (NodeId N = P.Obj.Addr->getSibling())
  120. OS << Print(N, P.G);
  121. return OS;
  122. }
  123. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<UseNode*>> &P) {
  124. printRefHeader(OS, P.Obj, P.G);
  125. OS << '(';
  126. if (NodeId N = P.Obj.Addr->getReachingDef())
  127. OS << Print(N, P.G);
  128. OS << "):";
  129. if (NodeId N = P.Obj.Addr->getSibling())
  130. OS << Print(N, P.G);
  131. return OS;
  132. }
  133. raw_ostream &operator<< (raw_ostream &OS,
  134. const Print<NodeAddr<PhiUseNode*>> &P) {
  135. printRefHeader(OS, P.Obj, P.G);
  136. OS << '(';
  137. if (NodeId N = P.Obj.Addr->getReachingDef())
  138. OS << Print(N, P.G);
  139. OS << ',';
  140. if (NodeId N = P.Obj.Addr->getPredecessor())
  141. OS << Print(N, P.G);
  142. OS << "):";
  143. if (NodeId N = P.Obj.Addr->getSibling())
  144. OS << Print(N, P.G);
  145. return OS;
  146. }
  147. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<RefNode*>> &P) {
  148. switch (P.Obj.Addr->getKind()) {
  149. case NodeAttrs::Def:
  150. OS << PrintNode<DefNode*>(P.Obj, P.G);
  151. break;
  152. case NodeAttrs::Use:
  153. if (P.Obj.Addr->getFlags() & NodeAttrs::PhiRef)
  154. OS << PrintNode<PhiUseNode*>(P.Obj, P.G);
  155. else
  156. OS << PrintNode<UseNode*>(P.Obj, P.G);
  157. break;
  158. }
  159. return OS;
  160. }
  161. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeList> &P) {
  162. unsigned N = P.Obj.size();
  163. for (auto I : P.Obj) {
  164. OS << Print(I.Id, P.G);
  165. if (--N)
  166. OS << ' ';
  167. }
  168. return OS;
  169. }
  170. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeSet> &P) {
  171. unsigned N = P.Obj.size();
  172. for (auto I : P.Obj) {
  173. OS << Print(I, P.G);
  174. if (--N)
  175. OS << ' ';
  176. }
  177. return OS;
  178. }
  179. namespace {
  180. template <typename T>
  181. struct PrintListV {
  182. PrintListV(const NodeList &L, const DataFlowGraph &G) : List(L), G(G) {}
  183. using Type = T;
  184. const NodeList &List;
  185. const DataFlowGraph &G;
  186. };
  187. template <typename T>
  188. raw_ostream &operator<< (raw_ostream &OS, const PrintListV<T> &P) {
  189. unsigned N = P.List.size();
  190. for (NodeAddr<T> A : P.List) {
  191. OS << PrintNode<T>(A, P.G);
  192. if (--N)
  193. OS << ", ";
  194. }
  195. return OS;
  196. }
  197. } // end anonymous namespace
  198. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<PhiNode*>> &P) {
  199. OS << Print(P.Obj.Id, P.G) << ": phi ["
  200. << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']';
  201. return OS;
  202. }
  203. raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<StmtNode *>> &P) {
  204. const MachineInstr &MI = *P.Obj.Addr->getCode();
  205. unsigned Opc = MI.getOpcode();
  206. OS << Print(P.Obj.Id, P.G) << ": " << P.G.getTII().getName(Opc);
  207. // Print the target for calls and branches (for readability).
  208. if (MI.isCall() || MI.isBranch()) {
  209. MachineInstr::const_mop_iterator T =
  210. llvm::find_if(MI.operands(),
  211. [] (const MachineOperand &Op) -> bool {
  212. return Op.isMBB() || Op.isGlobal() || Op.isSymbol();
  213. });
  214. if (T != MI.operands_end()) {
  215. OS << ' ';
  216. if (T->isMBB())
  217. OS << printMBBReference(*T->getMBB());
  218. else if (T->isGlobal())
  219. OS << T->getGlobal()->getName();
  220. else if (T->isSymbol())
  221. OS << T->getSymbolName();
  222. }
  223. }
  224. OS << " [" << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']';
  225. return OS;
  226. }
  227. raw_ostream &operator<< (raw_ostream &OS,
  228. const Print<NodeAddr<InstrNode*>> &P) {
  229. switch (P.Obj.Addr->getKind()) {
  230. case NodeAttrs::Phi:
  231. OS << PrintNode<PhiNode*>(P.Obj, P.G);
  232. break;
  233. case NodeAttrs::Stmt:
  234. OS << PrintNode<StmtNode*>(P.Obj, P.G);
  235. break;
  236. default:
  237. OS << "instr? " << Print(P.Obj.Id, P.G);
  238. break;
  239. }
  240. return OS;
  241. }
  242. raw_ostream &operator<< (raw_ostream &OS,
  243. const Print<NodeAddr<BlockNode*>> &P) {
  244. MachineBasicBlock *BB = P.Obj.Addr->getCode();
  245. unsigned NP = BB->pred_size();
  246. std::vector<int> Ns;
  247. auto PrintBBs = [&OS] (std::vector<int> Ns) -> void {
  248. unsigned N = Ns.size();
  249. for (int I : Ns) {
  250. OS << "%bb." << I;
  251. if (--N)
  252. OS << ", ";
  253. }
  254. };
  255. OS << Print(P.Obj.Id, P.G) << ": --- " << printMBBReference(*BB)
  256. << " --- preds(" << NP << "): ";
  257. for (MachineBasicBlock *B : BB->predecessors())
  258. Ns.push_back(B->getNumber());
  259. PrintBBs(Ns);
  260. unsigned NS = BB->succ_size();
  261. OS << " succs(" << NS << "): ";
  262. Ns.clear();
  263. for (MachineBasicBlock *B : BB->successors())
  264. Ns.push_back(B->getNumber());
  265. PrintBBs(Ns);
  266. OS << '\n';
  267. for (auto I : P.Obj.Addr->members(P.G))
  268. OS << PrintNode<InstrNode*>(I, P.G) << '\n';
  269. return OS;
  270. }
  271. raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<FuncNode *>> &P) {
  272. OS << "DFG dump:[\n" << Print(P.Obj.Id, P.G) << ": Function: "
  273. << P.Obj.Addr->getCode()->getName() << '\n';
  274. for (auto I : P.Obj.Addr->members(P.G))
  275. OS << PrintNode<BlockNode*>(I, P.G) << '\n';
  276. OS << "]\n";
  277. return OS;
  278. }
  279. raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterSet> &P) {
  280. OS << '{';
  281. for (auto I : P.Obj)
  282. OS << ' ' << Print(I, P.G);
  283. OS << " }";
  284. return OS;
  285. }
  286. raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterAggr> &P) {
  287. P.Obj.print(OS);
  288. return OS;
  289. }
  290. raw_ostream &operator<< (raw_ostream &OS,
  291. const Print<DataFlowGraph::DefStack> &P) {
  292. for (auto I = P.Obj.top(), E = P.Obj.bottom(); I != E; ) {
  293. OS << Print(I->Id, P.G)
  294. << '<' << Print(I->Addr->getRegRef(P.G), P.G) << '>';
  295. I.down();
  296. if (I != E)
  297. OS << ' ';
  298. }
  299. return OS;
  300. }
  301. } // end namespace rdf
  302. } // end namespace llvm
  303. // Node allocation functions.
  304. //
  305. // Node allocator is like a slab memory allocator: it allocates blocks of
  306. // memory in sizes that are multiples of the size of a node. Each block has
  307. // the same size. Nodes are allocated from the currently active block, and
  308. // when it becomes full, a new one is created.
  309. // There is a mapping scheme between node id and its location in a block,
  310. // and within that block is described in the header file.
  311. //
  312. void NodeAllocator::startNewBlock() {
  313. void *T = MemPool.Allocate(NodesPerBlock*NodeMemSize, NodeMemSize);
  314. char *P = static_cast<char*>(T);
  315. Blocks.push_back(P);
  316. // Check if the block index is still within the allowed range, i.e. less
  317. // than 2^N, where N is the number of bits in NodeId for the block index.
  318. // BitsPerIndex is the number of bits per node index.
  319. assert((Blocks.size() < ((size_t)1 << (8*sizeof(NodeId)-BitsPerIndex))) &&
  320. "Out of bits for block index");
  321. ActiveEnd = P;
  322. }
  323. bool NodeAllocator::needNewBlock() {
  324. if (Blocks.empty())
  325. return true;
  326. char *ActiveBegin = Blocks.back();
  327. uint32_t Index = (ActiveEnd-ActiveBegin)/NodeMemSize;
  328. return Index >= NodesPerBlock;
  329. }
  330. NodeAddr<NodeBase*> NodeAllocator::New() {
  331. if (needNewBlock())
  332. startNewBlock();
  333. uint32_t ActiveB = Blocks.size()-1;
  334. uint32_t Index = (ActiveEnd - Blocks[ActiveB])/NodeMemSize;
  335. NodeAddr<NodeBase*> NA = { reinterpret_cast<NodeBase*>(ActiveEnd),
  336. makeId(ActiveB, Index) };
  337. ActiveEnd += NodeMemSize;
  338. return NA;
  339. }
  340. NodeId NodeAllocator::id(const NodeBase *P) const {
  341. uintptr_t A = reinterpret_cast<uintptr_t>(P);
  342. for (unsigned i = 0, n = Blocks.size(); i != n; ++i) {
  343. uintptr_t B = reinterpret_cast<uintptr_t>(Blocks[i]);
  344. if (A < B || A >= B + NodesPerBlock*NodeMemSize)
  345. continue;
  346. uint32_t Idx = (A-B)/NodeMemSize;
  347. return makeId(i, Idx);
  348. }
  349. llvm_unreachable("Invalid node address");
  350. }
  351. void NodeAllocator::clear() {
  352. MemPool.Reset();
  353. Blocks.clear();
  354. ActiveEnd = nullptr;
  355. }
  356. // Insert node NA after "this" in the circular chain.
  357. void NodeBase::append(NodeAddr<NodeBase*> NA) {
  358. NodeId Nx = Next;
  359. // If NA is already "next", do nothing.
  360. if (Next != NA.Id) {
  361. Next = NA.Id;
  362. NA.Addr->Next = Nx;
  363. }
  364. }
  365. // Fundamental node manipulator functions.
  366. // Obtain the register reference from a reference node.
  367. RegisterRef RefNode::getRegRef(const DataFlowGraph &G) const {
  368. assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
  369. if (NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef)
  370. return G.unpack(Ref.PR);
  371. assert(Ref.Op != nullptr);
  372. return G.makeRegRef(*Ref.Op);
  373. }
  374. // Set the register reference in the reference node directly (for references
  375. // in phi nodes).
  376. void RefNode::setRegRef(RegisterRef RR, DataFlowGraph &G) {
  377. assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
  378. assert(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef);
  379. Ref.PR = G.pack(RR);
  380. }
  381. // Set the register reference in the reference node based on a machine
  382. // operand (for references in statement nodes).
  383. void RefNode::setRegRef(MachineOperand *Op, DataFlowGraph &G) {
  384. assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
  385. assert(!(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef));
  386. (void)G;
  387. Ref.Op = Op;
  388. }
  389. // Get the owner of a given reference node.
  390. NodeAddr<NodeBase*> RefNode::getOwner(const DataFlowGraph &G) {
  391. NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext());
  392. while (NA.Addr != this) {
  393. if (NA.Addr->getType() == NodeAttrs::Code)
  394. return NA;
  395. NA = G.addr<NodeBase*>(NA.Addr->getNext());
  396. }
  397. llvm_unreachable("No owner in circular list");
  398. }
  399. // Connect the def node to the reaching def node.
  400. void DefNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) {
  401. Ref.RD = DA.Id;
  402. Ref.Sib = DA.Addr->getReachedDef();
  403. DA.Addr->setReachedDef(Self);
  404. }
  405. // Connect the use node to the reaching def node.
  406. void UseNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) {
  407. Ref.RD = DA.Id;
  408. Ref.Sib = DA.Addr->getReachedUse();
  409. DA.Addr->setReachedUse(Self);
  410. }
  411. // Get the first member of the code node.
  412. NodeAddr<NodeBase*> CodeNode::getFirstMember(const DataFlowGraph &G) const {
  413. if (Code.FirstM == 0)
  414. return NodeAddr<NodeBase*>();
  415. return G.addr<NodeBase*>(Code.FirstM);
  416. }
  417. // Get the last member of the code node.
  418. NodeAddr<NodeBase*> CodeNode::getLastMember(const DataFlowGraph &G) const {
  419. if (Code.LastM == 0)
  420. return NodeAddr<NodeBase*>();
  421. return G.addr<NodeBase*>(Code.LastM);
  422. }
  423. // Add node NA at the end of the member list of the given code node.
  424. void CodeNode::addMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) {
  425. NodeAddr<NodeBase*> ML = getLastMember(G);
  426. if (ML.Id != 0) {
  427. ML.Addr->append(NA);
  428. } else {
  429. Code.FirstM = NA.Id;
  430. NodeId Self = G.id(this);
  431. NA.Addr->setNext(Self);
  432. }
  433. Code.LastM = NA.Id;
  434. }
  435. // Add node NA after member node MA in the given code node.
  436. void CodeNode::addMemberAfter(NodeAddr<NodeBase*> MA, NodeAddr<NodeBase*> NA,
  437. const DataFlowGraph &G) {
  438. MA.Addr->append(NA);
  439. if (Code.LastM == MA.Id)
  440. Code.LastM = NA.Id;
  441. }
  442. // Remove member node NA from the given code node.
  443. void CodeNode::removeMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) {
  444. NodeAddr<NodeBase*> MA = getFirstMember(G);
  445. assert(MA.Id != 0);
  446. // Special handling if the member to remove is the first member.
  447. if (MA.Id == NA.Id) {
  448. if (Code.LastM == MA.Id) {
  449. // If it is the only member, set both first and last to 0.
  450. Code.FirstM = Code.LastM = 0;
  451. } else {
  452. // Otherwise, advance the first member.
  453. Code.FirstM = MA.Addr->getNext();
  454. }
  455. return;
  456. }
  457. while (MA.Addr != this) {
  458. NodeId MX = MA.Addr->getNext();
  459. if (MX == NA.Id) {
  460. MA.Addr->setNext(NA.Addr->getNext());
  461. // If the member to remove happens to be the last one, update the
  462. // LastM indicator.
  463. if (Code.LastM == NA.Id)
  464. Code.LastM = MA.Id;
  465. return;
  466. }
  467. MA = G.addr<NodeBase*>(MX);
  468. }
  469. llvm_unreachable("No such member");
  470. }
  471. // Return the list of all members of the code node.
  472. NodeList CodeNode::members(const DataFlowGraph &G) const {
  473. static auto True = [] (NodeAddr<NodeBase*>) -> bool { return true; };
  474. return members_if(True, G);
  475. }
  476. // Return the owner of the given instr node.
  477. NodeAddr<NodeBase*> InstrNode::getOwner(const DataFlowGraph &G) {
  478. NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext());
  479. while (NA.Addr != this) {
  480. assert(NA.Addr->getType() == NodeAttrs::Code);
  481. if (NA.Addr->getKind() == NodeAttrs::Block)
  482. return NA;
  483. NA = G.addr<NodeBase*>(NA.Addr->getNext());
  484. }
  485. llvm_unreachable("No owner in circular list");
  486. }
  487. // Add the phi node PA to the given block node.
  488. void BlockNode::addPhi(NodeAddr<PhiNode*> PA, const DataFlowGraph &G) {
  489. NodeAddr<NodeBase*> M = getFirstMember(G);
  490. if (M.Id == 0) {
  491. addMember(PA, G);
  492. return;
  493. }
  494. assert(M.Addr->getType() == NodeAttrs::Code);
  495. if (M.Addr->getKind() == NodeAttrs::Stmt) {
  496. // If the first member of the block is a statement, insert the phi as
  497. // the first member.
  498. Code.FirstM = PA.Id;
  499. PA.Addr->setNext(M.Id);
  500. } else {
  501. // If the first member is a phi, find the last phi, and append PA to it.
  502. assert(M.Addr->getKind() == NodeAttrs::Phi);
  503. NodeAddr<NodeBase*> MN = M;
  504. do {
  505. M = MN;
  506. MN = G.addr<NodeBase*>(M.Addr->getNext());
  507. assert(MN.Addr->getType() == NodeAttrs::Code);
  508. } while (MN.Addr->getKind() == NodeAttrs::Phi);
  509. // M is the last phi.
  510. addMemberAfter(M, PA, G);
  511. }
  512. }
  513. // Find the block node corresponding to the machine basic block BB in the
  514. // given func node.
  515. NodeAddr<BlockNode*> FuncNode::findBlock(const MachineBasicBlock *BB,
  516. const DataFlowGraph &G) const {
  517. auto EqBB = [BB] (NodeAddr<NodeBase*> NA) -> bool {
  518. return NodeAddr<BlockNode*>(NA).Addr->getCode() == BB;
  519. };
  520. NodeList Ms = members_if(EqBB, G);
  521. if (!Ms.empty())
  522. return Ms[0];
  523. return NodeAddr<BlockNode*>();
  524. }
  525. // Get the block node for the entry block in the given function.
  526. NodeAddr<BlockNode*> FuncNode::getEntryBlock(const DataFlowGraph &G) {
  527. MachineBasicBlock *EntryB = &getCode()->front();
  528. return findBlock(EntryB, G);
  529. }
  530. // Target operand information.
  531. //
  532. // For a given instruction, check if there are any bits of RR that can remain
  533. // unchanged across this def.
  534. bool TargetOperandInfo::isPreserving(const MachineInstr &In, unsigned OpNum)
  535. const {
  536. return TII.isPredicated(In);
  537. }
  538. // Check if the definition of RR produces an unspecified value.
  539. bool TargetOperandInfo::isClobbering(const MachineInstr &In, unsigned OpNum)
  540. const {
  541. const MachineOperand &Op = In.getOperand(OpNum);
  542. if (Op.isRegMask())
  543. return true;
  544. assert(Op.isReg());
  545. if (In.isCall())
  546. if (Op.isDef() && Op.isDead())
  547. return true;
  548. return false;
  549. }
  550. // Check if the given instruction specifically requires
  551. bool TargetOperandInfo::isFixedReg(const MachineInstr &In, unsigned OpNum)
  552. const {
  553. if (In.isCall() || In.isReturn() || In.isInlineAsm())
  554. return true;
  555. // Check for a tail call.
  556. if (In.isBranch())
  557. for (const MachineOperand &O : In.operands())
  558. if (O.isGlobal() || O.isSymbol())
  559. return true;
  560. const MCInstrDesc &D = In.getDesc();
  561. if (D.implicit_defs().empty() && D.implicit_uses().empty())
  562. return false;
  563. const MachineOperand &Op = In.getOperand(OpNum);
  564. // If there is a sub-register, treat the operand as non-fixed. Currently,
  565. // fixed registers are those that are listed in the descriptor as implicit
  566. // uses or defs, and those lists do not allow sub-registers.
  567. if (Op.getSubReg() != 0)
  568. return false;
  569. Register Reg = Op.getReg();
  570. ArrayRef<MCPhysReg> ImpOps =
  571. Op.isDef() ? D.implicit_defs() : D.implicit_uses();
  572. return is_contained(ImpOps, Reg);
  573. }
  574. //
  575. // The data flow graph construction.
  576. //
  577. DataFlowGraph::DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii,
  578. const TargetRegisterInfo &tri, const MachineDominatorTree &mdt,
  579. const MachineDominanceFrontier &mdf)
  580. : DefaultTOI(std::make_unique<TargetOperandInfo>(tii)), MF(mf), TII(tii),
  581. TRI(tri), PRI(tri, mf), MDT(mdt), MDF(mdf), TOI(*DefaultTOI),
  582. LiveIns(PRI) {
  583. }
  584. DataFlowGraph::DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii,
  585. const TargetRegisterInfo &tri, const MachineDominatorTree &mdt,
  586. const MachineDominanceFrontier &mdf, const TargetOperandInfo &toi)
  587. : MF(mf), TII(tii), TRI(tri), PRI(tri, mf), MDT(mdt), MDF(mdf), TOI(toi),
  588. LiveIns(PRI) {
  589. }
  590. // The implementation of the definition stack.
  591. // Each register reference has its own definition stack. In particular,
  592. // for a register references "Reg" and "Reg:subreg" will each have their
  593. // own definition stacks.
  594. // Construct a stack iterator.
  595. DataFlowGraph::DefStack::Iterator::Iterator(const DataFlowGraph::DefStack &S,
  596. bool Top) : DS(S) {
  597. if (!Top) {
  598. // Initialize to bottom.
  599. Pos = 0;
  600. return;
  601. }
  602. // Initialize to the top, i.e. top-most non-delimiter (or 0, if empty).
  603. Pos = DS.Stack.size();
  604. while (Pos > 0 && DS.isDelimiter(DS.Stack[Pos-1]))
  605. Pos--;
  606. }
  607. // Return the size of the stack, including block delimiters.
  608. unsigned DataFlowGraph::DefStack::size() const {
  609. unsigned S = 0;
  610. for (auto I = top(), E = bottom(); I != E; I.down())
  611. S++;
  612. return S;
  613. }
  614. // Remove the top entry from the stack. Remove all intervening delimiters
  615. // so that after this, the stack is either empty, or the top of the stack
  616. // is a non-delimiter.
  617. void DataFlowGraph::DefStack::pop() {
  618. assert(!empty());
  619. unsigned P = nextDown(Stack.size());
  620. Stack.resize(P);
  621. }
  622. // Push a delimiter for block node N on the stack.
  623. void DataFlowGraph::DefStack::start_block(NodeId N) {
  624. assert(N != 0);
  625. Stack.push_back(NodeAddr<DefNode*>(nullptr, N));
  626. }
  627. // Remove all nodes from the top of the stack, until the delimited for
  628. // block node N is encountered. Remove the delimiter as well. In effect,
  629. // this will remove from the stack all definitions from block N.
  630. void DataFlowGraph::DefStack::clear_block(NodeId N) {
  631. assert(N != 0);
  632. unsigned P = Stack.size();
  633. while (P > 0) {
  634. bool Found = isDelimiter(Stack[P-1], N);
  635. P--;
  636. if (Found)
  637. break;
  638. }
  639. // This will also remove the delimiter, if found.
  640. Stack.resize(P);
  641. }
  642. // Move the stack iterator up by one.
  643. unsigned DataFlowGraph::DefStack::nextUp(unsigned P) const {
  644. // Get the next valid position after P (skipping all delimiters).
  645. // The input position P does not have to point to a non-delimiter.
  646. unsigned SS = Stack.size();
  647. bool IsDelim;
  648. assert(P < SS);
  649. do {
  650. P++;
  651. IsDelim = isDelimiter(Stack[P-1]);
  652. } while (P < SS && IsDelim);
  653. assert(!IsDelim);
  654. return P;
  655. }
  656. // Move the stack iterator down by one.
  657. unsigned DataFlowGraph::DefStack::nextDown(unsigned P) const {
  658. // Get the preceding valid position before P (skipping all delimiters).
  659. // The input position P does not have to point to a non-delimiter.
  660. assert(P > 0 && P <= Stack.size());
  661. bool IsDelim = isDelimiter(Stack[P-1]);
  662. do {
  663. if (--P == 0)
  664. break;
  665. IsDelim = isDelimiter(Stack[P-1]);
  666. } while (P > 0 && IsDelim);
  667. assert(!IsDelim);
  668. return P;
  669. }
  670. // Register information.
  671. RegisterSet DataFlowGraph::getLandingPadLiveIns() const {
  672. RegisterSet LR;
  673. const Function &F = MF.getFunction();
  674. const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn()
  675. : nullptr;
  676. const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
  677. if (RegisterId R = TLI.getExceptionPointerRegister(PF))
  678. LR.insert(RegisterRef(R));
  679. if (!isFuncletEHPersonality(classifyEHPersonality(PF))) {
  680. if (RegisterId R = TLI.getExceptionSelectorRegister(PF))
  681. LR.insert(RegisterRef(R));
  682. }
  683. return LR;
  684. }
  685. // Node management functions.
  686. // Get the pointer to the node with the id N.
  687. NodeBase *DataFlowGraph::ptr(NodeId N) const {
  688. if (N == 0)
  689. return nullptr;
  690. return Memory.ptr(N);
  691. }
  692. // Get the id of the node at the address P.
  693. NodeId DataFlowGraph::id(const NodeBase *P) const {
  694. if (P == nullptr)
  695. return 0;
  696. return Memory.id(P);
  697. }
  698. // Allocate a new node and set the attributes to Attrs.
  699. NodeAddr<NodeBase*> DataFlowGraph::newNode(uint16_t Attrs) {
  700. NodeAddr<NodeBase*> P = Memory.New();
  701. P.Addr->init();
  702. P.Addr->setAttrs(Attrs);
  703. return P;
  704. }
  705. // Make a copy of the given node B, except for the data-flow links, which
  706. // are set to 0.
  707. NodeAddr<NodeBase*> DataFlowGraph::cloneNode(const NodeAddr<NodeBase*> B) {
  708. NodeAddr<NodeBase*> NA = newNode(0);
  709. memcpy(NA.Addr, B.Addr, sizeof(NodeBase));
  710. // Ref nodes need to have the data-flow links reset.
  711. if (NA.Addr->getType() == NodeAttrs::Ref) {
  712. NodeAddr<RefNode*> RA = NA;
  713. RA.Addr->setReachingDef(0);
  714. RA.Addr->setSibling(0);
  715. if (NA.Addr->getKind() == NodeAttrs::Def) {
  716. NodeAddr<DefNode*> DA = NA;
  717. DA.Addr->setReachedDef(0);
  718. DA.Addr->setReachedUse(0);
  719. }
  720. }
  721. return NA;
  722. }
  723. // Allocation routines for specific node types/kinds.
  724. NodeAddr<UseNode*> DataFlowGraph::newUse(NodeAddr<InstrNode*> Owner,
  725. MachineOperand &Op, uint16_t Flags) {
  726. NodeAddr<UseNode*> UA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags);
  727. UA.Addr->setRegRef(&Op, *this);
  728. return UA;
  729. }
  730. NodeAddr<PhiUseNode*> DataFlowGraph::newPhiUse(NodeAddr<PhiNode*> Owner,
  731. RegisterRef RR, NodeAddr<BlockNode*> PredB, uint16_t Flags) {
  732. NodeAddr<PhiUseNode*> PUA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags);
  733. assert(Flags & NodeAttrs::PhiRef);
  734. PUA.Addr->setRegRef(RR, *this);
  735. PUA.Addr->setPredecessor(PredB.Id);
  736. return PUA;
  737. }
  738. NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner,
  739. MachineOperand &Op, uint16_t Flags) {
  740. NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags);
  741. DA.Addr->setRegRef(&Op, *this);
  742. return DA;
  743. }
  744. NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner,
  745. RegisterRef RR, uint16_t Flags) {
  746. NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags);
  747. assert(Flags & NodeAttrs::PhiRef);
  748. DA.Addr->setRegRef(RR, *this);
  749. return DA;
  750. }
  751. NodeAddr<PhiNode*> DataFlowGraph::newPhi(NodeAddr<BlockNode*> Owner) {
  752. NodeAddr<PhiNode*> PA = newNode(NodeAttrs::Code | NodeAttrs::Phi);
  753. Owner.Addr->addPhi(PA, *this);
  754. return PA;
  755. }
  756. NodeAddr<StmtNode*> DataFlowGraph::newStmt(NodeAddr<BlockNode*> Owner,
  757. MachineInstr *MI) {
  758. NodeAddr<StmtNode*> SA = newNode(NodeAttrs::Code | NodeAttrs::Stmt);
  759. SA.Addr->setCode(MI);
  760. Owner.Addr->addMember(SA, *this);
  761. return SA;
  762. }
  763. NodeAddr<BlockNode*> DataFlowGraph::newBlock(NodeAddr<FuncNode*> Owner,
  764. MachineBasicBlock *BB) {
  765. NodeAddr<BlockNode*> BA = newNode(NodeAttrs::Code | NodeAttrs::Block);
  766. BA.Addr->setCode(BB);
  767. Owner.Addr->addMember(BA, *this);
  768. return BA;
  769. }
  770. NodeAddr<FuncNode*> DataFlowGraph::newFunc(MachineFunction *MF) {
  771. NodeAddr<FuncNode*> FA = newNode(NodeAttrs::Code | NodeAttrs::Func);
  772. FA.Addr->setCode(MF);
  773. return FA;
  774. }
  775. // Build the data flow graph.
  776. void DataFlowGraph::build(unsigned Options) {
  777. reset();
  778. Func = newFunc(&MF);
  779. if (MF.empty())
  780. return;
  781. for (MachineBasicBlock &B : MF) {
  782. NodeAddr<BlockNode*> BA = newBlock(Func, &B);
  783. BlockNodes.insert(std::make_pair(&B, BA));
  784. for (MachineInstr &I : B) {
  785. if (I.isDebugInstr())
  786. continue;
  787. buildStmt(BA, I);
  788. }
  789. }
  790. NodeAddr<BlockNode*> EA = Func.Addr->getEntryBlock(*this);
  791. NodeList Blocks = Func.Addr->members(*this);
  792. // Collect information about block references.
  793. RegisterSet AllRefs;
  794. for (NodeAddr<BlockNode*> BA : Blocks)
  795. for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this))
  796. for (NodeAddr<RefNode*> RA : IA.Addr->members(*this))
  797. AllRefs.insert(RA.Addr->getRegRef(*this));
  798. // Collect function live-ins and entry block live-ins.
  799. MachineRegisterInfo &MRI = MF.getRegInfo();
  800. MachineBasicBlock &EntryB = *EA.Addr->getCode();
  801. assert(EntryB.pred_empty() && "Function entry block has predecessors");
  802. for (std::pair<unsigned,unsigned> P : MRI.liveins())
  803. LiveIns.insert(RegisterRef(P.first));
  804. if (MRI.tracksLiveness()) {
  805. for (auto I : EntryB.liveins())
  806. LiveIns.insert(RegisterRef(I.PhysReg, I.LaneMask));
  807. }
  808. // Add function-entry phi nodes for the live-in registers.
  809. //for (std::pair<RegisterId,LaneBitmask> P : LiveIns) {
  810. for (auto I = LiveIns.rr_begin(), E = LiveIns.rr_end(); I != E; ++I) {
  811. RegisterRef RR = *I;
  812. NodeAddr<PhiNode*> PA = newPhi(EA);
  813. uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
  814. NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
  815. PA.Addr->addMember(DA, *this);
  816. }
  817. // Add phis for landing pads.
  818. // Landing pads, unlike usual backs blocks, are not entered through
  819. // branches in the program, or fall-throughs from other blocks. They
  820. // are entered from the exception handling runtime and target's ABI
  821. // may define certain registers as defined on entry to such a block.
  822. RegisterSet EHRegs = getLandingPadLiveIns();
  823. if (!EHRegs.empty()) {
  824. for (NodeAddr<BlockNode*> BA : Blocks) {
  825. const MachineBasicBlock &B = *BA.Addr->getCode();
  826. if (!B.isEHPad())
  827. continue;
  828. // Prepare a list of NodeIds of the block's predecessors.
  829. NodeList Preds;
  830. for (MachineBasicBlock *PB : B.predecessors())
  831. Preds.push_back(findBlock(PB));
  832. // Build phi nodes for each live-in.
  833. for (RegisterRef RR : EHRegs) {
  834. NodeAddr<PhiNode*> PA = newPhi(BA);
  835. uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
  836. // Add def:
  837. NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
  838. PA.Addr->addMember(DA, *this);
  839. // Add uses (no reaching defs for phi uses):
  840. for (NodeAddr<BlockNode*> PBA : Preds) {
  841. NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA);
  842. PA.Addr->addMember(PUA, *this);
  843. }
  844. }
  845. }
  846. }
  847. // Build a map "PhiM" which will contain, for each block, the set
  848. // of references that will require phi definitions in that block.
  849. BlockRefsMap PhiM;
  850. for (NodeAddr<BlockNode*> BA : Blocks)
  851. recordDefsForDF(PhiM, BA);
  852. for (NodeAddr<BlockNode*> BA : Blocks)
  853. buildPhis(PhiM, AllRefs, BA);
  854. // Link all the refs. This will recursively traverse the dominator tree.
  855. DefStackMap DM;
  856. linkBlockRefs(DM, EA);
  857. // Finally, remove all unused phi nodes.
  858. if (!(Options & BuildOptions::KeepDeadPhis))
  859. removeUnusedPhis();
  860. }
  861. RegisterRef DataFlowGraph::makeRegRef(unsigned Reg, unsigned Sub) const {
  862. assert(PhysicalRegisterInfo::isRegMaskId(Reg) ||
  863. Register::isPhysicalRegister(Reg));
  864. assert(Reg != 0);
  865. if (Sub != 0)
  866. Reg = TRI.getSubReg(Reg, Sub);
  867. return RegisterRef(Reg);
  868. }
  869. RegisterRef DataFlowGraph::makeRegRef(const MachineOperand &Op) const {
  870. assert(Op.isReg() || Op.isRegMask());
  871. if (Op.isReg())
  872. return makeRegRef(Op.getReg(), Op.getSubReg());
  873. return RegisterRef(PRI.getRegMaskId(Op.getRegMask()), LaneBitmask::getAll());
  874. }
  875. // For each stack in the map DefM, push the delimiter for block B on it.
  876. void DataFlowGraph::markBlock(NodeId B, DefStackMap &DefM) {
  877. // Push block delimiters.
  878. for (auto &P : DefM)
  879. P.second.start_block(B);
  880. }
  881. // Remove all definitions coming from block B from each stack in DefM.
  882. void DataFlowGraph::releaseBlock(NodeId B, DefStackMap &DefM) {
  883. // Pop all defs from this block from the definition stack. Defs that were
  884. // added to the map during the traversal of instructions will not have a
  885. // delimiter, but for those, the whole stack will be emptied.
  886. for (auto &P : DefM)
  887. P.second.clear_block(B);
  888. // Finally, remove empty stacks from the map.
  889. for (auto I = DefM.begin(), E = DefM.end(), NextI = I; I != E; I = NextI) {
  890. NextI = std::next(I);
  891. // This preserves the validity of iterators other than I.
  892. if (I->second.empty())
  893. DefM.erase(I);
  894. }
  895. }
  896. // Push all definitions from the instruction node IA to an appropriate
  897. // stack in DefM.
  898. void DataFlowGraph::pushAllDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) {
  899. pushClobbers(IA, DefM);
  900. pushDefs(IA, DefM);
  901. }
  902. // Push all definitions from the instruction node IA to an appropriate
  903. // stack in DefM.
  904. void DataFlowGraph::pushClobbers(NodeAddr<InstrNode*> IA, DefStackMap &DefM) {
  905. NodeSet Visited;
  906. std::set<RegisterId> Defined;
  907. // The important objectives of this function are:
  908. // - to be able to handle instructions both while the graph is being
  909. // constructed, and after the graph has been constructed, and
  910. // - maintain proper ordering of definitions on the stack for each
  911. // register reference:
  912. // - if there are two or more related defs in IA (i.e. coming from
  913. // the same machine operand), then only push one def on the stack,
  914. // - if there are multiple unrelated defs of non-overlapping
  915. // subregisters of S, then the stack for S will have both (in an
  916. // unspecified order), but the order does not matter from the data-
  917. // -flow perspective.
  918. for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) {
  919. if (Visited.count(DA.Id))
  920. continue;
  921. if (!(DA.Addr->getFlags() & NodeAttrs::Clobbering))
  922. continue;
  923. NodeList Rel = getRelatedRefs(IA, DA);
  924. NodeAddr<DefNode*> PDA = Rel.front();
  925. RegisterRef RR = PDA.Addr->getRegRef(*this);
  926. // Push the definition on the stack for the register and all aliases.
  927. // The def stack traversal in linkNodeUp will check the exact aliasing.
  928. DefM[RR.Reg].push(DA);
  929. Defined.insert(RR.Reg);
  930. for (RegisterId A : PRI.getAliasSet(RR.Reg)) {
  931. // Check that we don't push the same def twice.
  932. assert(A != RR.Reg);
  933. if (!Defined.count(A))
  934. DefM[A].push(DA);
  935. }
  936. // Mark all the related defs as visited.
  937. for (NodeAddr<NodeBase*> T : Rel)
  938. Visited.insert(T.Id);
  939. }
  940. }
  941. // Push all definitions from the instruction node IA to an appropriate
  942. // stack in DefM.
  943. void DataFlowGraph::pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) {
  944. NodeSet Visited;
  945. #ifndef NDEBUG
  946. std::set<RegisterId> Defined;
  947. #endif
  948. // The important objectives of this function are:
  949. // - to be able to handle instructions both while the graph is being
  950. // constructed, and after the graph has been constructed, and
  951. // - maintain proper ordering of definitions on the stack for each
  952. // register reference:
  953. // - if there are two or more related defs in IA (i.e. coming from
  954. // the same machine operand), then only push one def on the stack,
  955. // - if there are multiple unrelated defs of non-overlapping
  956. // subregisters of S, then the stack for S will have both (in an
  957. // unspecified order), but the order does not matter from the data-
  958. // -flow perspective.
  959. for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) {
  960. if (Visited.count(DA.Id))
  961. continue;
  962. if (DA.Addr->getFlags() & NodeAttrs::Clobbering)
  963. continue;
  964. NodeList Rel = getRelatedRefs(IA, DA);
  965. NodeAddr<DefNode*> PDA = Rel.front();
  966. RegisterRef RR = PDA.Addr->getRegRef(*this);
  967. #ifndef NDEBUG
  968. // Assert if the register is defined in two or more unrelated defs.
  969. // This could happen if there are two or more def operands defining it.
  970. if (!Defined.insert(RR.Reg).second) {
  971. MachineInstr *MI = NodeAddr<StmtNode*>(IA).Addr->getCode();
  972. dbgs() << "Multiple definitions of register: "
  973. << Print(RR, *this) << " in\n " << *MI << "in "
  974. << printMBBReference(*MI->getParent()) << '\n';
  975. llvm_unreachable(nullptr);
  976. }
  977. #endif
  978. // Push the definition on the stack for the register and all aliases.
  979. // The def stack traversal in linkNodeUp will check the exact aliasing.
  980. DefM[RR.Reg].push(DA);
  981. for (RegisterId A : PRI.getAliasSet(RR.Reg)) {
  982. // Check that we don't push the same def twice.
  983. assert(A != RR.Reg);
  984. DefM[A].push(DA);
  985. }
  986. // Mark all the related defs as visited.
  987. for (NodeAddr<NodeBase*> T : Rel)
  988. Visited.insert(T.Id);
  989. }
  990. }
  991. // Return the list of all reference nodes related to RA, including RA itself.
  992. // See "getNextRelated" for the meaning of a "related reference".
  993. NodeList DataFlowGraph::getRelatedRefs(NodeAddr<InstrNode*> IA,
  994. NodeAddr<RefNode*> RA) const {
  995. assert(IA.Id != 0 && RA.Id != 0);
  996. NodeList Refs;
  997. NodeId Start = RA.Id;
  998. do {
  999. Refs.push_back(RA);
  1000. RA = getNextRelated(IA, RA);
  1001. } while (RA.Id != 0 && RA.Id != Start);
  1002. return Refs;
  1003. }
  1004. // Clear all information in the graph.
  1005. void DataFlowGraph::reset() {
  1006. Memory.clear();
  1007. BlockNodes.clear();
  1008. Func = NodeAddr<FuncNode*>();
  1009. }
  1010. // Return the next reference node in the instruction node IA that is related
  1011. // to RA. Conceptually, two reference nodes are related if they refer to the
  1012. // same instance of a register access, but differ in flags or other minor
  1013. // characteristics. Specific examples of related nodes are shadow reference
  1014. // nodes.
  1015. // Return the equivalent of nullptr if there are no more related references.
  1016. NodeAddr<RefNode*> DataFlowGraph::getNextRelated(NodeAddr<InstrNode*> IA,
  1017. NodeAddr<RefNode*> RA) const {
  1018. assert(IA.Id != 0 && RA.Id != 0);
  1019. auto Related = [this,RA](NodeAddr<RefNode*> TA) -> bool {
  1020. if (TA.Addr->getKind() != RA.Addr->getKind())
  1021. return false;
  1022. if (TA.Addr->getRegRef(*this) != RA.Addr->getRegRef(*this))
  1023. return false;
  1024. return true;
  1025. };
  1026. auto RelatedStmt = [&Related,RA](NodeAddr<RefNode*> TA) -> bool {
  1027. return Related(TA) &&
  1028. &RA.Addr->getOp() == &TA.Addr->getOp();
  1029. };
  1030. auto RelatedPhi = [&Related,RA](NodeAddr<RefNode*> TA) -> bool {
  1031. if (!Related(TA))
  1032. return false;
  1033. if (TA.Addr->getKind() != NodeAttrs::Use)
  1034. return true;
  1035. // For phi uses, compare predecessor blocks.
  1036. const NodeAddr<const PhiUseNode*> TUA = TA;
  1037. const NodeAddr<const PhiUseNode*> RUA = RA;
  1038. return TUA.Addr->getPredecessor() == RUA.Addr->getPredecessor();
  1039. };
  1040. RegisterRef RR = RA.Addr->getRegRef(*this);
  1041. if (IA.Addr->getKind() == NodeAttrs::Stmt)
  1042. return RA.Addr->getNextRef(RR, RelatedStmt, true, *this);
  1043. return RA.Addr->getNextRef(RR, RelatedPhi, true, *this);
  1044. }
  1045. // Find the next node related to RA in IA that satisfies condition P.
  1046. // If such a node was found, return a pair where the second element is the
  1047. // located node. If such a node does not exist, return a pair where the
  1048. // first element is the element after which such a node should be inserted,
  1049. // and the second element is a null-address.
  1050. template <typename Predicate>
  1051. std::pair<NodeAddr<RefNode*>,NodeAddr<RefNode*>>
  1052. DataFlowGraph::locateNextRef(NodeAddr<InstrNode*> IA, NodeAddr<RefNode*> RA,
  1053. Predicate P) const {
  1054. assert(IA.Id != 0 && RA.Id != 0);
  1055. NodeAddr<RefNode*> NA;
  1056. NodeId Start = RA.Id;
  1057. while (true) {
  1058. NA = getNextRelated(IA, RA);
  1059. if (NA.Id == 0 || NA.Id == Start)
  1060. break;
  1061. if (P(NA))
  1062. break;
  1063. RA = NA;
  1064. }
  1065. if (NA.Id != 0 && NA.Id != Start)
  1066. return std::make_pair(RA, NA);
  1067. return std::make_pair(RA, NodeAddr<RefNode*>());
  1068. }
  1069. // Get the next shadow node in IA corresponding to RA, and optionally create
  1070. // such a node if it does not exist.
  1071. NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA,
  1072. NodeAddr<RefNode*> RA, bool Create) {
  1073. assert(IA.Id != 0 && RA.Id != 0);
  1074. uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow;
  1075. auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool {
  1076. return TA.Addr->getFlags() == Flags;
  1077. };
  1078. auto Loc = locateNextRef(IA, RA, IsShadow);
  1079. if (Loc.second.Id != 0 || !Create)
  1080. return Loc.second;
  1081. // Create a copy of RA and mark is as shadow.
  1082. NodeAddr<RefNode*> NA = cloneNode(RA);
  1083. NA.Addr->setFlags(Flags | NodeAttrs::Shadow);
  1084. IA.Addr->addMemberAfter(Loc.first, NA, *this);
  1085. return NA;
  1086. }
  1087. // Get the next shadow node in IA corresponding to RA. Return null-address
  1088. // if such a node does not exist.
  1089. NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA,
  1090. NodeAddr<RefNode*> RA) const {
  1091. assert(IA.Id != 0 && RA.Id != 0);
  1092. uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow;
  1093. auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool {
  1094. return TA.Addr->getFlags() == Flags;
  1095. };
  1096. return locateNextRef(IA, RA, IsShadow).second;
  1097. }
  1098. // Create a new statement node in the block node BA that corresponds to
  1099. // the machine instruction MI.
  1100. void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) {
  1101. NodeAddr<StmtNode*> SA = newStmt(BA, &In);
  1102. auto isCall = [] (const MachineInstr &In) -> bool {
  1103. if (In.isCall())
  1104. return true;
  1105. // Is tail call?
  1106. if (In.isBranch()) {
  1107. for (const MachineOperand &Op : In.operands())
  1108. if (Op.isGlobal() || Op.isSymbol())
  1109. return true;
  1110. // Assume indirect branches are calls. This is for the purpose of
  1111. // keeping implicit operands, and so it won't hurt on intra-function
  1112. // indirect branches.
  1113. if (In.isIndirectBranch())
  1114. return true;
  1115. }
  1116. return false;
  1117. };
  1118. auto isDefUndef = [this] (const MachineInstr &In, RegisterRef DR) -> bool {
  1119. // This instruction defines DR. Check if there is a use operand that
  1120. // would make DR live on entry to the instruction.
  1121. for (const MachineOperand &Op : In.operands()) {
  1122. if (!Op.isReg() || Op.getReg() == 0 || !Op.isUse() || Op.isUndef())
  1123. continue;
  1124. RegisterRef UR = makeRegRef(Op);
  1125. if (PRI.alias(DR, UR))
  1126. return false;
  1127. }
  1128. return true;
  1129. };
  1130. bool IsCall = isCall(In);
  1131. unsigned NumOps = In.getNumOperands();
  1132. // Avoid duplicate implicit defs. This will not detect cases of implicit
  1133. // defs that define registers that overlap, but it is not clear how to
  1134. // interpret that in the absence of explicit defs. Overlapping explicit
  1135. // defs are likely illegal already.
  1136. BitVector DoneDefs(TRI.getNumRegs());
  1137. // Process explicit defs first.
  1138. for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
  1139. MachineOperand &Op = In.getOperand(OpN);
  1140. if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
  1141. continue;
  1142. Register R = Op.getReg();
  1143. if (!R || !R.isPhysical())
  1144. continue;
  1145. uint16_t Flags = NodeAttrs::None;
  1146. if (TOI.isPreserving(In, OpN)) {
  1147. Flags |= NodeAttrs::Preserving;
  1148. // If the def is preserving, check if it is also undefined.
  1149. if (isDefUndef(In, makeRegRef(Op)))
  1150. Flags |= NodeAttrs::Undef;
  1151. }
  1152. if (TOI.isClobbering(In, OpN))
  1153. Flags |= NodeAttrs::Clobbering;
  1154. if (TOI.isFixedReg(In, OpN))
  1155. Flags |= NodeAttrs::Fixed;
  1156. if (IsCall && Op.isDead())
  1157. Flags |= NodeAttrs::Dead;
  1158. NodeAddr<DefNode*> DA = newDef(SA, Op, Flags);
  1159. SA.Addr->addMember(DA, *this);
  1160. assert(!DoneDefs.test(R));
  1161. DoneDefs.set(R);
  1162. }
  1163. // Process reg-masks (as clobbers).
  1164. BitVector DoneClobbers(TRI.getNumRegs());
  1165. for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
  1166. MachineOperand &Op = In.getOperand(OpN);
  1167. if (!Op.isRegMask())
  1168. continue;
  1169. uint16_t Flags = NodeAttrs::Clobbering | NodeAttrs::Fixed |
  1170. NodeAttrs::Dead;
  1171. NodeAddr<DefNode*> DA = newDef(SA, Op, Flags);
  1172. SA.Addr->addMember(DA, *this);
  1173. // Record all clobbered registers in DoneDefs.
  1174. const uint32_t *RM = Op.getRegMask();
  1175. for (unsigned i = 1, e = TRI.getNumRegs(); i != e; ++i)
  1176. if (!(RM[i/32] & (1u << (i%32))))
  1177. DoneClobbers.set(i);
  1178. }
  1179. // Process implicit defs, skipping those that have already been added
  1180. // as explicit.
  1181. for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
  1182. MachineOperand &Op = In.getOperand(OpN);
  1183. if (!Op.isReg() || !Op.isDef() || !Op.isImplicit())
  1184. continue;
  1185. Register R = Op.getReg();
  1186. if (!R || !R.isPhysical() || DoneDefs.test(R))
  1187. continue;
  1188. RegisterRef RR = makeRegRef(Op);
  1189. uint16_t Flags = NodeAttrs::None;
  1190. if (TOI.isPreserving(In, OpN)) {
  1191. Flags |= NodeAttrs::Preserving;
  1192. // If the def is preserving, check if it is also undefined.
  1193. if (isDefUndef(In, RR))
  1194. Flags |= NodeAttrs::Undef;
  1195. }
  1196. if (TOI.isClobbering(In, OpN))
  1197. Flags |= NodeAttrs::Clobbering;
  1198. if (TOI.isFixedReg(In, OpN))
  1199. Flags |= NodeAttrs::Fixed;
  1200. if (IsCall && Op.isDead()) {
  1201. if (DoneClobbers.test(R))
  1202. continue;
  1203. Flags |= NodeAttrs::Dead;
  1204. }
  1205. NodeAddr<DefNode*> DA = newDef(SA, Op, Flags);
  1206. SA.Addr->addMember(DA, *this);
  1207. DoneDefs.set(R);
  1208. }
  1209. for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
  1210. MachineOperand &Op = In.getOperand(OpN);
  1211. if (!Op.isReg() || !Op.isUse())
  1212. continue;
  1213. Register R = Op.getReg();
  1214. if (!R || !R.isPhysical())
  1215. continue;
  1216. uint16_t Flags = NodeAttrs::None;
  1217. if (Op.isUndef())
  1218. Flags |= NodeAttrs::Undef;
  1219. if (TOI.isFixedReg(In, OpN))
  1220. Flags |= NodeAttrs::Fixed;
  1221. NodeAddr<UseNode*> UA = newUse(SA, Op, Flags);
  1222. SA.Addr->addMember(UA, *this);
  1223. }
  1224. }
  1225. // Scan all defs in the block node BA and record in PhiM the locations of
  1226. // phi nodes corresponding to these defs.
  1227. void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM,
  1228. NodeAddr<BlockNode*> BA) {
  1229. // Check all defs from block BA and record them in each block in BA's
  1230. // iterated dominance frontier. This information will later be used to
  1231. // create phi nodes.
  1232. MachineBasicBlock *BB = BA.Addr->getCode();
  1233. assert(BB);
  1234. auto DFLoc = MDF.find(BB);
  1235. if (DFLoc == MDF.end() || DFLoc->second.empty())
  1236. return;
  1237. // Traverse all instructions in the block and collect the set of all
  1238. // defined references. For each reference there will be a phi created
  1239. // in the block's iterated dominance frontier.
  1240. // This is done to make sure that each defined reference gets only one
  1241. // phi node, even if it is defined multiple times.
  1242. RegisterSet Defs;
  1243. for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this))
  1244. for (NodeAddr<RefNode*> RA : IA.Addr->members_if(IsDef, *this))
  1245. Defs.insert(RA.Addr->getRegRef(*this));
  1246. // Calculate the iterated dominance frontier of BB.
  1247. const MachineDominanceFrontier::DomSetType &DF = DFLoc->second;
  1248. SetVector<MachineBasicBlock*> IDF(DF.begin(), DF.end());
  1249. for (unsigned i = 0; i < IDF.size(); ++i) {
  1250. auto F = MDF.find(IDF[i]);
  1251. if (F != MDF.end())
  1252. IDF.insert(F->second.begin(), F->second.end());
  1253. }
  1254. // Finally, add the set of defs to each block in the iterated dominance
  1255. // frontier.
  1256. for (auto *DB : IDF) {
  1257. NodeAddr<BlockNode*> DBA = findBlock(DB);
  1258. PhiM[DBA.Id].insert(Defs.begin(), Defs.end());
  1259. }
  1260. }
  1261. // Given the locations of phi nodes in the map PhiM, create the phi nodes
  1262. // that are located in the block node BA.
  1263. void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, RegisterSet &AllRefs,
  1264. NodeAddr<BlockNode*> BA) {
  1265. // Check if this blocks has any DF defs, i.e. if there are any defs
  1266. // that this block is in the iterated dominance frontier of.
  1267. auto HasDF = PhiM.find(BA.Id);
  1268. if (HasDF == PhiM.end() || HasDF->second.empty())
  1269. return;
  1270. // First, remove all R in Refs in such that there exists T in Refs
  1271. // such that T covers R. In other words, only leave those refs that
  1272. // are not covered by another ref (i.e. maximal with respect to covering).
  1273. auto MaxCoverIn = [this] (RegisterRef RR, RegisterSet &RRs) -> RegisterRef {
  1274. for (RegisterRef I : RRs)
  1275. if (I != RR && RegisterAggr::isCoverOf(I, RR, PRI))
  1276. RR = I;
  1277. return RR;
  1278. };
  1279. RegisterSet MaxDF;
  1280. for (RegisterRef I : HasDF->second)
  1281. MaxDF.insert(MaxCoverIn(I, HasDF->second));
  1282. std::vector<RegisterRef> MaxRefs;
  1283. for (RegisterRef I : MaxDF)
  1284. MaxRefs.push_back(MaxCoverIn(I, AllRefs));
  1285. // Now, for each R in MaxRefs, get the alias closure of R. If the closure
  1286. // only has R in it, create a phi a def for R. Otherwise, create a phi,
  1287. // and add a def for each S in the closure.
  1288. // Sort the refs so that the phis will be created in a deterministic order.
  1289. llvm::sort(MaxRefs);
  1290. // Remove duplicates.
  1291. auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end());
  1292. MaxRefs.erase(NewEnd, MaxRefs.end());
  1293. auto Aliased = [this,&MaxRefs](RegisterRef RR,
  1294. std::vector<unsigned> &Closure) -> bool {
  1295. for (unsigned I : Closure)
  1296. if (PRI.alias(RR, MaxRefs[I]))
  1297. return true;
  1298. return false;
  1299. };
  1300. // Prepare a list of NodeIds of the block's predecessors.
  1301. NodeList Preds;
  1302. const MachineBasicBlock *MBB = BA.Addr->getCode();
  1303. for (MachineBasicBlock *PB : MBB->predecessors())
  1304. Preds.push_back(findBlock(PB));
  1305. while (!MaxRefs.empty()) {
  1306. // Put the first element in the closure, and then add all subsequent
  1307. // elements from MaxRefs to it, if they alias at least one element
  1308. // already in the closure.
  1309. // ClosureIdx: vector of indices in MaxRefs of members of the closure.
  1310. std::vector<unsigned> ClosureIdx = { 0 };
  1311. for (unsigned i = 1; i != MaxRefs.size(); ++i)
  1312. if (Aliased(MaxRefs[i], ClosureIdx))
  1313. ClosureIdx.push_back(i);
  1314. // Build a phi for the closure.
  1315. unsigned CS = ClosureIdx.size();
  1316. NodeAddr<PhiNode*> PA = newPhi(BA);
  1317. // Add defs.
  1318. for (unsigned X = 0; X != CS; ++X) {
  1319. RegisterRef RR = MaxRefs[ClosureIdx[X]];
  1320. uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
  1321. NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
  1322. PA.Addr->addMember(DA, *this);
  1323. }
  1324. // Add phi uses.
  1325. for (NodeAddr<BlockNode*> PBA : Preds) {
  1326. for (unsigned X = 0; X != CS; ++X) {
  1327. RegisterRef RR = MaxRefs[ClosureIdx[X]];
  1328. NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA);
  1329. PA.Addr->addMember(PUA, *this);
  1330. }
  1331. }
  1332. // Erase from MaxRefs all elements in the closure.
  1333. auto Begin = MaxRefs.begin();
  1334. for (unsigned Idx : llvm::reverse(ClosureIdx))
  1335. MaxRefs.erase(Begin + Idx);
  1336. }
  1337. }
  1338. // Remove any unneeded phi nodes that were created during the build process.
  1339. void DataFlowGraph::removeUnusedPhis() {
  1340. // This will remove unused phis, i.e. phis where each def does not reach
  1341. // any uses or other defs. This will not detect or remove circular phi
  1342. // chains that are otherwise dead. Unused/dead phis are created during
  1343. // the build process and this function is intended to remove these cases
  1344. // that are easily determinable to be unnecessary.
  1345. SetVector<NodeId> PhiQ;
  1346. for (NodeAddr<BlockNode*> BA : Func.Addr->members(*this)) {
  1347. for (auto P : BA.Addr->members_if(IsPhi, *this))
  1348. PhiQ.insert(P.Id);
  1349. }
  1350. static auto HasUsedDef = [](NodeList &Ms) -> bool {
  1351. for (NodeAddr<NodeBase*> M : Ms) {
  1352. if (M.Addr->getKind() != NodeAttrs::Def)
  1353. continue;
  1354. NodeAddr<DefNode*> DA = M;
  1355. if (DA.Addr->getReachedDef() != 0 || DA.Addr->getReachedUse() != 0)
  1356. return true;
  1357. }
  1358. return false;
  1359. };
  1360. // Any phi, if it is removed, may affect other phis (make them dead).
  1361. // For each removed phi, collect the potentially affected phis and add
  1362. // them back to the queue.
  1363. while (!PhiQ.empty()) {
  1364. auto PA = addr<PhiNode*>(PhiQ[0]);
  1365. PhiQ.remove(PA.Id);
  1366. NodeList Refs = PA.Addr->members(*this);
  1367. if (HasUsedDef(Refs))
  1368. continue;
  1369. for (NodeAddr<RefNode*> RA : Refs) {
  1370. if (NodeId RD = RA.Addr->getReachingDef()) {
  1371. auto RDA = addr<DefNode*>(RD);
  1372. NodeAddr<InstrNode*> OA = RDA.Addr->getOwner(*this);
  1373. if (IsPhi(OA))
  1374. PhiQ.insert(OA.Id);
  1375. }
  1376. if (RA.Addr->isDef())
  1377. unlinkDef(RA, true);
  1378. else
  1379. unlinkUse(RA, true);
  1380. }
  1381. NodeAddr<BlockNode*> BA = PA.Addr->getOwner(*this);
  1382. BA.Addr->removeMember(PA, *this);
  1383. }
  1384. }
  1385. // For a given reference node TA in an instruction node IA, connect the
  1386. // reaching def of TA to the appropriate def node. Create any shadow nodes
  1387. // as appropriate.
  1388. template <typename T>
  1389. void DataFlowGraph::linkRefUp(NodeAddr<InstrNode*> IA, NodeAddr<T> TA,
  1390. DefStack &DS) {
  1391. if (DS.empty())
  1392. return;
  1393. RegisterRef RR = TA.Addr->getRegRef(*this);
  1394. NodeAddr<T> TAP;
  1395. // References from the def stack that have been examined so far.
  1396. RegisterAggr Defs(PRI);
  1397. for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()) {
  1398. RegisterRef QR = I->Addr->getRegRef(*this);
  1399. // Skip all defs that are aliased to any of the defs that we have already
  1400. // seen. If this completes a cover of RR, stop the stack traversal.
  1401. bool Alias = Defs.hasAliasOf(QR);
  1402. bool Cover = Defs.insert(QR).hasCoverOf(RR);
  1403. if (Alias) {
  1404. if (Cover)
  1405. break;
  1406. continue;
  1407. }
  1408. // The reaching def.
  1409. NodeAddr<DefNode*> RDA = *I;
  1410. // Pick the reached node.
  1411. if (TAP.Id == 0) {
  1412. TAP = TA;
  1413. } else {
  1414. // Mark the existing ref as "shadow" and create a new shadow.
  1415. TAP.Addr->setFlags(TAP.Addr->getFlags() | NodeAttrs::Shadow);
  1416. TAP = getNextShadow(IA, TAP, true);
  1417. }
  1418. // Create the link.
  1419. TAP.Addr->linkToDef(TAP.Id, RDA);
  1420. if (Cover)
  1421. break;
  1422. }
  1423. }
  1424. // Create data-flow links for all reference nodes in the statement node SA.
  1425. template <typename Predicate>
  1426. void DataFlowGraph::linkStmtRefs(DefStackMap &DefM, NodeAddr<StmtNode*> SA,
  1427. Predicate P) {
  1428. #ifndef NDEBUG
  1429. RegisterSet Defs;
  1430. #endif
  1431. // Link all nodes (upwards in the data-flow) with their reaching defs.
  1432. for (NodeAddr<RefNode*> RA : SA.Addr->members_if(P, *this)) {
  1433. uint16_t Kind = RA.Addr->getKind();
  1434. assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use);
  1435. RegisterRef RR = RA.Addr->getRegRef(*this);
  1436. #ifndef NDEBUG
  1437. // Do not expect multiple defs of the same reference.
  1438. assert(Kind != NodeAttrs::Def || !Defs.count(RR));
  1439. Defs.insert(RR);
  1440. #endif
  1441. auto F = DefM.find(RR.Reg);
  1442. if (F == DefM.end())
  1443. continue;
  1444. DefStack &DS = F->second;
  1445. if (Kind == NodeAttrs::Use)
  1446. linkRefUp<UseNode*>(SA, RA, DS);
  1447. else if (Kind == NodeAttrs::Def)
  1448. linkRefUp<DefNode*>(SA, RA, DS);
  1449. else
  1450. llvm_unreachable("Unexpected node in instruction");
  1451. }
  1452. }
  1453. // Create data-flow links for all instructions in the block node BA. This
  1454. // will include updating any phi nodes in BA.
  1455. void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) {
  1456. // Push block delimiters.
  1457. markBlock(BA.Id, DefM);
  1458. auto IsClobber = [] (NodeAddr<RefNode*> RA) -> bool {
  1459. return IsDef(RA) && (RA.Addr->getFlags() & NodeAttrs::Clobbering);
  1460. };
  1461. auto IsNoClobber = [] (NodeAddr<RefNode*> RA) -> bool {
  1462. return IsDef(RA) && !(RA.Addr->getFlags() & NodeAttrs::Clobbering);
  1463. };
  1464. assert(BA.Addr && "block node address is needed to create a data-flow link");
  1465. // For each non-phi instruction in the block, link all the defs and uses
  1466. // to their reaching defs. For any member of the block (including phis),
  1467. // push the defs on the corresponding stacks.
  1468. for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) {
  1469. // Ignore phi nodes here. They will be linked part by part from the
  1470. // predecessors.
  1471. if (IA.Addr->getKind() == NodeAttrs::Stmt) {
  1472. linkStmtRefs(DefM, IA, IsUse);
  1473. linkStmtRefs(DefM, IA, IsClobber);
  1474. }
  1475. // Push the definitions on the stack.
  1476. pushClobbers(IA, DefM);
  1477. if (IA.Addr->getKind() == NodeAttrs::Stmt)
  1478. linkStmtRefs(DefM, IA, IsNoClobber);
  1479. pushDefs(IA, DefM);
  1480. }
  1481. // Recursively process all children in the dominator tree.
  1482. MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode());
  1483. for (auto *I : *N) {
  1484. MachineBasicBlock *SB = I->getBlock();
  1485. NodeAddr<BlockNode*> SBA = findBlock(SB);
  1486. linkBlockRefs(DefM, SBA);
  1487. }
  1488. // Link the phi uses from the successor blocks.
  1489. auto IsUseForBA = [BA](NodeAddr<NodeBase*> NA) -> bool {
  1490. if (NA.Addr->getKind() != NodeAttrs::Use)
  1491. return false;
  1492. assert(NA.Addr->getFlags() & NodeAttrs::PhiRef);
  1493. NodeAddr<PhiUseNode*> PUA = NA;
  1494. return PUA.Addr->getPredecessor() == BA.Id;
  1495. };
  1496. RegisterSet EHLiveIns = getLandingPadLiveIns();
  1497. MachineBasicBlock *MBB = BA.Addr->getCode();
  1498. for (MachineBasicBlock *SB : MBB->successors()) {
  1499. bool IsEHPad = SB->isEHPad();
  1500. NodeAddr<BlockNode*> SBA = findBlock(SB);
  1501. for (NodeAddr<InstrNode*> IA : SBA.Addr->members_if(IsPhi, *this)) {
  1502. // Do not link phi uses for landing pad live-ins.
  1503. if (IsEHPad) {
  1504. // Find what register this phi is for.
  1505. NodeAddr<RefNode*> RA = IA.Addr->getFirstMember(*this);
  1506. assert(RA.Id != 0);
  1507. if (EHLiveIns.count(RA.Addr->getRegRef(*this)))
  1508. continue;
  1509. }
  1510. // Go over each phi use associated with MBB, and link it.
  1511. for (auto U : IA.Addr->members_if(IsUseForBA, *this)) {
  1512. NodeAddr<PhiUseNode*> PUA = U;
  1513. RegisterRef RR = PUA.Addr->getRegRef(*this);
  1514. linkRefUp<UseNode*>(IA, PUA, DefM[RR.Reg]);
  1515. }
  1516. }
  1517. }
  1518. // Pop all defs from this block from the definition stacks.
  1519. releaseBlock(BA.Id, DefM);
  1520. }
  1521. // Remove the use node UA from any data-flow and structural links.
  1522. void DataFlowGraph::unlinkUseDF(NodeAddr<UseNode*> UA) {
  1523. NodeId RD = UA.Addr->getReachingDef();
  1524. NodeId Sib = UA.Addr->getSibling();
  1525. if (RD == 0) {
  1526. assert(Sib == 0);
  1527. return;
  1528. }
  1529. auto RDA = addr<DefNode*>(RD);
  1530. auto TA = addr<UseNode*>(RDA.Addr->getReachedUse());
  1531. if (TA.Id == UA.Id) {
  1532. RDA.Addr->setReachedUse(Sib);
  1533. return;
  1534. }
  1535. while (TA.Id != 0) {
  1536. NodeId S = TA.Addr->getSibling();
  1537. if (S == UA.Id) {
  1538. TA.Addr->setSibling(UA.Addr->getSibling());
  1539. return;
  1540. }
  1541. TA = addr<UseNode*>(S);
  1542. }
  1543. }
  1544. // Remove the def node DA from any data-flow and structural links.
  1545. void DataFlowGraph::unlinkDefDF(NodeAddr<DefNode*> DA) {
  1546. //
  1547. // RD
  1548. // | reached
  1549. // | def
  1550. // :
  1551. // .
  1552. // +----+
  1553. // ... -- | DA | -- ... -- 0 : sibling chain of DA
  1554. // +----+
  1555. // | | reached
  1556. // | : def
  1557. // | .
  1558. // | ... : Siblings (defs)
  1559. // |
  1560. // : reached
  1561. // . use
  1562. // ... : sibling chain of reached uses
  1563. NodeId RD = DA.Addr->getReachingDef();
  1564. // Visit all siblings of the reached def and reset their reaching defs.
  1565. // Also, defs reached by DA are now "promoted" to being reached by RD,
  1566. // so all of them will need to be spliced into the sibling chain where
  1567. // DA belongs.
  1568. auto getAllNodes = [this] (NodeId N) -> NodeList {
  1569. NodeList Res;
  1570. while (N) {
  1571. auto RA = addr<RefNode*>(N);
  1572. // Keep the nodes in the exact sibling order.
  1573. Res.push_back(RA);
  1574. N = RA.Addr->getSibling();
  1575. }
  1576. return Res;
  1577. };
  1578. NodeList ReachedDefs = getAllNodes(DA.Addr->getReachedDef());
  1579. NodeList ReachedUses = getAllNodes(DA.Addr->getReachedUse());
  1580. if (RD == 0) {
  1581. for (NodeAddr<RefNode*> I : ReachedDefs)
  1582. I.Addr->setSibling(0);
  1583. for (NodeAddr<RefNode*> I : ReachedUses)
  1584. I.Addr->setSibling(0);
  1585. }
  1586. for (NodeAddr<DefNode*> I : ReachedDefs)
  1587. I.Addr->setReachingDef(RD);
  1588. for (NodeAddr<UseNode*> I : ReachedUses)
  1589. I.Addr->setReachingDef(RD);
  1590. NodeId Sib = DA.Addr->getSibling();
  1591. if (RD == 0) {
  1592. assert(Sib == 0);
  1593. return;
  1594. }
  1595. // Update the reaching def node and remove DA from the sibling list.
  1596. auto RDA = addr<DefNode*>(RD);
  1597. auto TA = addr<DefNode*>(RDA.Addr->getReachedDef());
  1598. if (TA.Id == DA.Id) {
  1599. // If DA is the first reached def, just update the RD's reached def
  1600. // to the DA's sibling.
  1601. RDA.Addr->setReachedDef(Sib);
  1602. } else {
  1603. // Otherwise, traverse the sibling list of the reached defs and remove
  1604. // DA from it.
  1605. while (TA.Id != 0) {
  1606. NodeId S = TA.Addr->getSibling();
  1607. if (S == DA.Id) {
  1608. TA.Addr->setSibling(Sib);
  1609. break;
  1610. }
  1611. TA = addr<DefNode*>(S);
  1612. }
  1613. }
  1614. // Splice the DA's reached defs into the RDA's reached def chain.
  1615. if (!ReachedDefs.empty()) {
  1616. auto Last = NodeAddr<DefNode*>(ReachedDefs.back());
  1617. Last.Addr->setSibling(RDA.Addr->getReachedDef());
  1618. RDA.Addr->setReachedDef(ReachedDefs.front().Id);
  1619. }
  1620. // Splice the DA's reached uses into the RDA's reached use chain.
  1621. if (!ReachedUses.empty()) {
  1622. auto Last = NodeAddr<UseNode*>(ReachedUses.back());
  1623. Last.Addr->setSibling(RDA.Addr->getReachedUse());
  1624. RDA.Addr->setReachedUse(ReachedUses.front().Id);
  1625. }
  1626. }