RDFGraph.cpp 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832
  1. //===- RDFGraph.cpp -------------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Target-independent, SSA-based data flow graph for register data flow (RDF).
  10. //
  11. #include "llvm/ADT/BitVector.h"
  12. #include "llvm/ADT/STLExtras.h"
  13. #include "llvm/ADT/SetVector.h"
  14. #include "llvm/CodeGen/MachineBasicBlock.h"
  15. #include "llvm/CodeGen/MachineDominanceFrontier.h"
  16. #include "llvm/CodeGen/MachineDominators.h"
  17. #include "llvm/CodeGen/MachineFunction.h"
  18. #include "llvm/CodeGen/MachineInstr.h"
  19. #include "llvm/CodeGen/MachineOperand.h"
  20. #include "llvm/CodeGen/MachineRegisterInfo.h"
  21. #include "llvm/CodeGen/RDFGraph.h"
  22. #include "llvm/CodeGen/RDFRegisters.h"
  23. #include "llvm/CodeGen/TargetInstrInfo.h"
  24. #include "llvm/CodeGen/TargetLowering.h"
  25. #include "llvm/CodeGen/TargetRegisterInfo.h"
  26. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  27. #include "llvm/IR/Function.h"
  28. #include "llvm/MC/LaneBitmask.h"
  29. #include "llvm/MC/MCInstrDesc.h"
  30. #include "llvm/MC/MCRegisterInfo.h"
  31. #include "llvm/Support/Debug.h"
  32. #include "llvm/Support/ErrorHandling.h"
  33. #include "llvm/Support/raw_ostream.h"
  34. #include <algorithm>
  35. #include <cassert>
  36. #include <cstdint>
  37. #include <cstring>
  38. #include <iterator>
  39. #include <set>
  40. #include <utility>
  41. #include <vector>
  42. using namespace llvm;
  43. using namespace rdf;
  44. // Printing functions. Have them here first, so that the rest of the code
  45. // can use them.
  46. namespace llvm {
  47. namespace rdf {
  48. raw_ostream &operator<< (raw_ostream &OS, const PrintLaneMaskOpt &P) {
  49. if (!P.Mask.all())
  50. OS << ':' << PrintLaneMask(P.Mask);
  51. return OS;
  52. }
  53. raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterRef> &P) {
  54. auto &TRI = P.G.getTRI();
  55. if (P.Obj.Reg > 0 && P.Obj.Reg < TRI.getNumRegs())
  56. OS << TRI.getName(P.Obj.Reg);
  57. else
  58. OS << '#' << P.Obj.Reg;
  59. OS << PrintLaneMaskOpt(P.Obj.Mask);
  60. return OS;
  61. }
  62. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeId> &P) {
  63. auto NA = P.G.addr<NodeBase*>(P.Obj);
  64. uint16_t Attrs = NA.Addr->getAttrs();
  65. uint16_t Kind = NodeAttrs::kind(Attrs);
  66. uint16_t Flags = NodeAttrs::flags(Attrs);
  67. switch (NodeAttrs::type(Attrs)) {
  68. case NodeAttrs::Code:
  69. switch (Kind) {
  70. case NodeAttrs::Func: OS << 'f'; break;
  71. case NodeAttrs::Block: OS << 'b'; break;
  72. case NodeAttrs::Stmt: OS << 's'; break;
  73. case NodeAttrs::Phi: OS << 'p'; break;
  74. default: OS << "c?"; break;
  75. }
  76. break;
  77. case NodeAttrs::Ref:
  78. if (Flags & NodeAttrs::Undef)
  79. OS << '/';
  80. if (Flags & NodeAttrs::Dead)
  81. OS << '\\';
  82. if (Flags & NodeAttrs::Preserving)
  83. OS << '+';
  84. if (Flags & NodeAttrs::Clobbering)
  85. OS << '~';
  86. switch (Kind) {
  87. case NodeAttrs::Use: OS << 'u'; break;
  88. case NodeAttrs::Def: OS << 'd'; break;
  89. case NodeAttrs::Block: OS << 'b'; break;
  90. default: OS << "r?"; break;
  91. }
  92. break;
  93. default:
  94. OS << '?';
  95. break;
  96. }
  97. OS << P.Obj;
  98. if (Flags & NodeAttrs::Shadow)
  99. OS << '"';
  100. return OS;
  101. }
  102. static void printRefHeader(raw_ostream &OS, const NodeAddr<RefNode*> RA,
  103. const DataFlowGraph &G) {
  104. OS << Print<NodeId>(RA.Id, G) << '<'
  105. << Print<RegisterRef>(RA.Addr->getRegRef(G), G) << '>';
  106. if (RA.Addr->getFlags() & NodeAttrs::Fixed)
  107. OS << '!';
  108. }
  109. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<DefNode*>> &P) {
  110. printRefHeader(OS, P.Obj, P.G);
  111. OS << '(';
  112. if (NodeId N = P.Obj.Addr->getReachingDef())
  113. OS << Print<NodeId>(N, P.G);
  114. OS << ',';
  115. if (NodeId N = P.Obj.Addr->getReachedDef())
  116. OS << Print<NodeId>(N, P.G);
  117. OS << ',';
  118. if (NodeId N = P.Obj.Addr->getReachedUse())
  119. OS << Print<NodeId>(N, P.G);
  120. OS << "):";
  121. if (NodeId N = P.Obj.Addr->getSibling())
  122. OS << Print<NodeId>(N, P.G);
  123. return OS;
  124. }
  125. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<UseNode*>> &P) {
  126. printRefHeader(OS, P.Obj, P.G);
  127. OS << '(';
  128. if (NodeId N = P.Obj.Addr->getReachingDef())
  129. OS << Print<NodeId>(N, P.G);
  130. OS << "):";
  131. if (NodeId N = P.Obj.Addr->getSibling())
  132. OS << Print<NodeId>(N, P.G);
  133. return OS;
  134. }
  135. raw_ostream &operator<< (raw_ostream &OS,
  136. const Print<NodeAddr<PhiUseNode*>> &P) {
  137. printRefHeader(OS, P.Obj, P.G);
  138. OS << '(';
  139. if (NodeId N = P.Obj.Addr->getReachingDef())
  140. OS << Print<NodeId>(N, P.G);
  141. OS << ',';
  142. if (NodeId N = P.Obj.Addr->getPredecessor())
  143. OS << Print<NodeId>(N, P.G);
  144. OS << "):";
  145. if (NodeId N = P.Obj.Addr->getSibling())
  146. OS << Print<NodeId>(N, P.G);
  147. return OS;
  148. }
  149. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<RefNode*>> &P) {
  150. switch (P.Obj.Addr->getKind()) {
  151. case NodeAttrs::Def:
  152. OS << PrintNode<DefNode*>(P.Obj, P.G);
  153. break;
  154. case NodeAttrs::Use:
  155. if (P.Obj.Addr->getFlags() & NodeAttrs::PhiRef)
  156. OS << PrintNode<PhiUseNode*>(P.Obj, P.G);
  157. else
  158. OS << PrintNode<UseNode*>(P.Obj, P.G);
  159. break;
  160. }
  161. return OS;
  162. }
  163. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeList> &P) {
  164. unsigned N = P.Obj.size();
  165. for (auto I : P.Obj) {
  166. OS << Print<NodeId>(I.Id, P.G);
  167. if (--N)
  168. OS << ' ';
  169. }
  170. return OS;
  171. }
  172. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeSet> &P) {
  173. unsigned N = P.Obj.size();
  174. for (auto I : P.Obj) {
  175. OS << Print<NodeId>(I, P.G);
  176. if (--N)
  177. OS << ' ';
  178. }
  179. return OS;
  180. }
  181. namespace {
  182. template <typename T>
  183. struct PrintListV {
  184. PrintListV(const NodeList &L, const DataFlowGraph &G) : List(L), G(G) {}
  185. using Type = T;
  186. const NodeList &List;
  187. const DataFlowGraph &G;
  188. };
  189. template <typename T>
  190. raw_ostream &operator<< (raw_ostream &OS, const PrintListV<T> &P) {
  191. unsigned N = P.List.size();
  192. for (NodeAddr<T> A : P.List) {
  193. OS << PrintNode<T>(A, P.G);
  194. if (--N)
  195. OS << ", ";
  196. }
  197. return OS;
  198. }
  199. } // end anonymous namespace
  200. raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<PhiNode*>> &P) {
  201. OS << Print<NodeId>(P.Obj.Id, P.G) << ": phi ["
  202. << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']';
  203. return OS;
  204. }
  205. raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<StmtNode *>> &P) {
  206. const MachineInstr &MI = *P.Obj.Addr->getCode();
  207. unsigned Opc = MI.getOpcode();
  208. OS << Print<NodeId>(P.Obj.Id, P.G) << ": " << P.G.getTII().getName(Opc);
  209. // Print the target for calls and branches (for readability).
  210. if (MI.isCall() || MI.isBranch()) {
  211. MachineInstr::const_mop_iterator T =
  212. llvm::find_if(MI.operands(),
  213. [] (const MachineOperand &Op) -> bool {
  214. return Op.isMBB() || Op.isGlobal() || Op.isSymbol();
  215. });
  216. if (T != MI.operands_end()) {
  217. OS << ' ';
  218. if (T->isMBB())
  219. OS << printMBBReference(*T->getMBB());
  220. else if (T->isGlobal())
  221. OS << T->getGlobal()->getName();
  222. else if (T->isSymbol())
  223. OS << T->getSymbolName();
  224. }
  225. }
  226. OS << " [" << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']';
  227. return OS;
  228. }
  229. raw_ostream &operator<< (raw_ostream &OS,
  230. const Print<NodeAddr<InstrNode*>> &P) {
  231. switch (P.Obj.Addr->getKind()) {
  232. case NodeAttrs::Phi:
  233. OS << PrintNode<PhiNode*>(P.Obj, P.G);
  234. break;
  235. case NodeAttrs::Stmt:
  236. OS << PrintNode<StmtNode*>(P.Obj, P.G);
  237. break;
  238. default:
  239. OS << "instr? " << Print<NodeId>(P.Obj.Id, P.G);
  240. break;
  241. }
  242. return OS;
  243. }
  244. raw_ostream &operator<< (raw_ostream &OS,
  245. const Print<NodeAddr<BlockNode*>> &P) {
  246. MachineBasicBlock *BB = P.Obj.Addr->getCode();
  247. unsigned NP = BB->pred_size();
  248. std::vector<int> Ns;
  249. auto PrintBBs = [&OS] (std::vector<int> Ns) -> void {
  250. unsigned N = Ns.size();
  251. for (int I : Ns) {
  252. OS << "%bb." << I;
  253. if (--N)
  254. OS << ", ";
  255. }
  256. };
  257. OS << Print<NodeId>(P.Obj.Id, P.G) << ": --- " << printMBBReference(*BB)
  258. << " --- preds(" << NP << "): ";
  259. for (MachineBasicBlock *B : BB->predecessors())
  260. Ns.push_back(B->getNumber());
  261. PrintBBs(Ns);
  262. unsigned NS = BB->succ_size();
  263. OS << " succs(" << NS << "): ";
  264. Ns.clear();
  265. for (MachineBasicBlock *B : BB->successors())
  266. Ns.push_back(B->getNumber());
  267. PrintBBs(Ns);
  268. OS << '\n';
  269. for (auto I : P.Obj.Addr->members(P.G))
  270. OS << PrintNode<InstrNode*>(I, P.G) << '\n';
  271. return OS;
  272. }
  273. raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<FuncNode *>> &P) {
  274. OS << "DFG dump:[\n" << Print<NodeId>(P.Obj.Id, P.G) << ": Function: "
  275. << P.Obj.Addr->getCode()->getName() << '\n';
  276. for (auto I : P.Obj.Addr->members(P.G))
  277. OS << PrintNode<BlockNode*>(I, P.G) << '\n';
  278. OS << "]\n";
  279. return OS;
  280. }
  281. raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterSet> &P) {
  282. OS << '{';
  283. for (auto I : P.Obj)
  284. OS << ' ' << Print<RegisterRef>(I, P.G);
  285. OS << " }";
  286. return OS;
  287. }
  288. raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterAggr> &P) {
  289. P.Obj.print(OS);
  290. return OS;
  291. }
  292. raw_ostream &operator<< (raw_ostream &OS,
  293. const Print<DataFlowGraph::DefStack> &P) {
  294. for (auto I = P.Obj.top(), E = P.Obj.bottom(); I != E; ) {
  295. OS << Print<NodeId>(I->Id, P.G)
  296. << '<' << Print<RegisterRef>(I->Addr->getRegRef(P.G), P.G) << '>';
  297. I.down();
  298. if (I != E)
  299. OS << ' ';
  300. }
  301. return OS;
  302. }
  303. } // end namespace rdf
  304. } // end namespace llvm
  305. // Node allocation functions.
  306. //
  307. // Node allocator is like a slab memory allocator: it allocates blocks of
  308. // memory in sizes that are multiples of the size of a node. Each block has
  309. // the same size. Nodes are allocated from the currently active block, and
  310. // when it becomes full, a new one is created.
  311. // There is a mapping scheme between node id and its location in a block,
  312. // and within that block is described in the header file.
  313. //
  314. void NodeAllocator::startNewBlock() {
  315. void *T = MemPool.Allocate(NodesPerBlock*NodeMemSize, NodeMemSize);
  316. char *P = static_cast<char*>(T);
  317. Blocks.push_back(P);
  318. // Check if the block index is still within the allowed range, i.e. less
  319. // than 2^N, where N is the number of bits in NodeId for the block index.
  320. // BitsPerIndex is the number of bits per node index.
  321. assert((Blocks.size() < ((size_t)1 << (8*sizeof(NodeId)-BitsPerIndex))) &&
  322. "Out of bits for block index");
  323. ActiveEnd = P;
  324. }
  325. bool NodeAllocator::needNewBlock() {
  326. if (Blocks.empty())
  327. return true;
  328. char *ActiveBegin = Blocks.back();
  329. uint32_t Index = (ActiveEnd-ActiveBegin)/NodeMemSize;
  330. return Index >= NodesPerBlock;
  331. }
  332. NodeAddr<NodeBase*> NodeAllocator::New() {
  333. if (needNewBlock())
  334. startNewBlock();
  335. uint32_t ActiveB = Blocks.size()-1;
  336. uint32_t Index = (ActiveEnd - Blocks[ActiveB])/NodeMemSize;
  337. NodeAddr<NodeBase*> NA = { reinterpret_cast<NodeBase*>(ActiveEnd),
  338. makeId(ActiveB, Index) };
  339. ActiveEnd += NodeMemSize;
  340. return NA;
  341. }
  342. NodeId NodeAllocator::id(const NodeBase *P) const {
  343. uintptr_t A = reinterpret_cast<uintptr_t>(P);
  344. for (unsigned i = 0, n = Blocks.size(); i != n; ++i) {
  345. uintptr_t B = reinterpret_cast<uintptr_t>(Blocks[i]);
  346. if (A < B || A >= B + NodesPerBlock*NodeMemSize)
  347. continue;
  348. uint32_t Idx = (A-B)/NodeMemSize;
  349. return makeId(i, Idx);
  350. }
  351. llvm_unreachable("Invalid node address");
  352. }
  353. void NodeAllocator::clear() {
  354. MemPool.Reset();
  355. Blocks.clear();
  356. ActiveEnd = nullptr;
  357. }
  358. // Insert node NA after "this" in the circular chain.
  359. void NodeBase::append(NodeAddr<NodeBase*> NA) {
  360. NodeId Nx = Next;
  361. // If NA is already "next", do nothing.
  362. if (Next != NA.Id) {
  363. Next = NA.Id;
  364. NA.Addr->Next = Nx;
  365. }
  366. }
  367. // Fundamental node manipulator functions.
  368. // Obtain the register reference from a reference node.
  369. RegisterRef RefNode::getRegRef(const DataFlowGraph &G) const {
  370. assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
  371. if (NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef)
  372. return G.unpack(Ref.PR);
  373. assert(Ref.Op != nullptr);
  374. return G.makeRegRef(*Ref.Op);
  375. }
  376. // Set the register reference in the reference node directly (for references
  377. // in phi nodes).
  378. void RefNode::setRegRef(RegisterRef RR, DataFlowGraph &G) {
  379. assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
  380. assert(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef);
  381. Ref.PR = G.pack(RR);
  382. }
  383. // Set the register reference in the reference node based on a machine
  384. // operand (for references in statement nodes).
  385. void RefNode::setRegRef(MachineOperand *Op, DataFlowGraph &G) {
  386. assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
  387. assert(!(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef));
  388. (void)G;
  389. Ref.Op = Op;
  390. }
  391. // Get the owner of a given reference node.
  392. NodeAddr<NodeBase*> RefNode::getOwner(const DataFlowGraph &G) {
  393. NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext());
  394. while (NA.Addr != this) {
  395. if (NA.Addr->getType() == NodeAttrs::Code)
  396. return NA;
  397. NA = G.addr<NodeBase*>(NA.Addr->getNext());
  398. }
  399. llvm_unreachable("No owner in circular list");
  400. }
  401. // Connect the def node to the reaching def node.
  402. void DefNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) {
  403. Ref.RD = DA.Id;
  404. Ref.Sib = DA.Addr->getReachedDef();
  405. DA.Addr->setReachedDef(Self);
  406. }
  407. // Connect the use node to the reaching def node.
  408. void UseNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) {
  409. Ref.RD = DA.Id;
  410. Ref.Sib = DA.Addr->getReachedUse();
  411. DA.Addr->setReachedUse(Self);
  412. }
  413. // Get the first member of the code node.
  414. NodeAddr<NodeBase*> CodeNode::getFirstMember(const DataFlowGraph &G) const {
  415. if (Code.FirstM == 0)
  416. return NodeAddr<NodeBase*>();
  417. return G.addr<NodeBase*>(Code.FirstM);
  418. }
  419. // Get the last member of the code node.
  420. NodeAddr<NodeBase*> CodeNode::getLastMember(const DataFlowGraph &G) const {
  421. if (Code.LastM == 0)
  422. return NodeAddr<NodeBase*>();
  423. return G.addr<NodeBase*>(Code.LastM);
  424. }
  425. // Add node NA at the end of the member list of the given code node.
  426. void CodeNode::addMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) {
  427. NodeAddr<NodeBase*> ML = getLastMember(G);
  428. if (ML.Id != 0) {
  429. ML.Addr->append(NA);
  430. } else {
  431. Code.FirstM = NA.Id;
  432. NodeId Self = G.id(this);
  433. NA.Addr->setNext(Self);
  434. }
  435. Code.LastM = NA.Id;
  436. }
  437. // Add node NA after member node MA in the given code node.
  438. void CodeNode::addMemberAfter(NodeAddr<NodeBase*> MA, NodeAddr<NodeBase*> NA,
  439. const DataFlowGraph &G) {
  440. MA.Addr->append(NA);
  441. if (Code.LastM == MA.Id)
  442. Code.LastM = NA.Id;
  443. }
  444. // Remove member node NA from the given code node.
  445. void CodeNode::removeMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) {
  446. NodeAddr<NodeBase*> MA = getFirstMember(G);
  447. assert(MA.Id != 0);
  448. // Special handling if the member to remove is the first member.
  449. if (MA.Id == NA.Id) {
  450. if (Code.LastM == MA.Id) {
  451. // If it is the only member, set both first and last to 0.
  452. Code.FirstM = Code.LastM = 0;
  453. } else {
  454. // Otherwise, advance the first member.
  455. Code.FirstM = MA.Addr->getNext();
  456. }
  457. return;
  458. }
  459. while (MA.Addr != this) {
  460. NodeId MX = MA.Addr->getNext();
  461. if (MX == NA.Id) {
  462. MA.Addr->setNext(NA.Addr->getNext());
  463. // If the member to remove happens to be the last one, update the
  464. // LastM indicator.
  465. if (Code.LastM == NA.Id)
  466. Code.LastM = MA.Id;
  467. return;
  468. }
  469. MA = G.addr<NodeBase*>(MX);
  470. }
  471. llvm_unreachable("No such member");
  472. }
  473. // Return the list of all members of the code node.
  474. NodeList CodeNode::members(const DataFlowGraph &G) const {
  475. static auto True = [] (NodeAddr<NodeBase*>) -> bool { return true; };
  476. return members_if(True, G);
  477. }
  478. // Return the owner of the given instr node.
  479. NodeAddr<NodeBase*> InstrNode::getOwner(const DataFlowGraph &G) {
  480. NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext());
  481. while (NA.Addr != this) {
  482. assert(NA.Addr->getType() == NodeAttrs::Code);
  483. if (NA.Addr->getKind() == NodeAttrs::Block)
  484. return NA;
  485. NA = G.addr<NodeBase*>(NA.Addr->getNext());
  486. }
  487. llvm_unreachable("No owner in circular list");
  488. }
  489. // Add the phi node PA to the given block node.
  490. void BlockNode::addPhi(NodeAddr<PhiNode*> PA, const DataFlowGraph &G) {
  491. NodeAddr<NodeBase*> M = getFirstMember(G);
  492. if (M.Id == 0) {
  493. addMember(PA, G);
  494. return;
  495. }
  496. assert(M.Addr->getType() == NodeAttrs::Code);
  497. if (M.Addr->getKind() == NodeAttrs::Stmt) {
  498. // If the first member of the block is a statement, insert the phi as
  499. // the first member.
  500. Code.FirstM = PA.Id;
  501. PA.Addr->setNext(M.Id);
  502. } else {
  503. // If the first member is a phi, find the last phi, and append PA to it.
  504. assert(M.Addr->getKind() == NodeAttrs::Phi);
  505. NodeAddr<NodeBase*> MN = M;
  506. do {
  507. M = MN;
  508. MN = G.addr<NodeBase*>(M.Addr->getNext());
  509. assert(MN.Addr->getType() == NodeAttrs::Code);
  510. } while (MN.Addr->getKind() == NodeAttrs::Phi);
  511. // M is the last phi.
  512. addMemberAfter(M, PA, G);
  513. }
  514. }
  515. // Find the block node corresponding to the machine basic block BB in the
  516. // given func node.
  517. NodeAddr<BlockNode*> FuncNode::findBlock(const MachineBasicBlock *BB,
  518. const DataFlowGraph &G) const {
  519. auto EqBB = [BB] (NodeAddr<NodeBase*> NA) -> bool {
  520. return NodeAddr<BlockNode*>(NA).Addr->getCode() == BB;
  521. };
  522. NodeList Ms = members_if(EqBB, G);
  523. if (!Ms.empty())
  524. return Ms[0];
  525. return NodeAddr<BlockNode*>();
  526. }
  527. // Get the block node for the entry block in the given function.
  528. NodeAddr<BlockNode*> FuncNode::getEntryBlock(const DataFlowGraph &G) {
  529. MachineBasicBlock *EntryB = &getCode()->front();
  530. return findBlock(EntryB, G);
  531. }
  532. // Target operand information.
  533. //
  534. // For a given instruction, check if there are any bits of RR that can remain
  535. // unchanged across this def.
  536. bool TargetOperandInfo::isPreserving(const MachineInstr &In, unsigned OpNum)
  537. const {
  538. return TII.isPredicated(In);
  539. }
  540. // Check if the definition of RR produces an unspecified value.
  541. bool TargetOperandInfo::isClobbering(const MachineInstr &In, unsigned OpNum)
  542. const {
  543. const MachineOperand &Op = In.getOperand(OpNum);
  544. if (Op.isRegMask())
  545. return true;
  546. assert(Op.isReg());
  547. if (In.isCall())
  548. if (Op.isDef() && Op.isDead())
  549. return true;
  550. return false;
  551. }
  552. // Check if the given instruction specifically requires
  553. bool TargetOperandInfo::isFixedReg(const MachineInstr &In, unsigned OpNum)
  554. const {
  555. if (In.isCall() || In.isReturn() || In.isInlineAsm())
  556. return true;
  557. // Check for a tail call.
  558. if (In.isBranch())
  559. for (const MachineOperand &O : In.operands())
  560. if (O.isGlobal() || O.isSymbol())
  561. return true;
  562. const MCInstrDesc &D = In.getDesc();
  563. if (!D.getImplicitDefs() && !D.getImplicitUses())
  564. return false;
  565. const MachineOperand &Op = In.getOperand(OpNum);
  566. // If there is a sub-register, treat the operand as non-fixed. Currently,
  567. // fixed registers are those that are listed in the descriptor as implicit
  568. // uses or defs, and those lists do not allow sub-registers.
  569. if (Op.getSubReg() != 0)
  570. return false;
  571. Register Reg = Op.getReg();
  572. const MCPhysReg *ImpR = Op.isDef() ? D.getImplicitDefs()
  573. : D.getImplicitUses();
  574. if (!ImpR)
  575. return false;
  576. while (*ImpR)
  577. if (*ImpR++ == Reg)
  578. return true;
  579. return false;
  580. }
  581. //
  582. // The data flow graph construction.
  583. //
  584. DataFlowGraph::DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii,
  585. const TargetRegisterInfo &tri, const MachineDominatorTree &mdt,
  586. const MachineDominanceFrontier &mdf, const TargetOperandInfo &toi)
  587. : MF(mf), TII(tii), TRI(tri), PRI(tri, mf), MDT(mdt), MDF(mdf), TOI(toi),
  588. LiveIns(PRI) {
  589. }
  590. // The implementation of the definition stack.
  591. // Each register reference has its own definition stack. In particular,
  592. // for a register references "Reg" and "Reg:subreg" will each have their
  593. // own definition stacks.
  594. // Construct a stack iterator.
  595. DataFlowGraph::DefStack::Iterator::Iterator(const DataFlowGraph::DefStack &S,
  596. bool Top) : DS(S) {
  597. if (!Top) {
  598. // Initialize to bottom.
  599. Pos = 0;
  600. return;
  601. }
  602. // Initialize to the top, i.e. top-most non-delimiter (or 0, if empty).
  603. Pos = DS.Stack.size();
  604. while (Pos > 0 && DS.isDelimiter(DS.Stack[Pos-1]))
  605. Pos--;
  606. }
  607. // Return the size of the stack, including block delimiters.
  608. unsigned DataFlowGraph::DefStack::size() const {
  609. unsigned S = 0;
  610. for (auto I = top(), E = bottom(); I != E; I.down())
  611. S++;
  612. return S;
  613. }
  614. // Remove the top entry from the stack. Remove all intervening delimiters
  615. // so that after this, the stack is either empty, or the top of the stack
  616. // is a non-delimiter.
  617. void DataFlowGraph::DefStack::pop() {
  618. assert(!empty());
  619. unsigned P = nextDown(Stack.size());
  620. Stack.resize(P);
  621. }
  622. // Push a delimiter for block node N on the stack.
  623. void DataFlowGraph::DefStack::start_block(NodeId N) {
  624. assert(N != 0);
  625. Stack.push_back(NodeAddr<DefNode*>(nullptr, N));
  626. }
  627. // Remove all nodes from the top of the stack, until the delimited for
  628. // block node N is encountered. Remove the delimiter as well. In effect,
  629. // this will remove from the stack all definitions from block N.
  630. void DataFlowGraph::DefStack::clear_block(NodeId N) {
  631. assert(N != 0);
  632. unsigned P = Stack.size();
  633. while (P > 0) {
  634. bool Found = isDelimiter(Stack[P-1], N);
  635. P--;
  636. if (Found)
  637. break;
  638. }
  639. // This will also remove the delimiter, if found.
  640. Stack.resize(P);
  641. }
  642. // Move the stack iterator up by one.
  643. unsigned DataFlowGraph::DefStack::nextUp(unsigned P) const {
  644. // Get the next valid position after P (skipping all delimiters).
  645. // The input position P does not have to point to a non-delimiter.
  646. unsigned SS = Stack.size();
  647. bool IsDelim;
  648. assert(P < SS);
  649. do {
  650. P++;
  651. IsDelim = isDelimiter(Stack[P-1]);
  652. } while (P < SS && IsDelim);
  653. assert(!IsDelim);
  654. return P;
  655. }
  656. // Move the stack iterator down by one.
  657. unsigned DataFlowGraph::DefStack::nextDown(unsigned P) const {
  658. // Get the preceding valid position before P (skipping all delimiters).
  659. // The input position P does not have to point to a non-delimiter.
  660. assert(P > 0 && P <= Stack.size());
  661. bool IsDelim = isDelimiter(Stack[P-1]);
  662. do {
  663. if (--P == 0)
  664. break;
  665. IsDelim = isDelimiter(Stack[P-1]);
  666. } while (P > 0 && IsDelim);
  667. assert(!IsDelim);
  668. return P;
  669. }
  670. // Register information.
  671. RegisterSet DataFlowGraph::getLandingPadLiveIns() const {
  672. RegisterSet LR;
  673. const Function &F = MF.getFunction();
  674. const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn()
  675. : nullptr;
  676. const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
  677. if (RegisterId R = TLI.getExceptionPointerRegister(PF))
  678. LR.insert(RegisterRef(R));
  679. if (!isFuncletEHPersonality(classifyEHPersonality(PF))) {
  680. if (RegisterId R = TLI.getExceptionSelectorRegister(PF))
  681. LR.insert(RegisterRef(R));
  682. }
  683. return LR;
  684. }
  685. // Node management functions.
  686. // Get the pointer to the node with the id N.
  687. NodeBase *DataFlowGraph::ptr(NodeId N) const {
  688. if (N == 0)
  689. return nullptr;
  690. return Memory.ptr(N);
  691. }
  692. // Get the id of the node at the address P.
  693. NodeId DataFlowGraph::id(const NodeBase *P) const {
  694. if (P == nullptr)
  695. return 0;
  696. return Memory.id(P);
  697. }
  698. // Allocate a new node and set the attributes to Attrs.
  699. NodeAddr<NodeBase*> DataFlowGraph::newNode(uint16_t Attrs) {
  700. NodeAddr<NodeBase*> P = Memory.New();
  701. P.Addr->init();
  702. P.Addr->setAttrs(Attrs);
  703. return P;
  704. }
  705. // Make a copy of the given node B, except for the data-flow links, which
  706. // are set to 0.
  707. NodeAddr<NodeBase*> DataFlowGraph::cloneNode(const NodeAddr<NodeBase*> B) {
  708. NodeAddr<NodeBase*> NA = newNode(0);
  709. memcpy(NA.Addr, B.Addr, sizeof(NodeBase));
  710. // Ref nodes need to have the data-flow links reset.
  711. if (NA.Addr->getType() == NodeAttrs::Ref) {
  712. NodeAddr<RefNode*> RA = NA;
  713. RA.Addr->setReachingDef(0);
  714. RA.Addr->setSibling(0);
  715. if (NA.Addr->getKind() == NodeAttrs::Def) {
  716. NodeAddr<DefNode*> DA = NA;
  717. DA.Addr->setReachedDef(0);
  718. DA.Addr->setReachedUse(0);
  719. }
  720. }
  721. return NA;
  722. }
  723. // Allocation routines for specific node types/kinds.
  724. NodeAddr<UseNode*> DataFlowGraph::newUse(NodeAddr<InstrNode*> Owner,
  725. MachineOperand &Op, uint16_t Flags) {
  726. NodeAddr<UseNode*> UA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags);
  727. UA.Addr->setRegRef(&Op, *this);
  728. return UA;
  729. }
  730. NodeAddr<PhiUseNode*> DataFlowGraph::newPhiUse(NodeAddr<PhiNode*> Owner,
  731. RegisterRef RR, NodeAddr<BlockNode*> PredB, uint16_t Flags) {
  732. NodeAddr<PhiUseNode*> PUA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags);
  733. assert(Flags & NodeAttrs::PhiRef);
  734. PUA.Addr->setRegRef(RR, *this);
  735. PUA.Addr->setPredecessor(PredB.Id);
  736. return PUA;
  737. }
  738. NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner,
  739. MachineOperand &Op, uint16_t Flags) {
  740. NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags);
  741. DA.Addr->setRegRef(&Op, *this);
  742. return DA;
  743. }
  744. NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner,
  745. RegisterRef RR, uint16_t Flags) {
  746. NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags);
  747. assert(Flags & NodeAttrs::PhiRef);
  748. DA.Addr->setRegRef(RR, *this);
  749. return DA;
  750. }
  751. NodeAddr<PhiNode*> DataFlowGraph::newPhi(NodeAddr<BlockNode*> Owner) {
  752. NodeAddr<PhiNode*> PA = newNode(NodeAttrs::Code | NodeAttrs::Phi);
  753. Owner.Addr->addPhi(PA, *this);
  754. return PA;
  755. }
  756. NodeAddr<StmtNode*> DataFlowGraph::newStmt(NodeAddr<BlockNode*> Owner,
  757. MachineInstr *MI) {
  758. NodeAddr<StmtNode*> SA = newNode(NodeAttrs::Code | NodeAttrs::Stmt);
  759. SA.Addr->setCode(MI);
  760. Owner.Addr->addMember(SA, *this);
  761. return SA;
  762. }
  763. NodeAddr<BlockNode*> DataFlowGraph::newBlock(NodeAddr<FuncNode*> Owner,
  764. MachineBasicBlock *BB) {
  765. NodeAddr<BlockNode*> BA = newNode(NodeAttrs::Code | NodeAttrs::Block);
  766. BA.Addr->setCode(BB);
  767. Owner.Addr->addMember(BA, *this);
  768. return BA;
  769. }
  770. NodeAddr<FuncNode*> DataFlowGraph::newFunc(MachineFunction *MF) {
  771. NodeAddr<FuncNode*> FA = newNode(NodeAttrs::Code | NodeAttrs::Func);
  772. FA.Addr->setCode(MF);
  773. return FA;
  774. }
  775. // Build the data flow graph.
  776. void DataFlowGraph::build(unsigned Options) {
  777. reset();
  778. Func = newFunc(&MF);
  779. if (MF.empty())
  780. return;
  781. for (MachineBasicBlock &B : MF) {
  782. NodeAddr<BlockNode*> BA = newBlock(Func, &B);
  783. BlockNodes.insert(std::make_pair(&B, BA));
  784. for (MachineInstr &I : B) {
  785. if (I.isDebugInstr())
  786. continue;
  787. buildStmt(BA, I);
  788. }
  789. }
  790. NodeAddr<BlockNode*> EA = Func.Addr->getEntryBlock(*this);
  791. NodeList Blocks = Func.Addr->members(*this);
  792. // Collect information about block references.
  793. RegisterSet AllRefs;
  794. for (NodeAddr<BlockNode*> BA : Blocks)
  795. for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this))
  796. for (NodeAddr<RefNode*> RA : IA.Addr->members(*this))
  797. AllRefs.insert(RA.Addr->getRegRef(*this));
  798. // Collect function live-ins and entry block live-ins.
  799. MachineRegisterInfo &MRI = MF.getRegInfo();
  800. MachineBasicBlock &EntryB = *EA.Addr->getCode();
  801. assert(EntryB.pred_empty() && "Function entry block has predecessors");
  802. for (std::pair<unsigned,unsigned> P : MRI.liveins())
  803. LiveIns.insert(RegisterRef(P.first));
  804. if (MRI.tracksLiveness()) {
  805. for (auto I : EntryB.liveins())
  806. LiveIns.insert(RegisterRef(I.PhysReg, I.LaneMask));
  807. }
  808. // Add function-entry phi nodes for the live-in registers.
  809. //for (std::pair<RegisterId,LaneBitmask> P : LiveIns) {
  810. for (auto I = LiveIns.rr_begin(), E = LiveIns.rr_end(); I != E; ++I) {
  811. RegisterRef RR = *I;
  812. NodeAddr<PhiNode*> PA = newPhi(EA);
  813. uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
  814. NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
  815. PA.Addr->addMember(DA, *this);
  816. }
  817. // Add phis for landing pads.
  818. // Landing pads, unlike usual backs blocks, are not entered through
  819. // branches in the program, or fall-throughs from other blocks. They
  820. // are entered from the exception handling runtime and target's ABI
  821. // may define certain registers as defined on entry to such a block.
  822. RegisterSet EHRegs = getLandingPadLiveIns();
  823. if (!EHRegs.empty()) {
  824. for (NodeAddr<BlockNode*> BA : Blocks) {
  825. const MachineBasicBlock &B = *BA.Addr->getCode();
  826. if (!B.isEHPad())
  827. continue;
  828. // Prepare a list of NodeIds of the block's predecessors.
  829. NodeList Preds;
  830. for (MachineBasicBlock *PB : B.predecessors())
  831. Preds.push_back(findBlock(PB));
  832. // Build phi nodes for each live-in.
  833. for (RegisterRef RR : EHRegs) {
  834. NodeAddr<PhiNode*> PA = newPhi(BA);
  835. uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
  836. // Add def:
  837. NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
  838. PA.Addr->addMember(DA, *this);
  839. // Add uses (no reaching defs for phi uses):
  840. for (NodeAddr<BlockNode*> PBA : Preds) {
  841. NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA);
  842. PA.Addr->addMember(PUA, *this);
  843. }
  844. }
  845. }
  846. }
  847. // Build a map "PhiM" which will contain, for each block, the set
  848. // of references that will require phi definitions in that block.
  849. BlockRefsMap PhiM;
  850. for (NodeAddr<BlockNode*> BA : Blocks)
  851. recordDefsForDF(PhiM, BA);
  852. for (NodeAddr<BlockNode*> BA : Blocks)
  853. buildPhis(PhiM, AllRefs, BA);
  854. // Link all the refs. This will recursively traverse the dominator tree.
  855. DefStackMap DM;
  856. linkBlockRefs(DM, EA);
  857. // Finally, remove all unused phi nodes.
  858. if (!(Options & BuildOptions::KeepDeadPhis))
  859. removeUnusedPhis();
  860. }
  861. RegisterRef DataFlowGraph::makeRegRef(unsigned Reg, unsigned Sub) const {
  862. assert(PhysicalRegisterInfo::isRegMaskId(Reg) ||
  863. Register::isPhysicalRegister(Reg));
  864. assert(Reg != 0);
  865. if (Sub != 0)
  866. Reg = TRI.getSubReg(Reg, Sub);
  867. return RegisterRef(Reg);
  868. }
  869. RegisterRef DataFlowGraph::makeRegRef(const MachineOperand &Op) const {
  870. assert(Op.isReg() || Op.isRegMask());
  871. if (Op.isReg())
  872. return makeRegRef(Op.getReg(), Op.getSubReg());
  873. return RegisterRef(PRI.getRegMaskId(Op.getRegMask()), LaneBitmask::getAll());
  874. }
  875. RegisterRef DataFlowGraph::restrictRef(RegisterRef AR, RegisterRef BR) const {
  876. if (AR.Reg == BR.Reg) {
  877. LaneBitmask M = AR.Mask & BR.Mask;
  878. return M.any() ? RegisterRef(AR.Reg, M) : RegisterRef();
  879. }
  880. // This isn't strictly correct, because the overlap may happen in the
  881. // part masked out.
  882. if (PRI.alias(AR, BR))
  883. return AR;
  884. return RegisterRef();
  885. }
  886. // For each stack in the map DefM, push the delimiter for block B on it.
  887. void DataFlowGraph::markBlock(NodeId B, DefStackMap &DefM) {
  888. // Push block delimiters.
  889. for (auto &P : DefM)
  890. P.second.start_block(B);
  891. }
  892. // Remove all definitions coming from block B from each stack in DefM.
  893. void DataFlowGraph::releaseBlock(NodeId B, DefStackMap &DefM) {
  894. // Pop all defs from this block from the definition stack. Defs that were
  895. // added to the map during the traversal of instructions will not have a
  896. // delimiter, but for those, the whole stack will be emptied.
  897. for (auto &P : DefM)
  898. P.second.clear_block(B);
  899. // Finally, remove empty stacks from the map.
  900. for (auto I = DefM.begin(), E = DefM.end(), NextI = I; I != E; I = NextI) {
  901. NextI = std::next(I);
  902. // This preserves the validity of iterators other than I.
  903. if (I->second.empty())
  904. DefM.erase(I);
  905. }
  906. }
  907. // Push all definitions from the instruction node IA to an appropriate
  908. // stack in DefM.
  909. void DataFlowGraph::pushAllDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) {
  910. pushClobbers(IA, DefM);
  911. pushDefs(IA, DefM);
  912. }
  913. // Push all definitions from the instruction node IA to an appropriate
  914. // stack in DefM.
  915. void DataFlowGraph::pushClobbers(NodeAddr<InstrNode*> IA, DefStackMap &DefM) {
  916. NodeSet Visited;
  917. std::set<RegisterId> Defined;
  918. // The important objectives of this function are:
  919. // - to be able to handle instructions both while the graph is being
  920. // constructed, and after the graph has been constructed, and
  921. // - maintain proper ordering of definitions on the stack for each
  922. // register reference:
  923. // - if there are two or more related defs in IA (i.e. coming from
  924. // the same machine operand), then only push one def on the stack,
  925. // - if there are multiple unrelated defs of non-overlapping
  926. // subregisters of S, then the stack for S will have both (in an
  927. // unspecified order), but the order does not matter from the data-
  928. // -flow perspective.
  929. for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) {
  930. if (Visited.count(DA.Id))
  931. continue;
  932. if (!(DA.Addr->getFlags() & NodeAttrs::Clobbering))
  933. continue;
  934. NodeList Rel = getRelatedRefs(IA, DA);
  935. NodeAddr<DefNode*> PDA = Rel.front();
  936. RegisterRef RR = PDA.Addr->getRegRef(*this);
  937. // Push the definition on the stack for the register and all aliases.
  938. // The def stack traversal in linkNodeUp will check the exact aliasing.
  939. DefM[RR.Reg].push(DA);
  940. Defined.insert(RR.Reg);
  941. for (RegisterId A : PRI.getAliasSet(RR.Reg)) {
  942. // Check that we don't push the same def twice.
  943. assert(A != RR.Reg);
  944. if (!Defined.count(A))
  945. DefM[A].push(DA);
  946. }
  947. // Mark all the related defs as visited.
  948. for (NodeAddr<NodeBase*> T : Rel)
  949. Visited.insert(T.Id);
  950. }
  951. }
  952. // Push all definitions from the instruction node IA to an appropriate
  953. // stack in DefM.
  954. void DataFlowGraph::pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) {
  955. NodeSet Visited;
  956. #ifndef NDEBUG
  957. std::set<RegisterId> Defined;
  958. #endif
  959. // The important objectives of this function are:
  960. // - to be able to handle instructions both while the graph is being
  961. // constructed, and after the graph has been constructed, and
  962. // - maintain proper ordering of definitions on the stack for each
  963. // register reference:
  964. // - if there are two or more related defs in IA (i.e. coming from
  965. // the same machine operand), then only push one def on the stack,
  966. // - if there are multiple unrelated defs of non-overlapping
  967. // subregisters of S, then the stack for S will have both (in an
  968. // unspecified order), but the order does not matter from the data-
  969. // -flow perspective.
  970. for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) {
  971. if (Visited.count(DA.Id))
  972. continue;
  973. if (DA.Addr->getFlags() & NodeAttrs::Clobbering)
  974. continue;
  975. NodeList Rel = getRelatedRefs(IA, DA);
  976. NodeAddr<DefNode*> PDA = Rel.front();
  977. RegisterRef RR = PDA.Addr->getRegRef(*this);
  978. #ifndef NDEBUG
  979. // Assert if the register is defined in two or more unrelated defs.
  980. // This could happen if there are two or more def operands defining it.
  981. if (!Defined.insert(RR.Reg).second) {
  982. MachineInstr *MI = NodeAddr<StmtNode*>(IA).Addr->getCode();
  983. dbgs() << "Multiple definitions of register: "
  984. << Print<RegisterRef>(RR, *this) << " in\n " << *MI << "in "
  985. << printMBBReference(*MI->getParent()) << '\n';
  986. llvm_unreachable(nullptr);
  987. }
  988. #endif
  989. // Push the definition on the stack for the register and all aliases.
  990. // The def stack traversal in linkNodeUp will check the exact aliasing.
  991. DefM[RR.Reg].push(DA);
  992. for (RegisterId A : PRI.getAliasSet(RR.Reg)) {
  993. // Check that we don't push the same def twice.
  994. assert(A != RR.Reg);
  995. DefM[A].push(DA);
  996. }
  997. // Mark all the related defs as visited.
  998. for (NodeAddr<NodeBase*> T : Rel)
  999. Visited.insert(T.Id);
  1000. }
  1001. }
  1002. // Return the list of all reference nodes related to RA, including RA itself.
  1003. // See "getNextRelated" for the meaning of a "related reference".
  1004. NodeList DataFlowGraph::getRelatedRefs(NodeAddr<InstrNode*> IA,
  1005. NodeAddr<RefNode*> RA) const {
  1006. assert(IA.Id != 0 && RA.Id != 0);
  1007. NodeList Refs;
  1008. NodeId Start = RA.Id;
  1009. do {
  1010. Refs.push_back(RA);
  1011. RA = getNextRelated(IA, RA);
  1012. } while (RA.Id != 0 && RA.Id != Start);
  1013. return Refs;
  1014. }
  1015. // Clear all information in the graph.
  1016. void DataFlowGraph::reset() {
  1017. Memory.clear();
  1018. BlockNodes.clear();
  1019. Func = NodeAddr<FuncNode*>();
  1020. }
  1021. // Return the next reference node in the instruction node IA that is related
  1022. // to RA. Conceptually, two reference nodes are related if they refer to the
  1023. // same instance of a register access, but differ in flags or other minor
  1024. // characteristics. Specific examples of related nodes are shadow reference
  1025. // nodes.
  1026. // Return the equivalent of nullptr if there are no more related references.
  1027. NodeAddr<RefNode*> DataFlowGraph::getNextRelated(NodeAddr<InstrNode*> IA,
  1028. NodeAddr<RefNode*> RA) const {
  1029. assert(IA.Id != 0 && RA.Id != 0);
  1030. auto Related = [this,RA](NodeAddr<RefNode*> TA) -> bool {
  1031. if (TA.Addr->getKind() != RA.Addr->getKind())
  1032. return false;
  1033. if (TA.Addr->getRegRef(*this) != RA.Addr->getRegRef(*this))
  1034. return false;
  1035. return true;
  1036. };
  1037. auto RelatedStmt = [&Related,RA](NodeAddr<RefNode*> TA) -> bool {
  1038. return Related(TA) &&
  1039. &RA.Addr->getOp() == &TA.Addr->getOp();
  1040. };
  1041. auto RelatedPhi = [&Related,RA](NodeAddr<RefNode*> TA) -> bool {
  1042. if (!Related(TA))
  1043. return false;
  1044. if (TA.Addr->getKind() != NodeAttrs::Use)
  1045. return true;
  1046. // For phi uses, compare predecessor blocks.
  1047. const NodeAddr<const PhiUseNode*> TUA = TA;
  1048. const NodeAddr<const PhiUseNode*> RUA = RA;
  1049. return TUA.Addr->getPredecessor() == RUA.Addr->getPredecessor();
  1050. };
  1051. RegisterRef RR = RA.Addr->getRegRef(*this);
  1052. if (IA.Addr->getKind() == NodeAttrs::Stmt)
  1053. return RA.Addr->getNextRef(RR, RelatedStmt, true, *this);
  1054. return RA.Addr->getNextRef(RR, RelatedPhi, true, *this);
  1055. }
  1056. // Find the next node related to RA in IA that satisfies condition P.
  1057. // If such a node was found, return a pair where the second element is the
  1058. // located node. If such a node does not exist, return a pair where the
  1059. // first element is the element after which such a node should be inserted,
  1060. // and the second element is a null-address.
  1061. template <typename Predicate>
  1062. std::pair<NodeAddr<RefNode*>,NodeAddr<RefNode*>>
  1063. DataFlowGraph::locateNextRef(NodeAddr<InstrNode*> IA, NodeAddr<RefNode*> RA,
  1064. Predicate P) const {
  1065. assert(IA.Id != 0 && RA.Id != 0);
  1066. NodeAddr<RefNode*> NA;
  1067. NodeId Start = RA.Id;
  1068. while (true) {
  1069. NA = getNextRelated(IA, RA);
  1070. if (NA.Id == 0 || NA.Id == Start)
  1071. break;
  1072. if (P(NA))
  1073. break;
  1074. RA = NA;
  1075. }
  1076. if (NA.Id != 0 && NA.Id != Start)
  1077. return std::make_pair(RA, NA);
  1078. return std::make_pair(RA, NodeAddr<RefNode*>());
  1079. }
  1080. // Get the next shadow node in IA corresponding to RA, and optionally create
  1081. // such a node if it does not exist.
  1082. NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA,
  1083. NodeAddr<RefNode*> RA, bool Create) {
  1084. assert(IA.Id != 0 && RA.Id != 0);
  1085. uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow;
  1086. auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool {
  1087. return TA.Addr->getFlags() == Flags;
  1088. };
  1089. auto Loc = locateNextRef(IA, RA, IsShadow);
  1090. if (Loc.second.Id != 0 || !Create)
  1091. return Loc.second;
  1092. // Create a copy of RA and mark is as shadow.
  1093. NodeAddr<RefNode*> NA = cloneNode(RA);
  1094. NA.Addr->setFlags(Flags | NodeAttrs::Shadow);
  1095. IA.Addr->addMemberAfter(Loc.first, NA, *this);
  1096. return NA;
  1097. }
  1098. // Get the next shadow node in IA corresponding to RA. Return null-address
  1099. // if such a node does not exist.
  1100. NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA,
  1101. NodeAddr<RefNode*> RA) const {
  1102. assert(IA.Id != 0 && RA.Id != 0);
  1103. uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow;
  1104. auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool {
  1105. return TA.Addr->getFlags() == Flags;
  1106. };
  1107. return locateNextRef(IA, RA, IsShadow).second;
  1108. }
  1109. // Create a new statement node in the block node BA that corresponds to
  1110. // the machine instruction MI.
  1111. void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) {
  1112. NodeAddr<StmtNode*> SA = newStmt(BA, &In);
  1113. auto isCall = [] (const MachineInstr &In) -> bool {
  1114. if (In.isCall())
  1115. return true;
  1116. // Is tail call?
  1117. if (In.isBranch()) {
  1118. for (const MachineOperand &Op : In.operands())
  1119. if (Op.isGlobal() || Op.isSymbol())
  1120. return true;
  1121. // Assume indirect branches are calls. This is for the purpose of
  1122. // keeping implicit operands, and so it won't hurt on intra-function
  1123. // indirect branches.
  1124. if (In.isIndirectBranch())
  1125. return true;
  1126. }
  1127. return false;
  1128. };
  1129. auto isDefUndef = [this] (const MachineInstr &In, RegisterRef DR) -> bool {
  1130. // This instruction defines DR. Check if there is a use operand that
  1131. // would make DR live on entry to the instruction.
  1132. for (const MachineOperand &Op : In.operands()) {
  1133. if (!Op.isReg() || Op.getReg() == 0 || !Op.isUse() || Op.isUndef())
  1134. continue;
  1135. RegisterRef UR = makeRegRef(Op);
  1136. if (PRI.alias(DR, UR))
  1137. return false;
  1138. }
  1139. return true;
  1140. };
  1141. bool IsCall = isCall(In);
  1142. unsigned NumOps = In.getNumOperands();
  1143. // Avoid duplicate implicit defs. This will not detect cases of implicit
  1144. // defs that define registers that overlap, but it is not clear how to
  1145. // interpret that in the absence of explicit defs. Overlapping explicit
  1146. // defs are likely illegal already.
  1147. BitVector DoneDefs(TRI.getNumRegs());
  1148. // Process explicit defs first.
  1149. for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
  1150. MachineOperand &Op = In.getOperand(OpN);
  1151. if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
  1152. continue;
  1153. Register R = Op.getReg();
  1154. if (!R || !Register::isPhysicalRegister(R))
  1155. continue;
  1156. uint16_t Flags = NodeAttrs::None;
  1157. if (TOI.isPreserving(In, OpN)) {
  1158. Flags |= NodeAttrs::Preserving;
  1159. // If the def is preserving, check if it is also undefined.
  1160. if (isDefUndef(In, makeRegRef(Op)))
  1161. Flags |= NodeAttrs::Undef;
  1162. }
  1163. if (TOI.isClobbering(In, OpN))
  1164. Flags |= NodeAttrs::Clobbering;
  1165. if (TOI.isFixedReg(In, OpN))
  1166. Flags |= NodeAttrs::Fixed;
  1167. if (IsCall && Op.isDead())
  1168. Flags |= NodeAttrs::Dead;
  1169. NodeAddr<DefNode*> DA = newDef(SA, Op, Flags);
  1170. SA.Addr->addMember(DA, *this);
  1171. assert(!DoneDefs.test(R));
  1172. DoneDefs.set(R);
  1173. }
  1174. // Process reg-masks (as clobbers).
  1175. BitVector DoneClobbers(TRI.getNumRegs());
  1176. for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
  1177. MachineOperand &Op = In.getOperand(OpN);
  1178. if (!Op.isRegMask())
  1179. continue;
  1180. uint16_t Flags = NodeAttrs::Clobbering | NodeAttrs::Fixed |
  1181. NodeAttrs::Dead;
  1182. NodeAddr<DefNode*> DA = newDef(SA, Op, Flags);
  1183. SA.Addr->addMember(DA, *this);
  1184. // Record all clobbered registers in DoneDefs.
  1185. const uint32_t *RM = Op.getRegMask();
  1186. for (unsigned i = 1, e = TRI.getNumRegs(); i != e; ++i)
  1187. if (!(RM[i/32] & (1u << (i%32))))
  1188. DoneClobbers.set(i);
  1189. }
  1190. // Process implicit defs, skipping those that have already been added
  1191. // as explicit.
  1192. for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
  1193. MachineOperand &Op = In.getOperand(OpN);
  1194. if (!Op.isReg() || !Op.isDef() || !Op.isImplicit())
  1195. continue;
  1196. Register R = Op.getReg();
  1197. if (!R || !Register::isPhysicalRegister(R) || DoneDefs.test(R))
  1198. continue;
  1199. RegisterRef RR = makeRegRef(Op);
  1200. uint16_t Flags = NodeAttrs::None;
  1201. if (TOI.isPreserving(In, OpN)) {
  1202. Flags |= NodeAttrs::Preserving;
  1203. // If the def is preserving, check if it is also undefined.
  1204. if (isDefUndef(In, RR))
  1205. Flags |= NodeAttrs::Undef;
  1206. }
  1207. if (TOI.isClobbering(In, OpN))
  1208. Flags |= NodeAttrs::Clobbering;
  1209. if (TOI.isFixedReg(In, OpN))
  1210. Flags |= NodeAttrs::Fixed;
  1211. if (IsCall && Op.isDead()) {
  1212. if (DoneClobbers.test(R))
  1213. continue;
  1214. Flags |= NodeAttrs::Dead;
  1215. }
  1216. NodeAddr<DefNode*> DA = newDef(SA, Op, Flags);
  1217. SA.Addr->addMember(DA, *this);
  1218. DoneDefs.set(R);
  1219. }
  1220. for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
  1221. MachineOperand &Op = In.getOperand(OpN);
  1222. if (!Op.isReg() || !Op.isUse())
  1223. continue;
  1224. Register R = Op.getReg();
  1225. if (!R || !Register::isPhysicalRegister(R))
  1226. continue;
  1227. uint16_t Flags = NodeAttrs::None;
  1228. if (Op.isUndef())
  1229. Flags |= NodeAttrs::Undef;
  1230. if (TOI.isFixedReg(In, OpN))
  1231. Flags |= NodeAttrs::Fixed;
  1232. NodeAddr<UseNode*> UA = newUse(SA, Op, Flags);
  1233. SA.Addr->addMember(UA, *this);
  1234. }
  1235. }
  1236. // Scan all defs in the block node BA and record in PhiM the locations of
  1237. // phi nodes corresponding to these defs.
  1238. void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM,
  1239. NodeAddr<BlockNode*> BA) {
  1240. // Check all defs from block BA and record them in each block in BA's
  1241. // iterated dominance frontier. This information will later be used to
  1242. // create phi nodes.
  1243. MachineBasicBlock *BB = BA.Addr->getCode();
  1244. assert(BB);
  1245. auto DFLoc = MDF.find(BB);
  1246. if (DFLoc == MDF.end() || DFLoc->second.empty())
  1247. return;
  1248. // Traverse all instructions in the block and collect the set of all
  1249. // defined references. For each reference there will be a phi created
  1250. // in the block's iterated dominance frontier.
  1251. // This is done to make sure that each defined reference gets only one
  1252. // phi node, even if it is defined multiple times.
  1253. RegisterSet Defs;
  1254. for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this))
  1255. for (NodeAddr<RefNode*> RA : IA.Addr->members_if(IsDef, *this))
  1256. Defs.insert(RA.Addr->getRegRef(*this));
  1257. // Calculate the iterated dominance frontier of BB.
  1258. const MachineDominanceFrontier::DomSetType &DF = DFLoc->second;
  1259. SetVector<MachineBasicBlock*> IDF(DF.begin(), DF.end());
  1260. for (unsigned i = 0; i < IDF.size(); ++i) {
  1261. auto F = MDF.find(IDF[i]);
  1262. if (F != MDF.end())
  1263. IDF.insert(F->second.begin(), F->second.end());
  1264. }
  1265. // Finally, add the set of defs to each block in the iterated dominance
  1266. // frontier.
  1267. for (auto DB : IDF) {
  1268. NodeAddr<BlockNode*> DBA = findBlock(DB);
  1269. PhiM[DBA.Id].insert(Defs.begin(), Defs.end());
  1270. }
  1271. }
  1272. // Given the locations of phi nodes in the map PhiM, create the phi nodes
  1273. // that are located in the block node BA.
  1274. void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, RegisterSet &AllRefs,
  1275. NodeAddr<BlockNode*> BA) {
  1276. // Check if this blocks has any DF defs, i.e. if there are any defs
  1277. // that this block is in the iterated dominance frontier of.
  1278. auto HasDF = PhiM.find(BA.Id);
  1279. if (HasDF == PhiM.end() || HasDF->second.empty())
  1280. return;
  1281. // First, remove all R in Refs in such that there exists T in Refs
  1282. // such that T covers R. In other words, only leave those refs that
  1283. // are not covered by another ref (i.e. maximal with respect to covering).
  1284. auto MaxCoverIn = [this] (RegisterRef RR, RegisterSet &RRs) -> RegisterRef {
  1285. for (RegisterRef I : RRs)
  1286. if (I != RR && RegisterAggr::isCoverOf(I, RR, PRI))
  1287. RR = I;
  1288. return RR;
  1289. };
  1290. RegisterSet MaxDF;
  1291. for (RegisterRef I : HasDF->second)
  1292. MaxDF.insert(MaxCoverIn(I, HasDF->second));
  1293. std::vector<RegisterRef> MaxRefs;
  1294. for (RegisterRef I : MaxDF)
  1295. MaxRefs.push_back(MaxCoverIn(I, AllRefs));
  1296. // Now, for each R in MaxRefs, get the alias closure of R. If the closure
  1297. // only has R in it, create a phi a def for R. Otherwise, create a phi,
  1298. // and add a def for each S in the closure.
  1299. // Sort the refs so that the phis will be created in a deterministic order.
  1300. llvm::sort(MaxRefs);
  1301. // Remove duplicates.
  1302. auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end());
  1303. MaxRefs.erase(NewEnd, MaxRefs.end());
  1304. auto Aliased = [this,&MaxRefs](RegisterRef RR,
  1305. std::vector<unsigned> &Closure) -> bool {
  1306. for (unsigned I : Closure)
  1307. if (PRI.alias(RR, MaxRefs[I]))
  1308. return true;
  1309. return false;
  1310. };
  1311. // Prepare a list of NodeIds of the block's predecessors.
  1312. NodeList Preds;
  1313. const MachineBasicBlock *MBB = BA.Addr->getCode();
  1314. for (MachineBasicBlock *PB : MBB->predecessors())
  1315. Preds.push_back(findBlock(PB));
  1316. while (!MaxRefs.empty()) {
  1317. // Put the first element in the closure, and then add all subsequent
  1318. // elements from MaxRefs to it, if they alias at least one element
  1319. // already in the closure.
  1320. // ClosureIdx: vector of indices in MaxRefs of members of the closure.
  1321. std::vector<unsigned> ClosureIdx = { 0 };
  1322. for (unsigned i = 1; i != MaxRefs.size(); ++i)
  1323. if (Aliased(MaxRefs[i], ClosureIdx))
  1324. ClosureIdx.push_back(i);
  1325. // Build a phi for the closure.
  1326. unsigned CS = ClosureIdx.size();
  1327. NodeAddr<PhiNode*> PA = newPhi(BA);
  1328. // Add defs.
  1329. for (unsigned X = 0; X != CS; ++X) {
  1330. RegisterRef RR = MaxRefs[ClosureIdx[X]];
  1331. uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
  1332. NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
  1333. PA.Addr->addMember(DA, *this);
  1334. }
  1335. // Add phi uses.
  1336. for (NodeAddr<BlockNode*> PBA : Preds) {
  1337. for (unsigned X = 0; X != CS; ++X) {
  1338. RegisterRef RR = MaxRefs[ClosureIdx[X]];
  1339. NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA);
  1340. PA.Addr->addMember(PUA, *this);
  1341. }
  1342. }
  1343. // Erase from MaxRefs all elements in the closure.
  1344. auto Begin = MaxRefs.begin();
  1345. for (unsigned Idx : llvm::reverse(ClosureIdx))
  1346. MaxRefs.erase(Begin + Idx);
  1347. }
  1348. }
  1349. // Remove any unneeded phi nodes that were created during the build process.
  1350. void DataFlowGraph::removeUnusedPhis() {
  1351. // This will remove unused phis, i.e. phis where each def does not reach
  1352. // any uses or other defs. This will not detect or remove circular phi
  1353. // chains that are otherwise dead. Unused/dead phis are created during
  1354. // the build process and this function is intended to remove these cases
  1355. // that are easily determinable to be unnecessary.
  1356. SetVector<NodeId> PhiQ;
  1357. for (NodeAddr<BlockNode*> BA : Func.Addr->members(*this)) {
  1358. for (auto P : BA.Addr->members_if(IsPhi, *this))
  1359. PhiQ.insert(P.Id);
  1360. }
  1361. static auto HasUsedDef = [](NodeList &Ms) -> bool {
  1362. for (NodeAddr<NodeBase*> M : Ms) {
  1363. if (M.Addr->getKind() != NodeAttrs::Def)
  1364. continue;
  1365. NodeAddr<DefNode*> DA = M;
  1366. if (DA.Addr->getReachedDef() != 0 || DA.Addr->getReachedUse() != 0)
  1367. return true;
  1368. }
  1369. return false;
  1370. };
  1371. // Any phi, if it is removed, may affect other phis (make them dead).
  1372. // For each removed phi, collect the potentially affected phis and add
  1373. // them back to the queue.
  1374. while (!PhiQ.empty()) {
  1375. auto PA = addr<PhiNode*>(PhiQ[0]);
  1376. PhiQ.remove(PA.Id);
  1377. NodeList Refs = PA.Addr->members(*this);
  1378. if (HasUsedDef(Refs))
  1379. continue;
  1380. for (NodeAddr<RefNode*> RA : Refs) {
  1381. if (NodeId RD = RA.Addr->getReachingDef()) {
  1382. auto RDA = addr<DefNode*>(RD);
  1383. NodeAddr<InstrNode*> OA = RDA.Addr->getOwner(*this);
  1384. if (IsPhi(OA))
  1385. PhiQ.insert(OA.Id);
  1386. }
  1387. if (RA.Addr->isDef())
  1388. unlinkDef(RA, true);
  1389. else
  1390. unlinkUse(RA, true);
  1391. }
  1392. NodeAddr<BlockNode*> BA = PA.Addr->getOwner(*this);
  1393. BA.Addr->removeMember(PA, *this);
  1394. }
  1395. }
  1396. // For a given reference node TA in an instruction node IA, connect the
  1397. // reaching def of TA to the appropriate def node. Create any shadow nodes
  1398. // as appropriate.
  1399. template <typename T>
  1400. void DataFlowGraph::linkRefUp(NodeAddr<InstrNode*> IA, NodeAddr<T> TA,
  1401. DefStack &DS) {
  1402. if (DS.empty())
  1403. return;
  1404. RegisterRef RR = TA.Addr->getRegRef(*this);
  1405. NodeAddr<T> TAP;
  1406. // References from the def stack that have been examined so far.
  1407. RegisterAggr Defs(PRI);
  1408. for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()) {
  1409. RegisterRef QR = I->Addr->getRegRef(*this);
  1410. // Skip all defs that are aliased to any of the defs that we have already
  1411. // seen. If this completes a cover of RR, stop the stack traversal.
  1412. bool Alias = Defs.hasAliasOf(QR);
  1413. bool Cover = Defs.insert(QR).hasCoverOf(RR);
  1414. if (Alias) {
  1415. if (Cover)
  1416. break;
  1417. continue;
  1418. }
  1419. // The reaching def.
  1420. NodeAddr<DefNode*> RDA = *I;
  1421. // Pick the reached node.
  1422. if (TAP.Id == 0) {
  1423. TAP = TA;
  1424. } else {
  1425. // Mark the existing ref as "shadow" and create a new shadow.
  1426. TAP.Addr->setFlags(TAP.Addr->getFlags() | NodeAttrs::Shadow);
  1427. TAP = getNextShadow(IA, TAP, true);
  1428. }
  1429. // Create the link.
  1430. TAP.Addr->linkToDef(TAP.Id, RDA);
  1431. if (Cover)
  1432. break;
  1433. }
  1434. }
  1435. // Create data-flow links for all reference nodes in the statement node SA.
  1436. template <typename Predicate>
  1437. void DataFlowGraph::linkStmtRefs(DefStackMap &DefM, NodeAddr<StmtNode*> SA,
  1438. Predicate P) {
  1439. #ifndef NDEBUG
  1440. RegisterSet Defs;
  1441. #endif
  1442. // Link all nodes (upwards in the data-flow) with their reaching defs.
  1443. for (NodeAddr<RefNode*> RA : SA.Addr->members_if(P, *this)) {
  1444. uint16_t Kind = RA.Addr->getKind();
  1445. assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use);
  1446. RegisterRef RR = RA.Addr->getRegRef(*this);
  1447. #ifndef NDEBUG
  1448. // Do not expect multiple defs of the same reference.
  1449. assert(Kind != NodeAttrs::Def || !Defs.count(RR));
  1450. Defs.insert(RR);
  1451. #endif
  1452. auto F = DefM.find(RR.Reg);
  1453. if (F == DefM.end())
  1454. continue;
  1455. DefStack &DS = F->second;
  1456. if (Kind == NodeAttrs::Use)
  1457. linkRefUp<UseNode*>(SA, RA, DS);
  1458. else if (Kind == NodeAttrs::Def)
  1459. linkRefUp<DefNode*>(SA, RA, DS);
  1460. else
  1461. llvm_unreachable("Unexpected node in instruction");
  1462. }
  1463. }
  1464. // Create data-flow links for all instructions in the block node BA. This
  1465. // will include updating any phi nodes in BA.
  1466. void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) {
  1467. // Push block delimiters.
  1468. markBlock(BA.Id, DefM);
  1469. auto IsClobber = [] (NodeAddr<RefNode*> RA) -> bool {
  1470. return IsDef(RA) && (RA.Addr->getFlags() & NodeAttrs::Clobbering);
  1471. };
  1472. auto IsNoClobber = [] (NodeAddr<RefNode*> RA) -> bool {
  1473. return IsDef(RA) && !(RA.Addr->getFlags() & NodeAttrs::Clobbering);
  1474. };
  1475. assert(BA.Addr && "block node address is needed to create a data-flow link");
  1476. // For each non-phi instruction in the block, link all the defs and uses
  1477. // to their reaching defs. For any member of the block (including phis),
  1478. // push the defs on the corresponding stacks.
  1479. for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) {
  1480. // Ignore phi nodes here. They will be linked part by part from the
  1481. // predecessors.
  1482. if (IA.Addr->getKind() == NodeAttrs::Stmt) {
  1483. linkStmtRefs(DefM, IA, IsUse);
  1484. linkStmtRefs(DefM, IA, IsClobber);
  1485. }
  1486. // Push the definitions on the stack.
  1487. pushClobbers(IA, DefM);
  1488. if (IA.Addr->getKind() == NodeAttrs::Stmt)
  1489. linkStmtRefs(DefM, IA, IsNoClobber);
  1490. pushDefs(IA, DefM);
  1491. }
  1492. // Recursively process all children in the dominator tree.
  1493. MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode());
  1494. for (auto I : *N) {
  1495. MachineBasicBlock *SB = I->getBlock();
  1496. NodeAddr<BlockNode*> SBA = findBlock(SB);
  1497. linkBlockRefs(DefM, SBA);
  1498. }
  1499. // Link the phi uses from the successor blocks.
  1500. auto IsUseForBA = [BA](NodeAddr<NodeBase*> NA) -> bool {
  1501. if (NA.Addr->getKind() != NodeAttrs::Use)
  1502. return false;
  1503. assert(NA.Addr->getFlags() & NodeAttrs::PhiRef);
  1504. NodeAddr<PhiUseNode*> PUA = NA;
  1505. return PUA.Addr->getPredecessor() == BA.Id;
  1506. };
  1507. RegisterSet EHLiveIns = getLandingPadLiveIns();
  1508. MachineBasicBlock *MBB = BA.Addr->getCode();
  1509. for (MachineBasicBlock *SB : MBB->successors()) {
  1510. bool IsEHPad = SB->isEHPad();
  1511. NodeAddr<BlockNode*> SBA = findBlock(SB);
  1512. for (NodeAddr<InstrNode*> IA : SBA.Addr->members_if(IsPhi, *this)) {
  1513. // Do not link phi uses for landing pad live-ins.
  1514. if (IsEHPad) {
  1515. // Find what register this phi is for.
  1516. NodeAddr<RefNode*> RA = IA.Addr->getFirstMember(*this);
  1517. assert(RA.Id != 0);
  1518. if (EHLiveIns.count(RA.Addr->getRegRef(*this)))
  1519. continue;
  1520. }
  1521. // Go over each phi use associated with MBB, and link it.
  1522. for (auto U : IA.Addr->members_if(IsUseForBA, *this)) {
  1523. NodeAddr<PhiUseNode*> PUA = U;
  1524. RegisterRef RR = PUA.Addr->getRegRef(*this);
  1525. linkRefUp<UseNode*>(IA, PUA, DefM[RR.Reg]);
  1526. }
  1527. }
  1528. }
  1529. // Pop all defs from this block from the definition stacks.
  1530. releaseBlock(BA.Id, DefM);
  1531. }
  1532. // Remove the use node UA from any data-flow and structural links.
  1533. void DataFlowGraph::unlinkUseDF(NodeAddr<UseNode*> UA) {
  1534. NodeId RD = UA.Addr->getReachingDef();
  1535. NodeId Sib = UA.Addr->getSibling();
  1536. if (RD == 0) {
  1537. assert(Sib == 0);
  1538. return;
  1539. }
  1540. auto RDA = addr<DefNode*>(RD);
  1541. auto TA = addr<UseNode*>(RDA.Addr->getReachedUse());
  1542. if (TA.Id == UA.Id) {
  1543. RDA.Addr->setReachedUse(Sib);
  1544. return;
  1545. }
  1546. while (TA.Id != 0) {
  1547. NodeId S = TA.Addr->getSibling();
  1548. if (S == UA.Id) {
  1549. TA.Addr->setSibling(UA.Addr->getSibling());
  1550. return;
  1551. }
  1552. TA = addr<UseNode*>(S);
  1553. }
  1554. }
  1555. // Remove the def node DA from any data-flow and structural links.
  1556. void DataFlowGraph::unlinkDefDF(NodeAddr<DefNode*> DA) {
  1557. //
  1558. // RD
  1559. // | reached
  1560. // | def
  1561. // :
  1562. // .
  1563. // +----+
  1564. // ... -- | DA | -- ... -- 0 : sibling chain of DA
  1565. // +----+
  1566. // | | reached
  1567. // | : def
  1568. // | .
  1569. // | ... : Siblings (defs)
  1570. // |
  1571. // : reached
  1572. // . use
  1573. // ... : sibling chain of reached uses
  1574. NodeId RD = DA.Addr->getReachingDef();
  1575. // Visit all siblings of the reached def and reset their reaching defs.
  1576. // Also, defs reached by DA are now "promoted" to being reached by RD,
  1577. // so all of them will need to be spliced into the sibling chain where
  1578. // DA belongs.
  1579. auto getAllNodes = [this] (NodeId N) -> NodeList {
  1580. NodeList Res;
  1581. while (N) {
  1582. auto RA = addr<RefNode*>(N);
  1583. // Keep the nodes in the exact sibling order.
  1584. Res.push_back(RA);
  1585. N = RA.Addr->getSibling();
  1586. }
  1587. return Res;
  1588. };
  1589. NodeList ReachedDefs = getAllNodes(DA.Addr->getReachedDef());
  1590. NodeList ReachedUses = getAllNodes(DA.Addr->getReachedUse());
  1591. if (RD == 0) {
  1592. for (NodeAddr<RefNode*> I : ReachedDefs)
  1593. I.Addr->setSibling(0);
  1594. for (NodeAddr<RefNode*> I : ReachedUses)
  1595. I.Addr->setSibling(0);
  1596. }
  1597. for (NodeAddr<DefNode*> I : ReachedDefs)
  1598. I.Addr->setReachingDef(RD);
  1599. for (NodeAddr<UseNode*> I : ReachedUses)
  1600. I.Addr->setReachingDef(RD);
  1601. NodeId Sib = DA.Addr->getSibling();
  1602. if (RD == 0) {
  1603. assert(Sib == 0);
  1604. return;
  1605. }
  1606. // Update the reaching def node and remove DA from the sibling list.
  1607. auto RDA = addr<DefNode*>(RD);
  1608. auto TA = addr<DefNode*>(RDA.Addr->getReachedDef());
  1609. if (TA.Id == DA.Id) {
  1610. // If DA is the first reached def, just update the RD's reached def
  1611. // to the DA's sibling.
  1612. RDA.Addr->setReachedDef(Sib);
  1613. } else {
  1614. // Otherwise, traverse the sibling list of the reached defs and remove
  1615. // DA from it.
  1616. while (TA.Id != 0) {
  1617. NodeId S = TA.Addr->getSibling();
  1618. if (S == DA.Id) {
  1619. TA.Addr->setSibling(Sib);
  1620. break;
  1621. }
  1622. TA = addr<DefNode*>(S);
  1623. }
  1624. }
  1625. // Splice the DA's reached defs into the RDA's reached def chain.
  1626. if (!ReachedDefs.empty()) {
  1627. auto Last = NodeAddr<DefNode*>(ReachedDefs.back());
  1628. Last.Addr->setSibling(RDA.Addr->getReachedDef());
  1629. RDA.Addr->setReachedDef(ReachedDefs.front().Id);
  1630. }
  1631. // Splice the DA's reached uses into the RDA's reached use chain.
  1632. if (!ReachedUses.empty()) {
  1633. auto Last = NodeAddr<UseNode*>(ReachedUses.back());
  1634. Last.Addr->setSibling(RDA.Addr->getReachedUse());
  1635. RDA.Addr->setReachedUse(ReachedUses.front().Id);
  1636. }
  1637. }