RegAllocPBQP.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. //
  14. // This file defines the PBQPBuilder interface, for classes which build PBQP
  15. // instances to represent register allocation problems, and the RegAllocPBQP
  16. // interface.
  17. //
  18. //===----------------------------------------------------------------------===//
  19. #ifndef LLVM_CODEGEN_REGALLOCPBQP_H
  20. #define LLVM_CODEGEN_REGALLOCPBQP_H
  21. #include "llvm/ADT/DenseMap.h"
  22. #include "llvm/ADT/Hashing.h"
  23. #include "llvm/CodeGen/PBQP/CostAllocator.h"
  24. #include "llvm/CodeGen/PBQP/Graph.h"
  25. #include "llvm/CodeGen/PBQP/Math.h"
  26. #include "llvm/CodeGen/PBQP/ReductionRules.h"
  27. #include "llvm/CodeGen/PBQP/Solution.h"
  28. #include "llvm/CodeGen/Register.h"
  29. #include "llvm/MC/MCRegister.h"
  30. #include "llvm/Support/ErrorHandling.h"
  31. #include <algorithm>
  32. #include <cassert>
  33. #include <cstddef>
  34. #include <limits>
  35. #include <memory>
  36. #include <set>
  37. #include <vector>
  38. namespace llvm {
  39. class FunctionPass;
  40. class LiveIntervals;
  41. class MachineBlockFrequencyInfo;
  42. class MachineFunction;
  43. class raw_ostream;
  44. namespace PBQP {
  45. namespace RegAlloc {
  46. /// Spill option index.
  47. inline unsigned getSpillOptionIdx() { return 0; }
  48. /// Metadata to speed allocatability test.
  49. ///
  50. /// Keeps track of the number of infinities in each row and column.
  51. class MatrixMetadata {
  52. public:
  53. MatrixMetadata(const Matrix& M)
  54. : UnsafeRows(new bool[M.getRows() - 1]()),
  55. UnsafeCols(new bool[M.getCols() - 1]()) {
  56. unsigned* ColCounts = new unsigned[M.getCols() - 1]();
  57. for (unsigned i = 1; i < M.getRows(); ++i) {
  58. unsigned RowCount = 0;
  59. for (unsigned j = 1; j < M.getCols(); ++j) {
  60. if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
  61. ++RowCount;
  62. ++ColCounts[j - 1];
  63. UnsafeRows[i - 1] = true;
  64. UnsafeCols[j - 1] = true;
  65. }
  66. }
  67. WorstRow = std::max(WorstRow, RowCount);
  68. }
  69. unsigned WorstColCountForCurRow =
  70. *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
  71. WorstCol = std::max(WorstCol, WorstColCountForCurRow);
  72. delete[] ColCounts;
  73. }
  74. MatrixMetadata(const MatrixMetadata &) = delete;
  75. MatrixMetadata &operator=(const MatrixMetadata &) = delete;
  76. unsigned getWorstRow() const { return WorstRow; }
  77. unsigned getWorstCol() const { return WorstCol; }
  78. const bool* getUnsafeRows() const { return UnsafeRows.get(); }
  79. const bool* getUnsafeCols() const { return UnsafeCols.get(); }
  80. private:
  81. unsigned WorstRow = 0;
  82. unsigned WorstCol = 0;
  83. std::unique_ptr<bool[]> UnsafeRows;
  84. std::unique_ptr<bool[]> UnsafeCols;
  85. };
  86. /// Holds a vector of the allowed physical regs for a vreg.
  87. class AllowedRegVector {
  88. friend hash_code hash_value(const AllowedRegVector &);
  89. public:
  90. AllowedRegVector() = default;
  91. AllowedRegVector(AllowedRegVector &&) = default;
  92. AllowedRegVector(const std::vector<MCRegister> &OptVec)
  93. : NumOpts(OptVec.size()), Opts(new MCRegister[NumOpts]) {
  94. std::copy(OptVec.begin(), OptVec.end(), Opts.get());
  95. }
  96. unsigned size() const { return NumOpts; }
  97. MCRegister operator[](size_t I) const { return Opts[I]; }
  98. bool operator==(const AllowedRegVector &Other) const {
  99. if (NumOpts != Other.NumOpts)
  100. return false;
  101. return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
  102. }
  103. bool operator!=(const AllowedRegVector &Other) const {
  104. return !(*this == Other);
  105. }
  106. private:
  107. unsigned NumOpts = 0;
  108. std::unique_ptr<MCRegister[]> Opts;
  109. };
  110. inline hash_code hash_value(const AllowedRegVector &OptRegs) {
  111. MCRegister *OStart = OptRegs.Opts.get();
  112. MCRegister *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
  113. return hash_combine(OptRegs.NumOpts,
  114. hash_combine_range(OStart, OEnd));
  115. }
  116. /// Holds graph-level metadata relevant to PBQP RA problems.
  117. class GraphMetadata {
  118. private:
  119. using AllowedRegVecPool = ValuePool<AllowedRegVector>;
  120. public:
  121. using AllowedRegVecRef = AllowedRegVecPool::PoolRef;
  122. GraphMetadata(MachineFunction &MF,
  123. LiveIntervals &LIS,
  124. MachineBlockFrequencyInfo &MBFI)
  125. : MF(MF), LIS(LIS), MBFI(MBFI) {}
  126. MachineFunction &MF;
  127. LiveIntervals &LIS;
  128. MachineBlockFrequencyInfo &MBFI;
  129. void setNodeIdForVReg(Register VReg, GraphBase::NodeId NId) {
  130. VRegToNodeId[VReg.id()] = NId;
  131. }
  132. GraphBase::NodeId getNodeIdForVReg(Register VReg) const {
  133. auto VRegItr = VRegToNodeId.find(VReg);
  134. if (VRegItr == VRegToNodeId.end())
  135. return GraphBase::invalidNodeId();
  136. return VRegItr->second;
  137. }
  138. AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
  139. return AllowedRegVecs.getValue(std::move(Allowed));
  140. }
  141. private:
  142. DenseMap<Register, GraphBase::NodeId> VRegToNodeId;
  143. AllowedRegVecPool AllowedRegVecs;
  144. };
  145. /// Holds solver state and other metadata relevant to each PBQP RA node.
  146. class NodeMetadata {
  147. public:
  148. using AllowedRegVector = RegAlloc::AllowedRegVector;
  149. // The node's reduction state. The order in this enum is important,
  150. // as it is assumed nodes can only progress up (i.e. towards being
  151. // optimally reducible) when reducing the graph.
  152. using ReductionState = enum {
  153. Unprocessed,
  154. NotProvablyAllocatable,
  155. ConservativelyAllocatable,
  156. OptimallyReducible
  157. };
  158. NodeMetadata() = default;
  159. NodeMetadata(const NodeMetadata &Other)
  160. : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
  161. OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
  162. AllowedRegs(Other.AllowedRegs)
  163. #if LLVM_ENABLE_ABI_BREAKING_CHECKS
  164. ,
  165. everConservativelyAllocatable(Other.everConservativelyAllocatable)
  166. #endif
  167. {
  168. if (NumOpts > 0) {
  169. std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
  170. &OptUnsafeEdges[0]);
  171. }
  172. }
  173. NodeMetadata(NodeMetadata &&) = default;
  174. NodeMetadata& operator=(NodeMetadata &&) = default;
  175. void setVReg(Register VReg) { this->VReg = VReg; }
  176. Register getVReg() const { return VReg; }
  177. void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
  178. this->AllowedRegs = std::move(AllowedRegs);
  179. }
  180. const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
  181. void setup(const Vector& Costs) {
  182. NumOpts = Costs.getLength() - 1;
  183. OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
  184. }
  185. ReductionState getReductionState() const { return RS; }
  186. void setReductionState(ReductionState RS) {
  187. assert(RS >= this->RS && "A node's reduction state can not be downgraded");
  188. this->RS = RS;
  189. #if LLVM_ENABLE_ABI_BREAKING_CHECKS
  190. // Remember this state to assert later that a non-infinite register
  191. // option was available.
  192. if (RS == ConservativelyAllocatable)
  193. everConservativelyAllocatable = true;
  194. #endif
  195. }
  196. void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
  197. DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
  198. const bool* UnsafeOpts =
  199. Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
  200. for (unsigned i = 0; i < NumOpts; ++i)
  201. OptUnsafeEdges[i] += UnsafeOpts[i];
  202. }
  203. void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
  204. DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
  205. const bool* UnsafeOpts =
  206. Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
  207. for (unsigned i = 0; i < NumOpts; ++i)
  208. OptUnsafeEdges[i] -= UnsafeOpts[i];
  209. }
  210. bool isConservativelyAllocatable() const {
  211. return (DeniedOpts < NumOpts) ||
  212. (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
  213. &OptUnsafeEdges[NumOpts]);
  214. }
  215. #if LLVM_ENABLE_ABI_BREAKING_CHECKS
  216. bool wasConservativelyAllocatable() const {
  217. return everConservativelyAllocatable;
  218. }
  219. #endif
  220. private:
  221. ReductionState RS = Unprocessed;
  222. unsigned NumOpts = 0;
  223. unsigned DeniedOpts = 0;
  224. std::unique_ptr<unsigned[]> OptUnsafeEdges;
  225. Register VReg;
  226. GraphMetadata::AllowedRegVecRef AllowedRegs;
  227. #if LLVM_ENABLE_ABI_BREAKING_CHECKS
  228. bool everConservativelyAllocatable = false;
  229. #endif
  230. };
  231. class RegAllocSolverImpl {
  232. private:
  233. using RAMatrix = MDMatrix<MatrixMetadata>;
  234. public:
  235. using RawVector = PBQP::Vector;
  236. using RawMatrix = PBQP::Matrix;
  237. using Vector = PBQP::Vector;
  238. using Matrix = RAMatrix;
  239. using CostAllocator = PBQP::PoolCostAllocator<Vector, Matrix>;
  240. using NodeId = GraphBase::NodeId;
  241. using EdgeId = GraphBase::EdgeId;
  242. using NodeMetadata = RegAlloc::NodeMetadata;
  243. struct EdgeMetadata {};
  244. using GraphMetadata = RegAlloc::GraphMetadata;
  245. using Graph = PBQP::Graph<RegAllocSolverImpl>;
  246. RegAllocSolverImpl(Graph &G) : G(G) {}
  247. Solution solve() {
  248. G.setSolver(*this);
  249. Solution S;
  250. setup();
  251. S = backpropagate(G, reduce());
  252. G.unsetSolver();
  253. return S;
  254. }
  255. void handleAddNode(NodeId NId) {
  256. assert(G.getNodeCosts(NId).getLength() > 1 &&
  257. "PBQP Graph should not contain single or zero-option nodes");
  258. G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
  259. }
  260. void handleRemoveNode(NodeId NId) {}
  261. void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
  262. void handleAddEdge(EdgeId EId) {
  263. handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
  264. handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
  265. }
  266. void handleDisconnectEdge(EdgeId EId, NodeId NId) {
  267. NodeMetadata& NMd = G.getNodeMetadata(NId);
  268. const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
  269. NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
  270. promote(NId, NMd);
  271. }
  272. void handleReconnectEdge(EdgeId EId, NodeId NId) {
  273. NodeMetadata& NMd = G.getNodeMetadata(NId);
  274. const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
  275. NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
  276. }
  277. void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
  278. NodeId N1Id = G.getEdgeNode1Id(EId);
  279. NodeId N2Id = G.getEdgeNode2Id(EId);
  280. NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
  281. NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
  282. bool Transpose = N1Id != G.getEdgeNode1Id(EId);
  283. // Metadata are computed incrementally. First, update them
  284. // by removing the old cost.
  285. const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
  286. N1Md.handleRemoveEdge(OldMMd, Transpose);
  287. N2Md.handleRemoveEdge(OldMMd, !Transpose);
  288. // And update now the metadata with the new cost.
  289. const MatrixMetadata& MMd = NewCosts.getMetadata();
  290. N1Md.handleAddEdge(MMd, Transpose);
  291. N2Md.handleAddEdge(MMd, !Transpose);
  292. // As the metadata may have changed with the update, the nodes may have
  293. // become ConservativelyAllocatable or OptimallyReducible.
  294. promote(N1Id, N1Md);
  295. promote(N2Id, N2Md);
  296. }
  297. private:
  298. void promote(NodeId NId, NodeMetadata& NMd) {
  299. if (G.getNodeDegree(NId) == 3) {
  300. // This node is becoming optimally reducible.
  301. moveToOptimallyReducibleNodes(NId);
  302. } else if (NMd.getReductionState() ==
  303. NodeMetadata::NotProvablyAllocatable &&
  304. NMd.isConservativelyAllocatable()) {
  305. // This node just became conservatively allocatable.
  306. moveToConservativelyAllocatableNodes(NId);
  307. }
  308. }
  309. void removeFromCurrentSet(NodeId NId) {
  310. switch (G.getNodeMetadata(NId).getReductionState()) {
  311. case NodeMetadata::Unprocessed: break;
  312. case NodeMetadata::OptimallyReducible:
  313. assert(OptimallyReducibleNodes.find(NId) !=
  314. OptimallyReducibleNodes.end() &&
  315. "Node not in optimally reducible set.");
  316. OptimallyReducibleNodes.erase(NId);
  317. break;
  318. case NodeMetadata::ConservativelyAllocatable:
  319. assert(ConservativelyAllocatableNodes.find(NId) !=
  320. ConservativelyAllocatableNodes.end() &&
  321. "Node not in conservatively allocatable set.");
  322. ConservativelyAllocatableNodes.erase(NId);
  323. break;
  324. case NodeMetadata::NotProvablyAllocatable:
  325. assert(NotProvablyAllocatableNodes.find(NId) !=
  326. NotProvablyAllocatableNodes.end() &&
  327. "Node not in not-provably-allocatable set.");
  328. NotProvablyAllocatableNodes.erase(NId);
  329. break;
  330. }
  331. }
  332. void moveToOptimallyReducibleNodes(NodeId NId) {
  333. removeFromCurrentSet(NId);
  334. OptimallyReducibleNodes.insert(NId);
  335. G.getNodeMetadata(NId).setReductionState(
  336. NodeMetadata::OptimallyReducible);
  337. }
  338. void moveToConservativelyAllocatableNodes(NodeId NId) {
  339. removeFromCurrentSet(NId);
  340. ConservativelyAllocatableNodes.insert(NId);
  341. G.getNodeMetadata(NId).setReductionState(
  342. NodeMetadata::ConservativelyAllocatable);
  343. }
  344. void moveToNotProvablyAllocatableNodes(NodeId NId) {
  345. removeFromCurrentSet(NId);
  346. NotProvablyAllocatableNodes.insert(NId);
  347. G.getNodeMetadata(NId).setReductionState(
  348. NodeMetadata::NotProvablyAllocatable);
  349. }
  350. void setup() {
  351. // Set up worklists.
  352. for (auto NId : G.nodeIds()) {
  353. if (G.getNodeDegree(NId) < 3)
  354. moveToOptimallyReducibleNodes(NId);
  355. else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
  356. moveToConservativelyAllocatableNodes(NId);
  357. else
  358. moveToNotProvablyAllocatableNodes(NId);
  359. }
  360. }
  361. // Compute a reduction order for the graph by iteratively applying PBQP
  362. // reduction rules. Locally optimal rules are applied whenever possible (R0,
  363. // R1, R2). If no locally-optimal rules apply then any conservatively
  364. // allocatable node is reduced. Finally, if no conservatively allocatable
  365. // node exists then the node with the lowest spill-cost:degree ratio is
  366. // selected.
  367. std::vector<GraphBase::NodeId> reduce() {
  368. assert(!G.empty() && "Cannot reduce empty graph.");
  369. using NodeId = GraphBase::NodeId;
  370. std::vector<NodeId> NodeStack;
  371. // Consume worklists.
  372. while (true) {
  373. if (!OptimallyReducibleNodes.empty()) {
  374. NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
  375. NodeId NId = *NItr;
  376. OptimallyReducibleNodes.erase(NItr);
  377. NodeStack.push_back(NId);
  378. switch (G.getNodeDegree(NId)) {
  379. case 0:
  380. break;
  381. case 1:
  382. applyR1(G, NId);
  383. break;
  384. case 2:
  385. applyR2(G, NId);
  386. break;
  387. default: llvm_unreachable("Not an optimally reducible node.");
  388. }
  389. } else if (!ConservativelyAllocatableNodes.empty()) {
  390. // Conservatively allocatable nodes will never spill. For now just
  391. // take the first node in the set and push it on the stack. When we
  392. // start optimizing more heavily for register preferencing, it may
  393. // would be better to push nodes with lower 'expected' or worst-case
  394. // register costs first (since early nodes are the most
  395. // constrained).
  396. NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
  397. NodeId NId = *NItr;
  398. ConservativelyAllocatableNodes.erase(NItr);
  399. NodeStack.push_back(NId);
  400. G.disconnectAllNeighborsFromNode(NId);
  401. } else if (!NotProvablyAllocatableNodes.empty()) {
  402. NodeSet::iterator NItr =
  403. std::min_element(NotProvablyAllocatableNodes.begin(),
  404. NotProvablyAllocatableNodes.end(),
  405. SpillCostComparator(G));
  406. NodeId NId = *NItr;
  407. NotProvablyAllocatableNodes.erase(NItr);
  408. NodeStack.push_back(NId);
  409. G.disconnectAllNeighborsFromNode(NId);
  410. } else
  411. break;
  412. }
  413. return NodeStack;
  414. }
  415. class SpillCostComparator {
  416. public:
  417. SpillCostComparator(const Graph& G) : G(G) {}
  418. bool operator()(NodeId N1Id, NodeId N2Id) {
  419. PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
  420. PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
  421. if (N1SC == N2SC)
  422. return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
  423. return N1SC < N2SC;
  424. }
  425. private:
  426. const Graph& G;
  427. };
  428. Graph& G;
  429. using NodeSet = std::set<NodeId>;
  430. NodeSet OptimallyReducibleNodes;
  431. NodeSet ConservativelyAllocatableNodes;
  432. NodeSet NotProvablyAllocatableNodes;
  433. };
  434. class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
  435. private:
  436. using BaseT = PBQP::Graph<RegAllocSolverImpl>;
  437. public:
  438. PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
  439. /// Dump this graph to dbgs().
  440. void dump() const;
  441. /// Dump this graph to an output stream.
  442. /// @param OS Output stream to print on.
  443. void dump(raw_ostream &OS) const;
  444. /// Print a representation of this graph in DOT format.
  445. /// @param OS Output stream to print on.
  446. void printDot(raw_ostream &OS) const;
  447. };
  448. inline Solution solve(PBQPRAGraph& G) {
  449. if (G.empty())
  450. return Solution();
  451. RegAllocSolverImpl RegAllocSolver(G);
  452. return RegAllocSolver.solve();
  453. }
  454. } // end namespace RegAlloc
  455. } // end namespace PBQP
  456. /// Create a PBQP register allocator instance.
  457. FunctionPass *
  458. createPBQPRegisterAllocator(char *customPassID = nullptr);
  459. } // end namespace llvm
  460. #endif // LLVM_CODEGEN_REGALLOCPBQP_H
  461. #ifdef __GNUC__
  462. #pragma GCC diagnostic pop
  463. #endif