SpillPlacement.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. //===- SpillPlacement.cpp - Optimal Spill Code Placement ------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the spill code placement analysis.
  10. //
  11. // Each edge bundle corresponds to a node in a Hopfield network. Constraints on
  12. // basic blocks are weighted by the block frequency and added to become the node
  13. // bias.
  14. //
  15. // Transparent basic blocks have the variable live through, but don't care if it
  16. // is spilled or in a register. These blocks become connections in the Hopfield
  17. // network, again weighted by block frequency.
  18. //
  19. // The Hopfield network minimizes (possibly locally) its energy function:
  20. //
  21. // E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
  22. //
  23. // The energy function represents the expected spill code execution frequency,
  24. // or the cost of spilling. This is a Lyapunov function which never increases
  25. // when a node is updated. It is guaranteed to converge to a local minimum.
  26. //
  27. //===----------------------------------------------------------------------===//
  28. #include "SpillPlacement.h"
  29. #include "llvm/ADT/BitVector.h"
  30. #include "llvm/CodeGen/EdgeBundles.h"
  31. #include "llvm/CodeGen/MachineBasicBlock.h"
  32. #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
  33. #include "llvm/CodeGen/MachineFunction.h"
  34. #include "llvm/CodeGen/MachineLoopInfo.h"
  35. #include "llvm/CodeGen/Passes.h"
  36. #include "llvm/InitializePasses.h"
  37. #include "llvm/Pass.h"
  38. #include <algorithm>
  39. #include <cassert>
  40. #include <cstdint>
  41. #include <utility>
  42. using namespace llvm;
  43. #define DEBUG_TYPE "spill-code-placement"
  44. char SpillPlacement::ID = 0;
  45. char &llvm::SpillPlacementID = SpillPlacement::ID;
  46. INITIALIZE_PASS_BEGIN(SpillPlacement, DEBUG_TYPE,
  47. "Spill Code Placement Analysis", true, true)
  48. INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
  49. INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
  50. INITIALIZE_PASS_END(SpillPlacement, DEBUG_TYPE,
  51. "Spill Code Placement Analysis", true, true)
  52. void SpillPlacement::getAnalysisUsage(AnalysisUsage &AU) const {
  53. AU.setPreservesAll();
  54. AU.addRequired<MachineBlockFrequencyInfo>();
  55. AU.addRequiredTransitive<EdgeBundles>();
  56. AU.addRequiredTransitive<MachineLoopInfo>();
  57. MachineFunctionPass::getAnalysisUsage(AU);
  58. }
  59. /// Node - Each edge bundle corresponds to a Hopfield node.
  60. ///
  61. /// The node contains precomputed frequency data that only depends on the CFG,
  62. /// but Bias and Links are computed each time placeSpills is called.
  63. ///
  64. /// The node Value is positive when the variable should be in a register. The
  65. /// value can change when linked nodes change, but convergence is very fast
  66. /// because all weights are positive.
  67. struct SpillPlacement::Node {
  68. /// BiasN - Sum of blocks that prefer a spill.
  69. BlockFrequency BiasN;
  70. /// BiasP - Sum of blocks that prefer a register.
  71. BlockFrequency BiasP;
  72. /// Value - Output value of this node computed from the Bias and links.
  73. /// This is always on of the values {-1, 0, 1}. A positive number means the
  74. /// variable should go in a register through this bundle.
  75. int Value;
  76. using LinkVector = SmallVector<std::pair<BlockFrequency, unsigned>, 4>;
  77. /// Links - (Weight, BundleNo) for all transparent blocks connecting to other
  78. /// bundles. The weights are all positive block frequencies.
  79. LinkVector Links;
  80. /// SumLinkWeights - Cached sum of the weights of all links + ThresHold.
  81. BlockFrequency SumLinkWeights;
  82. /// preferReg - Return true when this node prefers to be in a register.
  83. bool preferReg() const {
  84. // Undecided nodes (Value==0) go on the stack.
  85. return Value > 0;
  86. }
  87. /// mustSpill - Return True if this node is so biased that it must spill.
  88. bool mustSpill() const {
  89. // We must spill if Bias < -sum(weights) or the MustSpill flag was set.
  90. // BiasN is saturated when MustSpill is set, make sure this still returns
  91. // true when the RHS saturates. Note that SumLinkWeights includes Threshold.
  92. return BiasN >= BiasP + SumLinkWeights;
  93. }
  94. /// clear - Reset per-query data, but preserve frequencies that only depend on
  95. /// the CFG.
  96. void clear(const BlockFrequency &Threshold) {
  97. BiasN = BiasP = Value = 0;
  98. SumLinkWeights = Threshold;
  99. Links.clear();
  100. }
  101. /// addLink - Add a link to bundle b with weight w.
  102. void addLink(unsigned b, BlockFrequency w) {
  103. // Update cached sum.
  104. SumLinkWeights += w;
  105. // There can be multiple links to the same bundle, add them up.
  106. for (std::pair<BlockFrequency, unsigned> &L : Links)
  107. if (L.second == b) {
  108. L.first += w;
  109. return;
  110. }
  111. // This must be the first link to b.
  112. Links.push_back(std::make_pair(w, b));
  113. }
  114. /// addBias - Bias this node.
  115. void addBias(BlockFrequency freq, BorderConstraint direction) {
  116. switch (direction) {
  117. default:
  118. break;
  119. case PrefReg:
  120. BiasP += freq;
  121. break;
  122. case PrefSpill:
  123. BiasN += freq;
  124. break;
  125. case MustSpill:
  126. BiasN = BlockFrequency::getMaxFrequency();
  127. break;
  128. }
  129. }
  130. /// update - Recompute Value from Bias and Links. Return true when node
  131. /// preference changes.
  132. bool update(const Node nodes[], const BlockFrequency &Threshold) {
  133. // Compute the weighted sum of inputs.
  134. BlockFrequency SumN = BiasN;
  135. BlockFrequency SumP = BiasP;
  136. for (std::pair<BlockFrequency, unsigned> &L : Links) {
  137. if (nodes[L.second].Value == -1)
  138. SumN += L.first;
  139. else if (nodes[L.second].Value == 1)
  140. SumP += L.first;
  141. }
  142. // Each weighted sum is going to be less than the total frequency of the
  143. // bundle. Ideally, we should simply set Value = sign(SumP - SumN), but we
  144. // will add a dead zone around 0 for two reasons:
  145. //
  146. // 1. It avoids arbitrary bias when all links are 0 as is possible during
  147. // initial iterations.
  148. // 2. It helps tame rounding errors when the links nominally sum to 0.
  149. //
  150. bool Before = preferReg();
  151. if (SumN >= SumP + Threshold)
  152. Value = -1;
  153. else if (SumP >= SumN + Threshold)
  154. Value = 1;
  155. else
  156. Value = 0;
  157. return Before != preferReg();
  158. }
  159. void getDissentingNeighbors(SparseSet<unsigned> &List,
  160. const Node nodes[]) const {
  161. for (const auto &Elt : Links) {
  162. unsigned n = Elt.second;
  163. // Neighbors that already have the same value are not going to
  164. // change because of this node changing.
  165. if (Value != nodes[n].Value)
  166. List.insert(n);
  167. }
  168. }
  169. };
  170. bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
  171. MF = &mf;
  172. bundles = &getAnalysis<EdgeBundles>();
  173. loops = &getAnalysis<MachineLoopInfo>();
  174. assert(!nodes && "Leaking node array");
  175. nodes = new Node[bundles->getNumBundles()];
  176. TodoList.clear();
  177. TodoList.setUniverse(bundles->getNumBundles());
  178. // Compute total ingoing and outgoing block frequencies for all bundles.
  179. BlockFrequencies.resize(mf.getNumBlockIDs());
  180. MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
  181. setThreshold(MBFI->getEntryFreq());
  182. for (auto &I : mf) {
  183. unsigned Num = I.getNumber();
  184. BlockFrequencies[Num] = MBFI->getBlockFreq(&I);
  185. }
  186. // We never change the function.
  187. return false;
  188. }
  189. void SpillPlacement::releaseMemory() {
  190. delete[] nodes;
  191. nodes = nullptr;
  192. TodoList.clear();
  193. }
  194. /// activate - mark node n as active if it wasn't already.
  195. void SpillPlacement::activate(unsigned n) {
  196. TodoList.insert(n);
  197. if (ActiveNodes->test(n))
  198. return;
  199. ActiveNodes->set(n);
  200. nodes[n].clear(Threshold);
  201. // Very large bundles usually come from big switches, indirect branches,
  202. // landing pads, or loops with many 'continue' statements. It is difficult to
  203. // allocate registers when so many different blocks are involved.
  204. //
  205. // Give a small negative bias to large bundles such that a substantial
  206. // fraction of the connected blocks need to be interested before we consider
  207. // expanding the region through the bundle. This helps compile time by
  208. // limiting the number of blocks visited and the number of links in the
  209. // Hopfield network.
  210. if (bundles->getBlocks(n).size() > 100) {
  211. nodes[n].BiasP = 0;
  212. nodes[n].BiasN = (MBFI->getEntryFreq() / 16);
  213. }
  214. }
  215. /// Set the threshold for a given entry frequency.
  216. ///
  217. /// Set the threshold relative to \c Entry. Since the threshold is used as a
  218. /// bound on the open interval (-Threshold;Threshold), 1 is the minimum
  219. /// threshold.
  220. void SpillPlacement::setThreshold(const BlockFrequency &Entry) {
  221. // Apparently 2 is a good threshold when Entry==2^14, but we need to scale
  222. // it. Divide by 2^13, rounding as appropriate.
  223. uint64_t Freq = Entry.getFrequency();
  224. uint64_t Scaled = (Freq >> 13) + bool(Freq & (1 << 12));
  225. Threshold = std::max(UINT64_C(1), Scaled);
  226. }
  227. /// addConstraints - Compute node biases and weights from a set of constraints.
  228. /// Set a bit in NodeMask for each active node.
  229. void SpillPlacement::addConstraints(ArrayRef<BlockConstraint> LiveBlocks) {
  230. for (const BlockConstraint &LB : LiveBlocks) {
  231. BlockFrequency Freq = BlockFrequencies[LB.Number];
  232. // Live-in to block?
  233. if (LB.Entry != DontCare) {
  234. unsigned ib = bundles->getBundle(LB.Number, false);
  235. activate(ib);
  236. nodes[ib].addBias(Freq, LB.Entry);
  237. }
  238. // Live-out from block?
  239. if (LB.Exit != DontCare) {
  240. unsigned ob = bundles->getBundle(LB.Number, true);
  241. activate(ob);
  242. nodes[ob].addBias(Freq, LB.Exit);
  243. }
  244. }
  245. }
  246. /// addPrefSpill - Same as addConstraints(PrefSpill)
  247. void SpillPlacement::addPrefSpill(ArrayRef<unsigned> Blocks, bool Strong) {
  248. for (unsigned B : Blocks) {
  249. BlockFrequency Freq = BlockFrequencies[B];
  250. if (Strong)
  251. Freq += Freq;
  252. unsigned ib = bundles->getBundle(B, false);
  253. unsigned ob = bundles->getBundle(B, true);
  254. activate(ib);
  255. activate(ob);
  256. nodes[ib].addBias(Freq, PrefSpill);
  257. nodes[ob].addBias(Freq, PrefSpill);
  258. }
  259. }
  260. void SpillPlacement::addLinks(ArrayRef<unsigned> Links) {
  261. for (unsigned Number : Links) {
  262. unsigned ib = bundles->getBundle(Number, false);
  263. unsigned ob = bundles->getBundle(Number, true);
  264. // Ignore self-loops.
  265. if (ib == ob)
  266. continue;
  267. activate(ib);
  268. activate(ob);
  269. BlockFrequency Freq = BlockFrequencies[Number];
  270. nodes[ib].addLink(ob, Freq);
  271. nodes[ob].addLink(ib, Freq);
  272. }
  273. }
  274. bool SpillPlacement::scanActiveBundles() {
  275. RecentPositive.clear();
  276. for (unsigned n : ActiveNodes->set_bits()) {
  277. update(n);
  278. // A node that must spill, or a node without any links is not going to
  279. // change its value ever again, so exclude it from iterations.
  280. if (nodes[n].mustSpill())
  281. continue;
  282. if (nodes[n].preferReg())
  283. RecentPositive.push_back(n);
  284. }
  285. return !RecentPositive.empty();
  286. }
  287. bool SpillPlacement::update(unsigned n) {
  288. if (!nodes[n].update(nodes, Threshold))
  289. return false;
  290. nodes[n].getDissentingNeighbors(TodoList, nodes);
  291. return true;
  292. }
  293. /// iterate - Repeatedly update the Hopfield nodes until stability or the
  294. /// maximum number of iterations is reached.
  295. void SpillPlacement::iterate() {
  296. // We do not need to push those node in the todolist.
  297. // They are already been proceeded as part of the previous iteration.
  298. RecentPositive.clear();
  299. // Since the last iteration, the todolist have been augmented by calls
  300. // to addConstraints, addLinks, and co.
  301. // Update the network energy starting at this new frontier.
  302. // The call to ::update will add the nodes that changed into the todolist.
  303. unsigned Limit = bundles->getNumBundles() * 10;
  304. while(Limit-- > 0 && !TodoList.empty()) {
  305. unsigned n = TodoList.pop_back_val();
  306. if (!update(n))
  307. continue;
  308. if (nodes[n].preferReg())
  309. RecentPositive.push_back(n);
  310. }
  311. }
  312. void SpillPlacement::prepare(BitVector &RegBundles) {
  313. RecentPositive.clear();
  314. TodoList.clear();
  315. // Reuse RegBundles as our ActiveNodes vector.
  316. ActiveNodes = &RegBundles;
  317. ActiveNodes->clear();
  318. ActiveNodes->resize(bundles->getNumBundles());
  319. }
  320. bool
  321. SpillPlacement::finish() {
  322. assert(ActiveNodes && "Call prepare() first");
  323. // Write preferences back to ActiveNodes.
  324. bool Perfect = true;
  325. for (unsigned n : ActiveNodes->set_bits())
  326. if (!nodes[n].preferReg()) {
  327. ActiveNodes->reset(n);
  328. Perfect = false;
  329. }
  330. ActiveNodes = nullptr;
  331. return Perfect;
  332. }
  333. void SpillPlacement::BlockConstraint::print(raw_ostream &OS) const {
  334. auto toString = [](BorderConstraint C) -> StringRef {
  335. switch(C) {
  336. case DontCare: return "DontCare";
  337. case PrefReg: return "PrefReg";
  338. case PrefSpill: return "PrefSpill";
  339. case PrefBoth: return "PrefBoth";
  340. case MustSpill: return "MustSpill";
  341. };
  342. llvm_unreachable("uncovered switch");
  343. };
  344. dbgs() << "{" << Number << ", "
  345. << toString(Entry) << ", "
  346. << toString(Exit) << ", "
  347. << (ChangesValue ? "changes" : "no change") << "}";
  348. }
  349. void SpillPlacement::BlockConstraint::dump() const {
  350. print(dbgs());
  351. dbgs() << "\n";
  352. }