MLRegallocEvictAdvisor.cpp 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143
  1. //===- MLRegAllocEvictAdvisor.cpp - ML eviction advisor -------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Implementation of the ML eviction advisor and reward injection pass
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "AllocationOrder.h"
  13. #include "RegAllocEvictionAdvisor.h"
  14. #include "RegAllocGreedy.h"
  15. #include "llvm/Analysis/MLModelRunner.h"
  16. #include "llvm/Analysis/TensorSpec.h"
  17. #if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL) || defined(LLVM_HAVE_TFLITE)
  18. #include "llvm/Analysis/ModelUnderTrainingRunner.h"
  19. #include "llvm/Analysis/NoInferenceModelRunner.h"
  20. #include "llvm/Analysis/Utils/TrainingLogger.h"
  21. #endif
  22. #include "MLRegallocEvictAdvisor.h"
  23. #include "llvm/Analysis/ReleaseModeModelRunner.h"
  24. #include "llvm/CodeGen/CalcSpillWeights.h"
  25. #include "llvm/CodeGen/LiveRegMatrix.h"
  26. #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
  27. #include "llvm/CodeGen/MachineFunction.h"
  28. #include "llvm/CodeGen/MachineLoopInfo.h"
  29. #include "llvm/CodeGen/MachineRegisterInfo.h"
  30. #include "llvm/CodeGen/Passes.h"
  31. #include "llvm/CodeGen/RegisterClassInfo.h"
  32. #include "llvm/CodeGen/VirtRegMap.h"
  33. #include "llvm/InitializePasses.h"
  34. #include "llvm/Pass.h"
  35. #include "llvm/PassRegistry.h"
  36. #include "llvm/Support/CommandLine.h"
  37. #include "llvm/Support/ErrorHandling.h"
  38. #include <array>
  39. #include <memory>
  40. using namespace llvm;
  41. #define DEBUG_TYPE "ml-regalloc"
  42. // Generated header in release (AOT) mode
  43. #if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL)
  44. #error #include "RegallocEvictModel.h"
  45. using CompiledModelType = RegallocEvictModel;
  46. #else
  47. using CompiledModelType = NoopSavedModelImpl;
  48. #endif
  49. // Options that only make sense in development mode
  50. #ifdef LLVM_HAVE_TFLITE
  51. #include "RegAllocScore.h"
  52. #include "llvm/Analysis/Utils/TFUtils.h"
  53. static cl::opt<std::string> TrainingLog(
  54. "regalloc-training-log", cl::Hidden,
  55. cl::desc("Training log for the register allocator eviction model"));
  56. static cl::opt<std::string> ModelUnderTraining(
  57. "regalloc-model", cl::Hidden,
  58. cl::desc("The model being trained for register allocation eviction"));
  59. static cl::opt<bool> EnableDevelopmentFeatures(
  60. "regalloc-enable-development-features", cl::Hidden,
  61. cl::desc("Whether or not to enable features under development for the ML "
  62. "regalloc advisor"));
  63. #else
  64. static const bool EnableDevelopmentFeatures = false;
  65. #endif // #ifdef LLVM_HAVE_TFLITE
  66. extern cl::opt<unsigned> EvictInterferenceCutoff;
  67. /// The score injection pass.
  68. /// This pass calculates the score for a function and inserts it in the log, but
  69. /// this happens only in development mode. It's a no-op otherwise.
  70. namespace llvm {
  71. class RegAllocScoring : public MachineFunctionPass {
  72. public:
  73. static char ID;
  74. RegAllocScoring() : MachineFunctionPass(ID) {
  75. initializeRegAllocScoringPass(*PassRegistry::getPassRegistry());
  76. }
  77. ~RegAllocScoring() override = default;
  78. StringRef getPassName() const override {
  79. return "Register Allocation Pass Scoring";
  80. }
  81. /// RegAllocReward analysis usage.
  82. void getAnalysisUsage(AnalysisUsage &AU) const override {
  83. AU.setPreservesAll();
  84. AU.addRequired<RegAllocEvictionAdvisorAnalysis>();
  85. AU.addRequired<RegAllocPriorityAdvisorAnalysis>();
  86. AU.addRequired<MachineBlockFrequencyInfo>();
  87. MachineFunctionPass::getAnalysisUsage(AU);
  88. }
  89. /// Performs this pass
  90. bool runOnMachineFunction(MachineFunction &) override;
  91. };
  92. char RegAllocScoring::ID = 0;
  93. FunctionPass *createRegAllocScoringPass() { return new RegAllocScoring(); }
  94. } // namespace llvm
  95. INITIALIZE_PASS(RegAllocScoring, "regallocscoringpass",
  96. "Register Allocation Scoring Pass", false, false)
  97. // ===================================
  98. // Common ML Advisor declarations
  99. // ===================================
  100. namespace {
  101. // The model can only accept a specified number of opcodes and will error it if
  102. // fed an opcode it hasn't seen before. This constant sets the current cutoff.
  103. static const int OpcodeValueCutoff = 17716;
  104. // Most features are as described above, so we'll reuse this vector in defining
  105. // them.
  106. static const std::vector<int64_t> PerLiveRangeShape{1, NumberOfInterferences};
  107. // --------------
  108. // Features table
  109. // --------------
  110. // For each interfering live range (incl. the candidate) we collect a number of
  111. // features. However, because the features are of different types (and because
  112. // of ML best practices), we organize the tensors per feature, not per
  113. // candidate. Each such tensor has a scalar value corresponding to the
  114. // interferring live range at that position, in the order in AllocationOrder.
  115. // The last position corresponds to the virt reg seeking allocation.
  116. // Exception to all that is the progression feature, which is just a scalar (see
  117. // its documentation for details).
  118. // Note on naming: the "_by_max" are normalized using the largest value of that
  119. // tensor, as observed in the current decision making stage (i.e. for the
  120. // current call to the advisor's tryFindEvictionCandidate)
  121. //
  122. // The feature list format: type, name, shape, documentation.
  123. // Note: we can really just use int64 and float, hence the modeling of some
  124. // bools as int64 values.
  125. #define RA_EVICT_FEATURES_LIST(M) \
  126. M(int64_t, mask, PerLiveRangeShape, \
  127. "boolean values, 0 for unavailable candidates (i.e. if a position is 0, " \
  128. "it " \
  129. "can't be evicted)") \
  130. M(int64_t, is_free, PerLiveRangeShape, \
  131. "boolean values, 1 if this phys reg is actually free (no interferences)") \
  132. M(float, nr_urgent, PerLiveRangeShape, \
  133. "number of 'urgent' intervals, normalized. Urgent are those that are OK " \
  134. "to break cascades") \
  135. M(float, nr_broken_hints, PerLiveRangeShape, \
  136. "if this position were evicted, how many broken hints would there be") \
  137. M(int64_t, is_hint, PerLiveRangeShape, \
  138. "is this a preferred phys reg for the candidate") \
  139. M(int64_t, is_local, PerLiveRangeShape, \
  140. "is this live range local to a basic block") \
  141. M(float, nr_rematerializable, PerLiveRangeShape, \
  142. "nr rematerializable ranges") \
  143. M(float, nr_defs_and_uses, PerLiveRangeShape, \
  144. "bb freq - weighed nr defs and uses") \
  145. M(float, weighed_reads_by_max, PerLiveRangeShape, \
  146. "bb freq - weighed nr of reads, normalized") \
  147. M(float, weighed_writes_by_max, PerLiveRangeShape, \
  148. "bb feq - weighed nr of writes, normalized") \
  149. M(float, weighed_read_writes_by_max, PerLiveRangeShape, \
  150. "bb freq - weighed nr of uses that are both read and writes, normalized") \
  151. M(float, weighed_indvars_by_max, PerLiveRangeShape, \
  152. "bb freq - weighed nr of uses that are indvars, normalized") \
  153. M(float, hint_weights_by_max, PerLiveRangeShape, \
  154. "bb freq - weighed nr of uses that are hints, normalized") \
  155. M(float, start_bb_freq_by_max, PerLiveRangeShape, \
  156. "the freq in the start block, normalized") \
  157. M(float, end_bb_freq_by_max, PerLiveRangeShape, \
  158. "freq of end block, normalized") \
  159. M(float, hottest_bb_freq_by_max, PerLiveRangeShape, \
  160. "hottest BB freq, normalized") \
  161. M(float, liverange_size, PerLiveRangeShape, \
  162. "size (instr index diff) of the LR") \
  163. M(float, use_def_density, PerLiveRangeShape, \
  164. "the max weight, as computed by the manual heuristic") \
  165. M(int64_t, max_stage, PerLiveRangeShape, \
  166. "largest stage of an interval in this LR") \
  167. M(int64_t, min_stage, PerLiveRangeShape, \
  168. "lowest stage of an interval in this LR") \
  169. M(float, progress, {1}, "ratio of current queue size to initial size")
  170. #ifdef LLVM_HAVE_TFLITE
  171. #define RA_EVICT_FIRST_DEVELOPMENT_FEATURE(M) \
  172. M(int64_t, instructions, InstructionsShape, \
  173. "Opcodes of the instructions covered by the eviction problem")
  174. #define RA_EVICT_REST_DEVELOPMENT_FEATURES(M) \
  175. M(int64_t, instructions_mapping, InstructionsMappingShape, \
  176. "A binary matrix mapping LRs to instruction opcodes") \
  177. M(float, mbb_frequencies, MBBFrequencyShape, \
  178. "A vector of machine basic block frequencies") \
  179. M(int64_t, mbb_mapping, InstructionsShape, \
  180. "A vector of indicies mapping instructions to MBBs")
  181. #else
  182. #define RA_EVICT_FIRST_DEVELOPMENT_FEATURE(M)
  183. #define RA_EVICT_REST_DEVELOPMENT_FEATURES(M)
  184. #endif
  185. // The model learns to pick one of the mask == 1 interferences. This is the
  186. // name of the output tensor. The contract with the model is that the output
  187. // will be guaranteed to be to a mask == 1 position. Using a macro here to
  188. // avoid 'not used' warnings (and keep cond compilation to a minimum)
  189. #define DecisionName "index_to_evict"
  190. // Named features index.
  191. enum FeatureIDs {
  192. #define _FEATURE_IDX_SIMPLE(_, name, __, ___) name
  193. #define _FEATURE_IDX(A, B, C, D) _FEATURE_IDX_SIMPLE(A, B, C, D),
  194. RA_EVICT_FEATURES_LIST(_FEATURE_IDX) FeatureCount,
  195. #ifdef LLVM_HAVE_TFLITE
  196. RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_FEATURE_IDX_SIMPLE) = FeatureCount,
  197. #else
  198. RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_FEATURE_IDX)
  199. #endif // #ifdef LLVM_HAVE_TFLITE
  200. RA_EVICT_REST_DEVELOPMENT_FEATURES(_FEATURE_IDX) FeaturesWithDevelopmentCount
  201. #undef _FEATURE_IDX
  202. #undef _FEATURE_IDX_SIMPLE
  203. };
  204. // The ML advisor will typically have a sparse input to the evaluator, because
  205. // various phys regs won't be available. It's easier (maintenance-wise) to
  206. // bulk-reset the state of the evaluator each time we are about to use it
  207. // again.
  208. template <typename T> size_t getTotalSize(const std::vector<int64_t> &Shape) {
  209. size_t Ret = sizeof(T);
  210. for (const auto V : Shape)
  211. Ret *= V;
  212. return Ret;
  213. }
  214. void resetInputs(MLModelRunner &Runner) {
  215. #define _RESET(TYPE, NAME, SHAPE, __) \
  216. std::memset(Runner.getTensorUntyped(FeatureIDs::NAME), 0, \
  217. getTotalSize<TYPE>(SHAPE));
  218. RA_EVICT_FEATURES_LIST(_RESET)
  219. if (EnableDevelopmentFeatures) {
  220. RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_RESET)
  221. RA_EVICT_REST_DEVELOPMENT_FEATURES(_RESET)
  222. #undef _RESET
  223. }
  224. }
  225. // Per-live interval components that get aggregated into the feature values
  226. // that will be passed to the evaluator.
  227. struct LIFeatureComponents {
  228. double R = 0;
  229. double W = 0;
  230. double RW = 0;
  231. double IndVarUpdates = 0;
  232. double HintWeights = 0.0;
  233. int64_t NrDefsAndUses = 0;
  234. float HottestBlockFreq = 0.0;
  235. bool IsRemat = false;
  236. };
  237. using CandidateRegList =
  238. std::array<std::pair<MCRegister, bool>, NumberOfInterferences>;
  239. using FeaturesListNormalizer =
  240. llvm::SmallVector<float, FeatureIDs::FeatureCount>;
  241. /// The ML evictor (commonalities between release and development mode)
  242. class MLEvictAdvisor : public RegAllocEvictionAdvisor {
  243. public:
  244. MLEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
  245. MLModelRunner *Runner, const MachineBlockFrequencyInfo &MBFI,
  246. const MachineLoopInfo &Loops);
  247. protected:
  248. const RegAllocEvictionAdvisor &getDefaultAdvisor() const {
  249. return static_cast<const RegAllocEvictionAdvisor &>(DefaultAdvisor);
  250. }
  251. // The assumption is that if the Runner could not be constructed, we emit-ed
  252. // error, and we shouldn't be asking for it here.
  253. const MLModelRunner &getRunner() const { return *Runner; }
  254. /// This just calls Evaluate on the Runner, but in the development mode
  255. /// case, if we're just capturing the log of the default advisor, it needs
  256. /// to call the latter instead, so we need to pass all the necessary
  257. /// parameters for it. In the development case, it will also log.
  258. virtual int64_t
  259. tryFindEvictionCandidatePosition(const LiveInterval &VirtReg,
  260. const AllocationOrder &Order,
  261. unsigned OrderLimit, uint8_t CostPerUseLimit,
  262. const SmallVirtRegSet &FixedRegisters) const;
  263. /// Load the features of the given VirtReg (allocated or not) at column Pos,
  264. /// but if that can't be evicted, return false instead.
  265. bool
  266. loadInterferenceFeatures(const LiveInterval &VirtReg, MCRegister PhysReg,
  267. bool IsHint, const SmallVirtRegSet &FixedRegisters,
  268. llvm::SmallVectorImpl<float> &Largest, size_t Pos,
  269. SmallVectorImpl<LRStartEndInfo> &LRPosInfo) const;
  270. private:
  271. static float getInitialQueueSize(const MachineFunction &MF);
  272. MCRegister tryFindEvictionCandidate(
  273. const LiveInterval &VirtReg, const AllocationOrder &Order,
  274. uint8_t CostPerUseLimit,
  275. const SmallVirtRegSet &FixedRegisters) const override;
  276. void extractFeatures(const SmallVectorImpl<const LiveInterval *> &Intervals,
  277. llvm::SmallVectorImpl<float> &Largest, size_t Pos,
  278. int64_t IsHint, int64_t LocalIntfsCount, float NrUrgent,
  279. SmallVectorImpl<LRStartEndInfo> &LRPosInfo) const;
  280. // Point-in-time: we didn't learn this, so we always delegate to the
  281. // default.
  282. bool canEvictHintInterference(
  283. const LiveInterval &VirtReg, MCRegister PhysReg,
  284. const SmallVirtRegSet &FixedRegisters) const override {
  285. return getDefaultAdvisor().canEvictHintInterference(VirtReg, PhysReg,
  286. FixedRegisters);
  287. }
  288. const LIFeatureComponents &
  289. getLIFeatureComponents(const LiveInterval &LI) const;
  290. // Hold on to a default advisor for:
  291. // 1) the implementation of canEvictHintInterference, because we didn't
  292. // learn that nuance yet; 2) for bootstrapping (logging) in the development
  293. // mode case.
  294. const DefaultEvictionAdvisor DefaultAdvisor;
  295. MLModelRunner *const Runner;
  296. const MachineBlockFrequencyInfo &MBFI;
  297. const MachineLoopInfo &Loops;
  298. // Indices of those features we don't want to normalize.
  299. // This could be static and shared, but its initialization is non-trivial.
  300. std::bitset<FeatureIDs::FeatureCount> DoNotNormalize;
  301. const float InitialQSize;
  302. using RegID = unsigned;
  303. mutable DenseMap<RegID, LIFeatureComponents> CachedFeatures;
  304. };
  305. #define _DECL_FEATURES(type, name, shape, _) \
  306. TensorSpec::createSpec<type>(#name, shape),
  307. // ===================================
  308. // Release (AOT) - specifics
  309. // ===================================
  310. class ReleaseModeEvictionAdvisorAnalysis final
  311. : public RegAllocEvictionAdvisorAnalysis {
  312. public:
  313. ReleaseModeEvictionAdvisorAnalysis()
  314. : RegAllocEvictionAdvisorAnalysis(AdvisorMode::Release) {
  315. if (EnableDevelopmentFeatures) {
  316. InputFeatures = {RA_EVICT_FEATURES_LIST(
  317. _DECL_FEATURES) RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_DECL_FEATURES)
  318. RA_EVICT_REST_DEVELOPMENT_FEATURES(_DECL_FEATURES)};
  319. } else {
  320. InputFeatures = {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)};
  321. }
  322. }
  323. // support for isa<> and dyn_cast.
  324. static bool classof(const RegAllocEvictionAdvisorAnalysis *R) {
  325. return R->getAdvisorMode() == AdvisorMode::Release;
  326. }
  327. private:
  328. std::vector<TensorSpec> InputFeatures;
  329. void getAnalysisUsage(AnalysisUsage &AU) const override {
  330. AU.addRequired<MachineBlockFrequencyInfo>();
  331. AU.addRequired<MachineLoopInfo>();
  332. RegAllocEvictionAdvisorAnalysis::getAnalysisUsage(AU);
  333. }
  334. std::unique_ptr<RegAllocEvictionAdvisor>
  335. getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
  336. if (!Runner)
  337. Runner = std::make_unique<ReleaseModeModelRunner<CompiledModelType>>(
  338. MF.getFunction().getContext(), InputFeatures, DecisionName);
  339. return std::make_unique<MLEvictAdvisor>(
  340. MF, RA, Runner.get(), getAnalysis<MachineBlockFrequencyInfo>(),
  341. getAnalysis<MachineLoopInfo>());
  342. }
  343. std::unique_ptr<ReleaseModeModelRunner<CompiledModelType>> Runner;
  344. };
  345. // ===================================
  346. // Development mode-specifics
  347. // ===================================
  348. //
  349. // Features we log
  350. #ifdef LLVM_HAVE_TFLITE
  351. static const TensorSpec Output =
  352. TensorSpec::createSpec<int64_t>(DecisionName, {1});
  353. static const TensorSpec Reward = TensorSpec::createSpec<float>("reward", {1});
  354. // Features we bind on the model. The tensor names have a prefix, and we also
  355. // need to include some tensors that are expected to be present by the
  356. // training algo.
  357. // TODO: can we just get rid of these?
  358. #define _DECL_TRAIN_FEATURES(type, name, shape, _) \
  359. TensorSpec::createSpec<type>(std::string("action_") + #name, shape),
  360. class DevelopmentModeEvictAdvisor : public MLEvictAdvisor {
  361. public:
  362. DevelopmentModeEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
  363. MLModelRunner *Runner,
  364. const MachineBlockFrequencyInfo &MBFI,
  365. const MachineLoopInfo &Loops, Logger *Log)
  366. : MLEvictAdvisor(MF, RA, Runner, MBFI, Loops), Log(Log) {}
  367. private:
  368. int64_t tryFindEvictionCandidatePosition(
  369. const LiveInterval &VirtReg, const AllocationOrder &Order,
  370. unsigned OrderLimit, uint8_t CostPerUseLimit,
  371. const SmallVirtRegSet &FixedRegisters) const override;
  372. Logger *const Log;
  373. };
  374. class DevelopmentModeEvictionAdvisorAnalysis final
  375. : public RegAllocEvictionAdvisorAnalysis {
  376. public:
  377. DevelopmentModeEvictionAdvisorAnalysis()
  378. : RegAllocEvictionAdvisorAnalysis(AdvisorMode::Development) {
  379. if (EnableDevelopmentFeatures) {
  380. InputFeatures = {RA_EVICT_FEATURES_LIST(
  381. _DECL_FEATURES) RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_DECL_FEATURES)
  382. RA_EVICT_REST_DEVELOPMENT_FEATURES(_DECL_FEATURES)};
  383. TrainingInputFeatures = {
  384. RA_EVICT_FEATURES_LIST(_DECL_TRAIN_FEATURES)
  385. RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_DECL_TRAIN_FEATURES)
  386. RA_EVICT_REST_DEVELOPMENT_FEATURES(_DECL_TRAIN_FEATURES)
  387. TensorSpec::createSpec<float>("action_discount", {1}),
  388. TensorSpec::createSpec<int32_t>("action_step_type", {1}),
  389. TensorSpec::createSpec<float>("action_reward", {1})};
  390. } else {
  391. InputFeatures = {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)};
  392. TrainingInputFeatures = {
  393. RA_EVICT_FEATURES_LIST(_DECL_TRAIN_FEATURES)
  394. TensorSpec::createSpec<float>("action_discount", {1}),
  395. TensorSpec::createSpec<int32_t>("action_step_type", {1}),
  396. TensorSpec::createSpec<float>("action_reward", {1})};
  397. }
  398. }
  399. // support for isa<> and dyn_cast.
  400. static bool classof(const RegAllocEvictionAdvisorAnalysis *R) {
  401. return R->getAdvisorMode() == AdvisorMode::Development;
  402. }
  403. void logRewardIfNeeded(const MachineFunction &MF,
  404. llvm::function_ref<float()> GetReward) override {
  405. if (!Log)
  406. return;
  407. // The function pass manager would run all the function passes for a
  408. // function, so we assume the last context belongs to this function. If
  409. // this invariant ever changes, we can implement at that time switching
  410. // contexts. At this point, it'd be an error
  411. if (Log->currentContext() != MF.getName()) {
  412. MF.getFunction().getContext().emitError(
  413. "The training log context shouldn't have had changed.");
  414. }
  415. if (Log->hasObservationInProgress())
  416. Log->logReward<float>(GetReward());
  417. }
  418. private:
  419. std::vector<TensorSpec> InputFeatures;
  420. std::vector<TensorSpec> TrainingInputFeatures;
  421. void getAnalysisUsage(AnalysisUsage &AU) const override {
  422. AU.addRequired<MachineBlockFrequencyInfo>();
  423. AU.addRequired<MachineLoopInfo>();
  424. RegAllocEvictionAdvisorAnalysis::getAnalysisUsage(AU);
  425. }
  426. bool doInitialization(Module &M) override {
  427. LLVMContext &Ctx = M.getContext();
  428. if (ModelUnderTraining.empty() && TrainingLog.empty()) {
  429. Ctx.emitError("Regalloc development mode should be requested with at "
  430. "least logging enabled and/or a training model");
  431. return false;
  432. }
  433. if (ModelUnderTraining.empty())
  434. Runner = std::make_unique<NoInferenceModelRunner>(Ctx, InputFeatures);
  435. else
  436. Runner = ModelUnderTrainingRunner::createAndEnsureValid(
  437. Ctx, ModelUnderTraining, DecisionName, TrainingInputFeatures);
  438. if (!Runner) {
  439. Ctx.emitError("Regalloc: could not set up the model runner");
  440. return false;
  441. }
  442. if (TrainingLog.empty())
  443. return false;
  444. std::error_code EC;
  445. auto OS = std::make_unique<raw_fd_ostream>(TrainingLog, EC);
  446. if (EC) {
  447. M.getContext().emitError(EC.message() + ":" + TrainingLog);
  448. return false;
  449. }
  450. std::vector<TensorSpec> LFS = InputFeatures;
  451. if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(Runner.get()))
  452. append_range(LFS, MUTR->extraOutputsForLoggingSpecs());
  453. // We always log the output; in particular, if we're not evaluating, we
  454. // don't have an output spec json file. That's why we handle the
  455. // 'normal' output separately.
  456. LFS.push_back(Output);
  457. Log = std::make_unique<Logger>(std::move(OS), LFS, Reward,
  458. /*IncludeReward*/ true);
  459. return false;
  460. }
  461. std::unique_ptr<RegAllocEvictionAdvisor>
  462. getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
  463. if (!Runner)
  464. return nullptr;
  465. if (Log)
  466. Log->switchContext(MF.getName());
  467. return std::make_unique<DevelopmentModeEvictAdvisor>(
  468. MF, RA, Runner.get(), getAnalysis<MachineBlockFrequencyInfo>(),
  469. getAnalysis<MachineLoopInfo>(), Log.get());
  470. }
  471. std::unique_ptr<MLModelRunner> Runner;
  472. std::unique_ptr<Logger> Log;
  473. };
  474. #endif //#ifdef LLVM_HAVE_TFLITE
  475. } // namespace
  476. float MLEvictAdvisor::getInitialQueueSize(const MachineFunction &MF) {
  477. auto &MRI = MF.getRegInfo();
  478. float Ret = 0.0;
  479. for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
  480. Register Reg = Register::index2VirtReg(I);
  481. if (MRI.reg_nodbg_empty(Reg))
  482. continue;
  483. ++Ret;
  484. }
  485. return Ret;
  486. }
  487. MLEvictAdvisor::MLEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
  488. MLModelRunner *Runner,
  489. const MachineBlockFrequencyInfo &MBFI,
  490. const MachineLoopInfo &Loops)
  491. : RegAllocEvictionAdvisor(MF, RA), DefaultAdvisor(MF, RA),
  492. Runner(std::move(Runner)), MBFI(MBFI), Loops(Loops),
  493. InitialQSize(MLEvictAdvisor::getInitialQueueSize(MF)) {
  494. assert(this->Runner);
  495. DoNotNormalize.set(FeatureIDs::mask);
  496. DoNotNormalize.set(FeatureIDs::is_free);
  497. DoNotNormalize.set(FeatureIDs::is_hint);
  498. DoNotNormalize.set(FeatureIDs::is_local);
  499. DoNotNormalize.set(FeatureIDs::min_stage);
  500. DoNotNormalize.set(FeatureIDs::max_stage);
  501. DoNotNormalize.set(FeatureIDs::progress);
  502. }
  503. int64_t MLEvictAdvisor::tryFindEvictionCandidatePosition(
  504. const LiveInterval &, const AllocationOrder &, unsigned, uint8_t,
  505. const SmallVirtRegSet &) const {
  506. int64_t Ret = Runner->evaluate<int64_t>();
  507. assert(Ret >= 0);
  508. assert(Ret <= CandidateVirtRegPos);
  509. return Ret;
  510. }
  511. bool MLEvictAdvisor::loadInterferenceFeatures(
  512. const LiveInterval &VirtReg, MCRegister PhysReg, bool IsHint,
  513. const SmallVirtRegSet &FixedRegisters,
  514. llvm::SmallVectorImpl<float> &Largest, size_t Pos,
  515. llvm::SmallVectorImpl<LRStartEndInfo> &LRPosInfo) const {
  516. // It is only possible to evict virtual register interference.
  517. if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) {
  518. // leave unavailable
  519. return false;
  520. }
  521. const bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
  522. int64_t LocalIntfs = 0;
  523. float NrUrgent = 0.0f;
  524. // The cascade tracking is the same as in the default advisor
  525. unsigned Cascade = RA.getExtraInfo().getCascadeOrCurrentNext(VirtReg.reg());
  526. SmallVector<const LiveInterval *, MaxInterferences> InterferingIntervals;
  527. for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
  528. LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
  529. // Different from the default heuristic, we don't make any assumptions
  530. // about what having more than 10 results in the query may mean.
  531. const auto &IFIntervals = Q.interferingVRegs(EvictInterferenceCutoff);
  532. if (IFIntervals.empty() && InterferingIntervals.empty())
  533. continue;
  534. if (IFIntervals.size() >= EvictInterferenceCutoff)
  535. return false;
  536. InterferingIntervals.append(IFIntervals.begin(), IFIntervals.end());
  537. for (const LiveInterval *Intf : reverse(IFIntervals)) {
  538. assert(Intf->reg().isVirtual() &&
  539. "Only expecting virtual register interference from query");
  540. // This is the same set of legality checks as in the default case: don't
  541. // try to evict fixed regs or 'done' ones. Also don't break cascades,
  542. // except in the urgent case, with the same nuances used in the default
  543. // heuristic.
  544. // We could try sharing this between the advisors, but it may end up
  545. // more complex than it is right now.
  546. if (FixedRegisters.count(Intf->reg()))
  547. return false;
  548. if (RA.getExtraInfo().getStage(*Intf) == RS_Done)
  549. return false;
  550. bool Urgent =
  551. !VirtReg.isSpillable() &&
  552. (Intf->isSpillable() ||
  553. RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg())) <
  554. RegClassInfo.getNumAllocatableRegs(
  555. MRI->getRegClass(Intf->reg())));
  556. // Only evict older cascades or live ranges without a cascade.
  557. unsigned IntfCascade = RA.getExtraInfo().getCascade(Intf->reg());
  558. if (Cascade <= IntfCascade) {
  559. if (!Urgent)
  560. return false;
  561. ++NrUrgent;
  562. }
  563. LocalIntfs += (IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
  564. (!EnableLocalReassign || !canReassign(*Intf, PhysReg)));
  565. }
  566. }
  567. // OK, so if we made it this far, this LR is an eviction candidate, load its
  568. // features.
  569. extractFeatures(InterferingIntervals, Largest, Pos, IsHint, LocalIntfs,
  570. NrUrgent, LRPosInfo);
  571. return true;
  572. }
  573. MCRegister MLEvictAdvisor::tryFindEvictionCandidate(
  574. const LiveInterval &VirtReg, const AllocationOrder &Order,
  575. uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) const {
  576. auto MaybeOrderLimit = getOrderLimit(VirtReg, Order, CostPerUseLimit);
  577. if (!MaybeOrderLimit)
  578. return MCRegister::NoRegister;
  579. unsigned OrderLimit = *MaybeOrderLimit;
  580. // The heuristic sets initial costs such as, if CostPerUseLimit is
  581. // max<uint8_t>, then any of the costs of the legally-evictable intervals
  582. // would be lower. When that happens, one of those will be selected.
  583. // Therefore, we allow the candidate be selected, unless the candidate is
  584. // unspillable, in which case it would be incorrect to not find a register
  585. // for it.
  586. const bool MustFindEviction =
  587. (!VirtReg.isSpillable() && CostPerUseLimit == static_cast<uint8_t>(~0u));
  588. // Number of available candidates - if 0, no need to continue.
  589. size_t Available = 0;
  590. // Make sure we don't have leftover partial state from an attempt where we
  591. // had no available candidates and bailed out early.
  592. resetInputs(*Runner);
  593. // Track the index->register mapping because AllocationOrder doesn't do that
  594. // and we'd have to scan it.
  595. // Also track their mask, to write asserts/debug.
  596. CandidateRegList Regs;
  597. Regs.fill({0, false});
  598. // Track the largest value of features seen during this eviction session. We
  599. // only normalize (some of) the float features, but it's just simpler to
  600. // dimension 'Largest' to all the features, especially since we have the
  601. // 'DoNotNormalize' list.
  602. FeaturesListNormalizer Largest(FeatureIDs::FeatureCount, 0.0);
  603. // Same overal idea as in the default eviction policy - we visit the values
  604. // of AllocationOrder one at a time. If it's not legally available, we mask
  605. // off the corresponding feature column (==do nothing because we already
  606. // reset all the features to 0) Use Pos to capture the column we load
  607. // features at - in AllocationOrder order.
  608. size_t Pos = 0;
  609. SmallVector<LRStartEndInfo, NumberOfInterferences> LRPosInfo;
  610. for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit); I != E;
  611. ++I, ++Pos) {
  612. MCRegister PhysReg = *I;
  613. assert(!Regs[Pos].second);
  614. assert(PhysReg);
  615. if (!canAllocatePhysReg(CostPerUseLimit, PhysReg)) {
  616. continue;
  617. }
  618. if (loadInterferenceFeatures(VirtReg, PhysReg, I.isHint(), FixedRegisters,
  619. Largest, Pos, LRPosInfo)) {
  620. ++Available;
  621. Regs[Pos] = std::make_pair(PhysReg, true);
  622. }
  623. }
  624. if (Available == 0) {
  625. // Nothing to decide, nothing to learn.
  626. assert(!MustFindEviction);
  627. return MCRegister::NoRegister;
  628. }
  629. const size_t ValidPosLimit = Pos;
  630. // If we must find eviction, the candidate should be masked out of the
  631. // decision making process.
  632. Regs[CandidateVirtRegPos].second = !MustFindEviction;
  633. if (!MustFindEviction)
  634. extractFeatures(SmallVector<const LiveInterval *, 1>(1, &VirtReg), Largest,
  635. CandidateVirtRegPos, /*IsHint*/ 0,
  636. /*LocalIntfsCount*/ 0,
  637. /*NrUrgent*/ 0.0, LRPosInfo);
  638. assert(InitialQSize > 0.0 && "We couldn't have gotten here if we had "
  639. "nothing to allocate initially.");
  640. #ifdef LLVM_HAVE_TFLITE
  641. if (EnableDevelopmentFeatures) {
  642. extractInstructionFeatures(
  643. LRPosInfo, Runner,
  644. [this](SlotIndex InputIndex) -> int {
  645. auto *CurrentMachineInstruction =
  646. LIS->getInstructionFromIndex(InputIndex);
  647. if (!CurrentMachineInstruction) {
  648. return -1;
  649. }
  650. return CurrentMachineInstruction->getOpcode();
  651. },
  652. [this](SlotIndex InputIndex) -> float {
  653. auto *CurrentMachineInstruction =
  654. LIS->getInstructionFromIndex(InputIndex);
  655. return MBFI.getBlockFreqRelativeToEntryBlock(
  656. CurrentMachineInstruction->getParent());
  657. },
  658. [this](SlotIndex InputIndex) -> MachineBasicBlock * {
  659. auto *CurrentMachineInstruction =
  660. LIS->getInstructionFromIndex(InputIndex);
  661. return CurrentMachineInstruction->getParent();
  662. },
  663. FeatureIDs::instructions, FeatureIDs::instructions_mapping,
  664. FeatureIDs::mbb_frequencies, FeatureIDs::mbb_mapping,
  665. LIS->getSlotIndexes()->getLastIndex());
  666. }
  667. #endif // #ifdef LLVM_HAVE_TFLITE
  668. // Normalize the features.
  669. for (auto &V : Largest)
  670. V = V ? V : 1.0;
  671. for (size_t FeatureIndex = 0; FeatureIndex < FeatureIDs::FeatureCount;
  672. ++FeatureIndex) {
  673. if (DoNotNormalize.test(FeatureIndex))
  674. continue;
  675. for (size_t Pos = 0; Pos < NumberOfInterferences; ++Pos) {
  676. Runner->getTensor<float>(FeatureIndex)[Pos] /= Largest[FeatureIndex];
  677. }
  678. }
  679. *Runner->getTensor<float>(FeatureIDs::progress) =
  680. static_cast<float>(RA.getQueueSize()) / InitialQSize;
  681. // Get a decision.
  682. size_t CandidatePos = tryFindEvictionCandidatePosition(
  683. VirtReg, Order, OrderLimit, CostPerUseLimit, FixedRegisters);
  684. // The contract with the ML side is that CandidatePos is mask == 1 (i.e.
  685. // Regs[CandidatePos].second)
  686. assert(Regs[CandidatePos].second);
  687. if (CandidatePos == CandidateVirtRegPos) {
  688. assert(!MustFindEviction);
  689. return MCRegister::NoRegister;
  690. }
  691. assert(CandidatePos < ValidPosLimit);
  692. (void)ValidPosLimit;
  693. return Regs[CandidatePos].first;
  694. }
  695. const LIFeatureComponents &
  696. MLEvictAdvisor::getLIFeatureComponents(const LiveInterval &LI) const {
  697. RegID ID = LI.reg().id();
  698. LIFeatureComponents Empty;
  699. auto I = CachedFeatures.insert(std::make_pair(ID, Empty));
  700. LIFeatureComponents &Ret = I.first->getSecond();
  701. if (!I.second)
  702. return Ret;
  703. SmallPtrSet<MachineInstr *, 8> Visited;
  704. const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
  705. for (MachineRegisterInfo::reg_instr_nodbg_iterator
  706. I = MRI->reg_instr_nodbg_begin(LI.reg()),
  707. E = MRI->reg_instr_nodbg_end();
  708. I != E;) {
  709. MachineInstr *MI = &*(I++);
  710. ++Ret.NrDefsAndUses;
  711. if (!Visited.insert(MI).second)
  712. continue;
  713. if (MI->isIdentityCopy() || MI->isImplicitDef())
  714. continue;
  715. bool Reads, Writes;
  716. std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg());
  717. float Freq = MBFI.getBlockFreqRelativeToEntryBlock(MI->getParent());
  718. Ret.HottestBlockFreq = std::max(Freq, Ret.HottestBlockFreq);
  719. Ret.R += (Reads && !Writes) * Freq;
  720. Ret.W += (!Reads && Writes) * Freq;
  721. Ret.RW += (Reads && Writes) * Freq;
  722. auto *MBB = MI->getParent();
  723. auto *Loop = Loops.getLoopFor(MBB);
  724. bool IsExiting = Loop ? Loop->isLoopExiting(MBB) : false;
  725. if (Writes && IsExiting && LIS->isLiveOutOfMBB(LI, MBB))
  726. Ret.IndVarUpdates += Freq;
  727. if (MI->isCopy() && VirtRegAuxInfo::copyHint(MI, LI.reg(), TRI, *MRI))
  728. Ret.HintWeights += Freq;
  729. }
  730. Ret.IsRemat = VirtRegAuxInfo::isRematerializable(
  731. LI, *LIS, *VRM, *MF.getSubtarget().getInstrInfo());
  732. return Ret;
  733. }
  734. // Overall, this currently mimics what we do for weight calculation, but instead
  735. // of accummulating the various features, we keep them separate.
  736. void MLEvictAdvisor::extractFeatures(
  737. const SmallVectorImpl<const LiveInterval *> &Intervals,
  738. llvm::SmallVectorImpl<float> &Largest, size_t Pos, int64_t IsHint,
  739. int64_t LocalIntfsCount, float NrUrgent,
  740. SmallVectorImpl<LRStartEndInfo> &LRPosInfo) const {
  741. int64_t NrDefsAndUses = 0;
  742. int64_t NrBrokenHints = 0;
  743. double R = 0.0;
  744. double W = 0.0;
  745. double RW = 0.0;
  746. double IndVarUpdates = 0.0;
  747. double HintWeights = 0.0;
  748. float StartBBFreq = 0.0;
  749. float EndBBFreq = 0.0;
  750. float HottestBlockFreq = 0.0;
  751. int32_t NrRematerializable = 0;
  752. float TotalWeight = 0.0;
  753. SlotIndex EndSI = LIS->getSlotIndexes()->getZeroIndex();
  754. SlotIndex StartSI = LIS->getSlotIndexes()->getLastIndex();
  755. int64_t MaxStage = 0;
  756. int64_t MinStage =
  757. Intervals.empty() ? 0 : std::numeric_limits<int64_t>::max();
  758. for (const auto *L : Intervals) {
  759. const LiveInterval &LI = *L;
  760. MaxStage = std::max<int64_t>(
  761. MaxStage, static_cast<int64_t>(RA.getExtraInfo().getStage(LI)));
  762. MinStage = std::min<int64_t>(
  763. MinStage, static_cast<int64_t>(RA.getExtraInfo().getStage(LI)));
  764. TotalWeight = std::max(TotalWeight, LI.weight());
  765. if (LI.beginIndex() < StartSI)
  766. StartSI = LI.beginIndex();
  767. if (LI.endIndex() > EndSI)
  768. EndSI = LI.endIndex();
  769. const LIFeatureComponents &LIFC = getLIFeatureComponents(LI);
  770. NrBrokenHints += VRM->hasPreferredPhys(LI.reg());
  771. NrDefsAndUses += LIFC.NrDefsAndUses;
  772. HottestBlockFreq = std::max(HottestBlockFreq, LIFC.HottestBlockFreq);
  773. R += LIFC.R;
  774. W += LIFC.W;
  775. RW += LIFC.RW;
  776. IndVarUpdates += LIFC.IndVarUpdates;
  777. HintWeights += LIFC.HintWeights;
  778. NrRematerializable += LIFC.IsRemat;
  779. if (EnableDevelopmentFeatures) {
  780. for (auto CurrentSegment : LI) {
  781. LRPosInfo.push_back(
  782. LRStartEndInfo{CurrentSegment.start, CurrentSegment.end, Pos});
  783. }
  784. }
  785. }
  786. size_t Size = 0;
  787. if (!Intervals.empty()) {
  788. StartBBFreq =
  789. MBFI.getBlockFreqRelativeToEntryBlock(LIS->getMBBFromIndex(StartSI));
  790. if (EndSI >= LIS->getSlotIndexes()->getLastIndex())
  791. EndSI = LIS->getSlotIndexes()->getLastIndex().getPrevIndex();
  792. EndBBFreq =
  793. MBFI.getBlockFreqRelativeToEntryBlock(LIS->getMBBFromIndex(EndSI));
  794. Size = StartSI.distance(EndSI);
  795. }
  796. // Set the features at the column 'Pos'.
  797. #define SET(ID, TYPE, VAL) \
  798. do { \
  799. Runner->getTensor<TYPE>(FeatureIDs::ID)[Pos] = static_cast<TYPE>(VAL); \
  800. if (!DoNotNormalize.test(FeatureIDs::ID)) \
  801. Largest[FeatureIDs::ID] = \
  802. std::max(Largest[FeatureIDs::ID], static_cast<float>(VAL)); \
  803. } while (false)
  804. SET(mask, int64_t, 1);
  805. SET(is_free, int64_t, Intervals.empty());
  806. SET(nr_urgent, float, NrUrgent);
  807. SET(nr_broken_hints, float, NrBrokenHints);
  808. SET(is_hint, int64_t, IsHint);
  809. SET(is_local, int64_t, LocalIntfsCount);
  810. SET(nr_rematerializable, float, NrRematerializable);
  811. SET(nr_defs_and_uses, float, NrDefsAndUses);
  812. SET(weighed_reads_by_max, float, R);
  813. SET(weighed_writes_by_max, float, W);
  814. SET(weighed_read_writes_by_max, float, RW);
  815. SET(weighed_indvars_by_max, float, IndVarUpdates);
  816. SET(hint_weights_by_max, float, HintWeights);
  817. SET(start_bb_freq_by_max, float, StartBBFreq);
  818. SET(end_bb_freq_by_max, float, EndBBFreq);
  819. SET(hottest_bb_freq_by_max, float, HottestBlockFreq);
  820. SET(liverange_size, float, Size);
  821. SET(use_def_density, float, TotalWeight);
  822. SET(max_stage, int64_t, MaxStage);
  823. SET(min_stage, int64_t, MinStage);
  824. #undef SET
  825. }
  826. void extractInstructionFeatures(
  827. SmallVectorImpl<LRStartEndInfo> &LRPosInfo, MLModelRunner *RegallocRunner,
  828. function_ref<int(SlotIndex)> GetOpcode,
  829. function_ref<float(SlotIndex)> GetMBBFreq,
  830. function_ref<MachineBasicBlock *(SlotIndex)> GetMBBReference,
  831. const int InstructionsIndex, const int InstructionsMappingIndex,
  832. const int MBBFreqIndex, const int MBBMappingIndex,
  833. const SlotIndex LastIndex) {
  834. // This function extracts instruction based features relevant to the eviction
  835. // problem currently being solved. This function ends up extracting two
  836. // tensors.
  837. // 1 - A vector of size max instruction count. It contains the opcodes of the
  838. // instructions spanned by all the intervals in the current instance of the
  839. // eviction problem.
  840. // 2 - A binary mapping matrix of size (LR count * max
  841. // instruction count) which maps where the LRs are live to the actual opcodes
  842. // for which they are live.
  843. // 3 - A vector of size max supported MBB count storing MBB frequencies,
  844. // encompassing all of the MBBs covered by the eviction problem.
  845. // 4 - A vector of size max instruction count of indices to members of the MBB
  846. // frequency vector, mapping each instruction to its associated MBB.
  847. // Start off by sorting the segments based on the beginning slot index.
  848. std::sort(
  849. LRPosInfo.begin(), LRPosInfo.end(),
  850. [](LRStartEndInfo A, LRStartEndInfo B) { return A.Begin < B.Begin; });
  851. size_t InstructionIndex = 0;
  852. size_t CurrentSegmentIndex = 0;
  853. SlotIndex CurrentIndex = LRPosInfo[0].Begin;
  854. std::map<MachineBasicBlock *, size_t> VisitedMBBs;
  855. size_t CurrentMBBIndex = 0;
  856. // This loop processes all the segments sequentially by starting at the
  857. // beginning slot index of the first segment, iterating through all the slot
  858. // indices before the end slot index of that segment (while checking for
  859. // overlaps with segments that start at greater slot indices). After hitting
  860. // that end index, the current segment being processed gets bumped until they
  861. // are all processed or the max instruction count is hit, where everything is
  862. // just truncated.
  863. while (true) {
  864. // If the index that we are currently at is within the current segment and
  865. // we haven't hit the max instruction count, continue processing the current
  866. // segment.
  867. while (CurrentIndex <= LRPosInfo[CurrentSegmentIndex].End &&
  868. InstructionIndex < ModelMaxSupportedInstructionCount) {
  869. int CurrentOpcode = GetOpcode(CurrentIndex);
  870. // If the current machine instruction is null, skip it
  871. if (CurrentOpcode == -1) {
  872. // If we're currently at the last index in the SlotIndex analysis,
  873. // we can't go any further, so return from the function
  874. if (CurrentIndex >= LastIndex) {
  875. return;
  876. }
  877. CurrentIndex = CurrentIndex.getNextIndex();
  878. continue;
  879. }
  880. MachineBasicBlock *CurrentMBBReference = GetMBBReference(CurrentIndex);
  881. if (VisitedMBBs.count(CurrentMBBReference) == 0) {
  882. VisitedMBBs[CurrentMBBReference] = CurrentMBBIndex;
  883. ++CurrentMBBIndex;
  884. }
  885. extractMBBFrequency(CurrentIndex, InstructionIndex, VisitedMBBs,
  886. GetMBBFreq, CurrentMBBReference, RegallocRunner,
  887. MBBFreqIndex, MBBMappingIndex);
  888. // Current code assumes we're not going to get any disjointed segments
  889. assert(LRPosInfo[CurrentSegmentIndex].Begin <= CurrentIndex);
  890. RegallocRunner->getTensor<int64_t>(InstructionsIndex)[InstructionIndex] =
  891. CurrentOpcode < OpcodeValueCutoff ? CurrentOpcode : 0;
  892. // set value in the binary mapping matrix for the current instruction
  893. auto CurrentSegmentPosition = LRPosInfo[CurrentSegmentIndex].Pos;
  894. RegallocRunner->getTensor<int64_t>(
  895. InstructionsMappingIndex)[CurrentSegmentPosition *
  896. ModelMaxSupportedInstructionCount +
  897. InstructionIndex] = 1;
  898. // All of the segments are sorted based on the beginning slot index, but
  899. // this doesn't mean that the beginning slot index of the next segment is
  900. // after the end segment of the one being currently processed. This while
  901. // loop checks for overlapping segments and modifies the portion of the
  902. // column in the mapping matrix for the currently processed instruction
  903. // for the LR it is checking. Also make sure that the beginning of the
  904. // current segment we're checking for overlap in is less than the current
  905. // index, otherwise we're done checking overlaps.
  906. size_t OverlapCheckCurrentSegment = CurrentSegmentIndex + 1;
  907. while (OverlapCheckCurrentSegment < LRPosInfo.size() &&
  908. LRPosInfo[OverlapCheckCurrentSegment].Begin <= CurrentIndex) {
  909. auto OverlapCurrentSegmentPosition =
  910. LRPosInfo[OverlapCheckCurrentSegment].Pos;
  911. if (LRPosInfo[OverlapCheckCurrentSegment].End >= CurrentIndex) {
  912. RegallocRunner->getTensor<int64_t>(
  913. InstructionsMappingIndex)[OverlapCurrentSegmentPosition *
  914. ModelMaxSupportedInstructionCount +
  915. InstructionIndex] = 1;
  916. }
  917. ++OverlapCheckCurrentSegment;
  918. }
  919. ++InstructionIndex;
  920. if (CurrentIndex >= LastIndex) {
  921. return;
  922. }
  923. CurrentIndex = CurrentIndex.getNextIndex();
  924. }
  925. // if we've just finished processing through the last segment or if we've
  926. // hit the maximum number of instructions, break out of the loop.
  927. if (CurrentSegmentIndex == LRPosInfo.size() - 1 ||
  928. InstructionIndex >= ModelMaxSupportedInstructionCount) {
  929. break;
  930. }
  931. // If the segments are not overlapping, we need to move to the beginning
  932. // index of the next segment to avoid having instructions not attached to
  933. // any register.
  934. if (LRPosInfo[CurrentSegmentIndex + 1].Begin >
  935. LRPosInfo[CurrentSegmentIndex].End) {
  936. CurrentIndex = LRPosInfo[CurrentSegmentIndex + 1].Begin;
  937. }
  938. ++CurrentSegmentIndex;
  939. }
  940. }
  941. void extractMBBFrequency(const SlotIndex CurrentIndex,
  942. const size_t CurrentInstructionIndex,
  943. std::map<MachineBasicBlock *, size_t> &VisitedMBBs,
  944. function_ref<float(SlotIndex)> GetMBBFreq,
  945. MachineBasicBlock *CurrentMBBReference,
  946. MLModelRunner *RegallocRunner, const int MBBFreqIndex,
  947. const int MBBMappingIndex) {
  948. size_t CurrentMBBIndex = VisitedMBBs[CurrentMBBReference];
  949. float CurrentMBBFreq = GetMBBFreq(CurrentIndex);
  950. if (CurrentMBBIndex < ModelMaxSupportedMBBCount) {
  951. RegallocRunner->getTensor<float>(MBBFreqIndex)[CurrentMBBIndex] =
  952. CurrentMBBFreq;
  953. RegallocRunner->getTensor<int64_t>(
  954. MBBMappingIndex)[CurrentInstructionIndex] = CurrentMBBIndex;
  955. }
  956. }
  957. // Development mode-specific implementations
  958. #ifdef LLVM_HAVE_TFLITE
  959. RegAllocEvictionAdvisorAnalysis *llvm::createDevelopmentModeAdvisor() {
  960. return new DevelopmentModeEvictionAdvisorAnalysis();
  961. }
  962. int64_t DevelopmentModeEvictAdvisor::tryFindEvictionCandidatePosition(
  963. const LiveInterval &VirtReg, const AllocationOrder &Order,
  964. unsigned OrderLimit, uint8_t CostPerUseLimit,
  965. const SmallVirtRegSet &FixedRegisters) const {
  966. int64_t Ret = 0;
  967. if (isa<ModelUnderTrainingRunner>(getRunner())) {
  968. Ret = MLEvictAdvisor::tryFindEvictionCandidatePosition(
  969. VirtReg, Order, OrderLimit, CostPerUseLimit, FixedRegisters);
  970. } else {
  971. MCRegister PhysReg = getDefaultAdvisor().tryFindEvictionCandidate(
  972. VirtReg, Order, CostPerUseLimit, FixedRegisters);
  973. // Find the index of the selected PhysReg. We need it for logging,
  974. // otherwise this is wasted cycles (but so would starting development mode
  975. // without a model nor logging)
  976. if (!PhysReg)
  977. Ret = CandidateVirtRegPos;
  978. else
  979. for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit);
  980. I != E; ++I, ++Ret)
  981. if (*I == PhysReg)
  982. break;
  983. }
  984. if (TrainingLog.empty())
  985. return Ret;
  986. // TODO(mtrofin): when we support optional rewards, this can go away. In the
  987. // meantime, we log the "pretend" reward (0) for the previous observation
  988. // before starting a new one.
  989. if (Log->hasObservationInProgress())
  990. Log->logReward<float>(0.0);
  991. Log->startObservation();
  992. size_t CurrentFeature = 0;
  993. size_t FeatureCount = EnableDevelopmentFeatures
  994. ? FeatureIDs::FeaturesWithDevelopmentCount
  995. : FeatureIDs::FeatureCount;
  996. for (; CurrentFeature < FeatureCount; ++CurrentFeature) {
  997. Log->logTensorValue(CurrentFeature,
  998. reinterpret_cast<const char *>(
  999. getRunner().getTensorUntyped(CurrentFeature)));
  1000. }
  1001. if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(&getRunner()))
  1002. for (size_t I = 0; I < MUTR->extraOutputsForLoggingSpecs().size();
  1003. ++I, ++CurrentFeature)
  1004. Log->logTensorValue(
  1005. CurrentFeature,
  1006. reinterpret_cast<const char *>(MUTR->getUntypedExtraOutputValue(I)));
  1007. // The output is right after the features and the extra outputs
  1008. Log->logTensorValue(CurrentFeature, reinterpret_cast<const char *>(&Ret));
  1009. Log->endObservation();
  1010. return Ret;
  1011. }
  1012. bool RegAllocScoring::runOnMachineFunction(MachineFunction &MF) {
  1013. std::optional<float> CachedReward;
  1014. auto GetReward = [&]() {
  1015. if (!CachedReward)
  1016. CachedReward = static_cast<float>(
  1017. calculateRegAllocScore(MF, getAnalysis<MachineBlockFrequencyInfo>())
  1018. .getScore());
  1019. return *CachedReward;
  1020. };
  1021. getAnalysis<RegAllocEvictionAdvisorAnalysis>().logRewardIfNeeded(MF,
  1022. GetReward);
  1023. getAnalysis<RegAllocPriorityAdvisorAnalysis>().logRewardIfNeeded(MF,
  1024. GetReward);
  1025. return false;
  1026. }
  1027. #endif // #ifdef LLVM_HAVE_TFLITE
  1028. RegAllocEvictionAdvisorAnalysis *llvm::createReleaseModeAdvisor() {
  1029. return new ReleaseModeEvictionAdvisorAnalysis();
  1030. }
  1031. // In all cases except development mode, we don't need scoring.
  1032. #if !defined(LLVM_HAVE_TFLITE)
  1033. bool RegAllocScoring::runOnMachineFunction(MachineFunction &) { return false; }
  1034. #endif