MLInlineAdvisor.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. //===- MLInlineAdvisor.cpp - machine learned InlineAdvisor ----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the interface between the inliner and a learned model.
  10. // It delegates model evaluation to either the AOT compiled model (the
  11. // 'release' mode) or a runtime-loaded model (the 'development' case).
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "llvm/Analysis/MLInlineAdvisor.h"
  15. #include "llvm/ADT/SCCIterator.h"
  16. #include "llvm/Analysis/CallGraph.h"
  17. #include "llvm/Analysis/FunctionPropertiesAnalysis.h"
  18. #include "llvm/Analysis/InlineCost.h"
  19. #include "llvm/Analysis/InlineModelFeatureMaps.h"
  20. #include "llvm/Analysis/LazyCallGraph.h"
  21. #include "llvm/Analysis/MLModelRunner.h"
  22. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  23. #include "llvm/Analysis/ReleaseModeModelRunner.h"
  24. #include "llvm/Analysis/TargetLibraryInfo.h"
  25. #include "llvm/Analysis/TargetTransformInfo.h"
  26. #include "llvm/Config/config.h"
  27. #include "llvm/IR/InstIterator.h"
  28. #include "llvm/IR/Instructions.h"
  29. #include "llvm/IR/PassManager.h"
  30. #include "llvm/Support/CommandLine.h"
  31. #include "llvm/Support/Path.h"
  32. #include <limits>
  33. #include <unordered_map>
  34. #include <unordered_set>
  35. using namespace llvm;
  36. #if defined(LLVM_HAVE_TF_AOT_INLINERSIZEMODEL)
  37. // codegen-ed file
  38. #error #include "InlinerSizeModel.h" // NOLINT
  39. std::unique_ptr<InlineAdvisor>
  40. llvm::getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM) {
  41. auto AOTRunner =
  42. std::make_unique<ReleaseModeModelRunner<llvm::InlinerSizeModel>>(
  43. M.getContext(), FeatureNameMap, DecisionName);
  44. return std::make_unique<MLInlineAdvisor>(M, MAM, std::move(AOTRunner));
  45. }
  46. #endif
  47. #define DEBUG_TYPE "inline-ml"
  48. static cl::opt<float> SizeIncreaseThreshold(
  49. "ml-advisor-size-increase-threshold", cl::Hidden,
  50. cl::desc("Maximum factor by which expected native size may increase before "
  51. "blocking any further inlining."),
  52. cl::init(2.0));
  53. // clang-format off
  54. const std::array<std::string, NumberOfFeatures> llvm::FeatureNameMap{
  55. // InlineCost features - these must come first
  56. #define POPULATE_NAMES(INDEX_NAME, NAME) NAME,
  57. INLINE_COST_FEATURE_ITERATOR(POPULATE_NAMES)
  58. #undef POPULATE_NAMES
  59. // Non-cost features
  60. #define POPULATE_NAMES(INDEX_NAME, NAME, COMMENT) NAME,
  61. INLINE_FEATURE_ITERATOR(POPULATE_NAMES)
  62. #undef POPULATE_NAMES
  63. };
  64. // clang-format on
  65. const char *const llvm::DecisionName = "inlining_decision";
  66. const char *const llvm::DefaultDecisionName = "inlining_default";
  67. const char *const llvm::RewardName = "delta_size";
  68. CallBase *getInlinableCS(Instruction &I) {
  69. if (auto *CS = dyn_cast<CallBase>(&I))
  70. if (Function *Callee = CS->getCalledFunction()) {
  71. if (!Callee->isDeclaration()) {
  72. return CS;
  73. }
  74. }
  75. return nullptr;
  76. }
  77. MLInlineAdvisor::MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM,
  78. std::unique_ptr<MLModelRunner> Runner)
  79. : InlineAdvisor(
  80. M, MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager()),
  81. ModelRunner(std::move(Runner)),
  82. CG(MAM.getResult<LazyCallGraphAnalysis>(M)),
  83. InitialIRSize(getModuleIRSize()), CurrentIRSize(InitialIRSize) {
  84. assert(ModelRunner);
  85. // Extract the 'call site height' feature - the position of a call site
  86. // relative to the farthest statically reachable SCC node. We don't mutate
  87. // this value while inlining happens. Empirically, this feature proved
  88. // critical in behavioral cloning - i.e. training a model to mimic the manual
  89. // heuristic's decisions - and, thus, equally important for training for
  90. // improvement.
  91. CallGraph CGraph(M);
  92. for (auto I = scc_begin(&CGraph); !I.isAtEnd(); ++I) {
  93. const std::vector<CallGraphNode *> &CGNodes = *I;
  94. unsigned Level = 0;
  95. for (auto *CGNode : CGNodes) {
  96. Function *F = CGNode->getFunction();
  97. if (!F || F->isDeclaration())
  98. continue;
  99. for (auto &I : instructions(F)) {
  100. if (auto *CS = getInlinableCS(I)) {
  101. auto *Called = CS->getCalledFunction();
  102. auto Pos = FunctionLevels.find(&CG.get(*Called));
  103. // In bottom up traversal, an inlinable callee is either in the
  104. // same SCC, or to a function in a visited SCC. So not finding its
  105. // level means we haven't visited it yet, meaning it's in this SCC.
  106. if (Pos == FunctionLevels.end())
  107. continue;
  108. Level = std::max(Level, Pos->second + 1);
  109. }
  110. }
  111. }
  112. for (auto *CGNode : CGNodes) {
  113. Function *F = CGNode->getFunction();
  114. if (F && !F->isDeclaration())
  115. FunctionLevels[&CG.get(*F)] = Level;
  116. }
  117. }
  118. for (auto KVP : FunctionLevels) {
  119. AllNodes.insert(KVP.first);
  120. EdgeCount += getLocalCalls(KVP.first->getFunction());
  121. }
  122. NodeCount = AllNodes.size();
  123. }
  124. unsigned MLInlineAdvisor::getInitialFunctionLevel(const Function &F) const {
  125. return CG.lookup(F) ? FunctionLevels.at(CG.lookup(F)) : 0;
  126. }
  127. void MLInlineAdvisor::onPassEntry() {
  128. // Function passes executed between InlinerPass runs may have changed the
  129. // module-wide features.
  130. // The cgscc pass manager rules are such that:
  131. // - if a pass leads to merging SCCs, then the pipeline is restarted on the
  132. // merged SCC
  133. // - if a pass leads to splitting the SCC, then we continue with one of the
  134. // splits
  135. // This means that the NodesInLastSCC is a superset (not strict) of the nodes
  136. // that subsequent passes would have processed
  137. // - in addition, if new Nodes were created by a pass (e.g. CoroSplit),
  138. // they'd be adjacent to Nodes in the last SCC. So we just need to check the
  139. // boundary of Nodes in NodesInLastSCC for Nodes we haven't seen. We don't
  140. // care about the nature of the Edge (call or ref).
  141. NodeCount -= static_cast<int64_t>(NodesInLastSCC.size());
  142. while (!NodesInLastSCC.empty()) {
  143. const auto *N = NodesInLastSCC.front();
  144. NodesInLastSCC.pop_front();
  145. // The Function wrapped by N could have been deleted since we last saw it.
  146. if (N->isDead()) {
  147. assert(!N->getFunction().isDeclaration());
  148. continue;
  149. }
  150. ++NodeCount;
  151. EdgeCount += getLocalCalls(N->getFunction());
  152. for (const auto &E : *(*N)) {
  153. const auto *AdjNode = &E.getNode();
  154. assert(!AdjNode->isDead() && !AdjNode->getFunction().isDeclaration());
  155. auto I = AllNodes.insert(AdjNode);
  156. if (I.second)
  157. NodesInLastSCC.push_back(AdjNode);
  158. }
  159. }
  160. EdgeCount -= EdgesOfLastSeenNodes;
  161. EdgesOfLastSeenNodes = 0;
  162. }
  163. void MLInlineAdvisor::onPassExit(LazyCallGraph::SCC *LastSCC) {
  164. if (!LastSCC)
  165. return;
  166. // Keep track of the nodes and edges we last saw. Then, in onPassEntry,
  167. // we update the node count and edge count from the subset of these nodes that
  168. // survived.
  169. assert(NodesInLastSCC.empty());
  170. assert(NodeCount >= LastSCC->size());
  171. EdgesOfLastSeenNodes = 0;
  172. for (const auto &N : *LastSCC) {
  173. assert(!N.isDead());
  174. EdgesOfLastSeenNodes += getLocalCalls(N.getFunction());
  175. NodesInLastSCC.push_back(&N);
  176. }
  177. assert(EdgeCount >= EdgesOfLastSeenNodes);
  178. }
  179. int64_t MLInlineAdvisor::getLocalCalls(Function &F) {
  180. return FAM.getResult<FunctionPropertiesAnalysis>(F)
  181. .DirectCallsToDefinedFunctions;
  182. }
  183. // Update the internal state of the advisor, and force invalidate feature
  184. // analysis. Currently, we maintain minimal (and very simple) global state - the
  185. // number of functions and the number of static calls. We also keep track of the
  186. // total IR size in this module, to stop misbehaving policies at a certain bloat
  187. // factor (SizeIncreaseThreshold)
  188. void MLInlineAdvisor::onSuccessfulInlining(const MLInlineAdvice &Advice,
  189. bool CalleeWasDeleted) {
  190. assert(!ForceStop);
  191. Function *Caller = Advice.getCaller();
  192. Function *Callee = Advice.getCallee();
  193. // The caller features aren't valid anymore.
  194. {
  195. PreservedAnalyses PA = PreservedAnalyses::all();
  196. PA.abandon<FunctionPropertiesAnalysis>();
  197. FAM.invalidate(*Caller, PA);
  198. }
  199. int64_t IRSizeAfter =
  200. getIRSize(*Caller) + (CalleeWasDeleted ? 0 : Advice.CalleeIRSize);
  201. CurrentIRSize += IRSizeAfter - (Advice.CallerIRSize + Advice.CalleeIRSize);
  202. if (CurrentIRSize > SizeIncreaseThreshold * InitialIRSize)
  203. ForceStop = true;
  204. // We can delta-update module-wide features. We know the inlining only changed
  205. // the caller, and maybe the callee (by deleting the latter).
  206. // Nodes are simple to update.
  207. // For edges, we 'forget' the edges that the caller and callee used to have
  208. // before inlining, and add back what they currently have together.
  209. int64_t NewCallerAndCalleeEdges =
  210. FAM.getResult<FunctionPropertiesAnalysis>(*Caller)
  211. .DirectCallsToDefinedFunctions;
  212. if (CalleeWasDeleted)
  213. --NodeCount;
  214. else
  215. NewCallerAndCalleeEdges +=
  216. FAM.getResult<FunctionPropertiesAnalysis>(*Callee)
  217. .DirectCallsToDefinedFunctions;
  218. EdgeCount += (NewCallerAndCalleeEdges - Advice.CallerAndCalleeEdges);
  219. assert(CurrentIRSize >= 0 && EdgeCount >= 0 && NodeCount >= 0);
  220. }
  221. int64_t MLInlineAdvisor::getModuleIRSize() const {
  222. int64_t Ret = 0;
  223. for (auto &F : M)
  224. if (!F.isDeclaration())
  225. Ret += getIRSize(F);
  226. return Ret;
  227. }
  228. std::unique_ptr<InlineAdvice> MLInlineAdvisor::getAdviceImpl(CallBase &CB) {
  229. auto &Caller = *CB.getCaller();
  230. auto &Callee = *CB.getCalledFunction();
  231. auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
  232. return FAM.getResult<AssumptionAnalysis>(F);
  233. };
  234. auto &TIR = FAM.getResult<TargetIRAnalysis>(Callee);
  235. auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(Caller);
  236. auto MandatoryKind = InlineAdvisor::getMandatoryKind(CB, FAM, ORE);
  237. // If this is a "never inline" case, there won't be any changes to internal
  238. // state we need to track, so we can just return the base InlineAdvice, which
  239. // will do nothing interesting.
  240. // Same thing if this is a recursive case.
  241. if (MandatoryKind == InlineAdvisor::MandatoryInliningKind::Never ||
  242. &Caller == &Callee)
  243. return getMandatoryAdvice(CB, false);
  244. bool Mandatory =
  245. MandatoryKind == InlineAdvisor::MandatoryInliningKind::Always;
  246. // If we need to stop, we won't want to track anymore any state changes, so
  247. // we just return the base InlineAdvice, which acts as a noop.
  248. if (ForceStop) {
  249. ORE.emit([&] {
  250. return OptimizationRemarkMissed(DEBUG_TYPE, "ForceStop", &CB)
  251. << "Won't attempt inlining because module size grew too much.";
  252. });
  253. return std::make_unique<InlineAdvice>(this, CB, ORE, Mandatory);
  254. }
  255. int CostEstimate = 0;
  256. if (!Mandatory) {
  257. auto IsCallSiteInlinable =
  258. llvm::getInliningCostEstimate(CB, TIR, GetAssumptionCache);
  259. if (!IsCallSiteInlinable) {
  260. // We can't inline this for correctness reasons, so return the base
  261. // InlineAdvice, as we don't care about tracking any state changes (which
  262. // won't happen).
  263. return std::make_unique<InlineAdvice>(this, CB, ORE, false);
  264. }
  265. CostEstimate = *IsCallSiteInlinable;
  266. }
  267. const auto CostFeatures =
  268. llvm::getInliningCostFeatures(CB, TIR, GetAssumptionCache);
  269. if (!CostFeatures) {
  270. return std::make_unique<InlineAdvice>(this, CB, ORE, false);
  271. }
  272. if (Mandatory)
  273. return getMandatoryAdvice(CB, true);
  274. auto NrCtantParams = 0;
  275. for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
  276. NrCtantParams += (isa<Constant>(*I));
  277. }
  278. auto &CallerBefore = FAM.getResult<FunctionPropertiesAnalysis>(Caller);
  279. auto &CalleeBefore = FAM.getResult<FunctionPropertiesAnalysis>(Callee);
  280. *ModelRunner->getTensor<int64_t>(FeatureIndex::CalleeBasicBlockCount) =
  281. CalleeBefore.BasicBlockCount;
  282. *ModelRunner->getTensor<int64_t>(FeatureIndex::CallSiteHeight) =
  283. getInitialFunctionLevel(Caller);
  284. *ModelRunner->getTensor<int64_t>(FeatureIndex::NodeCount) = NodeCount;
  285. *ModelRunner->getTensor<int64_t>(FeatureIndex::NrCtantParams) = NrCtantParams;
  286. *ModelRunner->getTensor<int64_t>(FeatureIndex::EdgeCount) = EdgeCount;
  287. *ModelRunner->getTensor<int64_t>(FeatureIndex::CallerUsers) =
  288. CallerBefore.Uses;
  289. *ModelRunner->getTensor<int64_t>(
  290. FeatureIndex::CallerConditionallyExecutedBlocks) =
  291. CallerBefore.BlocksReachedFromConditionalInstruction;
  292. *ModelRunner->getTensor<int64_t>(FeatureIndex::CallerBasicBlockCount) =
  293. CallerBefore.BasicBlockCount;
  294. *ModelRunner->getTensor<int64_t>(
  295. FeatureIndex::CalleeConditionallyExecutedBlocks) =
  296. CalleeBefore.BlocksReachedFromConditionalInstruction;
  297. *ModelRunner->getTensor<int64_t>(FeatureIndex::CalleeUsers) =
  298. CalleeBefore.Uses;
  299. *ModelRunner->getTensor<int64_t>(FeatureIndex::CostEstimate) = CostEstimate;
  300. // Add the cost features
  301. for (size_t I = 0;
  302. I < static_cast<size_t>(InlineCostFeatureIndex::NumberOfFeatures); ++I) {
  303. *ModelRunner->getTensor<int64_t>(inlineCostFeatureToMlFeature(
  304. static_cast<InlineCostFeatureIndex>(I))) = CostFeatures->at(I);
  305. }
  306. return getAdviceFromModel(CB, ORE);
  307. }
  308. std::unique_ptr<MLInlineAdvice>
  309. MLInlineAdvisor::getAdviceFromModel(CallBase &CB,
  310. OptimizationRemarkEmitter &ORE) {
  311. return std::make_unique<MLInlineAdvice>(
  312. this, CB, ORE, static_cast<bool>(ModelRunner->evaluate<int64_t>()));
  313. }
  314. std::unique_ptr<InlineAdvice> MLInlineAdvisor::getMandatoryAdvice(CallBase &CB,
  315. bool Advice) {
  316. // Make sure we track inlinings in all cases - mandatory or not.
  317. if (Advice && !ForceStop)
  318. return getMandatoryAdviceImpl(CB);
  319. // If this is a "never inline" case, there won't be any changes to internal
  320. // state we need to track, so we can just return the base InlineAdvice, which
  321. // will do nothing interesting.
  322. // Same if we are forced to stop - we don't track anymore.
  323. return std::make_unique<InlineAdvice>(this, CB, getCallerORE(CB), Advice);
  324. }
  325. std::unique_ptr<MLInlineAdvice>
  326. MLInlineAdvisor::getMandatoryAdviceImpl(CallBase &CB) {
  327. return std::make_unique<MLInlineAdvice>(this, CB, getCallerORE(CB), true);
  328. }
  329. void MLInlineAdvice::reportContextForRemark(
  330. DiagnosticInfoOptimizationBase &OR) {
  331. using namespace ore;
  332. OR << NV("Callee", Callee->getName());
  333. for (size_t I = 0; I < NumberOfFeatures; ++I)
  334. OR << NV(FeatureNameMap[I],
  335. *getAdvisor()->getModelRunner().getTensor<int64_t>(I));
  336. OR << NV("ShouldInline", isInliningRecommended());
  337. }
  338. void MLInlineAdvice::recordInliningImpl() {
  339. ORE.emit([&]() {
  340. OptimizationRemark R(DEBUG_TYPE, "InliningSuccess", DLoc, Block);
  341. reportContextForRemark(R);
  342. return R;
  343. });
  344. getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ false);
  345. }
  346. void MLInlineAdvice::recordInliningWithCalleeDeletedImpl() {
  347. ORE.emit([&]() {
  348. OptimizationRemark R(DEBUG_TYPE, "InliningSuccessWithCalleeDeleted", DLoc,
  349. Block);
  350. reportContextForRemark(R);
  351. return R;
  352. });
  353. getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ true);
  354. }
  355. void MLInlineAdvice::recordUnsuccessfulInliningImpl(
  356. const InlineResult &Result) {
  357. ORE.emit([&]() {
  358. OptimizationRemarkMissed R(DEBUG_TYPE, "InliningAttemptedAndUnsuccessful",
  359. DLoc, Block);
  360. reportContextForRemark(R);
  361. return R;
  362. });
  363. }
  364. void MLInlineAdvice::recordUnattemptedInliningImpl() {
  365. ORE.emit([&]() {
  366. OptimizationRemarkMissed R(DEBUG_TYPE, "IniningNotAttempted", DLoc, Block);
  367. reportContextForRemark(R);
  368. return R;
  369. });
  370. }