MLInlineAdvisor.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
  1. //===- MLInlineAdvisor.cpp - machine learned InlineAdvisor ----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the interface between the inliner and a learned model.
  10. // It delegates model evaluation to either the AOT compiled model (the
  11. // 'release' mode) or a runtime-loaded model (the 'development' case).
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "llvm/Analysis/MLInlineAdvisor.h"
  15. #include "llvm/ADT/SCCIterator.h"
  16. #include "llvm/Analysis/AssumptionCache.h"
  17. #include "llvm/Analysis/CallGraph.h"
  18. #include "llvm/Analysis/FunctionPropertiesAnalysis.h"
  19. #include "llvm/Analysis/InlineCost.h"
  20. #include "llvm/Analysis/InlineModelFeatureMaps.h"
  21. #include "llvm/Analysis/LazyCallGraph.h"
  22. #include "llvm/Analysis/LoopInfo.h"
  23. #include "llvm/Analysis/MLModelRunner.h"
  24. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  25. #include "llvm/Analysis/TargetTransformInfo.h"
  26. #include "llvm/IR/Dominators.h"
  27. #include "llvm/IR/InstIterator.h"
  28. #include "llvm/IR/PassManager.h"
  29. #include "llvm/Support/CommandLine.h"
  30. using namespace llvm;
  31. #if defined(LLVM_HAVE_TF_AOT_INLINERSIZEMODEL)
  32. #include "llvm/Analysis/ReleaseModeModelRunner.h"
  33. // codegen-ed file
  34. #error #include "InlinerSizeModel.h" // NOLINT
  35. std::unique_ptr<InlineAdvisor>
  36. llvm::getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM) {
  37. auto AOTRunner =
  38. std::make_unique<ReleaseModeModelRunner<llvm::InlinerSizeModel>>(
  39. M.getContext(), FeatureMap, DecisionName);
  40. return std::make_unique<MLInlineAdvisor>(M, MAM, std::move(AOTRunner));
  41. }
  42. #endif
  43. #define DEBUG_TYPE "inline-ml"
  44. static cl::opt<float> SizeIncreaseThreshold(
  45. "ml-advisor-size-increase-threshold", cl::Hidden,
  46. cl::desc("Maximum factor by which expected native size may increase before "
  47. "blocking any further inlining."),
  48. cl::init(2.0));
  49. static cl::opt<bool> KeepFPICache(
  50. "ml-advisor-keep-fpi-cache", cl::Hidden,
  51. cl::desc(
  52. "For test - keep the ML Inline advisor's FunctionPropertiesInfo cache"),
  53. cl::init(false));
  54. // clang-format off
  55. const std::array<TensorSpec, NumberOfFeatures> llvm::FeatureMap{
  56. #define POPULATE_NAMES(_, NAME) TensorSpec::createSpec<int64_t>(NAME, {1} ),
  57. // InlineCost features - these must come first
  58. INLINE_COST_FEATURE_ITERATOR(POPULATE_NAMES)
  59. #undef POPULATE_NAMES
  60. // Non-cost features
  61. #define POPULATE_NAMES(_, NAME, __) TensorSpec::createSpec<int64_t>(NAME, {1} ),
  62. INLINE_FEATURE_ITERATOR(POPULATE_NAMES)
  63. #undef POPULATE_NAMES
  64. };
  65. // clang-format on
  66. const char *const llvm::DecisionName = "inlining_decision";
  67. const char *const llvm::DefaultDecisionName = "inlining_default";
  68. const char *const llvm::RewardName = "delta_size";
  69. CallBase *getInlinableCS(Instruction &I) {
  70. if (auto *CS = dyn_cast<CallBase>(&I))
  71. if (Function *Callee = CS->getCalledFunction()) {
  72. if (!Callee->isDeclaration()) {
  73. return CS;
  74. }
  75. }
  76. return nullptr;
  77. }
  78. MLInlineAdvisor::MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM,
  79. std::unique_ptr<MLModelRunner> Runner)
  80. : InlineAdvisor(
  81. M, MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager()),
  82. ModelRunner(std::move(Runner)),
  83. CG(MAM.getResult<LazyCallGraphAnalysis>(M)),
  84. InitialIRSize(getModuleIRSize()), CurrentIRSize(InitialIRSize) {
  85. assert(ModelRunner);
  86. // Extract the 'call site height' feature - the position of a call site
  87. // relative to the farthest statically reachable SCC node. We don't mutate
  88. // this value while inlining happens. Empirically, this feature proved
  89. // critical in behavioral cloning - i.e. training a model to mimic the manual
  90. // heuristic's decisions - and, thus, equally important for training for
  91. // improvement.
  92. CallGraph CGraph(M);
  93. for (auto I = scc_begin(&CGraph); !I.isAtEnd(); ++I) {
  94. const std::vector<CallGraphNode *> &CGNodes = *I;
  95. unsigned Level = 0;
  96. for (auto *CGNode : CGNodes) {
  97. Function *F = CGNode->getFunction();
  98. if (!F || F->isDeclaration())
  99. continue;
  100. for (auto &I : instructions(F)) {
  101. if (auto *CS = getInlinableCS(I)) {
  102. auto *Called = CS->getCalledFunction();
  103. auto Pos = FunctionLevels.find(&CG.get(*Called));
  104. // In bottom up traversal, an inlinable callee is either in the
  105. // same SCC, or to a function in a visited SCC. So not finding its
  106. // level means we haven't visited it yet, meaning it's in this SCC.
  107. if (Pos == FunctionLevels.end())
  108. continue;
  109. Level = std::max(Level, Pos->second + 1);
  110. }
  111. }
  112. }
  113. for (auto *CGNode : CGNodes) {
  114. Function *F = CGNode->getFunction();
  115. if (F && !F->isDeclaration())
  116. FunctionLevels[&CG.get(*F)] = Level;
  117. }
  118. }
  119. for (auto KVP : FunctionLevels) {
  120. AllNodes.insert(KVP.first);
  121. EdgeCount += getLocalCalls(KVP.first->getFunction());
  122. }
  123. NodeCount = AllNodes.size();
  124. }
  125. unsigned MLInlineAdvisor::getInitialFunctionLevel(const Function &F) const {
  126. return CG.lookup(F) ? FunctionLevels.at(CG.lookup(F)) : 0;
  127. }
  128. void MLInlineAdvisor::onPassEntry(LazyCallGraph::SCC *LastSCC) {
  129. if (!LastSCC || ForceStop)
  130. return;
  131. FPICache.clear();
  132. // Function passes executed between InlinerPass runs may have changed the
  133. // module-wide features.
  134. // The cgscc pass manager rules are such that:
  135. // - if a pass leads to merging SCCs, then the pipeline is restarted on the
  136. // merged SCC
  137. // - if a pass leads to splitting the SCC, then we continue with one of the
  138. // splits
  139. // This means that the NodesInLastSCC is a superset (not strict) of the nodes
  140. // that subsequent passes would have processed
  141. // - in addition, if new Nodes were created by a pass (e.g. CoroSplit),
  142. // they'd be adjacent to Nodes in the last SCC. So we just need to check the
  143. // boundary of Nodes in NodesInLastSCC for Nodes we haven't seen. We don't
  144. // care about the nature of the Edge (call or ref).
  145. NodeCount -= static_cast<int64_t>(NodesInLastSCC.size());
  146. while (!NodesInLastSCC.empty()) {
  147. const auto *N = *NodesInLastSCC.begin();
  148. NodesInLastSCC.erase(N);
  149. // The Function wrapped by N could have been deleted since we last saw it.
  150. if (N->isDead()) {
  151. assert(!N->getFunction().isDeclaration());
  152. continue;
  153. }
  154. ++NodeCount;
  155. EdgeCount += getLocalCalls(N->getFunction());
  156. for (const auto &E : *(*N)) {
  157. const auto *AdjNode = &E.getNode();
  158. assert(!AdjNode->isDead() && !AdjNode->getFunction().isDeclaration());
  159. auto I = AllNodes.insert(AdjNode);
  160. if (I.second)
  161. NodesInLastSCC.insert(AdjNode);
  162. }
  163. }
  164. EdgeCount -= EdgesOfLastSeenNodes;
  165. EdgesOfLastSeenNodes = 0;
  166. // (Re)use NodesInLastSCC to remember the nodes in the SCC right now,
  167. // in case the SCC is split before onPassExit and some nodes are split out
  168. assert(NodesInLastSCC.empty());
  169. for (const auto &N : *LastSCC)
  170. NodesInLastSCC.insert(&N);
  171. }
  172. void MLInlineAdvisor::onPassExit(LazyCallGraph::SCC *LastSCC) {
  173. // No need to keep this around - function passes will invalidate it.
  174. if (!KeepFPICache)
  175. FPICache.clear();
  176. if (!LastSCC || ForceStop)
  177. return;
  178. // Keep track of the nodes and edges we last saw. Then, in onPassEntry,
  179. // we update the node count and edge count from the subset of these nodes that
  180. // survived.
  181. EdgesOfLastSeenNodes = 0;
  182. // Check on nodes that were in SCC onPassEntry
  183. for (auto I = NodesInLastSCC.begin(); I != NodesInLastSCC.end();) {
  184. if ((*I)->isDead())
  185. NodesInLastSCC.erase(*I++);
  186. else
  187. EdgesOfLastSeenNodes += getLocalCalls((*I++)->getFunction());
  188. }
  189. // Check on nodes that may have got added to SCC
  190. for (const auto &N : *LastSCC) {
  191. assert(!N.isDead());
  192. auto I = NodesInLastSCC.insert(&N);
  193. if (I.second)
  194. EdgesOfLastSeenNodes += getLocalCalls(N.getFunction());
  195. }
  196. assert(NodeCount >= NodesInLastSCC.size());
  197. assert(EdgeCount >= EdgesOfLastSeenNodes);
  198. }
  199. int64_t MLInlineAdvisor::getLocalCalls(Function &F) {
  200. return getCachedFPI(F).DirectCallsToDefinedFunctions;
  201. }
  202. // Update the internal state of the advisor, and force invalidate feature
  203. // analysis. Currently, we maintain minimal (and very simple) global state - the
  204. // number of functions and the number of static calls. We also keep track of the
  205. // total IR size in this module, to stop misbehaving policies at a certain bloat
  206. // factor (SizeIncreaseThreshold)
  207. void MLInlineAdvisor::onSuccessfulInlining(const MLInlineAdvice &Advice,
  208. bool CalleeWasDeleted) {
  209. assert(!ForceStop);
  210. Function *Caller = Advice.getCaller();
  211. Function *Callee = Advice.getCallee();
  212. // The caller features aren't valid anymore.
  213. {
  214. PreservedAnalyses PA = PreservedAnalyses::all();
  215. PA.abandon<FunctionPropertiesAnalysis>();
  216. PA.abandon<DominatorTreeAnalysis>();
  217. PA.abandon<LoopAnalysis>();
  218. FAM.invalidate(*Caller, PA);
  219. }
  220. Advice.updateCachedCallerFPI(FAM);
  221. int64_t IRSizeAfter =
  222. getIRSize(*Caller) + (CalleeWasDeleted ? 0 : Advice.CalleeIRSize);
  223. CurrentIRSize += IRSizeAfter - (Advice.CallerIRSize + Advice.CalleeIRSize);
  224. if (CurrentIRSize > SizeIncreaseThreshold * InitialIRSize)
  225. ForceStop = true;
  226. // We can delta-update module-wide features. We know the inlining only changed
  227. // the caller, and maybe the callee (by deleting the latter).
  228. // Nodes are simple to update.
  229. // For edges, we 'forget' the edges that the caller and callee used to have
  230. // before inlining, and add back what they currently have together.
  231. int64_t NewCallerAndCalleeEdges =
  232. getCachedFPI(*Caller).DirectCallsToDefinedFunctions;
  233. if (CalleeWasDeleted)
  234. --NodeCount;
  235. else
  236. NewCallerAndCalleeEdges +=
  237. getCachedFPI(*Callee).DirectCallsToDefinedFunctions;
  238. EdgeCount += (NewCallerAndCalleeEdges - Advice.CallerAndCalleeEdges);
  239. assert(CurrentIRSize >= 0 && EdgeCount >= 0 && NodeCount >= 0);
  240. }
  241. int64_t MLInlineAdvisor::getModuleIRSize() const {
  242. int64_t Ret = 0;
  243. for (auto &F : M)
  244. if (!F.isDeclaration())
  245. Ret += getIRSize(F);
  246. return Ret;
  247. }
  248. FunctionPropertiesInfo &MLInlineAdvisor::getCachedFPI(Function &F) const {
  249. auto InsertPair =
  250. FPICache.insert(std::make_pair(&F, FunctionPropertiesInfo()));
  251. if (!InsertPair.second)
  252. return InsertPair.first->second;
  253. InsertPair.first->second = FAM.getResult<FunctionPropertiesAnalysis>(F);
  254. return InsertPair.first->second;
  255. }
  256. std::unique_ptr<InlineAdvice> MLInlineAdvisor::getAdviceImpl(CallBase &CB) {
  257. if (auto Skip = getSkipAdviceIfUnreachableCallsite(CB))
  258. return Skip;
  259. auto &Caller = *CB.getCaller();
  260. auto &Callee = *CB.getCalledFunction();
  261. auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
  262. return FAM.getResult<AssumptionAnalysis>(F);
  263. };
  264. auto &TIR = FAM.getResult<TargetIRAnalysis>(Callee);
  265. auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(Caller);
  266. auto MandatoryKind = InlineAdvisor::getMandatoryKind(CB, FAM, ORE);
  267. // If this is a "never inline" case, there won't be any changes to internal
  268. // state we need to track, so we can just return the base InlineAdvice, which
  269. // will do nothing interesting.
  270. // Same thing if this is a recursive case.
  271. if (MandatoryKind == InlineAdvisor::MandatoryInliningKind::Never ||
  272. &Caller == &Callee)
  273. return getMandatoryAdvice(CB, false);
  274. bool Mandatory =
  275. MandatoryKind == InlineAdvisor::MandatoryInliningKind::Always;
  276. // If we need to stop, we won't want to track anymore any state changes, so
  277. // we just return the base InlineAdvice, which acts as a noop.
  278. if (ForceStop) {
  279. ORE.emit([&] {
  280. return OptimizationRemarkMissed(DEBUG_TYPE, "ForceStop", &CB)
  281. << "Won't attempt inlining because module size grew too much.";
  282. });
  283. return std::make_unique<InlineAdvice>(this, CB, ORE, Mandatory);
  284. }
  285. int CostEstimate = 0;
  286. if (!Mandatory) {
  287. auto IsCallSiteInlinable =
  288. llvm::getInliningCostEstimate(CB, TIR, GetAssumptionCache);
  289. if (!IsCallSiteInlinable) {
  290. // We can't inline this for correctness reasons, so return the base
  291. // InlineAdvice, as we don't care about tracking any state changes (which
  292. // won't happen).
  293. return std::make_unique<InlineAdvice>(this, CB, ORE, false);
  294. }
  295. CostEstimate = *IsCallSiteInlinable;
  296. }
  297. const auto CostFeatures =
  298. llvm::getInliningCostFeatures(CB, TIR, GetAssumptionCache);
  299. if (!CostFeatures) {
  300. return std::make_unique<InlineAdvice>(this, CB, ORE, false);
  301. }
  302. if (Mandatory)
  303. return getMandatoryAdvice(CB, true);
  304. auto NrCtantParams = 0;
  305. for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
  306. NrCtantParams += (isa<Constant>(*I));
  307. }
  308. auto &CallerBefore = getCachedFPI(Caller);
  309. auto &CalleeBefore = getCachedFPI(Callee);
  310. *ModelRunner->getTensor<int64_t>(FeatureIndex::CalleeBasicBlockCount) =
  311. CalleeBefore.BasicBlockCount;
  312. *ModelRunner->getTensor<int64_t>(FeatureIndex::CallSiteHeight) =
  313. getInitialFunctionLevel(Caller);
  314. *ModelRunner->getTensor<int64_t>(FeatureIndex::NodeCount) = NodeCount;
  315. *ModelRunner->getTensor<int64_t>(FeatureIndex::NrCtantParams) = NrCtantParams;
  316. *ModelRunner->getTensor<int64_t>(FeatureIndex::EdgeCount) = EdgeCount;
  317. *ModelRunner->getTensor<int64_t>(FeatureIndex::CallerUsers) =
  318. CallerBefore.Uses;
  319. *ModelRunner->getTensor<int64_t>(
  320. FeatureIndex::CallerConditionallyExecutedBlocks) =
  321. CallerBefore.BlocksReachedFromConditionalInstruction;
  322. *ModelRunner->getTensor<int64_t>(FeatureIndex::CallerBasicBlockCount) =
  323. CallerBefore.BasicBlockCount;
  324. *ModelRunner->getTensor<int64_t>(
  325. FeatureIndex::CalleeConditionallyExecutedBlocks) =
  326. CalleeBefore.BlocksReachedFromConditionalInstruction;
  327. *ModelRunner->getTensor<int64_t>(FeatureIndex::CalleeUsers) =
  328. CalleeBefore.Uses;
  329. *ModelRunner->getTensor<int64_t>(FeatureIndex::CostEstimate) = CostEstimate;
  330. // Add the cost features
  331. for (size_t I = 0;
  332. I < static_cast<size_t>(InlineCostFeatureIndex::NumberOfFeatures); ++I) {
  333. *ModelRunner->getTensor<int64_t>(inlineCostFeatureToMlFeature(
  334. static_cast<InlineCostFeatureIndex>(I))) = CostFeatures->at(I);
  335. }
  336. return getAdviceFromModel(CB, ORE);
  337. }
  338. std::unique_ptr<MLInlineAdvice>
  339. MLInlineAdvisor::getAdviceFromModel(CallBase &CB,
  340. OptimizationRemarkEmitter &ORE) {
  341. return std::make_unique<MLInlineAdvice>(
  342. this, CB, ORE, static_cast<bool>(ModelRunner->evaluate<int64_t>()));
  343. }
  344. std::unique_ptr<InlineAdvice>
  345. MLInlineAdvisor::getSkipAdviceIfUnreachableCallsite(CallBase &CB) {
  346. if (!FAM.getResult<DominatorTreeAnalysis>(*CB.getCaller())
  347. .isReachableFromEntry(CB.getParent()))
  348. return std::make_unique<InlineAdvice>(this, CB, getCallerORE(CB), false);
  349. return nullptr;
  350. }
  351. std::unique_ptr<InlineAdvice> MLInlineAdvisor::getMandatoryAdvice(CallBase &CB,
  352. bool Advice) {
  353. // Make sure we track inlinings in all cases - mandatory or not.
  354. if (auto Skip = getSkipAdviceIfUnreachableCallsite(CB))
  355. return Skip;
  356. if (Advice && !ForceStop)
  357. return getMandatoryAdviceImpl(CB);
  358. // If this is a "never inline" case, there won't be any changes to internal
  359. // state we need to track, so we can just return the base InlineAdvice, which
  360. // will do nothing interesting.
  361. // Same if we are forced to stop - we don't track anymore.
  362. return std::make_unique<InlineAdvice>(this, CB, getCallerORE(CB), Advice);
  363. }
  364. std::unique_ptr<MLInlineAdvice>
  365. MLInlineAdvisor::getMandatoryAdviceImpl(CallBase &CB) {
  366. return std::make_unique<MLInlineAdvice>(this, CB, getCallerORE(CB), true);
  367. }
  368. void MLInlineAdvisor::print(raw_ostream &OS) const {
  369. OS << "[MLInlineAdvisor] Nodes: " << NodeCount << " Edges: " << EdgeCount
  370. << " EdgesOfLastSeenNodes: " << EdgesOfLastSeenNodes << "\n";
  371. OS << "[MLInlineAdvisor] FPI:\n";
  372. for (auto I : FPICache) {
  373. OS << I.first->getName() << ":\n";
  374. I.second.print(OS);
  375. OS << "\n";
  376. }
  377. OS << "\n";
  378. }
  379. MLInlineAdvice::MLInlineAdvice(MLInlineAdvisor *Advisor, CallBase &CB,
  380. OptimizationRemarkEmitter &ORE,
  381. bool Recommendation)
  382. : InlineAdvice(Advisor, CB, ORE, Recommendation),
  383. CallerIRSize(Advisor->isForcedToStop() ? 0 : Advisor->getIRSize(*Caller)),
  384. CalleeIRSize(Advisor->isForcedToStop() ? 0 : Advisor->getIRSize(*Callee)),
  385. CallerAndCalleeEdges(Advisor->isForcedToStop()
  386. ? 0
  387. : (Advisor->getLocalCalls(*Caller) +
  388. Advisor->getLocalCalls(*Callee))),
  389. PreInlineCallerFPI(Advisor->getCachedFPI(*Caller)) {
  390. if (Recommendation)
  391. FPU.emplace(Advisor->getCachedFPI(*getCaller()), CB);
  392. }
  393. void MLInlineAdvice::reportContextForRemark(
  394. DiagnosticInfoOptimizationBase &OR) {
  395. using namespace ore;
  396. OR << NV("Callee", Callee->getName());
  397. for (size_t I = 0; I < NumberOfFeatures; ++I)
  398. OR << NV(FeatureMap[I].name(),
  399. *getAdvisor()->getModelRunner().getTensor<int64_t>(I));
  400. OR << NV("ShouldInline", isInliningRecommended());
  401. }
  402. void MLInlineAdvice::updateCachedCallerFPI(FunctionAnalysisManager &FAM) const {
  403. FPU->finish(FAM);
  404. }
  405. void MLInlineAdvice::recordInliningImpl() {
  406. ORE.emit([&]() {
  407. OptimizationRemark R(DEBUG_TYPE, "InliningSuccess", DLoc, Block);
  408. reportContextForRemark(R);
  409. return R;
  410. });
  411. getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ false);
  412. }
  413. void MLInlineAdvice::recordInliningWithCalleeDeletedImpl() {
  414. ORE.emit([&]() {
  415. OptimizationRemark R(DEBUG_TYPE, "InliningSuccessWithCalleeDeleted", DLoc,
  416. Block);
  417. reportContextForRemark(R);
  418. return R;
  419. });
  420. getAdvisor()->onSuccessfulInlining(*this, /*CalleeWasDeleted*/ true);
  421. }
  422. void MLInlineAdvice::recordUnsuccessfulInliningImpl(
  423. const InlineResult &Result) {
  424. getAdvisor()->getCachedFPI(*Caller) = PreInlineCallerFPI;
  425. ORE.emit([&]() {
  426. OptimizationRemarkMissed R(DEBUG_TYPE, "InliningAttemptedAndUnsuccessful",
  427. DLoc, Block);
  428. reportContextForRemark(R);
  429. return R;
  430. });
  431. }
  432. void MLInlineAdvice::recordUnattemptedInliningImpl() {
  433. assert(!FPU);
  434. ORE.emit([&]() {
  435. OptimizationRemarkMissed R(DEBUG_TYPE, "IniningNotAttempted", DLoc, Block);
  436. reportContextForRemark(R);
  437. return R;
  438. });
  439. }