InlineSizeEstimatorAnalysis.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. //===- InlineSizeEstimatorAnalysis.cpp - IR to native size from ML model --===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This implements feature and label extraction for offline supervised learning
  10. // of a IR to native size model.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
  14. #ifdef LLVM_HAVE_TFLITE
  15. #include "llvm/Analysis/Utils/TFUtils.h"
  16. #endif
  17. #include "llvm/IR/Function.h"
  18. #include "llvm/IR/PassManager.h"
  19. #include "llvm/Support/raw_ostream.h"
  20. using namespace llvm;
  21. AnalysisKey InlineSizeEstimatorAnalysis::Key;
  22. #ifdef LLVM_HAVE_TFLITE
  23. #include "llvm/Analysis/LoopInfo.h"
  24. #include "llvm/Analysis/TargetLibraryInfo.h"
  25. #include "llvm/Analysis/TargetTransformInfo.h"
  26. #include "llvm/IR/BasicBlock.h"
  27. #include "llvm/IR/Dominators.h"
  28. #include "llvm/IR/Instructions.h"
  29. #include "llvm/MC/MCAsmLayout.h"
  30. #include "llvm/Support/Casting.h"
  31. #include "llvm/Support/CommandLine.h"
  32. #include <algorithm>
  33. #include <deque>
  34. #include <optional>
  35. cl::opt<std::string> TFIR2NativeModelPath(
  36. "ml-inliner-ir2native-model", cl::Hidden,
  37. cl::desc("Path to saved model evaluating native size from IR."));
  38. #define DEBUG_TYPE "inline-size-estimator"
  39. namespace {
  40. unsigned getMaxInstructionID() {
  41. #define LAST_OTHER_INST(NR) return NR;
  42. #include "llvm/IR/Instruction.def"
  43. }
  44. class IRToNativeSizeLearning {
  45. public:
  46. enum class NamedFeatureIndex : size_t {
  47. InitialSize,
  48. Blocks,
  49. Calls,
  50. IsLocal,
  51. IsLinkOnceODR,
  52. IsLinkOnce,
  53. Loops,
  54. MaxLoopDepth,
  55. MaxDomTreeLevel,
  56. NumNamedFeatures
  57. };
  58. static const size_t NumNamedFeatures =
  59. static_cast<size_t>(NamedFeatureIndex::NumNamedFeatures);
  60. struct FunctionFeatures {
  61. static const size_t FeatureCount;
  62. std::array<int32_t, NumNamedFeatures> NamedFeatures = {0};
  63. std::vector<int32_t> InstructionHistogram;
  64. std::vector<int32_t> InstructionPairHistogram;
  65. void fillTensor(int32_t *Ptr) const;
  66. int32_t &operator[](NamedFeatureIndex Pos) {
  67. return NamedFeatures[static_cast<size_t>(Pos)];
  68. }
  69. };
  70. IRToNativeSizeLearning() = default;
  71. static FunctionFeatures getFunctionFeatures(Function &F,
  72. FunctionAnalysisManager &FAM);
  73. };
  74. // This is a point in time - we determined including these pairs of
  75. // consecutive instructions (in the IR layout available at inline time) as
  76. // features improves the model performance. We want to move away from manual
  77. // feature selection.
  78. // The array is given in opcode pairs rather than labels because 1) labels
  79. // weren't readily available, and 2) the successions were hand - extracted.
  80. //
  81. // This array must be sorted.
  82. static const std::array<std::pair<size_t, size_t>, 137>
  83. ImportantInstructionSuccessions{
  84. {{1, 1}, {1, 4}, {1, 5}, {1, 7}, {1, 8}, {1, 9}, {1, 11},
  85. {1, 12}, {1, 13}, {1, 14}, {1, 18}, {1, 20}, {1, 22}, {1, 24},
  86. {1, 25}, {1, 26}, {1, 27}, {1, 28}, {1, 29}, {1, 30}, {1, 31},
  87. {1, 32}, {1, 33}, {1, 34}, {1, 39}, {1, 40}, {1, 42}, {1, 45},
  88. {2, 1}, {2, 2}, {2, 13}, {2, 28}, {2, 29}, {2, 32}, {2, 33},
  89. {2, 34}, {2, 38}, {2, 48}, {2, 49}, {2, 53}, {2, 55}, {2, 56},
  90. {13, 2}, {13, 13}, {13, 26}, {13, 33}, {13, 34}, {13, 56}, {15, 27},
  91. {28, 2}, {28, 48}, {28, 53}, {29, 2}, {29, 33}, {29, 56}, {31, 31},
  92. {31, 33}, {31, 34}, {31, 49}, {32, 1}, {32, 2}, {32, 13}, {32, 15},
  93. {32, 28}, {32, 29}, {32, 32}, {32, 33}, {32, 34}, {32, 39}, {32, 40},
  94. {32, 48}, {32, 49}, {32, 53}, {32, 56}, {33, 1}, {33, 2}, {33, 32},
  95. {33, 33}, {33, 34}, {33, 49}, {33, 53}, {33, 56}, {34, 1}, {34, 2},
  96. {34, 32}, {34, 33}, {34, 34}, {34, 49}, {34, 53}, {34, 56}, {38, 34},
  97. {39, 57}, {40, 34}, {47, 15}, {47, 49}, {48, 2}, {48, 34}, {48, 56},
  98. {49, 1}, {49, 2}, {49, 28}, {49, 32}, {49, 33}, {49, 34}, {49, 39},
  99. {49, 49}, {49, 56}, {53, 1}, {53, 2}, {53, 28}, {53, 34}, {53, 53},
  100. {53, 57}, {55, 1}, {55, 28}, {55, 34}, {55, 53}, {55, 55}, {55, 56},
  101. {56, 1}, {56, 2}, {56, 7}, {56, 13}, {56, 32}, {56, 33}, {56, 34},
  102. {56, 49}, {56, 53}, {56, 56}, {56, 64}, {57, 34}, {57, 56}, {57, 57},
  103. {64, 1}, {64, 64}, {65, 1}, {65, 65}}};
  104. // We have: 9 calculated features (the features here); 1 feature for each
  105. // instruction opcode; and 1 feature for each manually-identified sequence.
  106. // For the latter 2, we build a histogram: we count the number of
  107. // occurrences of each instruction opcode or succession of instructions,
  108. // respectively.
  109. // Note that instruction opcodes start from 1. For convenience, we also have an
  110. // always 0 feature for the '0' opcode, hence the extra 1.
  111. const size_t IRToNativeSizeLearning::FunctionFeatures::FeatureCount =
  112. ImportantInstructionSuccessions.size() + getMaxInstructionID() + 1 +
  113. IRToNativeSizeLearning::NumNamedFeatures;
  114. size_t getSize(Function &F, TargetTransformInfo &TTI) {
  115. size_t Ret = 0;
  116. for (const auto &BB : F)
  117. for (const auto &I : BB)
  118. Ret += *(TTI.getInstructionCost(
  119. &I, TargetTransformInfo::TargetCostKind::TCK_CodeSize).getValue());
  120. return Ret;
  121. }
  122. size_t getSize(Function &F, FunctionAnalysisManager &FAM) {
  123. auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
  124. return getSize(F, TTI);
  125. }
  126. unsigned getMaxDominatorTreeDepth(const Function &F,
  127. const DominatorTree &Tree) {
  128. unsigned Ret = 0;
  129. for (const auto &BB : F)
  130. if (const auto *TN = Tree.getNode(&BB))
  131. Ret = std::max(Ret, TN->getLevel());
  132. return Ret;
  133. }
  134. } // namespace
  135. IRToNativeSizeLearning::FunctionFeatures
  136. IRToNativeSizeLearning::getFunctionFeatures(Function &F,
  137. FunctionAnalysisManager &FAM) {
  138. assert(llvm::is_sorted(ImportantInstructionSuccessions) &&
  139. "expected function features are sorted");
  140. auto &DomTree = FAM.getResult<DominatorTreeAnalysis>(F);
  141. FunctionFeatures FF;
  142. size_t InstrCount = getMaxInstructionID() + 1;
  143. FF.InstructionHistogram.resize(InstrCount);
  144. FF.InstructionPairHistogram.resize(ImportantInstructionSuccessions.size());
  145. int StartID = 0;
  146. int LastID = StartID;
  147. auto getPairIndex = [](size_t a, size_t b) {
  148. auto I = llvm::find(ImportantInstructionSuccessions, std::make_pair(a, b));
  149. if (I == ImportantInstructionSuccessions.end())
  150. return -1;
  151. return static_cast<int>(
  152. std::distance(ImportantInstructionSuccessions.begin(), I));
  153. };
  154. // We don't want debug calls, because they'd just add noise.
  155. for (const auto &BB : F) {
  156. for (const auto &I : BB.instructionsWithoutDebug()) {
  157. auto ID = I.getOpcode();
  158. ++FF.InstructionHistogram[ID];
  159. int PairIndex = getPairIndex(LastID, ID);
  160. if (PairIndex >= 0)
  161. ++FF.InstructionPairHistogram[PairIndex];
  162. LastID = ID;
  163. if (isa<CallBase>(I))
  164. ++FF[NamedFeatureIndex::Calls];
  165. }
  166. }
  167. FF[NamedFeatureIndex::InitialSize] = getSize(F, FAM);
  168. FF[NamedFeatureIndex::IsLocal] = F.hasLocalLinkage();
  169. FF[NamedFeatureIndex::IsLinkOnceODR] = F.hasLinkOnceODRLinkage();
  170. FF[NamedFeatureIndex::IsLinkOnce] = F.hasLinkOnceLinkage();
  171. FF[NamedFeatureIndex::Blocks] = F.size();
  172. auto &LI = FAM.getResult<LoopAnalysis>(F);
  173. FF[NamedFeatureIndex::Loops] = std::distance(LI.begin(), LI.end());
  174. for (auto &L : LI)
  175. FF[NamedFeatureIndex::MaxLoopDepth] =
  176. std::max(FF[NamedFeatureIndex::MaxLoopDepth],
  177. static_cast<int32_t>(L->getLoopDepth()));
  178. FF[NamedFeatureIndex::MaxDomTreeLevel] = getMaxDominatorTreeDepth(F, DomTree);
  179. return FF;
  180. }
  181. void IRToNativeSizeLearning::FunctionFeatures::fillTensor(int32_t *Ptr) const {
  182. std::copy(NamedFeatures.begin(), NamedFeatures.end(), Ptr);
  183. Ptr += NamedFeatures.size();
  184. std::copy(InstructionHistogram.begin(), InstructionHistogram.end(), Ptr);
  185. Ptr += InstructionHistogram.size();
  186. std::copy(InstructionPairHistogram.begin(), InstructionPairHistogram.end(),
  187. Ptr);
  188. }
  189. bool InlineSizeEstimatorAnalysis::isEvaluatorRequested() {
  190. return !TFIR2NativeModelPath.empty();
  191. }
  192. InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() {
  193. if (!isEvaluatorRequested()) {
  194. return;
  195. }
  196. std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
  197. "serving_default_input_1",
  198. {1, static_cast<int64_t>(
  199. IRToNativeSizeLearning::FunctionFeatures::FeatureCount)})};
  200. std::vector<TensorSpec> OutputSpecs{
  201. TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
  202. Evaluator = std::make_unique<TFModelEvaluator>(
  203. TFIR2NativeModelPath.getValue().c_str(), InputSpecs, OutputSpecs);
  204. if (!Evaluator || !Evaluator->isValid()) {
  205. Evaluator.reset();
  206. return;
  207. }
  208. }
  209. InlineSizeEstimatorAnalysis::Result
  210. InlineSizeEstimatorAnalysis::run(const Function &F,
  211. FunctionAnalysisManager &FAM) {
  212. if (!Evaluator)
  213. return std::nullopt;
  214. auto Features = IRToNativeSizeLearning::getFunctionFeatures(
  215. const_cast<Function &>(F), FAM);
  216. int32_t *V = Evaluator->getInput<int32_t>(0);
  217. Features.fillTensor(V);
  218. auto ER = Evaluator->evaluate();
  219. if (!ER)
  220. return std::nullopt;
  221. float Ret = *ER->getTensorValue<float>(0);
  222. if (Ret < 0.0)
  223. Ret = 0.0;
  224. return static_cast<size_t>(Ret);
  225. }
  226. InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis() {}
  227. InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis(
  228. InlineSizeEstimatorAnalysis &&Other)
  229. : Evaluator(std::move(Other.Evaluator)) {}
  230. #else
  231. namespace llvm {
  232. class TFModelEvaluator {};
  233. } // namespace llvm
  234. InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() = default;
  235. InlineSizeEstimatorAnalysis ::InlineSizeEstimatorAnalysis(
  236. InlineSizeEstimatorAnalysis &&) {}
  237. InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis() = default;
  238. InlineSizeEstimatorAnalysis::Result
  239. InlineSizeEstimatorAnalysis::run(const Function &F,
  240. FunctionAnalysisManager &FAM) {
  241. return std::nullopt;
  242. }
  243. bool InlineSizeEstimatorAnalysis::isEvaluatorRequested() { return false; }
  244. #endif
  245. PreservedAnalyses
  246. InlineSizeEstimatorAnalysisPrinterPass::run(Function &F,
  247. FunctionAnalysisManager &AM) {
  248. OS << "[InlineSizeEstimatorAnalysis] size estimate for " << F.getName()
  249. << ": " << AM.getResult<InlineSizeEstimatorAnalysis>(F) << "\n";
  250. return PreservedAnalyses::all();
  251. }