MemoryProfileInfo.cpp 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. //===-- MemoryProfileInfo.cpp - memory profile info ------------------------==//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains utilities to analyze memory profile information.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "llvm/Analysis/MemoryProfileInfo.h"
  13. #include "llvm/Support/CommandLine.h"
  14. using namespace llvm;
  15. using namespace llvm::memprof;
  16. #define DEBUG_TYPE "memory-profile-info"
  17. // Upper bound on accesses per byte for marking an allocation cold.
  18. cl::opt<float> MemProfAccessesPerByteColdThreshold(
  19. "memprof-accesses-per-byte-cold-threshold", cl::init(10.0), cl::Hidden,
  20. cl::desc("The threshold the accesses per byte must be under to consider "
  21. "an allocation cold"));
  22. // Lower bound on lifetime to mark an allocation cold (in addition to accesses
  23. // per byte above). This is to avoid pessimizing short lived objects.
  24. cl::opt<unsigned> MemProfMinLifetimeColdThreshold(
  25. "memprof-min-lifetime-cold-threshold", cl::init(200), cl::Hidden,
  26. cl::desc("The minimum lifetime (s) for an allocation to be considered "
  27. "cold"));
  28. AllocationType llvm::memprof::getAllocType(uint64_t MaxAccessCount,
  29. uint64_t MinSize,
  30. uint64_t MinLifetime) {
  31. if (((float)MaxAccessCount) / MinSize < MemProfAccessesPerByteColdThreshold &&
  32. // MinLifetime is expected to be in ms, so convert the threshold to ms.
  33. MinLifetime >= MemProfMinLifetimeColdThreshold * 1000)
  34. return AllocationType::Cold;
  35. return AllocationType::NotCold;
  36. }
  37. MDNode *llvm::memprof::buildCallstackMetadata(ArrayRef<uint64_t> CallStack,
  38. LLVMContext &Ctx) {
  39. std::vector<Metadata *> StackVals;
  40. for (auto Id : CallStack) {
  41. auto *StackValMD =
  42. ValueAsMetadata::get(ConstantInt::get(Type::getInt64Ty(Ctx), Id));
  43. StackVals.push_back(StackValMD);
  44. }
  45. return MDNode::get(Ctx, StackVals);
  46. }
  47. MDNode *llvm::memprof::getMIBStackNode(const MDNode *MIB) {
  48. assert(MIB->getNumOperands() == 2);
  49. // The stack metadata is the first operand of each memprof MIB metadata.
  50. return cast<MDNode>(MIB->getOperand(0));
  51. }
  52. AllocationType llvm::memprof::getMIBAllocType(const MDNode *MIB) {
  53. assert(MIB->getNumOperands() == 2);
  54. // The allocation type is currently the second operand of each memprof
  55. // MIB metadata. This will need to change as we add additional allocation
  56. // types that can be applied based on the allocation profile data.
  57. auto *MDS = dyn_cast<MDString>(MIB->getOperand(1));
  58. assert(MDS);
  59. if (MDS->getString().equals("cold"))
  60. return AllocationType::Cold;
  61. return AllocationType::NotCold;
  62. }
  63. static std::string getAllocTypeAttributeString(AllocationType Type) {
  64. switch (Type) {
  65. case AllocationType::NotCold:
  66. return "notcold";
  67. break;
  68. case AllocationType::Cold:
  69. return "cold";
  70. break;
  71. default:
  72. assert(false && "Unexpected alloc type");
  73. }
  74. llvm_unreachable("invalid alloc type");
  75. }
  76. static void addAllocTypeAttribute(LLVMContext &Ctx, CallBase *CI,
  77. AllocationType AllocType) {
  78. auto AllocTypeString = getAllocTypeAttributeString(AllocType);
  79. auto A = llvm::Attribute::get(Ctx, "memprof", AllocTypeString);
  80. CI->addFnAttr(A);
  81. }
  82. static bool hasSingleAllocType(uint8_t AllocTypes) {
  83. const unsigned NumAllocTypes = llvm::popcount(AllocTypes);
  84. assert(NumAllocTypes != 0);
  85. return NumAllocTypes == 1;
  86. }
  87. void CallStackTrie::addCallStack(AllocationType AllocType,
  88. ArrayRef<uint64_t> StackIds) {
  89. bool First = true;
  90. CallStackTrieNode *Curr = nullptr;
  91. for (auto StackId : StackIds) {
  92. // If this is the first stack frame, add or update alloc node.
  93. if (First) {
  94. First = false;
  95. if (Alloc) {
  96. assert(AllocStackId == StackId);
  97. Alloc->AllocTypes |= static_cast<uint8_t>(AllocType);
  98. } else {
  99. AllocStackId = StackId;
  100. Alloc = new CallStackTrieNode(AllocType);
  101. }
  102. Curr = Alloc;
  103. continue;
  104. }
  105. // Update existing caller node if it exists.
  106. auto Next = Curr->Callers.find(StackId);
  107. if (Next != Curr->Callers.end()) {
  108. Curr = Next->second;
  109. Curr->AllocTypes |= static_cast<uint8_t>(AllocType);
  110. continue;
  111. }
  112. // Otherwise add a new caller node.
  113. auto *New = new CallStackTrieNode(AllocType);
  114. Curr->Callers[StackId] = New;
  115. Curr = New;
  116. }
  117. assert(Curr);
  118. }
  119. void CallStackTrie::addCallStack(MDNode *MIB) {
  120. MDNode *StackMD = getMIBStackNode(MIB);
  121. assert(StackMD);
  122. std::vector<uint64_t> CallStack;
  123. CallStack.reserve(StackMD->getNumOperands());
  124. for (const auto &MIBStackIter : StackMD->operands()) {
  125. auto *StackId = mdconst::dyn_extract<ConstantInt>(MIBStackIter);
  126. assert(StackId);
  127. CallStack.push_back(StackId->getZExtValue());
  128. }
  129. addCallStack(getMIBAllocType(MIB), CallStack);
  130. }
  131. static MDNode *createMIBNode(LLVMContext &Ctx,
  132. std::vector<uint64_t> &MIBCallStack,
  133. AllocationType AllocType) {
  134. std::vector<Metadata *> MIBPayload(
  135. {buildCallstackMetadata(MIBCallStack, Ctx)});
  136. MIBPayload.push_back(
  137. MDString::get(Ctx, getAllocTypeAttributeString(AllocType)));
  138. return MDNode::get(Ctx, MIBPayload);
  139. }
  140. // Recursive helper to trim contexts and create metadata nodes.
  141. // Caller should have pushed Node's loc to MIBCallStack. Doing this in the
  142. // caller makes it simpler to handle the many early returns in this method.
  143. bool CallStackTrie::buildMIBNodes(CallStackTrieNode *Node, LLVMContext &Ctx,
  144. std::vector<uint64_t> &MIBCallStack,
  145. std::vector<Metadata *> &MIBNodes,
  146. bool CalleeHasAmbiguousCallerContext) {
  147. // Trim context below the first node in a prefix with a single alloc type.
  148. // Add an MIB record for the current call stack prefix.
  149. if (hasSingleAllocType(Node->AllocTypes)) {
  150. MIBNodes.push_back(
  151. createMIBNode(Ctx, MIBCallStack, (AllocationType)Node->AllocTypes));
  152. return true;
  153. }
  154. // We don't have a single allocation for all the contexts sharing this prefix,
  155. // so recursively descend into callers in trie.
  156. if (!Node->Callers.empty()) {
  157. bool NodeHasAmbiguousCallerContext = Node->Callers.size() > 1;
  158. bool AddedMIBNodesForAllCallerContexts = true;
  159. for (auto &Caller : Node->Callers) {
  160. MIBCallStack.push_back(Caller.first);
  161. AddedMIBNodesForAllCallerContexts &=
  162. buildMIBNodes(Caller.second, Ctx, MIBCallStack, MIBNodes,
  163. NodeHasAmbiguousCallerContext);
  164. // Remove Caller.
  165. MIBCallStack.pop_back();
  166. }
  167. if (AddedMIBNodesForAllCallerContexts)
  168. return true;
  169. // We expect that the callers should be forced to add MIBs to disambiguate
  170. // the context in this case (see below).
  171. assert(!NodeHasAmbiguousCallerContext);
  172. }
  173. // If we reached here, then this node does not have a single allocation type,
  174. // and we didn't add metadata for a longer call stack prefix including any of
  175. // Node's callers. That means we never hit a single allocation type along all
  176. // call stacks with this prefix. This can happen due to recursion collapsing
  177. // or the stack being deeper than tracked by the profiler runtime, leading to
  178. // contexts with different allocation types being merged. In that case, we
  179. // trim the context just below the deepest context split, which is this
  180. // node if the callee has an ambiguous caller context (multiple callers),
  181. // since the recursive calls above returned false. Conservatively give it
  182. // non-cold allocation type.
  183. if (!CalleeHasAmbiguousCallerContext)
  184. return false;
  185. MIBNodes.push_back(createMIBNode(Ctx, MIBCallStack, AllocationType::NotCold));
  186. return true;
  187. }
  188. // Build and attach the minimal necessary MIB metadata. If the alloc has a
  189. // single allocation type, add a function attribute instead. Returns true if
  190. // memprof metadata attached, false if not (attribute added).
  191. bool CallStackTrie::buildAndAttachMIBMetadata(CallBase *CI) {
  192. auto &Ctx = CI->getContext();
  193. if (hasSingleAllocType(Alloc->AllocTypes)) {
  194. addAllocTypeAttribute(Ctx, CI, (AllocationType)Alloc->AllocTypes);
  195. return false;
  196. }
  197. std::vector<uint64_t> MIBCallStack;
  198. MIBCallStack.push_back(AllocStackId);
  199. std::vector<Metadata *> MIBNodes;
  200. assert(!Alloc->Callers.empty() && "addCallStack has not been called yet");
  201. buildMIBNodes(Alloc, Ctx, MIBCallStack, MIBNodes,
  202. /*CalleeHasAmbiguousCallerContext=*/true);
  203. assert(MIBCallStack.size() == 1 &&
  204. "Should only be left with Alloc's location in stack");
  205. CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes));
  206. return true;
  207. }
  208. template <>
  209. CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::CallStackIterator(
  210. const MDNode *N, bool End)
  211. : N(N) {
  212. if (!N)
  213. return;
  214. Iter = End ? N->op_end() : N->op_begin();
  215. }
  216. template <>
  217. uint64_t
  218. CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::operator*() {
  219. assert(Iter != N->op_end());
  220. ConstantInt *StackIdCInt = mdconst::dyn_extract<ConstantInt>(*Iter);
  221. assert(StackIdCInt);
  222. return StackIdCInt->getZExtValue();
  223. }