RawMemProfReader.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557
  1. //===- RawMemProfReader.cpp - Instrumented memory profiling reader --------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains support for reading MemProf profiling data.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include <algorithm>
  13. #include <cstdint>
  14. #include <memory>
  15. #include <type_traits>
  16. #include "llvm/ADT/ArrayRef.h"
  17. #include "llvm/ADT/DenseMap.h"
  18. #include "llvm/ADT/SetVector.h"
  19. #include "llvm/ADT/SmallVector.h"
  20. #include "llvm/ADT/StringExtras.h"
  21. #include "llvm/DebugInfo/DWARF/DWARFContext.h"
  22. #include "llvm/DebugInfo/Symbolize/SymbolizableModule.h"
  23. #include "llvm/DebugInfo/Symbolize/SymbolizableObjectFile.h"
  24. #include "llvm/Object/Binary.h"
  25. #include "llvm/Object/ELFObjectFile.h"
  26. #include "llvm/Object/ObjectFile.h"
  27. #include "llvm/ProfileData/InstrProf.h"
  28. #include "llvm/ProfileData/MemProf.h"
  29. #include "llvm/ProfileData/MemProfData.inc"
  30. #include "llvm/ProfileData/RawMemProfReader.h"
  31. #include "llvm/Support/Endian.h"
  32. #include "llvm/Support/Path.h"
  33. #define DEBUG_TYPE "memprof"
  34. namespace llvm {
  35. namespace memprof {
  36. namespace {
  37. template <class T = uint64_t> inline T alignedRead(const char *Ptr) {
  38. static_assert(std::is_pod<T>::value, "Not a pod type.");
  39. assert(reinterpret_cast<size_t>(Ptr) % sizeof(T) == 0 && "Unaligned Read");
  40. return *reinterpret_cast<const T *>(Ptr);
  41. }
  42. Error checkBuffer(const MemoryBuffer &Buffer) {
  43. if (!RawMemProfReader::hasFormat(Buffer))
  44. return make_error<InstrProfError>(instrprof_error::bad_magic);
  45. if (Buffer.getBufferSize() == 0)
  46. return make_error<InstrProfError>(instrprof_error::empty_raw_profile);
  47. if (Buffer.getBufferSize() < sizeof(Header)) {
  48. return make_error<InstrProfError>(instrprof_error::truncated);
  49. }
  50. // The size of the buffer can be > header total size since we allow repeated
  51. // serialization of memprof profiles to the same file.
  52. uint64_t TotalSize = 0;
  53. const char *Next = Buffer.getBufferStart();
  54. while (Next < Buffer.getBufferEnd()) {
  55. auto *H = reinterpret_cast<const Header *>(Next);
  56. if (H->Version != MEMPROF_RAW_VERSION) {
  57. return make_error<InstrProfError>(instrprof_error::unsupported_version);
  58. }
  59. TotalSize += H->TotalSize;
  60. Next += H->TotalSize;
  61. }
  62. if (Buffer.getBufferSize() != TotalSize) {
  63. return make_error<InstrProfError>(instrprof_error::malformed);
  64. }
  65. return Error::success();
  66. }
  67. llvm::SmallVector<SegmentEntry> readSegmentEntries(const char *Ptr) {
  68. using namespace support;
  69. const uint64_t NumItemsToRead =
  70. endian::readNext<uint64_t, little, unaligned>(Ptr);
  71. llvm::SmallVector<SegmentEntry> Items;
  72. for (uint64_t I = 0; I < NumItemsToRead; I++) {
  73. Items.push_back(*reinterpret_cast<const SegmentEntry *>(
  74. Ptr + I * sizeof(SegmentEntry)));
  75. }
  76. return Items;
  77. }
  78. llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>>
  79. readMemInfoBlocks(const char *Ptr) {
  80. using namespace support;
  81. const uint64_t NumItemsToRead =
  82. endian::readNext<uint64_t, little, unaligned>(Ptr);
  83. llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>> Items;
  84. for (uint64_t I = 0; I < NumItemsToRead; I++) {
  85. const uint64_t Id = endian::readNext<uint64_t, little, unaligned>(Ptr);
  86. const MemInfoBlock MIB = *reinterpret_cast<const MemInfoBlock *>(Ptr);
  87. Items.push_back({Id, MIB});
  88. // Only increment by size of MIB since readNext implicitly increments.
  89. Ptr += sizeof(MemInfoBlock);
  90. }
  91. return Items;
  92. }
  93. CallStackMap readStackInfo(const char *Ptr) {
  94. using namespace support;
  95. const uint64_t NumItemsToRead =
  96. endian::readNext<uint64_t, little, unaligned>(Ptr);
  97. CallStackMap Items;
  98. for (uint64_t I = 0; I < NumItemsToRead; I++) {
  99. const uint64_t StackId = endian::readNext<uint64_t, little, unaligned>(Ptr);
  100. const uint64_t NumPCs = endian::readNext<uint64_t, little, unaligned>(Ptr);
  101. SmallVector<uint64_t> CallStack;
  102. for (uint64_t J = 0; J < NumPCs; J++) {
  103. CallStack.push_back(endian::readNext<uint64_t, little, unaligned>(Ptr));
  104. }
  105. Items[StackId] = CallStack;
  106. }
  107. return Items;
  108. }
  109. // Merges the contents of stack information in \p From to \p To. Returns true if
  110. // any stack ids observed previously map to a different set of program counter
  111. // addresses.
  112. bool mergeStackMap(const CallStackMap &From, CallStackMap &To) {
  113. for (const auto &IdStack : From) {
  114. auto I = To.find(IdStack.first);
  115. if (I == To.end()) {
  116. To[IdStack.first] = IdStack.second;
  117. } else {
  118. // Check that the PCs are the same (in order).
  119. if (IdStack.second != I->second)
  120. return true;
  121. }
  122. }
  123. return false;
  124. }
  125. Error report(Error E, const StringRef Context) {
  126. return joinErrors(createStringError(inconvertibleErrorCode(), Context),
  127. std::move(E));
  128. }
  129. bool isRuntimePath(const StringRef Path) {
  130. return StringRef(llvm::sys::path::convert_to_slash(Path))
  131. .contains("memprof/memprof_");
  132. }
  133. std::string getBuildIdString(const SegmentEntry &Entry) {
  134. constexpr size_t Size = sizeof(Entry.BuildId) / sizeof(uint8_t);
  135. constexpr uint8_t Zeros[Size] = {0};
  136. // If the build id is unset print a helpful string instead of all zeros.
  137. if (memcmp(Entry.BuildId, Zeros, Size) == 0)
  138. return "<None>";
  139. std::string Str;
  140. raw_string_ostream OS(Str);
  141. for (size_t I = 0; I < Size; I++) {
  142. OS << format_hex_no_prefix(Entry.BuildId[I], 2);
  143. }
  144. return OS.str();
  145. }
  146. } // namespace
  147. Expected<std::unique_ptr<RawMemProfReader>>
  148. RawMemProfReader::create(const Twine &Path, const StringRef ProfiledBinary,
  149. bool KeepName) {
  150. auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path);
  151. if (std::error_code EC = BufferOr.getError())
  152. return report(errorCodeToError(EC), Path.getSingleStringRef());
  153. std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release());
  154. if (Error E = checkBuffer(*Buffer))
  155. return report(std::move(E), Path.getSingleStringRef());
  156. if (ProfiledBinary.empty())
  157. return report(
  158. errorCodeToError(make_error_code(std::errc::invalid_argument)),
  159. "Path to profiled binary is empty!");
  160. auto BinaryOr = llvm::object::createBinary(ProfiledBinary);
  161. if (!BinaryOr) {
  162. return report(BinaryOr.takeError(), ProfiledBinary);
  163. }
  164. // Use new here since constructor is private.
  165. std::unique_ptr<RawMemProfReader> Reader(
  166. new RawMemProfReader(std::move(BinaryOr.get()), KeepName));
  167. if (Error E = Reader->initialize(std::move(Buffer))) {
  168. return std::move(E);
  169. }
  170. return std::move(Reader);
  171. }
  172. bool RawMemProfReader::hasFormat(const StringRef Path) {
  173. auto BufferOr = MemoryBuffer::getFileOrSTDIN(Path);
  174. if (!BufferOr)
  175. return false;
  176. std::unique_ptr<MemoryBuffer> Buffer(BufferOr.get().release());
  177. return hasFormat(*Buffer);
  178. }
  179. bool RawMemProfReader::hasFormat(const MemoryBuffer &Buffer) {
  180. if (Buffer.getBufferSize() < sizeof(uint64_t))
  181. return false;
  182. // Aligned read to sanity check that the buffer was allocated with at least 8b
  183. // alignment.
  184. const uint64_t Magic = alignedRead(Buffer.getBufferStart());
  185. return Magic == MEMPROF_RAW_MAGIC_64;
  186. }
  187. void RawMemProfReader::printYAML(raw_ostream &OS) {
  188. uint64_t NumAllocFunctions = 0, NumMibInfo = 0;
  189. for (const auto &KV : FunctionProfileData) {
  190. const size_t NumAllocSites = KV.second.AllocSites.size();
  191. if (NumAllocSites > 0) {
  192. NumAllocFunctions++;
  193. NumMibInfo += NumAllocSites;
  194. }
  195. }
  196. OS << "MemprofProfile:\n";
  197. OS << " Summary:\n";
  198. OS << " Version: " << MEMPROF_RAW_VERSION << "\n";
  199. OS << " NumSegments: " << SegmentInfo.size() << "\n";
  200. OS << " NumMibInfo: " << NumMibInfo << "\n";
  201. OS << " NumAllocFunctions: " << NumAllocFunctions << "\n";
  202. OS << " NumStackOffsets: " << StackMap.size() << "\n";
  203. // Print out the segment information.
  204. OS << " Segments:\n";
  205. for (const auto &Entry : SegmentInfo) {
  206. OS << " -\n";
  207. OS << " BuildId: " << getBuildIdString(Entry) << "\n";
  208. OS << " Start: 0x" << llvm::utohexstr(Entry.Start) << "\n";
  209. OS << " End: 0x" << llvm::utohexstr(Entry.End) << "\n";
  210. OS << " Offset: 0x" << llvm::utohexstr(Entry.Offset) << "\n";
  211. }
  212. // Print out the merged contents of the profiles.
  213. OS << " Records:\n";
  214. for (const auto &Entry : *this) {
  215. OS << " -\n";
  216. OS << " FunctionGUID: " << Entry.first << "\n";
  217. Entry.second.print(OS);
  218. }
  219. }
  220. Error RawMemProfReader::initialize(std::unique_ptr<MemoryBuffer> DataBuffer) {
  221. const StringRef FileName = Binary.getBinary()->getFileName();
  222. auto *ElfObject = dyn_cast<object::ELFObjectFileBase>(Binary.getBinary());
  223. if (!ElfObject) {
  224. return report(make_error<StringError>(Twine("Not an ELF file: "),
  225. inconvertibleErrorCode()),
  226. FileName);
  227. }
  228. // Check whether the profiled binary was built with position independent code
  229. // (PIC). For now we provide a error message until symbolization support
  230. // is added for pic.
  231. auto* Elf64LEObject = llvm::cast<llvm::object::ELF64LEObjectFile>(ElfObject);
  232. const llvm::object::ELF64LEFile& ElfFile = Elf64LEObject->getELFFile();
  233. auto PHdrsOr = ElfFile.program_headers();
  234. if(!PHdrsOr)
  235. return report(make_error<StringError>(Twine("Could not read program headers: "),
  236. inconvertibleErrorCode()),
  237. FileName);
  238. auto FirstLoadHeader = PHdrsOr->begin();
  239. while (FirstLoadHeader->p_type != llvm::ELF::PT_LOAD)
  240. ++FirstLoadHeader;
  241. if(FirstLoadHeader->p_vaddr == 0)
  242. return report(make_error<StringError>(Twine("Unsupported position independent code"),
  243. inconvertibleErrorCode()),
  244. FileName);
  245. auto Triple = ElfObject->makeTriple();
  246. if (!Triple.isX86())
  247. return report(make_error<StringError>(Twine("Unsupported target: ") +
  248. Triple.getArchName(),
  249. inconvertibleErrorCode()),
  250. FileName);
  251. auto *Object = cast<object::ObjectFile>(Binary.getBinary());
  252. std::unique_ptr<DIContext> Context = DWARFContext::create(
  253. *Object, DWARFContext::ProcessDebugRelocations::Process);
  254. auto SOFOr = symbolize::SymbolizableObjectFile::create(
  255. Object, std::move(Context), /*UntagAddresses=*/false);
  256. if (!SOFOr)
  257. return report(SOFOr.takeError(), FileName);
  258. Symbolizer = std::move(SOFOr.get());
  259. if (Error E = readRawProfile(std::move(DataBuffer)))
  260. return E;
  261. if (Error E = symbolizeAndFilterStackFrames())
  262. return E;
  263. return mapRawProfileToRecords();
  264. }
  265. Error RawMemProfReader::mapRawProfileToRecords() {
  266. // Hold a mapping from function to each callsite location we encounter within
  267. // it that is part of some dynamic allocation context. The location is stored
  268. // as a pointer to a symbolized list of inline frames.
  269. using LocationPtr = const llvm::SmallVector<FrameId> *;
  270. llvm::MapVector<GlobalValue::GUID, llvm::SetVector<LocationPtr>>
  271. PerFunctionCallSites;
  272. // Convert the raw profile callstack data into memprof records. While doing so
  273. // keep track of related contexts so that we can fill these in later.
  274. for (const auto &Entry : CallstackProfileData) {
  275. const uint64_t StackId = Entry.first;
  276. auto It = StackMap.find(StackId);
  277. if (It == StackMap.end())
  278. return make_error<InstrProfError>(
  279. instrprof_error::malformed,
  280. "memprof callstack record does not contain id: " + Twine(StackId));
  281. // Construct the symbolized callstack.
  282. llvm::SmallVector<FrameId> Callstack;
  283. Callstack.reserve(It->getSecond().size());
  284. llvm::ArrayRef<uint64_t> Addresses = It->getSecond();
  285. for (size_t I = 0; I < Addresses.size(); I++) {
  286. const uint64_t Address = Addresses[I];
  287. assert(SymbolizedFrame.count(Address) > 0 &&
  288. "Address not found in SymbolizedFrame map");
  289. const SmallVector<FrameId> &Frames = SymbolizedFrame[Address];
  290. assert(!idToFrame(Frames.back()).IsInlineFrame &&
  291. "The last frame should not be inlined");
  292. // Record the callsites for each function. Skip the first frame of the
  293. // first address since it is the allocation site itself that is recorded
  294. // as an alloc site.
  295. for (size_t J = 0; J < Frames.size(); J++) {
  296. if (I == 0 && J == 0)
  297. continue;
  298. // We attach the entire bottom-up frame here for the callsite even
  299. // though we only need the frames up to and including the frame for
  300. // Frames[J].Function. This will enable better deduplication for
  301. // compression in the future.
  302. const GlobalValue::GUID Guid = idToFrame(Frames[J]).Function;
  303. PerFunctionCallSites[Guid].insert(&Frames);
  304. }
  305. // Add all the frames to the current allocation callstack.
  306. Callstack.append(Frames.begin(), Frames.end());
  307. }
  308. // We attach the memprof record to each function bottom-up including the
  309. // first non-inline frame.
  310. for (size_t I = 0; /*Break out using the condition below*/; I++) {
  311. const Frame &F = idToFrame(Callstack[I]);
  312. auto Result =
  313. FunctionProfileData.insert({F.Function, IndexedMemProfRecord()});
  314. IndexedMemProfRecord &Record = Result.first->second;
  315. Record.AllocSites.emplace_back(Callstack, Entry.second);
  316. if (!F.IsInlineFrame)
  317. break;
  318. }
  319. }
  320. // Fill in the related callsites per function.
  321. for (const auto &[Id, Locs] : PerFunctionCallSites) {
  322. // Some functions may have only callsite data and no allocation data. Here
  323. // we insert a new entry for callsite data if we need to.
  324. auto Result = FunctionProfileData.insert({Id, IndexedMemProfRecord()});
  325. IndexedMemProfRecord &Record = Result.first->second;
  326. for (LocationPtr Loc : Locs) {
  327. Record.CallSites.push_back(*Loc);
  328. }
  329. }
  330. return Error::success();
  331. }
  332. Error RawMemProfReader::symbolizeAndFilterStackFrames() {
  333. // The specifier to use when symbolization is requested.
  334. const DILineInfoSpecifier Specifier(
  335. DILineInfoSpecifier::FileLineInfoKind::RawValue,
  336. DILineInfoSpecifier::FunctionNameKind::LinkageName);
  337. // For entries where all PCs in the callstack are discarded, we erase the
  338. // entry from the stack map.
  339. llvm::SmallVector<uint64_t> EntriesToErase;
  340. // We keep track of all prior discarded entries so that we can avoid invoking
  341. // the symbolizer for such entries.
  342. llvm::DenseSet<uint64_t> AllVAddrsToDiscard;
  343. for (auto &Entry : StackMap) {
  344. for (const uint64_t VAddr : Entry.getSecond()) {
  345. // Check if we have already symbolized and cached the result or if we
  346. // don't want to attempt symbolization since we know this address is bad.
  347. // In this case the address is also removed from the current callstack.
  348. if (SymbolizedFrame.count(VAddr) > 0 ||
  349. AllVAddrsToDiscard.contains(VAddr))
  350. continue;
  351. Expected<DIInliningInfo> DIOr = Symbolizer->symbolizeInlinedCode(
  352. getModuleOffset(VAddr), Specifier, /*UseSymbolTable=*/false);
  353. if (!DIOr)
  354. return DIOr.takeError();
  355. DIInliningInfo DI = DIOr.get();
  356. // Drop frames which we can't symbolize or if they belong to the runtime.
  357. if (DI.getFrame(0).FunctionName == DILineInfo::BadString ||
  358. isRuntimePath(DI.getFrame(0).FileName)) {
  359. AllVAddrsToDiscard.insert(VAddr);
  360. continue;
  361. }
  362. for (size_t I = 0, NumFrames = DI.getNumberOfFrames(); I < NumFrames;
  363. I++) {
  364. const auto &DIFrame = DI.getFrame(I);
  365. const uint64_t Guid =
  366. IndexedMemProfRecord::getGUID(DIFrame.FunctionName);
  367. const Frame F(Guid, DIFrame.Line - DIFrame.StartLine, DIFrame.Column,
  368. // Only the last entry is not an inlined location.
  369. I != NumFrames - 1);
  370. // Here we retain a mapping from the GUID to symbol name instead of
  371. // adding it to the frame object directly to reduce memory overhead.
  372. // This is because there can be many unique frames, particularly for
  373. // callsite frames.
  374. if (KeepSymbolName)
  375. GuidToSymbolName.insert({Guid, DIFrame.FunctionName});
  376. const FrameId Hash = F.hash();
  377. IdToFrame.insert({Hash, F});
  378. SymbolizedFrame[VAddr].push_back(Hash);
  379. }
  380. }
  381. auto &CallStack = Entry.getSecond();
  382. llvm::erase_if(CallStack, [&AllVAddrsToDiscard](const uint64_t A) {
  383. return AllVAddrsToDiscard.contains(A);
  384. });
  385. if (CallStack.empty())
  386. EntriesToErase.push_back(Entry.getFirst());
  387. }
  388. // Drop the entries where the callstack is empty.
  389. for (const uint64_t Id : EntriesToErase) {
  390. StackMap.erase(Id);
  391. CallstackProfileData.erase(Id);
  392. }
  393. if (StackMap.empty())
  394. return make_error<InstrProfError>(
  395. instrprof_error::malformed,
  396. "no entries in callstack map after symbolization");
  397. return Error::success();
  398. }
  399. Error RawMemProfReader::readRawProfile(
  400. std::unique_ptr<MemoryBuffer> DataBuffer) {
  401. const char *Next = DataBuffer->getBufferStart();
  402. while (Next < DataBuffer->getBufferEnd()) {
  403. auto *Header = reinterpret_cast<const memprof::Header *>(Next);
  404. // Read in the segment information, check whether its the same across all
  405. // profiles in this binary file.
  406. const llvm::SmallVector<SegmentEntry> Entries =
  407. readSegmentEntries(Next + Header->SegmentOffset);
  408. if (!SegmentInfo.empty() && SegmentInfo != Entries) {
  409. // We do not expect segment information to change when deserializing from
  410. // the same binary profile file. This can happen if dynamic libraries are
  411. // loaded/unloaded between profile dumping.
  412. return make_error<InstrProfError>(
  413. instrprof_error::malformed,
  414. "memprof raw profile has different segment information");
  415. }
  416. SegmentInfo.assign(Entries.begin(), Entries.end());
  417. // Read in the MemInfoBlocks. Merge them based on stack id - we assume that
  418. // raw profiles in the same binary file are from the same process so the
  419. // stackdepot ids are the same.
  420. for (const auto &Value : readMemInfoBlocks(Next + Header->MIBOffset)) {
  421. if (CallstackProfileData.count(Value.first)) {
  422. CallstackProfileData[Value.first].Merge(Value.second);
  423. } else {
  424. CallstackProfileData[Value.first] = Value.second;
  425. }
  426. }
  427. // Read in the callstack for each ids. For multiple raw profiles in the same
  428. // file, we expect that the callstack is the same for a unique id.
  429. const CallStackMap CSM = readStackInfo(Next + Header->StackOffset);
  430. if (StackMap.empty()) {
  431. StackMap = CSM;
  432. } else {
  433. if (mergeStackMap(CSM, StackMap))
  434. return make_error<InstrProfError>(
  435. instrprof_error::malformed,
  436. "memprof raw profile got different call stack for same id");
  437. }
  438. Next += Header->TotalSize;
  439. }
  440. return Error::success();
  441. }
  442. object::SectionedAddress
  443. RawMemProfReader::getModuleOffset(const uint64_t VirtualAddress) {
  444. LLVM_DEBUG({
  445. SegmentEntry *ContainingSegment = nullptr;
  446. for (auto &SE : SegmentInfo) {
  447. if (VirtualAddress > SE.Start && VirtualAddress <= SE.End) {
  448. ContainingSegment = &SE;
  449. }
  450. }
  451. // Ensure that the virtual address is valid.
  452. assert(ContainingSegment && "Could not find a segment entry");
  453. });
  454. // TODO: Compute the file offset based on the maps and program headers. For
  455. // now this only works for non PIE binaries.
  456. return object::SectionedAddress{VirtualAddress};
  457. }
  458. Error RawMemProfReader::readNextRecord(GuidMemProfRecordPair &GuidRecord) {
  459. if (FunctionProfileData.empty())
  460. return make_error<InstrProfError>(instrprof_error::empty_raw_profile);
  461. if (Iter == FunctionProfileData.end())
  462. return make_error<InstrProfError>(instrprof_error::eof);
  463. auto IdToFrameCallback = [this](const FrameId Id) {
  464. Frame F = this->idToFrame(Id);
  465. if (!this->KeepSymbolName)
  466. return F;
  467. auto Iter = this->GuidToSymbolName.find(F.Function);
  468. assert(Iter != this->GuidToSymbolName.end());
  469. F.SymbolName = Iter->getSecond();
  470. return F;
  471. };
  472. const IndexedMemProfRecord &IndexedRecord = Iter->second;
  473. GuidRecord = {Iter->first, MemProfRecord(IndexedRecord, IdToFrameCallback)};
  474. Iter++;
  475. return Error::success();
  476. }
  477. } // namespace memprof
  478. } // namespace llvm