InstrProfWriter.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. //===- InstrProfWriter.cpp - Instrumented profiling writer ----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains support for writing profiling data for clang's
  10. // instrumentation based PGO and coverage.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/ProfileData/InstrProfWriter.h"
  14. #include "llvm/ADT/STLExtras.h"
  15. #include "llvm/ADT/StringRef.h"
  16. #include "llvm/IR/ProfileSummary.h"
  17. #include "llvm/ProfileData/InstrProf.h"
  18. #include "llvm/ProfileData/MemProf.h"
  19. #include "llvm/ProfileData/ProfileCommon.h"
  20. #include "llvm/Support/Endian.h"
  21. #include "llvm/Support/EndianStream.h"
  22. #include "llvm/Support/Error.h"
  23. #include "llvm/Support/MemoryBuffer.h"
  24. #include "llvm/Support/OnDiskHashTable.h"
  25. #include "llvm/Support/raw_ostream.h"
  26. #include <cstdint>
  27. #include <memory>
  28. #include <string>
  29. #include <tuple>
  30. #include <utility>
  31. #include <vector>
  32. using namespace llvm;
  33. // A struct to define how the data stream should be patched. For Indexed
  34. // profiling, only uint64_t data type is needed.
  35. struct PatchItem {
  36. uint64_t Pos; // Where to patch.
  37. uint64_t *D; // Pointer to an array of source data.
  38. int N; // Number of elements in \c D array.
  39. };
  40. namespace llvm {
  41. // A wrapper class to abstract writer stream with support of bytes
  42. // back patching.
  43. class ProfOStream {
  44. public:
  45. ProfOStream(raw_fd_ostream &FD)
  46. : IsFDOStream(true), OS(FD), LE(FD, support::little) {}
  47. ProfOStream(raw_string_ostream &STR)
  48. : IsFDOStream(false), OS(STR), LE(STR, support::little) {}
  49. uint64_t tell() { return OS.tell(); }
  50. void write(uint64_t V) { LE.write<uint64_t>(V); }
  51. void writeByte(uint8_t V) { LE.write<uint8_t>(V); }
  52. // \c patch can only be called when all data is written and flushed.
  53. // For raw_string_ostream, the patch is done on the target string
  54. // directly and it won't be reflected in the stream's internal buffer.
  55. void patch(PatchItem *P, int NItems) {
  56. using namespace support;
  57. if (IsFDOStream) {
  58. raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS);
  59. const uint64_t LastPos = FDOStream.tell();
  60. for (int K = 0; K < NItems; K++) {
  61. FDOStream.seek(P[K].Pos);
  62. for (int I = 0; I < P[K].N; I++)
  63. write(P[K].D[I]);
  64. }
  65. // Reset the stream to the last position after patching so that users
  66. // don't accidentally overwrite data. This makes it consistent with
  67. // the string stream below which replaces the data directly.
  68. FDOStream.seek(LastPos);
  69. } else {
  70. raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
  71. std::string &Data = SOStream.str(); // with flush
  72. for (int K = 0; K < NItems; K++) {
  73. for (int I = 0; I < P[K].N; I++) {
  74. uint64_t Bytes = endian::byte_swap<uint64_t, little>(P[K].D[I]);
  75. Data.replace(P[K].Pos + I * sizeof(uint64_t), sizeof(uint64_t),
  76. (const char *)&Bytes, sizeof(uint64_t));
  77. }
  78. }
  79. }
  80. }
  81. // If \c OS is an instance of \c raw_fd_ostream, this field will be
  82. // true. Otherwise, \c OS will be an raw_string_ostream.
  83. bool IsFDOStream;
  84. raw_ostream &OS;
  85. support::endian::Writer LE;
  86. };
  87. class InstrProfRecordWriterTrait {
  88. public:
  89. using key_type = StringRef;
  90. using key_type_ref = StringRef;
  91. using data_type = const InstrProfWriter::ProfilingData *const;
  92. using data_type_ref = const InstrProfWriter::ProfilingData *const;
  93. using hash_value_type = uint64_t;
  94. using offset_type = uint64_t;
  95. support::endianness ValueProfDataEndianness = support::little;
  96. InstrProfSummaryBuilder *SummaryBuilder;
  97. InstrProfSummaryBuilder *CSSummaryBuilder;
  98. InstrProfRecordWriterTrait() = default;
  99. static hash_value_type ComputeHash(key_type_ref K) {
  100. return IndexedInstrProf::ComputeHash(K);
  101. }
  102. static std::pair<offset_type, offset_type>
  103. EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) {
  104. using namespace support;
  105. endian::Writer LE(Out, little);
  106. offset_type N = K.size();
  107. LE.write<offset_type>(N);
  108. offset_type M = 0;
  109. for (const auto &ProfileData : *V) {
  110. const InstrProfRecord &ProfRecord = ProfileData.second;
  111. M += sizeof(uint64_t); // The function hash
  112. M += sizeof(uint64_t); // The size of the Counts vector
  113. M += ProfRecord.Counts.size() * sizeof(uint64_t);
  114. // Value data
  115. M += ValueProfData::getSize(ProfileData.second);
  116. }
  117. LE.write<offset_type>(M);
  118. return std::make_pair(N, M);
  119. }
  120. void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N) {
  121. Out.write(K.data(), N);
  122. }
  123. void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V, offset_type) {
  124. using namespace support;
  125. endian::Writer LE(Out, little);
  126. for (const auto &ProfileData : *V) {
  127. const InstrProfRecord &ProfRecord = ProfileData.second;
  128. if (NamedInstrProfRecord::hasCSFlagInHash(ProfileData.first))
  129. CSSummaryBuilder->addRecord(ProfRecord);
  130. else
  131. SummaryBuilder->addRecord(ProfRecord);
  132. LE.write<uint64_t>(ProfileData.first); // Function hash
  133. LE.write<uint64_t>(ProfRecord.Counts.size());
  134. for (uint64_t I : ProfRecord.Counts)
  135. LE.write<uint64_t>(I);
  136. // Write value data
  137. std::unique_ptr<ValueProfData> VDataPtr =
  138. ValueProfData::serializeFrom(ProfileData.second);
  139. uint32_t S = VDataPtr->getSize();
  140. VDataPtr->swapBytesFromHost(ValueProfDataEndianness);
  141. Out.write((const char *)VDataPtr.get(), S);
  142. }
  143. }
  144. };
  145. } // end namespace llvm
  146. InstrProfWriter::InstrProfWriter(bool Sparse)
  147. : Sparse(Sparse), InfoObj(new InstrProfRecordWriterTrait()) {}
  148. InstrProfWriter::~InstrProfWriter() { delete InfoObj; }
  149. // Internal interface for testing purpose only.
  150. void InstrProfWriter::setValueProfDataEndianness(
  151. support::endianness Endianness) {
  152. InfoObj->ValueProfDataEndianness = Endianness;
  153. }
  154. void InstrProfWriter::setOutputSparse(bool Sparse) {
  155. this->Sparse = Sparse;
  156. }
  157. void InstrProfWriter::addRecord(NamedInstrProfRecord &&I, uint64_t Weight,
  158. function_ref<void(Error)> Warn) {
  159. auto Name = I.Name;
  160. auto Hash = I.Hash;
  161. addRecord(Name, Hash, std::move(I), Weight, Warn);
  162. }
  163. void InstrProfWriter::overlapRecord(NamedInstrProfRecord &&Other,
  164. OverlapStats &Overlap,
  165. OverlapStats &FuncLevelOverlap,
  166. const OverlapFuncFilters &FuncFilter) {
  167. auto Name = Other.Name;
  168. auto Hash = Other.Hash;
  169. Other.accumulateCounts(FuncLevelOverlap.Test);
  170. if (FunctionData.find(Name) == FunctionData.end()) {
  171. Overlap.addOneUnique(FuncLevelOverlap.Test);
  172. return;
  173. }
  174. if (FuncLevelOverlap.Test.CountSum < 1.0f) {
  175. Overlap.Overlap.NumEntries += 1;
  176. return;
  177. }
  178. auto &ProfileDataMap = FunctionData[Name];
  179. bool NewFunc;
  180. ProfilingData::iterator Where;
  181. std::tie(Where, NewFunc) =
  182. ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
  183. if (NewFunc) {
  184. Overlap.addOneMismatch(FuncLevelOverlap.Test);
  185. return;
  186. }
  187. InstrProfRecord &Dest = Where->second;
  188. uint64_t ValueCutoff = FuncFilter.ValueCutoff;
  189. if (!FuncFilter.NameFilter.empty() && Name.contains(FuncFilter.NameFilter))
  190. ValueCutoff = 0;
  191. Dest.overlap(Other, Overlap, FuncLevelOverlap, ValueCutoff);
  192. }
  193. void InstrProfWriter::addRecord(StringRef Name, uint64_t Hash,
  194. InstrProfRecord &&I, uint64_t Weight,
  195. function_ref<void(Error)> Warn) {
  196. auto &ProfileDataMap = FunctionData[Name];
  197. bool NewFunc;
  198. ProfilingData::iterator Where;
  199. std::tie(Where, NewFunc) =
  200. ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
  201. InstrProfRecord &Dest = Where->second;
  202. auto MapWarn = [&](instrprof_error E) {
  203. Warn(make_error<InstrProfError>(E));
  204. };
  205. if (NewFunc) {
  206. // We've never seen a function with this name and hash, add it.
  207. Dest = std::move(I);
  208. if (Weight > 1)
  209. Dest.scale(Weight, 1, MapWarn);
  210. } else {
  211. // We're updating a function we've seen before.
  212. Dest.merge(I, Weight, MapWarn);
  213. }
  214. Dest.sortValueData();
  215. }
  216. void InstrProfWriter::addMemProfRecord(
  217. const Function::GUID Id, const memprof::IndexedMemProfRecord &Record) {
  218. auto Result = MemProfRecordData.insert({Id, Record});
  219. // If we inserted a new record then we are done.
  220. if (Result.second) {
  221. return;
  222. }
  223. memprof::IndexedMemProfRecord &Existing = Result.first->second;
  224. Existing.merge(Record);
  225. }
  226. bool InstrProfWriter::addMemProfFrame(const memprof::FrameId Id,
  227. const memprof::Frame &Frame,
  228. function_ref<void(Error)> Warn) {
  229. auto Result = MemProfFrameData.insert({Id, Frame});
  230. // If a mapping already exists for the current frame id and it does not
  231. // match the new mapping provided then reset the existing contents and bail
  232. // out. We don't support the merging of memprof data whose Frame -> Id
  233. // mapping across profiles is inconsistent.
  234. if (!Result.second && Result.first->second != Frame) {
  235. Warn(make_error<InstrProfError>(instrprof_error::malformed,
  236. "frame to id mapping mismatch"));
  237. return false;
  238. }
  239. return true;
  240. }
  241. void InstrProfWriter::addBinaryIds(ArrayRef<llvm::object::BuildID> BIs) {
  242. llvm::append_range(BinaryIds, BIs);
  243. }
  244. void InstrProfWriter::mergeRecordsFromWriter(InstrProfWriter &&IPW,
  245. function_ref<void(Error)> Warn) {
  246. for (auto &I : IPW.FunctionData)
  247. for (auto &Func : I.getValue())
  248. addRecord(I.getKey(), Func.first, std::move(Func.second), 1, Warn);
  249. BinaryIds.reserve(BinaryIds.size() + IPW.BinaryIds.size());
  250. for (auto &I : IPW.BinaryIds)
  251. addBinaryIds(I);
  252. MemProfFrameData.reserve(IPW.MemProfFrameData.size());
  253. for (auto &I : IPW.MemProfFrameData) {
  254. // If we weren't able to add the frame mappings then it doesn't make sense
  255. // to try to merge the records from this profile.
  256. if (!addMemProfFrame(I.first, I.second, Warn))
  257. return;
  258. }
  259. MemProfRecordData.reserve(IPW.MemProfRecordData.size());
  260. for (auto &I : IPW.MemProfRecordData) {
  261. addMemProfRecord(I.first, I.second);
  262. }
  263. }
  264. bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) {
  265. if (!Sparse)
  266. return true;
  267. for (const auto &Func : PD) {
  268. const InstrProfRecord &IPR = Func.second;
  269. if (llvm::any_of(IPR.Counts, [](uint64_t Count) { return Count > 0; }))
  270. return true;
  271. }
  272. return false;
  273. }
  274. static void setSummary(IndexedInstrProf::Summary *TheSummary,
  275. ProfileSummary &PS) {
  276. using namespace IndexedInstrProf;
  277. const std::vector<ProfileSummaryEntry> &Res = PS.getDetailedSummary();
  278. TheSummary->NumSummaryFields = Summary::NumKinds;
  279. TheSummary->NumCutoffEntries = Res.size();
  280. TheSummary->set(Summary::MaxFunctionCount, PS.getMaxFunctionCount());
  281. TheSummary->set(Summary::MaxBlockCount, PS.getMaxCount());
  282. TheSummary->set(Summary::MaxInternalBlockCount, PS.getMaxInternalCount());
  283. TheSummary->set(Summary::TotalBlockCount, PS.getTotalCount());
  284. TheSummary->set(Summary::TotalNumBlocks, PS.getNumCounts());
  285. TheSummary->set(Summary::TotalNumFunctions, PS.getNumFunctions());
  286. for (unsigned I = 0; I < Res.size(); I++)
  287. TheSummary->setEntry(I, Res[I]);
  288. }
  289. Error InstrProfWriter::writeImpl(ProfOStream &OS) {
  290. using namespace IndexedInstrProf;
  291. using namespace support;
  292. OnDiskChainedHashTableGenerator<InstrProfRecordWriterTrait> Generator;
  293. InstrProfSummaryBuilder ISB(ProfileSummaryBuilder::DefaultCutoffs);
  294. InfoObj->SummaryBuilder = &ISB;
  295. InstrProfSummaryBuilder CSISB(ProfileSummaryBuilder::DefaultCutoffs);
  296. InfoObj->CSSummaryBuilder = &CSISB;
  297. // Populate the hash table generator.
  298. for (const auto &I : FunctionData)
  299. if (shouldEncodeData(I.getValue()))
  300. Generator.insert(I.getKey(), &I.getValue());
  301. // Write the header.
  302. IndexedInstrProf::Header Header;
  303. Header.Magic = IndexedInstrProf::Magic;
  304. Header.Version = IndexedInstrProf::ProfVersion::CurrentVersion;
  305. if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
  306. Header.Version |= VARIANT_MASK_IR_PROF;
  307. if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
  308. Header.Version |= VARIANT_MASK_CSIR_PROF;
  309. if (static_cast<bool>(ProfileKind &
  310. InstrProfKind::FunctionEntryInstrumentation))
  311. Header.Version |= VARIANT_MASK_INSTR_ENTRY;
  312. if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
  313. Header.Version |= VARIANT_MASK_BYTE_COVERAGE;
  314. if (static_cast<bool>(ProfileKind & InstrProfKind::FunctionEntryOnly))
  315. Header.Version |= VARIANT_MASK_FUNCTION_ENTRY_ONLY;
  316. if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf))
  317. Header.Version |= VARIANT_MASK_MEMPROF;
  318. Header.Unused = 0;
  319. Header.HashType = static_cast<uint64_t>(IndexedInstrProf::HashType);
  320. Header.HashOffset = 0;
  321. Header.MemProfOffset = 0;
  322. Header.BinaryIdOffset = 0;
  323. int N = sizeof(IndexedInstrProf::Header) / sizeof(uint64_t);
  324. // Only write out all the fields except 'HashOffset', 'MemProfOffset' and
  325. // 'BinaryIdOffset'. We need to remember the offset of these fields to allow
  326. // back patching later.
  327. for (int I = 0; I < N - 3; I++)
  328. OS.write(reinterpret_cast<uint64_t *>(&Header)[I]);
  329. // Save the location of Header.HashOffset field in \c OS.
  330. uint64_t HashTableStartFieldOffset = OS.tell();
  331. // Reserve the space for HashOffset field.
  332. OS.write(0);
  333. // Save the location of MemProf profile data. This is stored in two parts as
  334. // the schema and as a separate on-disk chained hashtable.
  335. uint64_t MemProfSectionOffset = OS.tell();
  336. // Reserve space for the MemProf table field to be patched later if this
  337. // profile contains memory profile information.
  338. OS.write(0);
  339. // Save the location of binary ids section.
  340. uint64_t BinaryIdSectionOffset = OS.tell();
  341. // Reserve space for the BinaryIdOffset field to be patched later if this
  342. // profile contains binary ids.
  343. OS.write(0);
  344. // Reserve space to write profile summary data.
  345. uint32_t NumEntries = ProfileSummaryBuilder::DefaultCutoffs.size();
  346. uint32_t SummarySize = Summary::getSize(Summary::NumKinds, NumEntries);
  347. // Remember the summary offset.
  348. uint64_t SummaryOffset = OS.tell();
  349. for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++)
  350. OS.write(0);
  351. uint64_t CSSummaryOffset = 0;
  352. uint64_t CSSummarySize = 0;
  353. if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
  354. CSSummaryOffset = OS.tell();
  355. CSSummarySize = SummarySize / sizeof(uint64_t);
  356. for (unsigned I = 0; I < CSSummarySize; I++)
  357. OS.write(0);
  358. }
  359. // Write the hash table.
  360. uint64_t HashTableStart = Generator.Emit(OS.OS, *InfoObj);
  361. // Write the MemProf profile data if we have it. This includes a simple schema
  362. // with the format described below followed by the hashtable:
  363. // uint64_t RecordTableOffset = RecordTableGenerator.Emit
  364. // uint64_t FramePayloadOffset = Stream offset before emitting the frame table
  365. // uint64_t FrameTableOffset = FrameTableGenerator.Emit
  366. // uint64_t Num schema entries
  367. // uint64_t Schema entry 0
  368. // uint64_t Schema entry 1
  369. // ....
  370. // uint64_t Schema entry N - 1
  371. // OnDiskChainedHashTable MemProfRecordData
  372. // OnDiskChainedHashTable MemProfFrameData
  373. uint64_t MemProfSectionStart = 0;
  374. if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf)) {
  375. MemProfSectionStart = OS.tell();
  376. OS.write(0ULL); // Reserve space for the memprof record table offset.
  377. OS.write(0ULL); // Reserve space for the memprof frame payload offset.
  378. OS.write(0ULL); // Reserve space for the memprof frame table offset.
  379. auto Schema = memprof::PortableMemInfoBlock::getSchema();
  380. OS.write(static_cast<uint64_t>(Schema.size()));
  381. for (const auto Id : Schema) {
  382. OS.write(static_cast<uint64_t>(Id));
  383. }
  384. auto RecordWriter = std::make_unique<memprof::RecordWriterTrait>();
  385. RecordWriter->Schema = &Schema;
  386. OnDiskChainedHashTableGenerator<memprof::RecordWriterTrait>
  387. RecordTableGenerator;
  388. for (auto &I : MemProfRecordData) {
  389. // Insert the key (func hash) and value (memprof record).
  390. RecordTableGenerator.insert(I.first, I.second);
  391. }
  392. uint64_t RecordTableOffset =
  393. RecordTableGenerator.Emit(OS.OS, *RecordWriter);
  394. uint64_t FramePayloadOffset = OS.tell();
  395. auto FrameWriter = std::make_unique<memprof::FrameWriterTrait>();
  396. OnDiskChainedHashTableGenerator<memprof::FrameWriterTrait>
  397. FrameTableGenerator;
  398. for (auto &I : MemProfFrameData) {
  399. // Insert the key (frame id) and value (frame contents).
  400. FrameTableGenerator.insert(I.first, I.second);
  401. }
  402. uint64_t FrameTableOffset = FrameTableGenerator.Emit(OS.OS, *FrameWriter);
  403. PatchItem PatchItems[] = {
  404. {MemProfSectionStart, &RecordTableOffset, 1},
  405. {MemProfSectionStart + sizeof(uint64_t), &FramePayloadOffset, 1},
  406. {MemProfSectionStart + 2 * sizeof(uint64_t), &FrameTableOffset, 1},
  407. };
  408. OS.patch(PatchItems, 3);
  409. }
  410. // BinaryIdSection has two parts:
  411. // 1. uint64_t BinaryIdsSectionSize
  412. // 2. list of binary ids that consist of:
  413. // a. uint64_t BinaryIdLength
  414. // b. uint8_t BinaryIdData
  415. // c. uint8_t Padding (if necessary)
  416. uint64_t BinaryIdSectionStart = OS.tell();
  417. // Calculate size of binary section.
  418. uint64_t BinaryIdsSectionSize = 0;
  419. // Remove duplicate binary ids.
  420. llvm::sort(BinaryIds);
  421. BinaryIds.erase(std::unique(BinaryIds.begin(), BinaryIds.end()),
  422. BinaryIds.end());
  423. for (auto BI : BinaryIds) {
  424. // Increment by binary id length data type size.
  425. BinaryIdsSectionSize += sizeof(uint64_t);
  426. // Increment by binary id data length, aligned to 8 bytes.
  427. BinaryIdsSectionSize += alignToPowerOf2(BI.size(), sizeof(uint64_t));
  428. }
  429. // Write binary ids section size.
  430. OS.write(BinaryIdsSectionSize);
  431. for (auto BI : BinaryIds) {
  432. uint64_t BILen = BI.size();
  433. // Write binary id length.
  434. OS.write(BILen);
  435. // Write binary id data.
  436. for (unsigned K = 0; K < BILen; K++)
  437. OS.writeByte(BI[K]);
  438. // Write padding if necessary.
  439. uint64_t PaddingSize = alignToPowerOf2(BILen, sizeof(uint64_t)) - BILen;
  440. for (unsigned K = 0; K < PaddingSize; K++)
  441. OS.writeByte(0);
  442. }
  443. // Allocate space for data to be serialized out.
  444. std::unique_ptr<IndexedInstrProf::Summary> TheSummary =
  445. IndexedInstrProf::allocSummary(SummarySize);
  446. // Compute the Summary and copy the data to the data
  447. // structure to be serialized out (to disk or buffer).
  448. std::unique_ptr<ProfileSummary> PS = ISB.getSummary();
  449. setSummary(TheSummary.get(), *PS);
  450. InfoObj->SummaryBuilder = nullptr;
  451. // For Context Sensitive summary.
  452. std::unique_ptr<IndexedInstrProf::Summary> TheCSSummary = nullptr;
  453. if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
  454. TheCSSummary = IndexedInstrProf::allocSummary(SummarySize);
  455. std::unique_ptr<ProfileSummary> CSPS = CSISB.getSummary();
  456. setSummary(TheCSSummary.get(), *CSPS);
  457. }
  458. InfoObj->CSSummaryBuilder = nullptr;
  459. // Now do the final patch:
  460. PatchItem PatchItems[] = {
  461. // Patch the Header.HashOffset field.
  462. {HashTableStartFieldOffset, &HashTableStart, 1},
  463. // Patch the Header.MemProfOffset (=0 for profiles without MemProf
  464. // data).
  465. {MemProfSectionOffset, &MemProfSectionStart, 1},
  466. // Patch the Header.BinaryIdSectionOffset.
  467. {BinaryIdSectionOffset, &BinaryIdSectionStart, 1},
  468. // Patch the summary data.
  469. {SummaryOffset, reinterpret_cast<uint64_t *>(TheSummary.get()),
  470. (int)(SummarySize / sizeof(uint64_t))},
  471. {CSSummaryOffset, reinterpret_cast<uint64_t *>(TheCSSummary.get()),
  472. (int)CSSummarySize}};
  473. OS.patch(PatchItems, std::size(PatchItems));
  474. for (const auto &I : FunctionData)
  475. for (const auto &F : I.getValue())
  476. if (Error E = validateRecord(F.second))
  477. return E;
  478. return Error::success();
  479. }
  480. Error InstrProfWriter::write(raw_fd_ostream &OS) {
  481. // Write the hash table.
  482. ProfOStream POS(OS);
  483. return writeImpl(POS);
  484. }
  485. std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() {
  486. std::string Data;
  487. raw_string_ostream OS(Data);
  488. ProfOStream POS(OS);
  489. // Write the hash table.
  490. if (Error E = writeImpl(POS))
  491. return nullptr;
  492. // Return this in an aligned memory buffer.
  493. return MemoryBuffer::getMemBufferCopy(Data);
  494. }
  495. static const char *ValueProfKindStr[] = {
  496. #define VALUE_PROF_KIND(Enumerator, Value, Descr) #Enumerator,
  497. #include "llvm/ProfileData/InstrProfData.inc"
  498. };
  499. Error InstrProfWriter::validateRecord(const InstrProfRecord &Func) {
  500. for (uint32_t VK = 0; VK <= IPVK_Last; VK++) {
  501. uint32_t NS = Func.getNumValueSites(VK);
  502. if (!NS)
  503. continue;
  504. for (uint32_t S = 0; S < NS; S++) {
  505. uint32_t ND = Func.getNumValueDataForSite(VK, S);
  506. std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
  507. DenseSet<uint64_t> SeenValues;
  508. for (uint32_t I = 0; I < ND; I++)
  509. if ((VK != IPVK_IndirectCallTarget) && !SeenValues.insert(VD[I].Value).second)
  510. return make_error<InstrProfError>(instrprof_error::invalid_prof);
  511. }
  512. }
  513. return Error::success();
  514. }
  515. void InstrProfWriter::writeRecordInText(StringRef Name, uint64_t Hash,
  516. const InstrProfRecord &Func,
  517. InstrProfSymtab &Symtab,
  518. raw_fd_ostream &OS) {
  519. OS << Name << "\n";
  520. OS << "# Func Hash:\n" << Hash << "\n";
  521. OS << "# Num Counters:\n" << Func.Counts.size() << "\n";
  522. OS << "# Counter Values:\n";
  523. for (uint64_t Count : Func.Counts)
  524. OS << Count << "\n";
  525. uint32_t NumValueKinds = Func.getNumValueKinds();
  526. if (!NumValueKinds) {
  527. OS << "\n";
  528. return;
  529. }
  530. OS << "# Num Value Kinds:\n" << Func.getNumValueKinds() << "\n";
  531. for (uint32_t VK = 0; VK < IPVK_Last + 1; VK++) {
  532. uint32_t NS = Func.getNumValueSites(VK);
  533. if (!NS)
  534. continue;
  535. OS << "# ValueKind = " << ValueProfKindStr[VK] << ":\n" << VK << "\n";
  536. OS << "# NumValueSites:\n" << NS << "\n";
  537. for (uint32_t S = 0; S < NS; S++) {
  538. uint32_t ND = Func.getNumValueDataForSite(VK, S);
  539. OS << ND << "\n";
  540. std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
  541. for (uint32_t I = 0; I < ND; I++) {
  542. if (VK == IPVK_IndirectCallTarget)
  543. OS << Symtab.getFuncNameOrExternalSymbol(VD[I].Value) << ":"
  544. << VD[I].Count << "\n";
  545. else
  546. OS << VD[I].Value << ":" << VD[I].Count << "\n";
  547. }
  548. }
  549. }
  550. OS << "\n";
  551. }
  552. Error InstrProfWriter::writeText(raw_fd_ostream &OS) {
  553. // Check CS first since it implies an IR level profile.
  554. if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
  555. OS << "# CSIR level Instrumentation Flag\n:csir\n";
  556. else if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
  557. OS << "# IR level Instrumentation Flag\n:ir\n";
  558. if (static_cast<bool>(ProfileKind &
  559. InstrProfKind::FunctionEntryInstrumentation))
  560. OS << "# Always instrument the function entry block\n:entry_first\n";
  561. InstrProfSymtab Symtab;
  562. using FuncPair = detail::DenseMapPair<uint64_t, InstrProfRecord>;
  563. using RecordType = std::pair<StringRef, FuncPair>;
  564. SmallVector<RecordType, 4> OrderedFuncData;
  565. for (const auto &I : FunctionData) {
  566. if (shouldEncodeData(I.getValue())) {
  567. if (Error E = Symtab.addFuncName(I.getKey()))
  568. return E;
  569. for (const auto &Func : I.getValue())
  570. OrderedFuncData.push_back(std::make_pair(I.getKey(), Func));
  571. }
  572. }
  573. llvm::sort(OrderedFuncData, [](const RecordType &A, const RecordType &B) {
  574. return std::tie(A.first, A.second.first) <
  575. std::tie(B.first, B.second.first);
  576. });
  577. for (const auto &record : OrderedFuncData) {
  578. const StringRef &Name = record.first;
  579. const FuncPair &Func = record.second;
  580. writeRecordInText(Name, Func.first, Func.second, Symtab, OS);
  581. }
  582. for (const auto &record : OrderedFuncData) {
  583. const FuncPair &Func = record.second;
  584. if (Error E = validateRecord(Func.second))
  585. return E;
  586. }
  587. return Error::success();
  588. }