PerfReader.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. //===-- PerfReader.h - perfscript reader -----------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef LLVM_TOOLS_LLVM_PROFGEN_PERFREADER_H
  9. #define LLVM_TOOLS_LLVM_PROFGEN_PERFREADER_H
  10. #include "ErrorHandling.h"
  11. #include "ProfiledBinary.h"
  12. #include "llvm/Support/Casting.h"
  13. #include "llvm/Support/CommandLine.h"
  14. #include "llvm/Support/Regex.h"
  15. #include <fstream>
  16. #include <list>
  17. #include <map>
  18. #include <vector>
  19. using namespace llvm;
  20. using namespace sampleprof;
  21. namespace llvm {
  22. namespace sampleprof {
  23. // Stream based trace line iterator
  24. class TraceStream {
  25. std::string CurrentLine;
  26. std::ifstream Fin;
  27. bool IsAtEoF = false;
  28. uint64_t LineNumber = 0;
  29. public:
  30. TraceStream(StringRef Filename) : Fin(Filename.str()) {
  31. if (!Fin.good())
  32. exitWithError("Error read input perf script file", Filename);
  33. advance();
  34. }
  35. StringRef getCurrentLine() {
  36. assert(!IsAtEoF && "Line iterator reaches the End-of-File!");
  37. return CurrentLine;
  38. }
  39. uint64_t getLineNumber() { return LineNumber; }
  40. bool isAtEoF() { return IsAtEoF; }
  41. // Read the next line
  42. void advance() {
  43. if (!std::getline(Fin, CurrentLine)) {
  44. IsAtEoF = true;
  45. return;
  46. }
  47. LineNumber++;
  48. }
  49. };
  50. // The type of perfscript
  51. enum PerfScriptType {
  52. PERF_UNKNOWN = 0,
  53. PERF_INVALID = 1,
  54. PERF_LBR = 2, // Only LBR sample
  55. PERF_LBR_STACK = 3, // Hybrid sample including call stack and LBR stack.
  56. };
  57. // The parsed LBR sample entry.
  58. struct LBREntry {
  59. uint64_t Source = 0;
  60. uint64_t Target = 0;
  61. // An artificial branch stands for a series of consecutive branches starting
  62. // from the current binary with a transition through external code and
  63. // eventually landing back in the current binary.
  64. bool IsArtificial = false;
  65. LBREntry(uint64_t S, uint64_t T, bool I)
  66. : Source(S), Target(T), IsArtificial(I) {}
  67. };
  68. // Hash interface for generic data of type T
  69. // Data should implement a \fn getHashCode and a \fn isEqual
  70. // Currently getHashCode is non-virtual to avoid the overhead of calling vtable,
  71. // i.e we explicitly calculate hash of derived class, assign to base class's
  72. // HashCode. This also provides the flexibility for calculating the hash code
  73. // incrementally(like rolling hash) during frame stack unwinding since unwinding
  74. // only changes the leaf of frame stack. \fn isEqual is a virtual function,
  75. // which will have perf overhead. In the future, if we redesign a better hash
  76. // function, then we can just skip this or switch to non-virtual function(like
  77. // just ignore comparision if hash conflicts probabilities is low)
  78. template <class T> class Hashable {
  79. public:
  80. std::shared_ptr<T> Data;
  81. Hashable(const std::shared_ptr<T> &D) : Data(D) {}
  82. // Hash code generation
  83. struct Hash {
  84. uint64_t operator()(const Hashable<T> &Key) const {
  85. // Don't make it virtual for getHashCode
  86. assert(Key.Data->getHashCode() && "Should generate HashCode for it!");
  87. return Key.Data->getHashCode();
  88. }
  89. };
  90. // Hash equal
  91. struct Equal {
  92. bool operator()(const Hashable<T> &LHS, const Hashable<T> &RHS) const {
  93. // Precisely compare the data, vtable will have overhead.
  94. return LHS.Data->isEqual(RHS.Data.get());
  95. }
  96. };
  97. T *getPtr() const { return Data.get(); }
  98. };
  99. // Base class to extend for all types of perf sample
  100. struct PerfSample {
  101. uint64_t HashCode = 0;
  102. virtual ~PerfSample() = default;
  103. uint64_t getHashCode() const { return HashCode; }
  104. virtual bool isEqual(const PerfSample *K) const {
  105. return HashCode == K->HashCode;
  106. };
  107. // Utilities for LLVM-style RTTI
  108. enum PerfKind { PK_HybridSample };
  109. const PerfKind Kind;
  110. PerfKind getKind() const { return Kind; }
  111. PerfSample(PerfKind K) : Kind(K){};
  112. };
  113. // The parsed hybrid sample including call stack and LBR stack.
  114. struct HybridSample : public PerfSample {
  115. // Profiled binary that current frame address belongs to
  116. ProfiledBinary *Binary;
  117. // Call stack recorded in FILO(leaf to root) order
  118. SmallVector<uint64_t, 16> CallStack;
  119. // LBR stack recorded in FIFO order
  120. SmallVector<LBREntry, 16> LBRStack;
  121. HybridSample() : PerfSample(PK_HybridSample){};
  122. static bool classof(const PerfSample *K) {
  123. return K->getKind() == PK_HybridSample;
  124. }
  125. // Used for sample aggregation
  126. bool isEqual(const PerfSample *K) const override {
  127. const HybridSample *Other = dyn_cast<HybridSample>(K);
  128. if (Other->Binary != Binary)
  129. return false;
  130. const SmallVector<uint64_t, 16> &OtherCallStack = Other->CallStack;
  131. const SmallVector<LBREntry, 16> &OtherLBRStack = Other->LBRStack;
  132. if (CallStack.size() != OtherCallStack.size() ||
  133. LBRStack.size() != OtherLBRStack.size())
  134. return false;
  135. auto Iter = CallStack.begin();
  136. for (auto Address : OtherCallStack) {
  137. if (Address != *Iter++)
  138. return false;
  139. }
  140. for (size_t I = 0; I < OtherLBRStack.size(); I++) {
  141. if (LBRStack[I].Source != OtherLBRStack[I].Source ||
  142. LBRStack[I].Target != OtherLBRStack[I].Target)
  143. return false;
  144. }
  145. return true;
  146. }
  147. void genHashCode() {
  148. // Use simple DJB2 hash
  149. auto HashCombine = [](uint64_t H, uint64_t V) {
  150. return ((H << 5) + H) + V;
  151. };
  152. uint64_t Hash = 5381;
  153. Hash = HashCombine(Hash, reinterpret_cast<uint64_t>(Binary));
  154. for (const auto &Value : CallStack) {
  155. Hash = HashCombine(Hash, Value);
  156. }
  157. for (const auto &Entry : LBRStack) {
  158. Hash = HashCombine(Hash, Entry.Source);
  159. Hash = HashCombine(Hash, Entry.Target);
  160. }
  161. HashCode = Hash;
  162. }
  163. };
  164. // After parsing the sample, we record the samples by aggregating them
  165. // into this counter. The key stores the sample data and the value is
  166. // the sample repeat times.
  167. using AggregatedCounter =
  168. std::unordered_map<Hashable<PerfSample>, uint64_t,
  169. Hashable<PerfSample>::Hash, Hashable<PerfSample>::Equal>;
  170. using SampleVector = SmallVector<std::tuple<uint64_t, uint64_t, uint64_t>, 16>;
  171. // The state for the unwinder, it doesn't hold the data but only keep the
  172. // pointer/index of the data, While unwinding, the CallStack is changed
  173. // dynamicially and will be recorded as the context of the sample
  174. struct UnwindState {
  175. // Profiled binary that current frame address belongs to
  176. const ProfiledBinary *Binary;
  177. // Call stack trie node
  178. struct ProfiledFrame {
  179. const uint64_t Address = 0;
  180. ProfiledFrame *Parent;
  181. SampleVector RangeSamples;
  182. SampleVector BranchSamples;
  183. std::unordered_map<uint64_t, std::unique_ptr<ProfiledFrame>> Children;
  184. ProfiledFrame(uint64_t Addr = 0, ProfiledFrame *P = nullptr)
  185. : Address(Addr), Parent(P) {}
  186. ProfiledFrame *getOrCreateChildFrame(uint64_t Address) {
  187. assert(Address && "Address can't be zero!");
  188. auto Ret = Children.emplace(
  189. Address, std::make_unique<ProfiledFrame>(Address, this));
  190. return Ret.first->second.get();
  191. }
  192. void recordRangeCount(uint64_t Start, uint64_t End, uint64_t Count) {
  193. RangeSamples.emplace_back(std::make_tuple(Start, End, Count));
  194. }
  195. void recordBranchCount(uint64_t Source, uint64_t Target, uint64_t Count) {
  196. BranchSamples.emplace_back(std::make_tuple(Source, Target, Count));
  197. }
  198. bool isDummyRoot() { return Address == 0; }
  199. };
  200. ProfiledFrame DummyTrieRoot;
  201. ProfiledFrame *CurrentLeafFrame;
  202. // Used to fall through the LBR stack
  203. uint32_t LBRIndex = 0;
  204. // Reference to HybridSample.LBRStack
  205. const SmallVector<LBREntry, 16> &LBRStack;
  206. // Used to iterate the address range
  207. InstructionPointer InstPtr;
  208. UnwindState(const HybridSample *Sample)
  209. : Binary(Sample->Binary), LBRStack(Sample->LBRStack),
  210. InstPtr(Sample->Binary, Sample->CallStack.front()) {
  211. initFrameTrie(Sample->CallStack);
  212. }
  213. bool validateInitialState() {
  214. uint64_t LBRLeaf = LBRStack[LBRIndex].Target;
  215. uint64_t LeafAddr = CurrentLeafFrame->Address;
  216. // When we take a stack sample, ideally the sampling distance between the
  217. // leaf IP of stack and the last LBR target shouldn't be very large.
  218. // Use a heuristic size (0x100) to filter out broken records.
  219. if (LeafAddr < LBRLeaf || LeafAddr >= LBRLeaf + 0x100) {
  220. WithColor::warning() << "Bogus trace: stack tip = "
  221. << format("%#010x", LeafAddr)
  222. << ", LBR tip = " << format("%#010x\n", LBRLeaf);
  223. return false;
  224. }
  225. return true;
  226. }
  227. void checkStateConsistency() {
  228. assert(InstPtr.Address == CurrentLeafFrame->Address &&
  229. "IP should align with context leaf");
  230. }
  231. const ProfiledBinary *getBinary() const { return Binary; }
  232. bool hasNextLBR() const { return LBRIndex < LBRStack.size(); }
  233. uint64_t getCurrentLBRSource() const { return LBRStack[LBRIndex].Source; }
  234. uint64_t getCurrentLBRTarget() const { return LBRStack[LBRIndex].Target; }
  235. const LBREntry &getCurrentLBR() const { return LBRStack[LBRIndex]; }
  236. void advanceLBR() { LBRIndex++; }
  237. ProfiledFrame *getParentFrame() { return CurrentLeafFrame->Parent; }
  238. void pushFrame(uint64_t Address) {
  239. CurrentLeafFrame = CurrentLeafFrame->getOrCreateChildFrame(Address);
  240. }
  241. void switchToFrame(uint64_t Address) {
  242. if (CurrentLeafFrame->Address == Address)
  243. return;
  244. CurrentLeafFrame = CurrentLeafFrame->Parent->getOrCreateChildFrame(Address);
  245. }
  246. void popFrame() { CurrentLeafFrame = CurrentLeafFrame->Parent; }
  247. void initFrameTrie(const SmallVectorImpl<uint64_t> &CallStack) {
  248. ProfiledFrame *Cur = &DummyTrieRoot;
  249. for (auto Address : reverse(CallStack)) {
  250. Cur = Cur->getOrCreateChildFrame(Address);
  251. }
  252. CurrentLeafFrame = Cur;
  253. }
  254. ProfiledFrame *getDummyRootPtr() { return &DummyTrieRoot; }
  255. };
  256. // Base class for sample counter key with context
  257. struct ContextKey {
  258. uint64_t HashCode = 0;
  259. virtual ~ContextKey() = default;
  260. uint64_t getHashCode() const { return HashCode; }
  261. virtual bool isEqual(const ContextKey *K) const {
  262. return HashCode == K->HashCode;
  263. };
  264. // Utilities for LLVM-style RTTI
  265. enum ContextKind { CK_StringBased, CK_ProbeBased };
  266. const ContextKind Kind;
  267. ContextKind getKind() const { return Kind; }
  268. ContextKey(ContextKind K) : Kind(K){};
  269. };
  270. // String based context id
  271. struct StringBasedCtxKey : public ContextKey {
  272. std::string Context;
  273. StringBasedCtxKey() : ContextKey(CK_StringBased){};
  274. static bool classof(const ContextKey *K) {
  275. return K->getKind() == CK_StringBased;
  276. }
  277. bool isEqual(const ContextKey *K) const override {
  278. const StringBasedCtxKey *Other = dyn_cast<StringBasedCtxKey>(K);
  279. return Context == Other->Context;
  280. }
  281. void genHashCode() { HashCode = hash_value(Context); }
  282. };
  283. // Probe based context key as the intermediate key of context
  284. // String based context key will introduce redundant string handling
  285. // since the callee context is inferred from the context string which
  286. // need to be splitted by '@' to get the last location frame, so we
  287. // can just use probe instead and generate the string in the end.
  288. struct ProbeBasedCtxKey : public ContextKey {
  289. SmallVector<const PseudoProbe *, 16> Probes;
  290. ProbeBasedCtxKey() : ContextKey(CK_ProbeBased) {}
  291. static bool classof(const ContextKey *K) {
  292. return K->getKind() == CK_ProbeBased;
  293. }
  294. bool isEqual(const ContextKey *K) const override {
  295. const ProbeBasedCtxKey *O = dyn_cast<ProbeBasedCtxKey>(K);
  296. assert(O != nullptr && "Probe based key shouldn't be null in isEqual");
  297. return std::equal(Probes.begin(), Probes.end(), O->Probes.begin(),
  298. O->Probes.end());
  299. }
  300. void genHashCode() {
  301. for (const auto *P : Probes) {
  302. HashCode = hash_combine(HashCode, P);
  303. }
  304. if (HashCode == 0) {
  305. // Avoid zero value of HashCode when it's an empty list
  306. HashCode = 1;
  307. }
  308. }
  309. };
  310. // The counter of branch samples for one function indexed by the branch,
  311. // which is represented as the source and target offset pair.
  312. using BranchSample = std::map<std::pair<uint64_t, uint64_t>, uint64_t>;
  313. // The counter of range samples for one function indexed by the range,
  314. // which is represented as the start and end offset pair.
  315. using RangeSample = std::map<std::pair<uint64_t, uint64_t>, uint64_t>;
  316. // Wrapper for sample counters including range counter and branch counter
  317. struct SampleCounter {
  318. RangeSample RangeCounter;
  319. BranchSample BranchCounter;
  320. void recordRangeCount(uint64_t Start, uint64_t End, uint64_t Repeat) {
  321. RangeCounter[{Start, End}] += Repeat;
  322. }
  323. void recordBranchCount(uint64_t Source, uint64_t Target, uint64_t Repeat) {
  324. BranchCounter[{Source, Target}] += Repeat;
  325. }
  326. };
  327. // Sample counter with context to support context-sensitive profile
  328. using ContextSampleCounterMap =
  329. std::unordered_map<Hashable<ContextKey>, SampleCounter,
  330. Hashable<ContextKey>::Hash, Hashable<ContextKey>::Equal>;
  331. struct FrameStack {
  332. SmallVector<uint64_t, 16> Stack;
  333. const ProfiledBinary *Binary;
  334. FrameStack(const ProfiledBinary *B) : Binary(B) {}
  335. bool pushFrame(UnwindState::ProfiledFrame *Cur) {
  336. Stack.push_back(Cur->Address);
  337. return true;
  338. }
  339. void popFrame() {
  340. if (!Stack.empty())
  341. Stack.pop_back();
  342. }
  343. std::shared_ptr<StringBasedCtxKey> getContextKey();
  344. };
  345. struct ProbeStack {
  346. SmallVector<const PseudoProbe *, 16> Stack;
  347. const ProfiledBinary *Binary;
  348. ProbeStack(const ProfiledBinary *B) : Binary(B) {}
  349. bool pushFrame(UnwindState::ProfiledFrame *Cur) {
  350. const PseudoProbe *CallProbe = Binary->getCallProbeForAddr(Cur->Address);
  351. // We may not find a probe for a merged or external callsite.
  352. // Callsite merging may cause the loss of original probe IDs.
  353. // Cutting off the context from here since the inliner will
  354. // not know how to consume a context with unknown callsites.
  355. if (!CallProbe)
  356. return false;
  357. Stack.push_back(CallProbe);
  358. return true;
  359. }
  360. void popFrame() {
  361. if (!Stack.empty())
  362. Stack.pop_back();
  363. }
  364. // Use pseudo probe based context key to get the sample counter
  365. // A context stands for a call path from 'main' to an uninlined
  366. // callee with all inline frames recovered on that path. The probes
  367. // belonging to that call path is the probes either originated from
  368. // the callee or from any functions inlined into the callee. Since
  369. // pseudo probes are organized in a tri-tree style after decoded,
  370. // the tree path from the tri-tree root (which is the uninlined
  371. // callee) to the probe node forms an inline context.
  372. // Here we use a list of probe(pointer) as the context key to speed up
  373. // aggregation and the final context string will be generate in
  374. // ProfileGenerator
  375. std::shared_ptr<ProbeBasedCtxKey> getContextKey();
  376. };
  377. /*
  378. As in hybrid sample we have a group of LBRs and the most recent sampling call
  379. stack, we can walk through those LBRs to infer more call stacks which would be
  380. used as context for profile. VirtualUnwinder is the class to do the call stack
  381. unwinding based on LBR state. Two types of unwinding are processd here:
  382. 1) LBR unwinding and 2) linear range unwinding.
  383. Specifically, for each LBR entry(can be classified into call, return, regular
  384. branch), LBR unwinding will replay the operation by pushing, popping or
  385. switching leaf frame towards the call stack and since the initial call stack
  386. is most recently sampled, the replay should be in anti-execution order, i.e. for
  387. the regular case, pop the call stack when LBR is call, push frame on call stack
  388. when LBR is return. After each LBR processed, it also needs to align with the
  389. next LBR by going through instructions from previous LBR's target to current
  390. LBR's source, which is the linear unwinding. As instruction from linear range
  391. can come from different function by inlining, linear unwinding will do the range
  392. splitting and record counters by the range with same inline context. Over those
  393. unwinding process we will record each call stack as context id and LBR/linear
  394. range as sample counter for further CS profile generation.
  395. */
  396. class VirtualUnwinder {
  397. public:
  398. VirtualUnwinder(ContextSampleCounterMap *Counter, const ProfiledBinary *B)
  399. : CtxCounterMap(Counter), Binary(B) {}
  400. bool unwind(const HybridSample *Sample, uint64_t Repeat);
  401. private:
  402. bool isCallState(UnwindState &State) const {
  403. // The tail call frame is always missing here in stack sample, we will
  404. // use a specific tail call tracker to infer it.
  405. return Binary->addressIsCall(State.getCurrentLBRSource());
  406. }
  407. bool isReturnState(UnwindState &State) const {
  408. // Simply check addressIsReturn, as ret is always reliable, both for
  409. // regular call and tail call.
  410. return Binary->addressIsReturn(State.getCurrentLBRSource());
  411. }
  412. void unwindCall(UnwindState &State);
  413. void unwindLinear(UnwindState &State, uint64_t Repeat);
  414. void unwindReturn(UnwindState &State);
  415. void unwindBranchWithinFrame(UnwindState &State);
  416. template <typename T>
  417. void collectSamplesFromFrame(UnwindState::ProfiledFrame *Cur, T &Stack);
  418. // Collect each samples on trie node by DFS traversal
  419. template <typename T>
  420. void collectSamplesFromFrameTrie(UnwindState::ProfiledFrame *Cur, T &Stack);
  421. void collectSamplesFromFrameTrie(UnwindState::ProfiledFrame *Cur);
  422. void recordRangeCount(uint64_t Start, uint64_t End, UnwindState &State,
  423. uint64_t Repeat);
  424. void recordBranchCount(const LBREntry &Branch, UnwindState &State,
  425. uint64_t Repeat);
  426. ContextSampleCounterMap *CtxCounterMap;
  427. // Profiled binary that current frame address belongs to
  428. const ProfiledBinary *Binary;
  429. };
  430. // Filename to binary map
  431. using BinaryMap = StringMap<ProfiledBinary>;
  432. // Address to binary map for fast look-up
  433. using AddressBinaryMap = std::map<uint64_t, ProfiledBinary *>;
  434. // Binary to ContextSampleCounters Map to support multiple binary, we may have
  435. // same binary loaded at different addresses, they should share the same sample
  436. // counter
  437. using BinarySampleCounterMap =
  438. std::unordered_map<ProfiledBinary *, ContextSampleCounterMap>;
  439. // Load binaries and read perf trace to parse the events and samples
  440. class PerfReader {
  441. public:
  442. PerfReader(cl::list<std::string> &BinaryFilenames,
  443. cl::list<std::string> &PerfTraceFilenames);
  444. // A LBR sample is like:
  445. // 0x5c6313f/0x5c63170/P/-/-/0 0x5c630e7/0x5c63130/P/-/-/0 ...
  446. // A heuristic for fast detection by checking whether a
  447. // leading " 0x" and the '/' exist.
  448. static bool isLBRSample(StringRef Line) {
  449. if (!Line.startswith(" 0x"))
  450. return false;
  451. if (Line.find('/') != StringRef::npos)
  452. return true;
  453. return false;
  454. }
  455. // The raw hybird sample is like
  456. // e.g.
  457. // 4005dc # call stack leaf
  458. // 400634
  459. // 400684 # call stack root
  460. // 0x4005c8/0x4005dc/P/-/-/0 0x40062f/0x4005b0/P/-/-/0 ...
  461. // ... 0x4005c8/0x4005dc/P/-/-/0 # LBR Entries
  462. // Determine the perfscript contains hybrid samples(call stack + LBRs) by
  463. // checking whether there is a non-empty call stack immediately followed by
  464. // a LBR sample
  465. static PerfScriptType checkPerfScriptType(StringRef FileName) {
  466. TraceStream TraceIt(FileName);
  467. uint64_t FrameAddr = 0;
  468. while (!TraceIt.isAtEoF()) {
  469. int32_t Count = 0;
  470. while (!TraceIt.isAtEoF() &&
  471. !TraceIt.getCurrentLine().ltrim().getAsInteger(16, FrameAddr)) {
  472. Count++;
  473. TraceIt.advance();
  474. }
  475. if (!TraceIt.isAtEoF()) {
  476. if (isLBRSample(TraceIt.getCurrentLine())) {
  477. if (Count > 0)
  478. return PERF_LBR_STACK;
  479. else
  480. return PERF_LBR;
  481. }
  482. TraceIt.advance();
  483. }
  484. }
  485. return PERF_INVALID;
  486. }
  487. // The parsed MMap event
  488. struct MMapEvent {
  489. uint64_t PID = 0;
  490. uint64_t BaseAddress = 0;
  491. uint64_t Size = 0;
  492. uint64_t Offset = 0;
  493. StringRef BinaryPath;
  494. };
  495. /// Load symbols and disassemble the code of a give binary.
  496. /// Also register the binary in the binary table.
  497. ///
  498. ProfiledBinary &loadBinary(const StringRef BinaryPath,
  499. bool AllowNameConflict = true);
  500. void updateBinaryAddress(const MMapEvent &Event);
  501. PerfScriptType getPerfScriptType() const { return PerfType; }
  502. // Entry of the reader to parse multiple perf traces
  503. void parsePerfTraces(cl::list<std::string> &PerfTraceFilenames);
  504. const BinarySampleCounterMap &getBinarySampleCounters() const {
  505. return BinarySampleCounters;
  506. }
  507. private:
  508. /// Validate the command line input
  509. void validateCommandLine(cl::list<std::string> &BinaryFilenames,
  510. cl::list<std::string> &PerfTraceFilenames);
  511. /// Parse a single line of a PERF_RECORD_MMAP2 event looking for a
  512. /// mapping between the binary name and its memory layout.
  513. ///
  514. void parseMMap2Event(TraceStream &TraceIt);
  515. // Parse perf events/samples and do aggregation
  516. void parseAndAggregateTrace(StringRef Filename);
  517. // Parse either an MMAP event or a perf sample
  518. void parseEventOrSample(TraceStream &TraceIt);
  519. // Parse the hybrid sample including the call and LBR line
  520. void parseHybridSample(TraceStream &TraceIt);
  521. // Extract call stack from the perf trace lines
  522. bool extractCallstack(TraceStream &TraceIt,
  523. SmallVectorImpl<uint64_t> &CallStack);
  524. // Extract LBR stack from one perf trace line
  525. bool extractLBRStack(TraceStream &TraceIt,
  526. SmallVectorImpl<LBREntry> &LBRStack,
  527. ProfiledBinary *Binary);
  528. void checkAndSetPerfType(cl::list<std::string> &PerfTraceFilenames);
  529. // Post process the profile after trace aggregation, we will do simple range
  530. // overlap computation for AutoFDO, or unwind for CSSPGO(hybrid sample).
  531. void generateRawProfile();
  532. // Unwind the hybrid samples after aggregration
  533. void unwindSamples();
  534. void printUnwinderOutput();
  535. // Helper function for looking up binary in AddressBinaryMap
  536. ProfiledBinary *getBinary(uint64_t Address);
  537. BinaryMap BinaryTable;
  538. AddressBinaryMap AddrToBinaryMap; // Used by address-based lookup.
  539. private:
  540. BinarySampleCounterMap BinarySampleCounters;
  541. // Samples with the repeating time generated by the perf reader
  542. AggregatedCounter AggregatedSamples;
  543. PerfScriptType PerfType = PERF_UNKNOWN;
  544. };
  545. } // end namespace sampleprof
  546. } // end namespace llvm
  547. #endif