memprof_rawprofile.cpp 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. #include <stdint.h>
  2. #include <stdlib.h>
  3. #include <string.h>
  4. #include "memprof_rawprofile.h"
  5. #include "profile/MemProfData.inc"
  6. #include "sanitizer_common/sanitizer_allocator_internal.h"
  7. #include "sanitizer_common/sanitizer_common.h"
  8. #include "sanitizer_common/sanitizer_linux.h"
  9. #include "sanitizer_common/sanitizer_procmaps.h"
  10. #include "sanitizer_common/sanitizer_stackdepot.h"
  11. #include "sanitizer_common/sanitizer_stackdepotbase.h"
  12. #include "sanitizer_common/sanitizer_stacktrace.h"
  13. #include "sanitizer_common/sanitizer_vector.h"
  14. namespace __memprof {
  15. using ::__sanitizer::Vector;
  16. using ::llvm::memprof::MemInfoBlock;
  17. using SegmentEntry = ::llvm::memprof::SegmentEntry;
  18. using Header = ::llvm::memprof::Header;
  19. namespace {
  20. template <class T> char *WriteBytes(T Pod, char *&Buffer) {
  21. *(T *)Buffer = Pod;
  22. return Buffer + sizeof(T);
  23. }
  24. void RecordStackId(const uptr Key, UNUSED LockedMemInfoBlock *const &MIB,
  25. void *Arg) {
  26. // No need to touch the MIB value here since we are only recording the key.
  27. auto *StackIds = reinterpret_cast<Vector<u64> *>(Arg);
  28. StackIds->PushBack(Key);
  29. }
  30. } // namespace
  31. u64 SegmentSizeBytes(MemoryMappingLayoutBase &Layout) {
  32. u64 NumSegmentsToRecord = 0;
  33. MemoryMappedSegment segment;
  34. for (Layout.Reset(); Layout.Next(&segment);)
  35. if (segment.IsReadable() && segment.IsExecutable())
  36. NumSegmentsToRecord++;
  37. return sizeof(u64) // A header which stores the number of records.
  38. + sizeof(SegmentEntry) * NumSegmentsToRecord;
  39. }
  40. // The segment section uses the following format:
  41. // ---------- Segment Info
  42. // Num Entries
  43. // ---------- Segment Entry
  44. // Start
  45. // End
  46. // Offset
  47. // BuildID 32B
  48. // ----------
  49. // ...
  50. void SerializeSegmentsToBuffer(MemoryMappingLayoutBase &Layout,
  51. const u64 ExpectedNumBytes, char *&Buffer) {
  52. char *Ptr = Buffer;
  53. // Reserve space for the final count.
  54. Ptr += sizeof(u64);
  55. u64 NumSegmentsRecorded = 0;
  56. MemoryMappedSegment segment;
  57. for (Layout.Reset(); Layout.Next(&segment);) {
  58. if (segment.IsReadable() && segment.IsExecutable()) {
  59. // TODO: Record segment.uuid when it is implemented for Linux-Elf.
  60. SegmentEntry Entry(segment.start, segment.end, segment.offset);
  61. memcpy(Ptr, &Entry, sizeof(SegmentEntry));
  62. Ptr += sizeof(SegmentEntry);
  63. NumSegmentsRecorded++;
  64. }
  65. }
  66. // Store the number of segments we recorded in the space we reserved.
  67. *((u64 *)Buffer) = NumSegmentsRecorded;
  68. CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) &&
  69. "Expected num bytes != actual bytes written");
  70. }
  71. u64 StackSizeBytes(const Vector<u64> &StackIds) {
  72. u64 NumBytesToWrite = sizeof(u64);
  73. const u64 NumIds = StackIds.Size();
  74. for (unsigned k = 0; k < NumIds; ++k) {
  75. const u64 Id = StackIds[k];
  76. // One entry for the id and then one more for the number of stack pcs.
  77. NumBytesToWrite += 2 * sizeof(u64);
  78. const StackTrace St = StackDepotGet(Id);
  79. CHECK(St.trace != nullptr && St.size > 0 && "Empty stack trace");
  80. for (uptr i = 0; i < St.size && St.trace[i] != 0; i++) {
  81. NumBytesToWrite += sizeof(u64);
  82. }
  83. }
  84. return NumBytesToWrite;
  85. }
  86. // The stack info section uses the following format:
  87. //
  88. // ---------- Stack Info
  89. // Num Entries
  90. // ---------- Stack Entry
  91. // Num Stacks
  92. // PC1
  93. // PC2
  94. // ...
  95. // ----------
  96. void SerializeStackToBuffer(const Vector<u64> &StackIds,
  97. const u64 ExpectedNumBytes, char *&Buffer) {
  98. const u64 NumIds = StackIds.Size();
  99. char *Ptr = Buffer;
  100. Ptr = WriteBytes(static_cast<u64>(NumIds), Ptr);
  101. for (unsigned k = 0; k < NumIds; ++k) {
  102. const u64 Id = StackIds[k];
  103. Ptr = WriteBytes(Id, Ptr);
  104. Ptr += sizeof(u64); // Bump it by u64, we will fill this in later.
  105. u64 Count = 0;
  106. const StackTrace St = StackDepotGet(Id);
  107. for (uptr i = 0; i < St.size && St.trace[i] != 0; i++) {
  108. // PCs in stack traces are actually the return addresses, that is,
  109. // addresses of the next instructions after the call.
  110. uptr pc = StackTrace::GetPreviousInstructionPc(St.trace[i]);
  111. Ptr = WriteBytes(static_cast<u64>(pc), Ptr);
  112. ++Count;
  113. }
  114. // Store the count in the space we reserved earlier.
  115. *(u64 *)(Ptr - (Count + 1) * sizeof(u64)) = Count;
  116. }
  117. CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) &&
  118. "Expected num bytes != actual bytes written");
  119. }
  120. // The MIB section has the following format:
  121. // ---------- MIB Info
  122. // Num Entries
  123. // ---------- MIB Entry 0
  124. // Alloc Count
  125. // ...
  126. // ---------- MIB Entry 1
  127. // Alloc Count
  128. // ...
  129. // ----------
  130. void SerializeMIBInfoToBuffer(MIBMapTy &MIBMap, const Vector<u64> &StackIds,
  131. const u64 ExpectedNumBytes, char *&Buffer) {
  132. char *Ptr = Buffer;
  133. const u64 NumEntries = StackIds.Size();
  134. Ptr = WriteBytes(NumEntries, Ptr);
  135. for (u64 i = 0; i < NumEntries; i++) {
  136. const u64 Key = StackIds[i];
  137. MIBMapTy::Handle h(&MIBMap, Key, /*remove=*/true, /*create=*/false);
  138. CHECK(h.exists());
  139. Ptr = WriteBytes(Key, Ptr);
  140. Ptr = WriteBytes((*h)->mib, Ptr);
  141. }
  142. CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) &&
  143. "Expected num bytes != actual bytes written");
  144. }
  145. // Format
  146. // ---------- Header
  147. // Magic
  148. // Version
  149. // Total Size
  150. // Segment Offset
  151. // MIB Info Offset
  152. // Stack Offset
  153. // ---------- Segment Info
  154. // Num Entries
  155. // ---------- Segment Entry
  156. // Start
  157. // End
  158. // Offset
  159. // BuildID 32B
  160. // ----------
  161. // ...
  162. // ----------
  163. // Optional Padding Bytes
  164. // ---------- MIB Info
  165. // Num Entries
  166. // ---------- MIB Entry
  167. // Alloc Count
  168. // ...
  169. // ----------
  170. // Optional Padding Bytes
  171. // ---------- Stack Info
  172. // Num Entries
  173. // ---------- Stack Entry
  174. // Num Stacks
  175. // PC1
  176. // PC2
  177. // ...
  178. // ----------
  179. // Optional Padding Bytes
  180. // ...
  181. u64 SerializeToRawProfile(MIBMapTy &MIBMap, MemoryMappingLayoutBase &Layout,
  182. char *&Buffer) {
  183. // Each section size is rounded up to 8b since the first entry in each section
  184. // is a u64 which holds the number of entries in the section by convention.
  185. const u64 NumSegmentBytes = RoundUpTo(SegmentSizeBytes(Layout), 8);
  186. Vector<u64> StackIds;
  187. MIBMap.ForEach(RecordStackId, reinterpret_cast<void *>(&StackIds));
  188. // The first 8b are for the total number of MIB records. Each MIB record is
  189. // preceded by a 8b stack id which is associated with stack frames in the next
  190. // section.
  191. const u64 NumMIBInfoBytes = RoundUpTo(
  192. sizeof(u64) + StackIds.Size() * (sizeof(u64) + sizeof(MemInfoBlock)), 8);
  193. const u64 NumStackBytes = RoundUpTo(StackSizeBytes(StackIds), 8);
  194. // Ensure that the profile is 8b aligned. We allow for some optional padding
  195. // at the end so that any subsequent profile serialized to the same file does
  196. // not incur unaligned accesses.
  197. const u64 TotalSizeBytes = RoundUpTo(
  198. sizeof(Header) + NumSegmentBytes + NumStackBytes + NumMIBInfoBytes, 8);
  199. // Allocate the memory for the entire buffer incl. info blocks.
  200. Buffer = (char *)InternalAlloc(TotalSizeBytes);
  201. char *Ptr = Buffer;
  202. Header header{MEMPROF_RAW_MAGIC_64,
  203. MEMPROF_RAW_VERSION,
  204. static_cast<u64>(TotalSizeBytes),
  205. sizeof(Header),
  206. sizeof(Header) + NumSegmentBytes,
  207. sizeof(Header) + NumSegmentBytes + NumMIBInfoBytes};
  208. Ptr = WriteBytes(header, Ptr);
  209. SerializeSegmentsToBuffer(Layout, NumSegmentBytes, Ptr);
  210. Ptr += NumSegmentBytes;
  211. SerializeMIBInfoToBuffer(MIBMap, StackIds, NumMIBInfoBytes, Ptr);
  212. Ptr += NumMIBInfoBytes;
  213. SerializeStackToBuffer(StackIds, NumStackBytes, Ptr);
  214. return TotalSizeBytes;
  215. }
  216. } // namespace __memprof