stack_depot.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. //===-- stack_depot.h -------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_STACK_DEPOT_H_
  9. #define SCUDO_STACK_DEPOT_H_
  10. #include "atomic_helpers.h"
  11. #include "mutex.h"
  12. namespace scudo {
  13. class MurMur2HashBuilder {
  14. static const u32 M = 0x5bd1e995;
  15. static const u32 Seed = 0x9747b28c;
  16. static const u32 R = 24;
  17. u32 H;
  18. public:
  19. explicit MurMur2HashBuilder(u32 Init = 0) { H = Seed ^ Init; }
  20. void add(u32 K) {
  21. K *= M;
  22. K ^= K >> R;
  23. K *= M;
  24. H *= M;
  25. H ^= K;
  26. }
  27. u32 get() {
  28. u32 X = H;
  29. X ^= X >> 13;
  30. X *= M;
  31. X ^= X >> 15;
  32. return X;
  33. }
  34. };
  35. class StackDepot {
  36. HybridMutex RingEndMu;
  37. u32 RingEnd = 0;
  38. // This data structure stores a stack trace for each allocation and
  39. // deallocation when stack trace recording is enabled, that may be looked up
  40. // using a hash of the stack trace. The lower bits of the hash are an index
  41. // into the Tab array, which stores an index into the Ring array where the
  42. // stack traces are stored. As the name implies, Ring is a ring buffer, so a
  43. // stack trace may wrap around to the start of the array.
  44. //
  45. // Each stack trace in Ring is prefixed by a stack trace marker consisting of
  46. // a fixed 1 bit in bit 0 (this allows disambiguation between stack frames
  47. // and stack trace markers in the case where instruction pointers are 4-byte
  48. // aligned, as they are on arm64), the stack trace hash in bits 1-32, and the
  49. // size of the stack trace in bits 33-63.
  50. //
  51. // The insert() function is potentially racy in its accesses to the Tab and
  52. // Ring arrays, but find() is resilient to races in the sense that, barring
  53. // hash collisions, it will either return the correct stack trace or no stack
  54. // trace at all, even if two instances of insert() raced with one another.
  55. // This is achieved by re-checking the hash of the stack trace before
  56. // returning the trace.
  57. #if SCUDO_SMALL_STACK_DEPOT
  58. static const uptr TabBits = 4;
  59. #else
  60. static const uptr TabBits = 16;
  61. #endif
  62. static const uptr TabSize = 1 << TabBits;
  63. static const uptr TabMask = TabSize - 1;
  64. atomic_u32 Tab[TabSize] = {};
  65. #if SCUDO_SMALL_STACK_DEPOT
  66. static const uptr RingBits = 4;
  67. #else
  68. static const uptr RingBits = 19;
  69. #endif
  70. static const uptr RingSize = 1 << RingBits;
  71. static const uptr RingMask = RingSize - 1;
  72. atomic_u64 Ring[RingSize] = {};
  73. public:
  74. // Insert hash of the stack trace [Begin, End) into the stack depot, and
  75. // return the hash.
  76. u32 insert(uptr *Begin, uptr *End) {
  77. MurMur2HashBuilder B;
  78. for (uptr *I = Begin; I != End; ++I)
  79. B.add(u32(*I) >> 2);
  80. u32 Hash = B.get();
  81. u32 Pos = Hash & TabMask;
  82. u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
  83. u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
  84. u64 Id = (u64(End - Begin) << 33) | (u64(Hash) << 1) | 1;
  85. if (Entry == Id)
  86. return Hash;
  87. ScopedLock Lock(RingEndMu);
  88. RingPos = RingEnd;
  89. atomic_store_relaxed(&Tab[Pos], RingPos);
  90. atomic_store_relaxed(&Ring[RingPos], Id);
  91. for (uptr *I = Begin; I != End; ++I) {
  92. RingPos = (RingPos + 1) & RingMask;
  93. atomic_store_relaxed(&Ring[RingPos], *I);
  94. }
  95. RingEnd = (RingPos + 1) & RingMask;
  96. return Hash;
  97. }
  98. // Look up a stack trace by hash. Returns true if successful. The trace may be
  99. // accessed via operator[] passing indexes between *RingPosPtr and
  100. // *RingPosPtr + *SizePtr.
  101. bool find(u32 Hash, uptr *RingPosPtr, uptr *SizePtr) const {
  102. u32 Pos = Hash & TabMask;
  103. u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
  104. if (RingPos >= RingSize)
  105. return false;
  106. u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
  107. u64 HashWithTagBit = (u64(Hash) << 1) | 1;
  108. if ((Entry & 0x1ffffffff) != HashWithTagBit)
  109. return false;
  110. u32 Size = u32(Entry >> 33);
  111. if (Size >= RingSize)
  112. return false;
  113. *RingPosPtr = (RingPos + 1) & RingMask;
  114. *SizePtr = Size;
  115. MurMur2HashBuilder B;
  116. for (uptr I = 0; I != Size; ++I) {
  117. RingPos = (RingPos + 1) & RingMask;
  118. B.add(u32(atomic_load_relaxed(&Ring[RingPos])) >> 2);
  119. }
  120. return B.get() == Hash;
  121. }
  122. u64 operator[](uptr RingPos) const {
  123. return atomic_load_relaxed(&Ring[RingPos & RingMask]);
  124. }
  125. };
  126. } // namespace scudo
  127. #endif // SCUDO_STACK_DEPOT_H_