sanitizer_stackdepot.cpp 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. //===-- sanitizer_stackdepot.cpp ------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is shared between AddressSanitizer and ThreadSanitizer
  10. // run-time libraries.
  11. //===----------------------------------------------------------------------===//
  12. #include "sanitizer_stackdepot.h"
  13. #include "sanitizer_atomic.h"
  14. #include "sanitizer_common.h"
  15. #include "sanitizer_hash.h"
  16. #include "sanitizer_mutex.h"
  17. #include "sanitizer_stack_store.h"
  18. #include "sanitizer_stackdepotbase.h"
  19. namespace __sanitizer {
  20. struct StackDepotNode {
  21. using hash_type = u64;
  22. hash_type stack_hash;
  23. u32 link;
  24. StackStore::Id store_id;
  25. static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
  26. typedef StackTrace args_type;
  27. bool eq(hash_type hash, const args_type &args) const {
  28. return hash == stack_hash;
  29. }
  30. static uptr allocated();
  31. static hash_type hash(const args_type &args) {
  32. MurMur2Hash64Builder H(args.size * sizeof(uptr));
  33. for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
  34. H.add(args.tag);
  35. return H.get();
  36. }
  37. static bool is_valid(const args_type &args) {
  38. return args.size > 0 && args.trace;
  39. }
  40. void store(u32 id, const args_type &args, hash_type hash);
  41. args_type load(u32 id) const;
  42. static StackDepotHandle get_handle(u32 id);
  43. typedef StackDepotHandle handle_type;
  44. };
  45. static StackStore stackStore;
  46. // FIXME(dvyukov): this single reserved bit is used in TSan.
  47. typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
  48. StackDepot;
  49. static StackDepot theDepot;
  50. // Keep mutable data out of frequently access nodes to improve caching
  51. // efficiency.
  52. static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1,
  53. StackDepot::kNodesSize2>
  54. useCounts;
  55. int StackDepotHandle::use_count() const {
  56. return atomic_load_relaxed(&useCounts[id_]);
  57. }
  58. void StackDepotHandle::inc_use_count_unsafe() {
  59. atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed);
  60. }
  61. uptr StackDepotNode::allocated() {
  62. return stackStore.Allocated() + useCounts.MemoryUsage();
  63. }
  64. static void CompressStackStore() {
  65. u64 start = Verbosity() >= 1 ? MonotonicNanoTime() : 0;
  66. uptr diff = stackStore.Pack(static_cast<StackStore::Compression>(
  67. Abs(common_flags()->compress_stack_depot)));
  68. if (!diff)
  69. return;
  70. if (Verbosity() >= 1) {
  71. u64 finish = MonotonicNanoTime();
  72. uptr total_before = theDepot.GetStats().allocated + diff;
  73. VPrintf(1, "%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\n",
  74. SanitizerToolName, diff >> 10, total_before >> 10,
  75. (finish - start) / 1000000);
  76. }
  77. }
  78. namespace {
  79. class CompressThread {
  80. public:
  81. constexpr CompressThread() = default;
  82. void NewWorkNotify();
  83. void Stop();
  84. void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
  85. void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
  86. private:
  87. enum class State {
  88. NotStarted = 0,
  89. Started,
  90. Failed,
  91. Stopped,
  92. };
  93. void Run();
  94. bool WaitForWork() {
  95. semaphore_.Wait();
  96. return atomic_load(&run_, memory_order_acquire);
  97. }
  98. Semaphore semaphore_ = {};
  99. StaticSpinMutex mutex_ = {};
  100. State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted;
  101. void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr;
  102. atomic_uint8_t run_ = {};
  103. };
  104. static CompressThread compress_thread;
  105. void CompressThread::NewWorkNotify() {
  106. int compress = common_flags()->compress_stack_depot;
  107. if (!compress)
  108. return;
  109. if (compress > 0 /* for testing or debugging */) {
  110. SpinMutexLock l(&mutex_);
  111. if (state_ == State::NotStarted) {
  112. atomic_store(&run_, 1, memory_order_release);
  113. CHECK_EQ(nullptr, thread_);
  114. thread_ = internal_start_thread(
  115. [](void *arg) -> void * {
  116. reinterpret_cast<CompressThread *>(arg)->Run();
  117. return nullptr;
  118. },
  119. this);
  120. state_ = thread_ ? State::Started : State::Failed;
  121. }
  122. if (state_ == State::Started) {
  123. semaphore_.Post();
  124. return;
  125. }
  126. }
  127. CompressStackStore();
  128. }
  129. void CompressThread::Run() {
  130. VPrintf(1, "%s: StackDepot compression thread started\n", SanitizerToolName);
  131. while (WaitForWork()) CompressStackStore();
  132. VPrintf(1, "%s: StackDepot compression thread stopped\n", SanitizerToolName);
  133. }
  134. void CompressThread::Stop() {
  135. void *t = nullptr;
  136. {
  137. SpinMutexLock l(&mutex_);
  138. if (state_ != State::Started)
  139. return;
  140. state_ = State::Stopped;
  141. CHECK_NE(nullptr, thread_);
  142. t = thread_;
  143. thread_ = nullptr;
  144. }
  145. atomic_store(&run_, 0, memory_order_release);
  146. semaphore_.Post();
  147. internal_join_thread(t);
  148. }
  149. void CompressThread::LockAndStop() {
  150. mutex_.Lock();
  151. if (state_ != State::Started)
  152. return;
  153. CHECK_NE(nullptr, thread_);
  154. atomic_store(&run_, 0, memory_order_release);
  155. semaphore_.Post();
  156. internal_join_thread(thread_);
  157. // Allow to restart after Unlock() if needed.
  158. state_ = State::NotStarted;
  159. thread_ = nullptr;
  160. }
  161. void CompressThread::Unlock() { mutex_.Unlock(); }
  162. } // namespace
  163. void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
  164. stack_hash = hash;
  165. uptr pack = 0;
  166. store_id = stackStore.Store(args, &pack);
  167. if (LIKELY(!pack))
  168. return;
  169. compress_thread.NewWorkNotify();
  170. }
  171. StackDepotNode::args_type StackDepotNode::load(u32 id) const {
  172. if (!store_id)
  173. return {};
  174. return stackStore.Load(store_id);
  175. }
  176. StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }
  177. u32 StackDepotPut(StackTrace stack) { return theDepot.Put(stack); }
  178. StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
  179. return StackDepotNode::get_handle(theDepot.Put(stack));
  180. }
  181. StackTrace StackDepotGet(u32 id) {
  182. return theDepot.Get(id);
  183. }
  184. void StackDepotLockAll() {
  185. theDepot.LockAll();
  186. compress_thread.LockAndStop();
  187. stackStore.LockAll();
  188. }
  189. void StackDepotUnlockAll() {
  190. stackStore.UnlockAll();
  191. compress_thread.Unlock();
  192. theDepot.UnlockAll();
  193. }
  194. void StackDepotPrintAll() {
  195. #if !SANITIZER_GO
  196. theDepot.PrintAll();
  197. #endif
  198. }
  199. void StackDepotStopBackgroundThread() { compress_thread.Stop(); }
  200. StackDepotHandle StackDepotNode::get_handle(u32 id) {
  201. return StackDepotHandle(&theDepot.nodes[id], id);
  202. }
  203. void StackDepotTestOnlyUnmap() {
  204. theDepot.TestOnlyUnmap();
  205. stackStore.TestOnlyUnmap();
  206. }
  207. } // namespace __sanitizer