sanitizer_stackdepot.cpp 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. //===-- sanitizer_stackdepot.cpp ------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is shared between AddressSanitizer and ThreadSanitizer
  10. // run-time libraries.
  11. //===----------------------------------------------------------------------===//
  12. #include "sanitizer_stackdepot.h"
  13. #include "sanitizer_atomic.h"
  14. #include "sanitizer_common.h"
  15. #include "sanitizer_hash.h"
  16. #include "sanitizer_mutex.h"
  17. #include "sanitizer_stack_store.h"
  18. #include "sanitizer_stackdepotbase.h"
  19. namespace __sanitizer {
  20. struct StackDepotNode {
  21. using hash_type = u64;
  22. hash_type stack_hash;
  23. u32 link;
  24. StackStore::Id store_id;
  25. static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
  26. typedef StackTrace args_type;
  27. bool eq(hash_type hash, const args_type &args) const {
  28. return hash == stack_hash;
  29. }
  30. static uptr allocated();
  31. static hash_type hash(const args_type &args) {
  32. MurMur2Hash64Builder H(args.size * sizeof(uptr));
  33. for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
  34. H.add(args.tag);
  35. return H.get();
  36. }
  37. static bool is_valid(const args_type &args) {
  38. return args.size > 0 && args.trace;
  39. }
  40. void store(u32 id, const args_type &args, hash_type hash);
  41. args_type load(u32 id) const;
  42. static StackDepotHandle get_handle(u32 id);
  43. typedef StackDepotHandle handle_type;
  44. };
  45. static StackStore stackStore;
  46. // FIXME(dvyukov): this single reserved bit is used in TSan.
  47. typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
  48. StackDepot;
  49. static StackDepot theDepot;
  50. // Keep mutable data out of frequently access nodes to improve caching
  51. // efficiency.
  52. static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1,
  53. StackDepot::kNodesSize2>
  54. useCounts;
  55. int StackDepotHandle::use_count() const {
  56. return atomic_load_relaxed(&useCounts[id_]);
  57. }
  58. void StackDepotHandle::inc_use_count_unsafe() {
  59. atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed);
  60. }
  61. uptr StackDepotNode::allocated() {
  62. return stackStore.Allocated() + useCounts.MemoryUsage();
  63. }
  64. static void CompressStackStore() {
  65. u64 start = MonotonicNanoTime();
  66. uptr diff = stackStore.Pack(static_cast<StackStore::Compression>(
  67. Abs(common_flags()->compress_stack_depot)));
  68. if (!diff)
  69. return;
  70. u64 finish = MonotonicNanoTime();
  71. uptr total_before = theDepot.GetStats().allocated + diff;
  72. VPrintf(1, "%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\n",
  73. SanitizerToolName, diff >> 10, total_before >> 10,
  74. (finish - start) / 1000000);
  75. }
  76. namespace {
  77. class CompressThread {
  78. public:
  79. constexpr CompressThread() = default;
  80. void NewWorkNotify();
  81. void Stop();
  82. void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
  83. void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
  84. private:
  85. enum class State {
  86. NotStarted = 0,
  87. Started,
  88. Failed,
  89. Stopped,
  90. };
  91. void Run();
  92. bool WaitForWork() {
  93. semaphore_.Wait();
  94. return atomic_load(&run_, memory_order_acquire);
  95. }
  96. Semaphore semaphore_ = {};
  97. StaticSpinMutex mutex_ = {};
  98. State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted;
  99. void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr;
  100. atomic_uint8_t run_ = {};
  101. };
  102. static CompressThread compress_thread;
  103. void CompressThread::NewWorkNotify() {
  104. int compress = common_flags()->compress_stack_depot;
  105. if (!compress)
  106. return;
  107. if (compress > 0 /* for testing or debugging */) {
  108. SpinMutexLock l(&mutex_);
  109. if (state_ == State::NotStarted) {
  110. atomic_store(&run_, 1, memory_order_release);
  111. CHECK_EQ(nullptr, thread_);
  112. thread_ = internal_start_thread(
  113. [](void *arg) -> void * {
  114. reinterpret_cast<CompressThread *>(arg)->Run();
  115. return nullptr;
  116. },
  117. this);
  118. state_ = thread_ ? State::Started : State::Failed;
  119. }
  120. if (state_ == State::Started) {
  121. semaphore_.Post();
  122. return;
  123. }
  124. }
  125. CompressStackStore();
  126. }
  127. void CompressThread::Run() {
  128. VPrintf(1, "%s: StackDepot compression thread started\n", SanitizerToolName);
  129. while (WaitForWork()) CompressStackStore();
  130. VPrintf(1, "%s: StackDepot compression thread stopped\n", SanitizerToolName);
  131. }
  132. void CompressThread::Stop() {
  133. void *t = nullptr;
  134. {
  135. SpinMutexLock l(&mutex_);
  136. if (state_ != State::Started)
  137. return;
  138. state_ = State::Stopped;
  139. CHECK_NE(nullptr, thread_);
  140. t = thread_;
  141. thread_ = nullptr;
  142. }
  143. atomic_store(&run_, 0, memory_order_release);
  144. semaphore_.Post();
  145. internal_join_thread(t);
  146. }
  147. void CompressThread::LockAndStop() {
  148. mutex_.Lock();
  149. if (state_ != State::Started)
  150. return;
  151. CHECK_NE(nullptr, thread_);
  152. atomic_store(&run_, 0, memory_order_release);
  153. semaphore_.Post();
  154. internal_join_thread(thread_);
  155. // Allow to restart after Unlock() if needed.
  156. state_ = State::NotStarted;
  157. thread_ = nullptr;
  158. }
  159. void CompressThread::Unlock() { mutex_.Unlock(); }
  160. } // namespace
  161. void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
  162. stack_hash = hash;
  163. uptr pack = 0;
  164. store_id = stackStore.Store(args, &pack);
  165. if (LIKELY(!pack))
  166. return;
  167. compress_thread.NewWorkNotify();
  168. }
  169. StackDepotNode::args_type StackDepotNode::load(u32 id) const {
  170. if (!store_id)
  171. return {};
  172. return stackStore.Load(store_id);
  173. }
  174. StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }
  175. u32 StackDepotPut(StackTrace stack) { return theDepot.Put(stack); }
  176. StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
  177. return StackDepotNode::get_handle(theDepot.Put(stack));
  178. }
  179. StackTrace StackDepotGet(u32 id) {
  180. return theDepot.Get(id);
  181. }
  182. void StackDepotLockAll() {
  183. theDepot.LockAll();
  184. compress_thread.LockAndStop();
  185. stackStore.LockAll();
  186. }
  187. void StackDepotUnlockAll() {
  188. stackStore.UnlockAll();
  189. compress_thread.Unlock();
  190. theDepot.UnlockAll();
  191. }
  192. void StackDepotPrintAll() {
  193. #if !SANITIZER_GO
  194. theDepot.PrintAll();
  195. #endif
  196. }
  197. void StackDepotStopBackgroundThread() { compress_thread.Stop(); }
  198. StackDepotHandle StackDepotNode::get_handle(u32 id) {
  199. return StackDepotHandle(&theDepot.nodes[id], id);
  200. }
  201. void StackDepotTestOnlyUnmap() {
  202. theDepot.TestOnlyUnmap();
  203. stackStore.TestOnlyUnmap();
  204. }
  205. } // namespace __sanitizer