hwasan_thread_list.h 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. //===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of HWAddressSanitizer.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. // HwasanThreadList is a registry for live threads, as well as an allocator for
  13. // HwasanThread objects and their stack history ring buffers. There are
  14. // constraints on memory layout of the shadow region and CompactRingBuffer that
  15. // are part of the ABI contract between compiler-rt and llvm.
  16. //
  17. // * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
  18. // * All stack ring buffers are located within (2**kShadowBaseAlignment)
  19. // sized region below and adjacent to the shadow region.
  20. // * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
  21. // aligned to twice its size. The value of N can be different for each buffer.
  22. //
  23. // These constrains guarantee that, given an address A of any element of the
  24. // ring buffer,
  25. // A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
  26. // is the address of the next element of that ring buffer (with wrap-around).
  27. // And, with K = kShadowBaseAlignment,
  28. // S = (A | ((1 << K) - 1)) + 1
  29. // (align up to kShadowBaseAlignment) is the start of the shadow region.
  30. //
  31. // These calculations are used in compiler instrumentation to update the ring
  32. // buffer and obtain the base address of shadow using only two inputs: address
  33. // of the current element of the ring buffer, and N (i.e. size of the ring
  34. // buffer). Since the value of N is very limited, we pack both inputs into a
  35. // single thread-local word as
  36. // (1 << (N + 56)) | A
  37. // See the implementation of class CompactRingBuffer, which is what is stored in
  38. // said thread-local word.
  39. //
  40. // Note the unusual way of aligning up the address of the shadow:
  41. // (A | ((1 << K) - 1)) + 1
  42. // It is only correct if A is not already equal to the shadow base address, but
  43. // it saves 2 instructions on AArch64.
  44. #include "hwasan.h"
  45. #include "hwasan_allocator.h"
  46. #include "hwasan_flags.h"
  47. #include "hwasan_thread.h"
  48. #include "sanitizer_common/sanitizer_placement_new.h"
  49. namespace __hwasan {
  50. static uptr RingBufferSize() {
  51. uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
  52. // FIXME: increase the limit to 8 once this bug is fixed:
  53. // https://bugs.llvm.org/show_bug.cgi?id=39030
  54. for (int shift = 1; shift < 7; ++shift) {
  55. uptr size = 4096 * (1ULL << shift);
  56. if (size >= desired_bytes)
  57. return size;
  58. }
  59. Printf("stack history size too large: %d\n", flags()->stack_history_size);
  60. CHECK(0);
  61. return 0;
  62. }
  63. struct ThreadStats {
  64. uptr n_live_threads;
  65. uptr total_stack_size;
  66. };
  67. class SANITIZER_MUTEX HwasanThreadList {
  68. public:
  69. HwasanThreadList(uptr storage, uptr size)
  70. : free_space_(storage), free_space_end_(storage + size) {
  71. // [storage, storage + size) is used as a vector of
  72. // thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
  73. // Each element contains
  74. // * a ring buffer at offset 0,
  75. // * a Thread object at offset ring_buffer_size_.
  76. ring_buffer_size_ = RingBufferSize();
  77. thread_alloc_size_ =
  78. RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
  79. }
  80. Thread *CreateCurrentThread(const Thread::InitState *state = nullptr)
  81. SANITIZER_EXCLUDES(free_list_mutex_, live_list_mutex_) {
  82. Thread *t = nullptr;
  83. {
  84. SpinMutexLock l(&free_list_mutex_);
  85. if (!free_list_.empty()) {
  86. t = free_list_.back();
  87. free_list_.pop_back();
  88. }
  89. }
  90. if (t) {
  91. uptr start = (uptr)t - ring_buffer_size_;
  92. internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
  93. } else {
  94. t = AllocThread();
  95. }
  96. {
  97. SpinMutexLock l(&live_list_mutex_);
  98. live_list_.push_back(t);
  99. }
  100. t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_, state);
  101. AddThreadStats(t);
  102. return t;
  103. }
  104. void DontNeedThread(Thread *t) {
  105. uptr start = (uptr)t - ring_buffer_size_;
  106. ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
  107. }
  108. void RemoveThreadFromLiveList(Thread *t)
  109. SANITIZER_EXCLUDES(live_list_mutex_) {
  110. SpinMutexLock l(&live_list_mutex_);
  111. for (Thread *&t2 : live_list_)
  112. if (t2 == t) {
  113. // To remove t2, copy the last element of the list in t2's position, and
  114. // pop_back(). This works even if t2 is itself the last element.
  115. t2 = live_list_.back();
  116. live_list_.pop_back();
  117. return;
  118. }
  119. CHECK(0 && "thread not found in live list");
  120. }
  121. void ReleaseThread(Thread *t) SANITIZER_EXCLUDES(free_list_mutex_) {
  122. RemoveThreadStats(t);
  123. t->Destroy();
  124. DontNeedThread(t);
  125. RemoveThreadFromLiveList(t);
  126. SpinMutexLock l(&free_list_mutex_);
  127. free_list_.push_back(t);
  128. }
  129. Thread *GetThreadByBufferAddress(uptr p) {
  130. return (Thread *)(RoundDownTo(p, ring_buffer_size_ * 2) +
  131. ring_buffer_size_);
  132. }
  133. uptr MemoryUsedPerThread() {
  134. uptr res = sizeof(Thread) + ring_buffer_size_;
  135. if (auto sz = flags()->heap_history_size)
  136. res += HeapAllocationsRingBuffer::SizeInBytes(sz);
  137. return res;
  138. }
  139. template <class CB>
  140. void VisitAllLiveThreads(CB cb) SANITIZER_EXCLUDES(live_list_mutex_) {
  141. SpinMutexLock l(&live_list_mutex_);
  142. for (Thread *t : live_list_) cb(t);
  143. }
  144. template <class CB>
  145. Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(stats_mutex_) {
  146. CheckLocked();
  147. for (Thread *t : live_list_)
  148. if (cb(t))
  149. return t;
  150. return nullptr;
  151. }
  152. void AddThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
  153. SpinMutexLock l(&stats_mutex_);
  154. stats_.n_live_threads++;
  155. stats_.total_stack_size += t->stack_size();
  156. }
  157. void RemoveThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
  158. SpinMutexLock l(&stats_mutex_);
  159. stats_.n_live_threads--;
  160. stats_.total_stack_size -= t->stack_size();
  161. }
  162. ThreadStats GetThreadStats() SANITIZER_EXCLUDES(stats_mutex_) {
  163. SpinMutexLock l(&stats_mutex_);
  164. return stats_;
  165. }
  166. uptr GetRingBufferSize() const { return ring_buffer_size_; }
  167. void Lock() SANITIZER_ACQUIRE(live_list_mutex_) { live_list_mutex_.Lock(); }
  168. void CheckLocked() const SANITIZER_CHECK_LOCKED(live_list_mutex_) {
  169. live_list_mutex_.CheckLocked();
  170. }
  171. void Unlock() SANITIZER_RELEASE(live_list_mutex_) {
  172. live_list_mutex_.Unlock();
  173. }
  174. private:
  175. Thread *AllocThread() {
  176. SpinMutexLock l(&free_space_mutex_);
  177. uptr align = ring_buffer_size_ * 2;
  178. CHECK(IsAligned(free_space_, align));
  179. Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
  180. free_space_ += thread_alloc_size_;
  181. CHECK(free_space_ <= free_space_end_ && "out of thread memory");
  182. return t;
  183. }
  184. SpinMutex free_space_mutex_;
  185. uptr free_space_;
  186. uptr free_space_end_;
  187. uptr ring_buffer_size_;
  188. uptr thread_alloc_size_;
  189. SpinMutex free_list_mutex_;
  190. InternalMmapVector<Thread *> free_list_
  191. SANITIZER_GUARDED_BY(free_list_mutex_);
  192. SpinMutex live_list_mutex_;
  193. InternalMmapVector<Thread *> live_list_
  194. SANITIZER_GUARDED_BY(live_list_mutex_);
  195. SpinMutex stats_mutex_;
  196. ThreadStats stats_ SANITIZER_GUARDED_BY(stats_mutex_);
  197. };
  198. void InitThreadList(uptr storage, uptr size);
  199. HwasanThreadList &hwasanThreadList();
  200. } // namespace __hwasan