hwasan_thread_list.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. //===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of HWAddressSanitizer.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. // HwasanThreadList is a registry for live threads, as well as an allocator for
  13. // HwasanThread objects and their stack history ring buffers. There are
  14. // constraints on memory layout of the shadow region and CompactRingBuffer that
  15. // are part of the ABI contract between compiler-rt and llvm.
  16. //
  17. // * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
  18. // * All stack ring buffers are located within (2**kShadowBaseAlignment)
  19. // sized region below and adjacent to the shadow region.
  20. // * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
  21. // aligned to twice its size. The value of N can be different for each buffer.
  22. //
  23. // These constrains guarantee that, given an address A of any element of the
  24. // ring buffer,
  25. // A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
  26. // is the address of the next element of that ring buffer (with wrap-around).
  27. // And, with K = kShadowBaseAlignment,
  28. // S = (A | ((1 << K) - 1)) + 1
  29. // (align up to kShadowBaseAlignment) is the start of the shadow region.
  30. //
  31. // These calculations are used in compiler instrumentation to update the ring
  32. // buffer and obtain the base address of shadow using only two inputs: address
  33. // of the current element of the ring buffer, and N (i.e. size of the ring
  34. // buffer). Since the value of N is very limited, we pack both inputs into a
  35. // single thread-local word as
  36. // (1 << (N + 56)) | A
  37. // See the implementation of class CompactRingBuffer, which is what is stored in
  38. // said thread-local word.
  39. //
  40. // Note the unusual way of aligning up the address of the shadow:
  41. // (A | ((1 << K) - 1)) + 1
  42. // It is only correct if A is not already equal to the shadow base address, but
  43. // it saves 2 instructions on AArch64.
  44. #include "hwasan.h"
  45. #include "hwasan_allocator.h"
  46. #include "hwasan_flags.h"
  47. #include "hwasan_thread.h"
  48. #include "sanitizer_common/sanitizer_placement_new.h"
  49. #include "sanitizer_common/sanitizer_thread_arg_retval.h"
  50. namespace __hwasan {
  51. static uptr RingBufferSize() {
  52. uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
  53. // FIXME: increase the limit to 8 once this bug is fixed:
  54. // https://bugs.llvm.org/show_bug.cgi?id=39030
  55. for (int shift = 1; shift < 7; ++shift) {
  56. uptr size = 4096 * (1ULL << shift);
  57. if (size >= desired_bytes)
  58. return size;
  59. }
  60. Printf("stack history size too large: %d\n", flags()->stack_history_size);
  61. CHECK(0);
  62. return 0;
  63. }
  64. struct ThreadStats {
  65. uptr n_live_threads;
  66. uptr total_stack_size;
  67. };
  68. class SANITIZER_MUTEX HwasanThreadList {
  69. public:
  70. HwasanThreadList(uptr storage, uptr size)
  71. : free_space_(storage), free_space_end_(storage + size) {
  72. // [storage, storage + size) is used as a vector of
  73. // thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
  74. // Each element contains
  75. // * a ring buffer at offset 0,
  76. // * a Thread object at offset ring_buffer_size_.
  77. ring_buffer_size_ = RingBufferSize();
  78. thread_alloc_size_ =
  79. RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
  80. }
  81. Thread *CreateCurrentThread(const Thread::InitState *state = nullptr)
  82. SANITIZER_EXCLUDES(free_list_mutex_, live_list_mutex_) {
  83. Thread *t = nullptr;
  84. {
  85. SpinMutexLock l(&free_list_mutex_);
  86. if (!free_list_.empty()) {
  87. t = free_list_.back();
  88. free_list_.pop_back();
  89. }
  90. }
  91. if (t) {
  92. uptr start = (uptr)t - ring_buffer_size_;
  93. internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
  94. } else {
  95. t = AllocThread();
  96. }
  97. {
  98. SpinMutexLock l(&live_list_mutex_);
  99. live_list_.push_back(t);
  100. }
  101. t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_, state);
  102. AddThreadStats(t);
  103. return t;
  104. }
  105. void DontNeedThread(Thread *t) {
  106. uptr start = (uptr)t - ring_buffer_size_;
  107. ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
  108. }
  109. void RemoveThreadFromLiveList(Thread *t)
  110. SANITIZER_EXCLUDES(live_list_mutex_) {
  111. SpinMutexLock l(&live_list_mutex_);
  112. for (Thread *&t2 : live_list_)
  113. if (t2 == t) {
  114. // To remove t2, copy the last element of the list in t2's position, and
  115. // pop_back(). This works even if t2 is itself the last element.
  116. t2 = live_list_.back();
  117. live_list_.pop_back();
  118. return;
  119. }
  120. CHECK(0 && "thread not found in live list");
  121. }
  122. void ReleaseThread(Thread *t) SANITIZER_EXCLUDES(free_list_mutex_) {
  123. RemoveThreadStats(t);
  124. RemoveThreadFromLiveList(t);
  125. t->Destroy();
  126. DontNeedThread(t);
  127. SpinMutexLock l(&free_list_mutex_);
  128. free_list_.push_back(t);
  129. }
  130. Thread *GetThreadByBufferAddress(uptr p) {
  131. return (Thread *)(RoundDownTo(p, ring_buffer_size_ * 2) +
  132. ring_buffer_size_);
  133. }
  134. uptr MemoryUsedPerThread() {
  135. uptr res = sizeof(Thread) + ring_buffer_size_;
  136. if (auto sz = flags()->heap_history_size)
  137. res += HeapAllocationsRingBuffer::SizeInBytes(sz);
  138. return res;
  139. }
  140. template <class CB>
  141. void VisitAllLiveThreads(CB cb) SANITIZER_EXCLUDES(live_list_mutex_) {
  142. SpinMutexLock l(&live_list_mutex_);
  143. for (Thread *t : live_list_) cb(t);
  144. }
  145. template <class CB>
  146. Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(live_list_mutex_) {
  147. CheckLocked();
  148. for (Thread *t : live_list_)
  149. if (cb(t))
  150. return t;
  151. return nullptr;
  152. }
  153. void AddThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
  154. SpinMutexLock l(&stats_mutex_);
  155. stats_.n_live_threads++;
  156. stats_.total_stack_size += t->stack_size();
  157. }
  158. void RemoveThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
  159. SpinMutexLock l(&stats_mutex_);
  160. stats_.n_live_threads--;
  161. stats_.total_stack_size -= t->stack_size();
  162. }
  163. ThreadStats GetThreadStats() SANITIZER_EXCLUDES(stats_mutex_) {
  164. SpinMutexLock l(&stats_mutex_);
  165. return stats_;
  166. }
  167. uptr GetRingBufferSize() const { return ring_buffer_size_; }
  168. void Lock() SANITIZER_ACQUIRE(live_list_mutex_) { live_list_mutex_.Lock(); }
  169. void CheckLocked() const SANITIZER_CHECK_LOCKED(live_list_mutex_) {
  170. live_list_mutex_.CheckLocked();
  171. }
  172. void Unlock() SANITIZER_RELEASE(live_list_mutex_) {
  173. live_list_mutex_.Unlock();
  174. }
  175. private:
  176. Thread *AllocThread() {
  177. SpinMutexLock l(&free_space_mutex_);
  178. uptr align = ring_buffer_size_ * 2;
  179. CHECK(IsAligned(free_space_, align));
  180. Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
  181. free_space_ += thread_alloc_size_;
  182. CHECK_LE(free_space_, free_space_end_);
  183. return t;
  184. }
  185. SpinMutex free_space_mutex_;
  186. uptr free_space_;
  187. uptr free_space_end_;
  188. uptr ring_buffer_size_;
  189. uptr thread_alloc_size_;
  190. SpinMutex free_list_mutex_;
  191. InternalMmapVector<Thread *> free_list_
  192. SANITIZER_GUARDED_BY(free_list_mutex_);
  193. SpinMutex live_list_mutex_;
  194. InternalMmapVector<Thread *> live_list_
  195. SANITIZER_GUARDED_BY(live_list_mutex_);
  196. SpinMutex stats_mutex_;
  197. ThreadStats stats_ SANITIZER_GUARDED_BY(stats_mutex_);
  198. };
  199. void InitThreadList(uptr storage, uptr size);
  200. HwasanThreadList &hwasanThreadList();
  201. ThreadArgRetval &hwasanThreadArgRetval();
  202. } // namespace __hwasan