hwasan_thread_list.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. //===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of HWAddressSanitizer.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. // HwasanThreadList is a registry for live threads, as well as an allocator for
  13. // HwasanThread objects and their stack history ring buffers. There are
  14. // constraints on memory layout of the shadow region and CompactRingBuffer that
  15. // are part of the ABI contract between compiler-rt and llvm.
  16. //
  17. // * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
  18. // * All stack ring buffers are located within (2**kShadowBaseAlignment)
  19. // sized region below and adjacent to the shadow region.
  20. // * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
  21. // aligned to twice its size. The value of N can be different for each buffer.
  22. //
  23. // These constrains guarantee that, given an address A of any element of the
  24. // ring buffer,
  25. // A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
  26. // is the address of the next element of that ring buffer (with wrap-around).
  27. // And, with K = kShadowBaseAlignment,
  28. // S = (A | ((1 << K) - 1)) + 1
  29. // (align up to kShadowBaseAlignment) is the start of the shadow region.
  30. //
  31. // These calculations are used in compiler instrumentation to update the ring
  32. // buffer and obtain the base address of shadow using only two inputs: address
  33. // of the current element of the ring buffer, and N (i.e. size of the ring
  34. // buffer). Since the value of N is very limited, we pack both inputs into a
  35. // single thread-local word as
  36. // (1 << (N + 56)) | A
  37. // See the implementation of class CompactRingBuffer, which is what is stored in
  38. // said thread-local word.
  39. //
  40. // Note the unusual way of aligning up the address of the shadow:
  41. // (A | ((1 << K) - 1)) + 1
  42. // It is only correct if A is not already equal to the shadow base address, but
  43. // it saves 2 instructions on AArch64.
  44. #include "hwasan.h"
  45. #include "hwasan_allocator.h"
  46. #include "hwasan_flags.h"
  47. #include "hwasan_thread.h"
  48. #include "sanitizer_common/sanitizer_placement_new.h"
  49. namespace __hwasan {
  50. static uptr RingBufferSize() {
  51. uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
  52. // FIXME: increase the limit to 8 once this bug is fixed:
  53. // https://bugs.llvm.org/show_bug.cgi?id=39030
  54. for (int shift = 1; shift < 7; ++shift) {
  55. uptr size = 4096 * (1ULL << shift);
  56. if (size >= desired_bytes)
  57. return size;
  58. }
  59. Printf("stack history size too large: %d\n", flags()->stack_history_size);
  60. CHECK(0);
  61. return 0;
  62. }
  63. struct ThreadStats {
  64. uptr n_live_threads;
  65. uptr total_stack_size;
  66. };
  67. class HwasanThreadList {
  68. public:
  69. HwasanThreadList(uptr storage, uptr size)
  70. : free_space_(storage), free_space_end_(storage + size) {
  71. // [storage, storage + size) is used as a vector of
  72. // thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
  73. // Each element contains
  74. // * a ring buffer at offset 0,
  75. // * a Thread object at offset ring_buffer_size_.
  76. ring_buffer_size_ = RingBufferSize();
  77. thread_alloc_size_ =
  78. RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
  79. }
  80. Thread *CreateCurrentThread(const Thread::InitState *state = nullptr) {
  81. Thread *t = nullptr;
  82. {
  83. SpinMutexLock l(&free_list_mutex_);
  84. if (!free_list_.empty()) {
  85. t = free_list_.back();
  86. free_list_.pop_back();
  87. }
  88. }
  89. if (t) {
  90. uptr start = (uptr)t - ring_buffer_size_;
  91. internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
  92. } else {
  93. t = AllocThread();
  94. }
  95. {
  96. SpinMutexLock l(&live_list_mutex_);
  97. live_list_.push_back(t);
  98. }
  99. t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_, state);
  100. AddThreadStats(t);
  101. return t;
  102. }
  103. void DontNeedThread(Thread *t) {
  104. uptr start = (uptr)t - ring_buffer_size_;
  105. ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
  106. }
  107. void RemoveThreadFromLiveList(Thread *t) {
  108. SpinMutexLock l(&live_list_mutex_);
  109. for (Thread *&t2 : live_list_)
  110. if (t2 == t) {
  111. // To remove t2, copy the last element of the list in t2's position, and
  112. // pop_back(). This works even if t2 is itself the last element.
  113. t2 = live_list_.back();
  114. live_list_.pop_back();
  115. return;
  116. }
  117. CHECK(0 && "thread not found in live list");
  118. }
  119. void ReleaseThread(Thread *t) {
  120. RemoveThreadStats(t);
  121. t->Destroy();
  122. DontNeedThread(t);
  123. RemoveThreadFromLiveList(t);
  124. SpinMutexLock l(&free_list_mutex_);
  125. free_list_.push_back(t);
  126. }
  127. Thread *GetThreadByBufferAddress(uptr p) {
  128. return (Thread *)(RoundDownTo(p, ring_buffer_size_ * 2) +
  129. ring_buffer_size_);
  130. }
  131. uptr MemoryUsedPerThread() {
  132. uptr res = sizeof(Thread) + ring_buffer_size_;
  133. if (auto sz = flags()->heap_history_size)
  134. res += HeapAllocationsRingBuffer::SizeInBytes(sz);
  135. return res;
  136. }
  137. template <class CB>
  138. void VisitAllLiveThreads(CB cb) {
  139. SpinMutexLock l(&live_list_mutex_);
  140. for (Thread *t : live_list_) cb(t);
  141. }
  142. void AddThreadStats(Thread *t) {
  143. SpinMutexLock l(&stats_mutex_);
  144. stats_.n_live_threads++;
  145. stats_.total_stack_size += t->stack_size();
  146. }
  147. void RemoveThreadStats(Thread *t) {
  148. SpinMutexLock l(&stats_mutex_);
  149. stats_.n_live_threads--;
  150. stats_.total_stack_size -= t->stack_size();
  151. }
  152. ThreadStats GetThreadStats() {
  153. SpinMutexLock l(&stats_mutex_);
  154. return stats_;
  155. }
  156. uptr GetRingBufferSize() const { return ring_buffer_size_; }
  157. private:
  158. Thread *AllocThread() {
  159. SpinMutexLock l(&free_space_mutex_);
  160. uptr align = ring_buffer_size_ * 2;
  161. CHECK(IsAligned(free_space_, align));
  162. Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
  163. free_space_ += thread_alloc_size_;
  164. CHECK(free_space_ <= free_space_end_ && "out of thread memory");
  165. return t;
  166. }
  167. SpinMutex free_space_mutex_;
  168. uptr free_space_;
  169. uptr free_space_end_;
  170. uptr ring_buffer_size_;
  171. uptr thread_alloc_size_;
  172. SpinMutex free_list_mutex_;
  173. InternalMmapVector<Thread *> free_list_;
  174. SpinMutex live_list_mutex_;
  175. InternalMmapVector<Thread *> live_list_;
  176. ThreadStats stats_;
  177. SpinMutex stats_mutex_;
  178. };
  179. void InitThreadList(uptr storage, uptr size);
  180. HwasanThreadList &hwasanThreadList();
  181. } // namespace __hwasan