hwasan_thread.cpp 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. #include "hwasan_thread.h"
  2. #include "hwasan.h"
  3. #include "hwasan_interface_internal.h"
  4. #include "hwasan_mapping.h"
  5. #include "hwasan_poisoning.h"
  6. #include "hwasan_thread_list.h"
  7. #include "sanitizer_common/sanitizer_atomic.h"
  8. #include "sanitizer_common/sanitizer_file.h"
  9. #include "sanitizer_common/sanitizer_placement_new.h"
  10. #include "sanitizer_common/sanitizer_tls_get_addr.h"
  11. namespace __hwasan {
  12. static u32 RandomSeed() {
  13. u32 seed;
  14. do {
  15. if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed), sizeof(seed),
  16. /*blocking=*/false))) {
  17. seed = static_cast<u32>(
  18. (NanoTime() >> 12) ^
  19. (reinterpret_cast<uptr>(__builtin_frame_address(0)) >> 4));
  20. }
  21. } while (!seed);
  22. return seed;
  23. }
  24. void Thread::InitRandomState() {
  25. random_state_ = flags()->random_tags ? RandomSeed() : unique_id_;
  26. random_state_inited_ = true;
  27. // Push a random number of zeros onto the ring buffer so that the first stack
  28. // tag base will be random.
  29. for (tag_t i = 0, e = GenerateRandomTag(); i != e; ++i)
  30. stack_allocations_->push(0);
  31. }
  32. void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
  33. const InitState *state) {
  34. CHECK_EQ(0, unique_id_); // try to catch bad stack reuse
  35. CHECK_EQ(0, stack_top_);
  36. CHECK_EQ(0, stack_bottom_);
  37. static atomic_uint64_t unique_id;
  38. unique_id_ = atomic_fetch_add(&unique_id, 1, memory_order_relaxed);
  39. if (!IsMainThread())
  40. os_id_ = GetTid();
  41. if (auto sz = flags()->heap_history_size)
  42. heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
  43. #if !SANITIZER_FUCHSIA
  44. // Do not initialize the stack ring buffer just yet on Fuchsia. Threads will
  45. // be initialized before we enter the thread itself, so we will instead call
  46. // this later.
  47. InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
  48. #endif
  49. InitStackAndTls(state);
  50. dtls_ = DTLS_Get();
  51. AllocatorThreadStart(allocator_cache());
  52. if (flags()->verbose_threads) {
  53. if (IsMainThread()) {
  54. Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
  55. sizeof(Thread), heap_allocations_->SizeInBytes(),
  56. stack_allocations_->size() * sizeof(uptr));
  57. }
  58. Print("Creating : ");
  59. }
  60. ClearShadowForThreadStackAndTLS();
  61. }
  62. void Thread::InitStackRingBuffer(uptr stack_buffer_start,
  63. uptr stack_buffer_size) {
  64. HwasanTSDThreadInit(); // Only needed with interceptors.
  65. uptr *ThreadLong = GetCurrentThreadLongPtr();
  66. // The following implicitly sets (this) as the current thread.
  67. stack_allocations_ = new (ThreadLong)
  68. StackAllocationsRingBuffer((void *)stack_buffer_start, stack_buffer_size);
  69. // Check that it worked.
  70. CHECK_EQ(GetCurrentThread(), this);
  71. // ScopedTaggingDisable needs GetCurrentThread to be set up.
  72. ScopedTaggingDisabler disabler;
  73. if (stack_bottom_) {
  74. int local;
  75. CHECK(AddrIsInStack((uptr)&local));
  76. CHECK(MemIsApp(stack_bottom_));
  77. CHECK(MemIsApp(stack_top_ - 1));
  78. }
  79. }
  80. void Thread::ClearShadowForThreadStackAndTLS() {
  81. if (stack_top_ != stack_bottom_)
  82. TagMemory(UntagAddr(stack_bottom_),
  83. UntagAddr(stack_top_) - UntagAddr(stack_bottom_),
  84. GetTagFromPointer(stack_top_));
  85. if (tls_begin_ != tls_end_)
  86. TagMemory(UntagAddr(tls_begin_),
  87. UntagAddr(tls_end_) - UntagAddr(tls_begin_),
  88. GetTagFromPointer(tls_begin_));
  89. }
  90. void Thread::Destroy() {
  91. if (flags()->verbose_threads)
  92. Print("Destroying: ");
  93. AllocatorThreadFinish(allocator_cache());
  94. ClearShadowForThreadStackAndTLS();
  95. if (heap_allocations_)
  96. heap_allocations_->Delete();
  97. DTLS_Destroy();
  98. // Unregister this as the current thread.
  99. // Instrumented code can not run on this thread from this point onwards, but
  100. // malloc/free can still be served. Glibc may call free() very late, after all
  101. // TSD destructors are done.
  102. CHECK_EQ(GetCurrentThread(), this);
  103. *GetCurrentThreadLongPtr() = 0;
  104. }
  105. void Thread::Print(const char *Prefix) {
  106. Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, unique_id_,
  107. (void *)this, stack_bottom(), stack_top(),
  108. stack_top() - stack_bottom(), tls_begin(), tls_end());
  109. }
  110. static u32 xorshift(u32 state) {
  111. state ^= state << 13;
  112. state ^= state >> 17;
  113. state ^= state << 5;
  114. return state;
  115. }
  116. // Generate a (pseudo-)random non-zero tag.
  117. tag_t Thread::GenerateRandomTag(uptr num_bits) {
  118. DCHECK_GT(num_bits, 0);
  119. if (tagging_disabled_)
  120. return 0;
  121. tag_t tag;
  122. const uptr tag_mask = (1ULL << num_bits) - 1;
  123. do {
  124. if (flags()->random_tags) {
  125. if (!random_buffer_) {
  126. EnsureRandomStateInited();
  127. random_buffer_ = random_state_ = xorshift(random_state_);
  128. }
  129. CHECK(random_buffer_);
  130. tag = random_buffer_ & tag_mask;
  131. random_buffer_ >>= num_bits;
  132. } else {
  133. EnsureRandomStateInited();
  134. random_state_ += 1;
  135. tag = random_state_ & tag_mask;
  136. }
  137. } while (!tag);
  138. return tag;
  139. }
  140. void EnsureMainThreadIDIsCorrect() {
  141. auto *t = __hwasan::GetCurrentThread();
  142. if (t && (t->IsMainThread()))
  143. t->set_os_id(GetTid());
  144. }
  145. } // namespace __hwasan
  146. // --- Implementation of LSan-specific functions --- {{{1
  147. namespace __lsan {
  148. static __hwasan::HwasanThreadList *GetHwasanThreadListLocked() {
  149. auto &tl = __hwasan::hwasanThreadList();
  150. tl.CheckLocked();
  151. return &tl;
  152. }
  153. static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) {
  154. return GetHwasanThreadListLocked()->FindThreadLocked(
  155. [os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
  156. }
  157. void LockThreads() {
  158. __hwasan::hwasanThreadList().Lock();
  159. __hwasan::hwasanThreadArgRetval().Lock();
  160. }
  161. void UnlockThreads() {
  162. __hwasan::hwasanThreadArgRetval().Unlock();
  163. __hwasan::hwasanThreadList().Unlock();
  164. }
  165. void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
  166. bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
  167. uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
  168. uptr *cache_end, DTLS **dtls) {
  169. auto *t = GetThreadByOsIDLocked(os_id);
  170. if (!t)
  171. return false;
  172. *stack_begin = t->stack_bottom();
  173. *stack_end = t->stack_top();
  174. *tls_begin = t->tls_begin();
  175. *tls_end = t->tls_end();
  176. // Fixme: is this correct for HWASan.
  177. *cache_begin = 0;
  178. *cache_end = 0;
  179. *dtls = t->dtls();
  180. return true;
  181. }
  182. void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
  183. void GetThreadExtraStackRangesLocked(tid_t os_id,
  184. InternalMmapVector<Range> *ranges) {}
  185. void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
  186. void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
  187. __hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs);
  188. }
  189. void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
  190. } // namespace __lsan