tsan_rtl_thread.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. //===-- tsan_rtl_thread.cpp -----------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "sanitizer_common/sanitizer_placement_new.h"
  13. #include "tsan_rtl.h"
  14. #include "tsan_mman.h"
  15. #include "tsan_platform.h"
  16. #include "tsan_report.h"
  17. #include "tsan_sync.h"
  18. namespace __tsan {
  19. // ThreadContext implementation.
  20. ThreadContext::ThreadContext(Tid tid) : ThreadContextBase(tid), thr(), sync() {}
  21. #if !SANITIZER_GO
  22. ThreadContext::~ThreadContext() {
  23. }
  24. #endif
  25. void ThreadContext::OnReset() { CHECK(!sync); }
  26. #if !SANITIZER_GO
  27. struct ThreadLeak {
  28. ThreadContext *tctx;
  29. int count;
  30. };
  31. static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
  32. auto &leaks = *static_cast<Vector<ThreadLeak> *>(arg);
  33. auto *tctx = static_cast<ThreadContext *>(tctx_base);
  34. if (tctx->detached || tctx->status != ThreadStatusFinished)
  35. return;
  36. for (uptr i = 0; i < leaks.Size(); i++) {
  37. if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
  38. leaks[i].count++;
  39. return;
  40. }
  41. }
  42. leaks.PushBack({tctx, 1});
  43. }
  44. #endif
  45. // Disabled on Mac because lldb test TestTsanBasic fails:
  46. // https://reviews.llvm.org/D112603#3163158
  47. #if !SANITIZER_GO && !SANITIZER_MAC
  48. static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
  49. if (tctx->tid == kMainTid) {
  50. Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
  51. } else {
  52. Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
  53. " created at:\n", tctx->tid, tctx->name);
  54. PrintStack(SymbolizeStackId(tctx->creation_stack_id));
  55. }
  56. Printf(" One of the following ignores was not ended"
  57. " (in order of probability)\n");
  58. for (uptr i = 0; i < set->Size(); i++) {
  59. Printf(" Ignore was enabled at:\n");
  60. PrintStack(SymbolizeStackId(set->At(i)));
  61. }
  62. Die();
  63. }
  64. static void ThreadCheckIgnore(ThreadState *thr) {
  65. if (ctx->after_multithreaded_fork)
  66. return;
  67. if (thr->ignore_reads_and_writes)
  68. ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
  69. if (thr->ignore_sync)
  70. ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
  71. }
  72. #else
  73. static void ThreadCheckIgnore(ThreadState *thr) {}
  74. #endif
  75. void ThreadFinalize(ThreadState *thr) {
  76. ThreadCheckIgnore(thr);
  77. #if !SANITIZER_GO
  78. if (!ShouldReport(thr, ReportTypeThreadLeak))
  79. return;
  80. ThreadRegistryLock l(&ctx->thread_registry);
  81. Vector<ThreadLeak> leaks;
  82. ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
  83. &leaks);
  84. for (uptr i = 0; i < leaks.Size(); i++) {
  85. ScopedReport rep(ReportTypeThreadLeak);
  86. rep.AddThread(leaks[i].tctx, true);
  87. rep.SetCount(leaks[i].count);
  88. OutputReport(thr, rep);
  89. }
  90. #endif
  91. }
  92. int ThreadCount(ThreadState *thr) {
  93. uptr result;
  94. ctx->thread_registry.GetNumberOfThreads(0, 0, &result);
  95. return (int)result;
  96. }
  97. struct OnCreatedArgs {
  98. VectorClock *sync;
  99. uptr sync_epoch;
  100. StackID stack;
  101. };
  102. Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
  103. // The main thread and GCD workers don't have a parent thread.
  104. Tid parent = kInvalidTid;
  105. OnCreatedArgs arg = {nullptr, 0, kInvalidStackID};
  106. if (thr) {
  107. parent = thr->tid;
  108. arg.stack = CurrentStackId(thr, pc);
  109. if (!thr->ignore_sync) {
  110. SlotLocker locker(thr);
  111. thr->clock.ReleaseStore(&arg.sync);
  112. arg.sync_epoch = ctx->global_epoch;
  113. IncrementEpoch(thr);
  114. }
  115. }
  116. Tid tid = ctx->thread_registry.CreateThread(uid, detached, parent, &arg);
  117. DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent, tid, uid);
  118. return tid;
  119. }
  120. void ThreadContext::OnCreated(void *arg) {
  121. OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
  122. sync = args->sync;
  123. sync_epoch = args->sync_epoch;
  124. creation_stack_id = args->stack;
  125. }
  126. extern "C" void __tsan_stack_initialization() {}
  127. struct OnStartedArgs {
  128. ThreadState *thr;
  129. uptr stk_addr;
  130. uptr stk_size;
  131. uptr tls_addr;
  132. uptr tls_size;
  133. };
  134. void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
  135. ThreadType thread_type) {
  136. ctx->thread_registry.StartThread(tid, os_id, thread_type, thr);
  137. if (!thr->ignore_sync) {
  138. SlotAttachAndLock(thr);
  139. if (thr->tctx->sync_epoch == ctx->global_epoch)
  140. thr->clock.Acquire(thr->tctx->sync);
  141. SlotUnlock(thr);
  142. }
  143. Free(thr->tctx->sync);
  144. uptr stk_addr = 0;
  145. uptr stk_size = 0;
  146. uptr tls_addr = 0;
  147. uptr tls_size = 0;
  148. #if !SANITIZER_GO
  149. if (thread_type != ThreadType::Fiber)
  150. GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr,
  151. &tls_size);
  152. #endif
  153. thr->stk_addr = stk_addr;
  154. thr->stk_size = stk_size;
  155. thr->tls_addr = tls_addr;
  156. thr->tls_size = tls_size;
  157. #if !SANITIZER_GO
  158. if (ctx->after_multithreaded_fork) {
  159. thr->ignore_interceptors++;
  160. ThreadIgnoreBegin(thr, 0);
  161. ThreadIgnoreSyncBegin(thr, 0);
  162. }
  163. #endif
  164. #if !SANITIZER_GO
  165. // Don't imitate stack/TLS writes for the main thread,
  166. // because its initialization is synchronized with all
  167. // subsequent threads anyway.
  168. if (tid != kMainTid) {
  169. if (stk_addr && stk_size) {
  170. const uptr pc = StackTrace::GetNextInstructionPc(
  171. reinterpret_cast<uptr>(__tsan_stack_initialization));
  172. MemoryRangeImitateWrite(thr, pc, stk_addr, stk_size);
  173. }
  174. if (tls_addr && tls_size)
  175. ImitateTlsWrite(thr, tls_addr, tls_size);
  176. }
  177. #endif
  178. }
  179. void ThreadContext::OnStarted(void *arg) {
  180. thr = static_cast<ThreadState *>(arg);
  181. DPrintf("#%d: ThreadStart\n", tid);
  182. new (thr) ThreadState(tid);
  183. if (common_flags()->detect_deadlocks)
  184. thr->dd_lt = ctx->dd->CreateLogicalThread(tid);
  185. thr->tctx = this;
  186. #if !SANITIZER_GO
  187. thr->is_inited = true;
  188. #endif
  189. }
  190. void ThreadFinish(ThreadState *thr) {
  191. DPrintf("#%d: ThreadFinish\n", thr->tid);
  192. ThreadCheckIgnore(thr);
  193. if (thr->stk_addr && thr->stk_size)
  194. DontNeedShadowFor(thr->stk_addr, thr->stk_size);
  195. if (thr->tls_addr && thr->tls_size)
  196. DontNeedShadowFor(thr->tls_addr, thr->tls_size);
  197. thr->is_dead = true;
  198. #if !SANITIZER_GO
  199. thr->is_inited = false;
  200. thr->ignore_interceptors++;
  201. PlatformCleanUpThreadState(thr);
  202. #endif
  203. if (!thr->ignore_sync) {
  204. SlotLocker locker(thr);
  205. ThreadRegistryLock lock(&ctx->thread_registry);
  206. // Note: detached is protected by the thread registry mutex,
  207. // the thread may be detaching concurrently in another thread.
  208. if (!thr->tctx->detached) {
  209. thr->clock.ReleaseStore(&thr->tctx->sync);
  210. thr->tctx->sync_epoch = ctx->global_epoch;
  211. IncrementEpoch(thr);
  212. }
  213. }
  214. #if !SANITIZER_GO
  215. UnmapOrDie(thr->shadow_stack, kShadowStackSize * sizeof(uptr));
  216. #else
  217. Free(thr->shadow_stack);
  218. #endif
  219. thr->shadow_stack = nullptr;
  220. thr->shadow_stack_pos = nullptr;
  221. thr->shadow_stack_end = nullptr;
  222. if (common_flags()->detect_deadlocks)
  223. ctx->dd->DestroyLogicalThread(thr->dd_lt);
  224. SlotDetach(thr);
  225. ctx->thread_registry.FinishThread(thr->tid);
  226. thr->~ThreadState();
  227. }
  228. void ThreadContext::OnFinished() {
  229. Lock lock(&ctx->slot_mtx);
  230. Lock lock1(&trace.mtx);
  231. // Queue all trace parts into the global recycle queue.
  232. auto parts = &trace.parts;
  233. while (trace.local_head) {
  234. CHECK(parts->Queued(trace.local_head));
  235. ctx->trace_part_recycle.PushBack(trace.local_head);
  236. trace.local_head = parts->Next(trace.local_head);
  237. }
  238. ctx->trace_part_recycle_finished += parts->Size();
  239. if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadHi) {
  240. ctx->trace_part_finished_excess += parts->Size();
  241. trace.parts_allocated = 0;
  242. } else if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadLo &&
  243. parts->Size() > 1) {
  244. ctx->trace_part_finished_excess += parts->Size() - 1;
  245. trace.parts_allocated = 1;
  246. }
  247. // From now on replay will use trace->final_pos.
  248. trace.final_pos = (Event *)atomic_load_relaxed(&thr->trace_pos);
  249. atomic_store_relaxed(&thr->trace_pos, 0);
  250. thr->tctx = nullptr;
  251. thr = nullptr;
  252. }
  253. struct ConsumeThreadContext {
  254. uptr uid;
  255. ThreadContextBase *tctx;
  256. };
  257. Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
  258. return ctx->thread_registry.ConsumeThreadUserId(uid);
  259. }
  260. struct JoinArg {
  261. VectorClock *sync;
  262. uptr sync_epoch;
  263. };
  264. void ThreadJoin(ThreadState *thr, uptr pc, Tid tid) {
  265. CHECK_GT(tid, 0);
  266. DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
  267. JoinArg arg = {};
  268. ctx->thread_registry.JoinThread(tid, &arg);
  269. if (!thr->ignore_sync) {
  270. SlotLocker locker(thr);
  271. if (arg.sync_epoch == ctx->global_epoch)
  272. thr->clock.Acquire(arg.sync);
  273. }
  274. Free(arg.sync);
  275. }
  276. void ThreadContext::OnJoined(void *ptr) {
  277. auto arg = static_cast<JoinArg *>(ptr);
  278. arg->sync = sync;
  279. arg->sync_epoch = sync_epoch;
  280. sync = nullptr;
  281. sync_epoch = 0;
  282. }
  283. void ThreadContext::OnDead() { CHECK_EQ(sync, nullptr); }
  284. void ThreadDetach(ThreadState *thr, uptr pc, Tid tid) {
  285. CHECK_GT(tid, 0);
  286. ctx->thread_registry.DetachThread(tid, thr);
  287. }
  288. void ThreadContext::OnDetached(void *arg) { Free(sync); }
  289. void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid) {
  290. CHECK_GT(tid, 0);
  291. ctx->thread_registry.SetThreadUserId(tid, uid);
  292. }
  293. void ThreadSetName(ThreadState *thr, const char *name) {
  294. ctx->thread_registry.SetThreadName(thr->tid, name);
  295. }
  296. #if !SANITIZER_GO
  297. void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
  298. Processor *proc = from->proc();
  299. ProcUnwire(proc, from);
  300. ProcWire(proc, to);
  301. set_cur_thread(to);
  302. }
  303. ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
  304. void *mem = Alloc(sizeof(ThreadState));
  305. ThreadState *fiber = static_cast<ThreadState *>(mem);
  306. internal_memset(fiber, 0, sizeof(*fiber));
  307. Tid tid = ThreadCreate(thr, pc, 0, true);
  308. FiberSwitchImpl(thr, fiber);
  309. ThreadStart(fiber, tid, 0, ThreadType::Fiber);
  310. FiberSwitchImpl(fiber, thr);
  311. return fiber;
  312. }
  313. void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
  314. FiberSwitchImpl(thr, fiber);
  315. ThreadFinish(fiber);
  316. FiberSwitchImpl(fiber, thr);
  317. Free(fiber);
  318. }
  319. void FiberSwitch(ThreadState *thr, uptr pc,
  320. ThreadState *fiber, unsigned flags) {
  321. if (!(flags & FiberSwitchFlagNoSync))
  322. Release(thr, pc, (uptr)fiber);
  323. FiberSwitchImpl(thr, fiber);
  324. if (!(flags & FiberSwitchFlagNoSync))
  325. Acquire(fiber, pc, (uptr)fiber);
  326. }
  327. #endif
  328. } // namespace __tsan