tsan_rtl_report.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869
  1. //===-- tsan_rtl_report.cpp -----------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "sanitizer_common/sanitizer_libc.h"
  13. #include "sanitizer_common/sanitizer_placement_new.h"
  14. #include "sanitizer_common/sanitizer_stackdepot.h"
  15. #include "sanitizer_common/sanitizer_common.h"
  16. #include "sanitizer_common/sanitizer_stacktrace.h"
  17. #include "tsan_platform.h"
  18. #include "tsan_rtl.h"
  19. #include "tsan_suppressions.h"
  20. #include "tsan_symbolize.h"
  21. #include "tsan_report.h"
  22. #include "tsan_sync.h"
  23. #include "tsan_mman.h"
  24. #include "tsan_flags.h"
  25. #include "tsan_fd.h"
  26. namespace __tsan {
  27. using namespace __sanitizer;
  28. static ReportStack *SymbolizeStack(StackTrace trace);
  29. // Can be overriden by an application/test to intercept reports.
  30. #ifdef TSAN_EXTERNAL_HOOKS
  31. bool OnReport(const ReportDesc *rep, bool suppressed);
  32. #else
  33. SANITIZER_WEAK_CXX_DEFAULT_IMPL
  34. bool OnReport(const ReportDesc *rep, bool suppressed) {
  35. (void)rep;
  36. return suppressed;
  37. }
  38. #endif
  39. SANITIZER_WEAK_DEFAULT_IMPL
  40. void __tsan_on_report(const ReportDesc *rep) {
  41. (void)rep;
  42. }
  43. static void StackStripMain(SymbolizedStack *frames) {
  44. SymbolizedStack *last_frame = nullptr;
  45. SymbolizedStack *last_frame2 = nullptr;
  46. for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
  47. last_frame2 = last_frame;
  48. last_frame = cur;
  49. }
  50. if (last_frame2 == 0)
  51. return;
  52. #if !SANITIZER_GO
  53. const char *last = last_frame->info.function;
  54. const char *last2 = last_frame2->info.function;
  55. // Strip frame above 'main'
  56. if (last2 && 0 == internal_strcmp(last2, "main")) {
  57. last_frame->ClearAll();
  58. last_frame2->next = nullptr;
  59. // Strip our internal thread start routine.
  60. } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
  61. last_frame->ClearAll();
  62. last_frame2->next = nullptr;
  63. // Strip global ctors init, .preinit_array and main caller.
  64. } else if (last && (0 == internal_strcmp(last, "__do_global_ctors_aux") ||
  65. 0 == internal_strcmp(last, "__libc_csu_init") ||
  66. 0 == internal_strcmp(last, "__libc_start_main"))) {
  67. last_frame->ClearAll();
  68. last_frame2->next = nullptr;
  69. // If both are 0, then we probably just failed to symbolize.
  70. } else if (last || last2) {
  71. // Ensure that we recovered stack completely. Trimmed stack
  72. // can actually happen if we do not instrument some code,
  73. // so it's only a debug print. However we must try hard to not miss it
  74. // due to our fault.
  75. DPrintf("Bottom stack frame is missed\n");
  76. }
  77. #else
  78. // The last frame always point into runtime (gosched0, goexit0, runtime.main).
  79. last_frame->ClearAll();
  80. last_frame2->next = nullptr;
  81. #endif
  82. }
  83. ReportStack *SymbolizeStackId(u32 stack_id) {
  84. if (stack_id == 0)
  85. return 0;
  86. StackTrace stack = StackDepotGet(stack_id);
  87. if (stack.trace == nullptr)
  88. return nullptr;
  89. return SymbolizeStack(stack);
  90. }
  91. static ReportStack *SymbolizeStack(StackTrace trace) {
  92. if (trace.size == 0)
  93. return 0;
  94. SymbolizedStack *top = nullptr;
  95. for (uptr si = 0; si < trace.size; si++) {
  96. const uptr pc = trace.trace[si];
  97. uptr pc1 = pc;
  98. // We obtain the return address, but we're interested in the previous
  99. // instruction.
  100. if ((pc & kExternalPCBit) == 0)
  101. pc1 = StackTrace::GetPreviousInstructionPc(pc);
  102. SymbolizedStack *ent = SymbolizeCode(pc1);
  103. CHECK_NE(ent, 0);
  104. SymbolizedStack *last = ent;
  105. while (last->next) {
  106. last->info.address = pc; // restore original pc for report
  107. last = last->next;
  108. }
  109. last->info.address = pc; // restore original pc for report
  110. last->next = top;
  111. top = ent;
  112. }
  113. StackStripMain(top);
  114. auto *stack = New<ReportStack>();
  115. stack->frames = top;
  116. return stack;
  117. }
  118. bool ShouldReport(ThreadState *thr, ReportType typ) {
  119. // We set thr->suppress_reports in the fork context.
  120. // Taking any locking in the fork context can lead to deadlocks.
  121. // If any locks are already taken, it's too late to do this check.
  122. CheckedMutex::CheckNoLocks();
  123. // For the same reason check we didn't lock thread_registry yet.
  124. if (SANITIZER_DEBUG)
  125. ThreadRegistryLock l(&ctx->thread_registry);
  126. if (!flags()->report_bugs || thr->suppress_reports)
  127. return false;
  128. switch (typ) {
  129. case ReportTypeSignalUnsafe:
  130. return flags()->report_signal_unsafe;
  131. case ReportTypeThreadLeak:
  132. #if !SANITIZER_GO
  133. // It's impossible to join phantom threads
  134. // in the child after fork.
  135. if (ctx->after_multithreaded_fork)
  136. return false;
  137. #endif
  138. return flags()->report_thread_leaks;
  139. case ReportTypeMutexDestroyLocked:
  140. return flags()->report_destroy_locked;
  141. default:
  142. return true;
  143. }
  144. }
  145. ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
  146. ctx->thread_registry.CheckLocked();
  147. rep_ = New<ReportDesc>();
  148. rep_->typ = typ;
  149. rep_->tag = tag;
  150. ctx->report_mtx.Lock();
  151. }
  152. ScopedReportBase::~ScopedReportBase() {
  153. ctx->report_mtx.Unlock();
  154. DestroyAndFree(rep_);
  155. }
  156. void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
  157. ReportStack **rs = rep_->stacks.PushBack();
  158. *rs = SymbolizeStack(stack);
  159. (*rs)->suppressable = suppressable;
  160. }
  161. void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
  162. Tid tid, StackTrace stack,
  163. const MutexSet *mset) {
  164. uptr addr0, size;
  165. AccessType typ;
  166. s.GetAccess(&addr0, &size, &typ);
  167. auto *mop = New<ReportMop>();
  168. rep_->mops.PushBack(mop);
  169. mop->tid = tid;
  170. mop->addr = addr + addr0;
  171. mop->size = size;
  172. mop->write = !(typ & kAccessRead);
  173. mop->atomic = typ & kAccessAtomic;
  174. mop->stack = SymbolizeStack(stack);
  175. mop->external_tag = external_tag;
  176. if (mop->stack)
  177. mop->stack->suppressable = true;
  178. for (uptr i = 0; i < mset->Size(); i++) {
  179. MutexSet::Desc d = mset->Get(i);
  180. int id = this->AddMutex(d.addr, d.stack_id);
  181. ReportMopMutex mtx = {id, d.write};
  182. mop->mset.PushBack(mtx);
  183. }
  184. }
  185. void ScopedReportBase::AddUniqueTid(Tid unique_tid) {
  186. rep_->unique_tids.PushBack(unique_tid);
  187. }
  188. void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
  189. for (uptr i = 0; i < rep_->threads.Size(); i++) {
  190. if ((u32)rep_->threads[i]->id == tctx->tid)
  191. return;
  192. }
  193. auto *rt = New<ReportThread>();
  194. rep_->threads.PushBack(rt);
  195. rt->id = tctx->tid;
  196. rt->os_id = tctx->os_id;
  197. rt->running = (tctx->status == ThreadStatusRunning);
  198. rt->name = internal_strdup(tctx->name);
  199. rt->parent_tid = tctx->parent_tid;
  200. rt->thread_type = tctx->thread_type;
  201. rt->stack = 0;
  202. rt->stack = SymbolizeStackId(tctx->creation_stack_id);
  203. if (rt->stack)
  204. rt->stack->suppressable = suppressable;
  205. }
  206. #if !SANITIZER_GO
  207. static ThreadContext *FindThreadByTidLocked(Tid tid) {
  208. ctx->thread_registry.CheckLocked();
  209. return static_cast<ThreadContext *>(
  210. ctx->thread_registry.GetThreadLocked(tid));
  211. }
  212. static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
  213. uptr addr = (uptr)arg;
  214. ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
  215. if (tctx->status != ThreadStatusRunning)
  216. return false;
  217. ThreadState *thr = tctx->thr;
  218. CHECK(thr);
  219. return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
  220. (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
  221. }
  222. ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
  223. ctx->thread_registry.CheckLocked();
  224. ThreadContext *tctx =
  225. static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked(
  226. IsInStackOrTls, (void *)addr));
  227. if (!tctx)
  228. return 0;
  229. ThreadState *thr = tctx->thr;
  230. CHECK(thr);
  231. *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
  232. return tctx;
  233. }
  234. #endif
  235. void ScopedReportBase::AddThread(Tid tid, bool suppressable) {
  236. #if !SANITIZER_GO
  237. if (const ThreadContext *tctx = FindThreadByTidLocked(tid))
  238. AddThread(tctx, suppressable);
  239. #endif
  240. }
  241. int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) {
  242. for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
  243. if (rep_->mutexes[i]->addr == addr)
  244. return rep_->mutexes[i]->id;
  245. }
  246. auto *rm = New<ReportMutex>();
  247. rep_->mutexes.PushBack(rm);
  248. rm->id = rep_->mutexes.Size() - 1;
  249. rm->addr = addr;
  250. rm->stack = SymbolizeStackId(creation_stack_id);
  251. return rm->id;
  252. }
  253. void ScopedReportBase::AddLocation(uptr addr, uptr size) {
  254. if (addr == 0)
  255. return;
  256. #if !SANITIZER_GO
  257. int fd = -1;
  258. Tid creat_tid = kInvalidTid;
  259. StackID creat_stack = 0;
  260. if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
  261. auto *loc = New<ReportLocation>();
  262. loc->type = ReportLocationFD;
  263. loc->fd = fd;
  264. loc->tid = creat_tid;
  265. loc->stack = SymbolizeStackId(creat_stack);
  266. rep_->locs.PushBack(loc);
  267. ThreadContext *tctx = FindThreadByTidLocked(creat_tid);
  268. if (tctx)
  269. AddThread(tctx);
  270. return;
  271. }
  272. MBlock *b = 0;
  273. uptr block_begin = 0;
  274. Allocator *a = allocator();
  275. if (a->PointerIsMine((void*)addr)) {
  276. block_begin = (uptr)a->GetBlockBegin((void *)addr);
  277. if (block_begin)
  278. b = ctx->metamap.GetBlock(block_begin);
  279. }
  280. if (!b)
  281. b = JavaHeapBlock(addr, &block_begin);
  282. if (b != 0) {
  283. auto *loc = New<ReportLocation>();
  284. loc->type = ReportLocationHeap;
  285. loc->heap_chunk_start = block_begin;
  286. loc->heap_chunk_size = b->siz;
  287. loc->external_tag = b->tag;
  288. loc->tid = b->tid;
  289. loc->stack = SymbolizeStackId(b->stk);
  290. rep_->locs.PushBack(loc);
  291. if (ThreadContext *tctx = FindThreadByTidLocked(b->tid))
  292. AddThread(tctx);
  293. return;
  294. }
  295. bool is_stack = false;
  296. if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
  297. auto *loc = New<ReportLocation>();
  298. loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
  299. loc->tid = tctx->tid;
  300. rep_->locs.PushBack(loc);
  301. AddThread(tctx);
  302. }
  303. #endif
  304. if (ReportLocation *loc = SymbolizeData(addr)) {
  305. loc->suppressable = true;
  306. rep_->locs.PushBack(loc);
  307. return;
  308. }
  309. }
  310. #if !SANITIZER_GO
  311. void ScopedReportBase::AddSleep(StackID stack_id) {
  312. rep_->sleep = SymbolizeStackId(stack_id);
  313. }
  314. #endif
  315. void ScopedReportBase::SetCount(int count) { rep_->count = count; }
  316. const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
  317. ScopedReport::ScopedReport(ReportType typ, uptr tag)
  318. : ScopedReportBase(typ, tag) {}
  319. ScopedReport::~ScopedReport() {}
  320. // Replays the trace up to last_pos position in the last part
  321. // or up to the provided epoch/sid (whichever is earlier)
  322. // and calls the provided function f for each event.
  323. template <typename Func>
  324. void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
  325. Epoch epoch, Func f) {
  326. TracePart *part = trace->parts.Front();
  327. Sid ev_sid = kFreeSid;
  328. Epoch ev_epoch = kEpochOver;
  329. for (;;) {
  330. DCHECK_EQ(part->trace, trace);
  331. // Note: an event can't start in the last element.
  332. // Since an event can take up to 2 elements,
  333. // we ensure we have at least 2 before adding an event.
  334. Event *end = &part->events[TracePart::kSize - 1];
  335. if (part == last)
  336. end = last_pos;
  337. f(kFreeSid, kEpochOver, nullptr); // notify about part start
  338. for (Event *evp = &part->events[0]; evp < end; evp++) {
  339. Event *evp0 = evp;
  340. if (!evp->is_access && !evp->is_func) {
  341. switch (evp->type) {
  342. case EventType::kTime: {
  343. auto *ev = reinterpret_cast<EventTime *>(evp);
  344. ev_sid = static_cast<Sid>(ev->sid);
  345. ev_epoch = static_cast<Epoch>(ev->epoch);
  346. if (ev_sid == sid && ev_epoch > epoch)
  347. return;
  348. break;
  349. }
  350. case EventType::kAccessExt:
  351. FALLTHROUGH;
  352. case EventType::kAccessRange:
  353. FALLTHROUGH;
  354. case EventType::kLock:
  355. FALLTHROUGH;
  356. case EventType::kRLock:
  357. // These take 2 Event elements.
  358. evp++;
  359. break;
  360. case EventType::kUnlock:
  361. // This takes 1 Event element.
  362. break;
  363. }
  364. }
  365. CHECK_NE(ev_sid, kFreeSid);
  366. CHECK_NE(ev_epoch, kEpochOver);
  367. f(ev_sid, ev_epoch, evp0);
  368. }
  369. if (part == last)
  370. return;
  371. part = trace->parts.Next(part);
  372. CHECK(part);
  373. }
  374. CHECK(0);
  375. }
  376. static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
  377. Vector<uptr> *stack, MutexSet *mset, uptr pc,
  378. bool *found) {
  379. DPrintf2(" MATCHED\n");
  380. *pmset = *mset;
  381. stack->PushBack(pc);
  382. pstk->Init(&(*stack)[0], stack->Size());
  383. stack->PopBack();
  384. *found = true;
  385. }
  386. // Checks if addr1|size1 is fully contained in addr2|size2.
  387. // We check for fully contained instread of just overlapping
  388. // because a memory access is always traced once, but can be
  389. // split into multiple accesses in the shadow.
  390. static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
  391. uptr size2) {
  392. return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
  393. }
  394. // Replays the trace of slot sid up to the target event identified
  395. // by epoch/addr/size/typ and restores and returns tid, stack, mutex set
  396. // and tag for that event. If there are multiple such events, it returns
  397. // the last one. Returns false if the event is not present in the trace.
  398. bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
  399. AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
  400. MutexSet *pmset, uptr *ptag) {
  401. // This function restores stack trace and mutex set for the thread/epoch.
  402. // It does so by getting stack trace and mutex set at the beginning of
  403. // trace part, and then replaying the trace till the given epoch.
  404. DPrintf2("RestoreStack: sid=%u@%u addr=0x%zx/%zu typ=%x\n",
  405. static_cast<int>(sid), static_cast<int>(epoch), addr, size,
  406. static_cast<int>(typ));
  407. ctx->slot_mtx.CheckLocked(); // needed to prevent trace part recycling
  408. ctx->thread_registry.CheckLocked();
  409. TidSlot *slot = &ctx->slots[static_cast<uptr>(sid)];
  410. Tid tid = kInvalidTid;
  411. // Need to lock the slot mutex as it protects slot->journal.
  412. slot->mtx.CheckLocked();
  413. for (uptr i = 0; i < slot->journal.Size(); i++) {
  414. DPrintf2(" journal: epoch=%d tid=%d\n",
  415. static_cast<int>(slot->journal[i].epoch), slot->journal[i].tid);
  416. if (i == slot->journal.Size() - 1 || slot->journal[i + 1].epoch > epoch) {
  417. tid = slot->journal[i].tid;
  418. break;
  419. }
  420. }
  421. if (tid == kInvalidTid)
  422. return false;
  423. *ptid = tid;
  424. ThreadContext *tctx =
  425. static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
  426. Trace *trace = &tctx->trace;
  427. // Snapshot first/last parts and the current position in the last part.
  428. TracePart *first_part;
  429. TracePart *last_part;
  430. Event *last_pos;
  431. {
  432. Lock lock(&trace->mtx);
  433. first_part = trace->parts.Front();
  434. if (!first_part) {
  435. DPrintf2("RestoreStack: tid=%d trace=%p no trace parts\n", tid, trace);
  436. return false;
  437. }
  438. last_part = trace->parts.Back();
  439. last_pos = trace->final_pos;
  440. if (tctx->thr)
  441. last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos);
  442. }
  443. DynamicMutexSet mset;
  444. Vector<uptr> stack;
  445. uptr prev_pc = 0;
  446. bool found = false;
  447. bool is_read = typ & kAccessRead;
  448. bool is_atomic = typ & kAccessAtomic;
  449. bool is_free = typ & kAccessFree;
  450. DPrintf2("RestoreStack: tid=%d parts=[%p-%p] last_pos=%p\n", tid,
  451. trace->parts.Front(), last_part, last_pos);
  452. TraceReplay(
  453. trace, last_part, last_pos, sid, epoch,
  454. [&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
  455. if (evp == nullptr) {
  456. // Each trace part is self-consistent, so we reset state.
  457. stack.Resize(0);
  458. mset->Reset();
  459. prev_pc = 0;
  460. return;
  461. }
  462. bool match = ev_sid == sid && ev_epoch == epoch;
  463. if (evp->is_access) {
  464. if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
  465. evp->_ == 0) // NopEvent
  466. return;
  467. auto *ev = reinterpret_cast<EventAccess *>(evp);
  468. uptr ev_addr = RestoreAddr(ev->addr);
  469. uptr ev_size = 1 << ev->size_log;
  470. uptr ev_pc =
  471. prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1));
  472. prev_pc = ev_pc;
  473. DPrintf2(" Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
  474. ev_addr, ev_size, ev->is_read, ev->is_atomic);
  475. if (match && type == EventType::kAccessExt &&
  476. IsWithinAccess(addr, size, ev_addr, ev_size) &&
  477. is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free)
  478. RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
  479. return;
  480. }
  481. if (evp->is_func) {
  482. auto *ev = reinterpret_cast<EventFunc *>(evp);
  483. if (ev->pc) {
  484. DPrintf2(" FuncEnter: pc=0x%llx\n", ev->pc);
  485. stack.PushBack(ev->pc);
  486. } else {
  487. DPrintf2(" FuncExit\n");
  488. // We don't log pathologically large stacks in each part,
  489. // if the stack was truncated we can have more func exits than
  490. // entries.
  491. if (stack.Size())
  492. stack.PopBack();
  493. }
  494. return;
  495. }
  496. switch (evp->type) {
  497. case EventType::kAccessExt: {
  498. auto *ev = reinterpret_cast<EventAccessExt *>(evp);
  499. uptr ev_addr = RestoreAddr(ev->addr);
  500. uptr ev_size = 1 << ev->size_log;
  501. prev_pc = ev->pc;
  502. DPrintf2(" AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n",
  503. ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
  504. if (match && type == EventType::kAccessExt &&
  505. IsWithinAccess(addr, size, ev_addr, ev_size) &&
  506. is_read == ev->is_read && is_atomic == ev->is_atomic &&
  507. !is_free)
  508. RestoreStackMatch(pstk, pmset, &stack, mset, ev->pc, &found);
  509. break;
  510. }
  511. case EventType::kAccessRange: {
  512. auto *ev = reinterpret_cast<EventAccessRange *>(evp);
  513. uptr ev_addr = RestoreAddr(ev->addr);
  514. uptr ev_size =
  515. (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo;
  516. uptr ev_pc = RestoreAddr(ev->pc);
  517. prev_pc = ev_pc;
  518. DPrintf2(" Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
  519. ev_addr, ev_size, ev->is_read, ev->is_free);
  520. if (match && type == EventType::kAccessExt &&
  521. IsWithinAccess(addr, size, ev_addr, ev_size) &&
  522. is_read == ev->is_read && !is_atomic && is_free == ev->is_free)
  523. RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
  524. break;
  525. }
  526. case EventType::kLock:
  527. FALLTHROUGH;
  528. case EventType::kRLock: {
  529. auto *ev = reinterpret_cast<EventLock *>(evp);
  530. bool is_write = ev->type == EventType::kLock;
  531. uptr ev_addr = RestoreAddr(ev->addr);
  532. uptr ev_pc = RestoreAddr(ev->pc);
  533. StackID stack_id =
  534. (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo;
  535. DPrintf2(" Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc,
  536. ev_addr, stack_id, is_write);
  537. mset->AddAddr(ev_addr, stack_id, is_write);
  538. // Events with ev_pc == 0 are written to the beginning of trace
  539. // part as initial mutex set (are not real).
  540. if (match && type == EventType::kLock && addr == ev_addr && ev_pc)
  541. RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
  542. break;
  543. }
  544. case EventType::kUnlock: {
  545. auto *ev = reinterpret_cast<EventUnlock *>(evp);
  546. uptr ev_addr = RestoreAddr(ev->addr);
  547. DPrintf2(" Unlock: addr=0x%zx\n", ev_addr);
  548. mset->DelAddr(ev_addr);
  549. break;
  550. }
  551. case EventType::kTime:
  552. // TraceReplay already extracted sid/epoch from it,
  553. // nothing else to do here.
  554. break;
  555. }
  556. });
  557. ExtractTagFromStack(pstk, ptag);
  558. return found;
  559. }
  560. bool RacyStacks::operator==(const RacyStacks &other) const {
  561. if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
  562. return true;
  563. if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
  564. return true;
  565. return false;
  566. }
  567. static bool FindRacyStacks(const RacyStacks &hash) {
  568. for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
  569. if (hash == ctx->racy_stacks[i]) {
  570. VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n");
  571. return true;
  572. }
  573. }
  574. return false;
  575. }
  576. static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
  577. if (!flags()->suppress_equal_stacks)
  578. return false;
  579. RacyStacks hash;
  580. hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
  581. hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
  582. {
  583. ReadLock lock(&ctx->racy_mtx);
  584. if (FindRacyStacks(hash))
  585. return true;
  586. }
  587. Lock lock(&ctx->racy_mtx);
  588. if (FindRacyStacks(hash))
  589. return true;
  590. ctx->racy_stacks.PushBack(hash);
  591. return false;
  592. }
  593. static bool FindRacyAddress(const RacyAddress &ra0) {
  594. for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
  595. RacyAddress ra2 = ctx->racy_addresses[i];
  596. uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
  597. uptr minend = min(ra0.addr_max, ra2.addr_max);
  598. if (maxbeg < minend) {
  599. VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
  600. return true;
  601. }
  602. }
  603. return false;
  604. }
  605. static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
  606. if (!flags()->suppress_equal_addresses)
  607. return false;
  608. RacyAddress ra0 = {addr_min, addr_max};
  609. {
  610. ReadLock lock(&ctx->racy_mtx);
  611. if (FindRacyAddress(ra0))
  612. return true;
  613. }
  614. Lock lock(&ctx->racy_mtx);
  615. if (FindRacyAddress(ra0))
  616. return true;
  617. ctx->racy_addresses.PushBack(ra0);
  618. return false;
  619. }
  620. bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
  621. // These should have been checked in ShouldReport.
  622. // It's too late to check them here, we have already taken locks.
  623. CHECK(flags()->report_bugs);
  624. CHECK(!thr->suppress_reports);
  625. atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
  626. const ReportDesc *rep = srep.GetReport();
  627. CHECK_EQ(thr->current_report, nullptr);
  628. thr->current_report = rep;
  629. Suppression *supp = 0;
  630. uptr pc_or_addr = 0;
  631. for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
  632. pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
  633. for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
  634. pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
  635. for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
  636. pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
  637. for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
  638. pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
  639. if (pc_or_addr != 0) {
  640. Lock lock(&ctx->fired_suppressions_mtx);
  641. FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
  642. ctx->fired_suppressions.push_back(s);
  643. }
  644. {
  645. bool suppressed = OnReport(rep, pc_or_addr != 0);
  646. if (suppressed) {
  647. thr->current_report = nullptr;
  648. return false;
  649. }
  650. }
  651. PrintReport(rep);
  652. __tsan_on_report(rep);
  653. ctx->nreported++;
  654. if (flags()->halt_on_error)
  655. Die();
  656. thr->current_report = nullptr;
  657. return true;
  658. }
  659. bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
  660. ReadLock lock(&ctx->fired_suppressions_mtx);
  661. for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
  662. if (ctx->fired_suppressions[k].type != type)
  663. continue;
  664. for (uptr j = 0; j < trace.size; j++) {
  665. FiredSuppression *s = &ctx->fired_suppressions[k];
  666. if (trace.trace[j] == s->pc_or_addr) {
  667. if (s->supp)
  668. atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
  669. return true;
  670. }
  671. }
  672. }
  673. return false;
  674. }
  675. static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
  676. ReadLock lock(&ctx->fired_suppressions_mtx);
  677. for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
  678. if (ctx->fired_suppressions[k].type != type)
  679. continue;
  680. FiredSuppression *s = &ctx->fired_suppressions[k];
  681. if (addr == s->pc_or_addr) {
  682. if (s->supp)
  683. atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
  684. return true;
  685. }
  686. }
  687. return false;
  688. }
  689. void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
  690. AccessType typ0) {
  691. CheckedMutex::CheckNoLocks();
  692. // Symbolizer makes lots of intercepted calls. If we try to process them,
  693. // at best it will cause deadlocks on internal mutexes.
  694. ScopedIgnoreInterceptors ignore;
  695. uptr addr = ShadowToMem(shadow_mem);
  696. DPrintf("#%d: ReportRace %p\n", thr->tid, (void *)addr);
  697. if (!ShouldReport(thr, ReportTypeRace))
  698. return;
  699. uptr addr_off0, size0;
  700. cur.GetAccess(&addr_off0, &size0, nullptr);
  701. uptr addr_off1, size1, typ1;
  702. old.GetAccess(&addr_off1, &size1, &typ1);
  703. if (!flags()->report_atomic_races &&
  704. ((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) &&
  705. !(typ0 & kAccessFree) && !(typ1 & kAccessFree))
  706. return;
  707. const uptr kMop = 2;
  708. Shadow s[kMop] = {cur, old};
  709. uptr addr0 = addr + addr_off0;
  710. uptr addr1 = addr + addr_off1;
  711. uptr end0 = addr0 + size0;
  712. uptr end1 = addr1 + size1;
  713. uptr addr_min = min(addr0, addr1);
  714. uptr addr_max = max(end0, end1);
  715. if (IsExpectedReport(addr_min, addr_max - addr_min))
  716. return;
  717. if (HandleRacyAddress(thr, addr_min, addr_max))
  718. return;
  719. ReportType rep_typ = ReportTypeRace;
  720. if ((typ0 & kAccessVptr) && (typ1 & kAccessFree))
  721. rep_typ = ReportTypeVptrUseAfterFree;
  722. else if (typ0 & kAccessVptr)
  723. rep_typ = ReportTypeVptrRace;
  724. else if (typ1 & kAccessFree)
  725. rep_typ = ReportTypeUseAfterFree;
  726. if (IsFiredSuppression(ctx, rep_typ, addr))
  727. return;
  728. VarSizeStackTrace traces[kMop];
  729. Tid tids[kMop] = {thr->tid, kInvalidTid};
  730. uptr tags[kMop] = {kExternalTagNone, kExternalTagNone};
  731. ObtainCurrentStack(thr, thr->trace_prev_pc, &traces[0], &tags[0]);
  732. if (IsFiredSuppression(ctx, rep_typ, traces[0]))
  733. return;
  734. DynamicMutexSet mset1;
  735. MutexSet *mset[kMop] = {&thr->mset, mset1};
  736. // We need to lock the slot during RestoreStack because it protects
  737. // the slot journal.
  738. Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
  739. ThreadRegistryLock l0(&ctx->thread_registry);
  740. Lock slots_lock(&ctx->slot_mtx);
  741. if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
  742. size1, typ1, &tids[1], &traces[1], mset[1], &tags[1]))
  743. return;
  744. if (IsFiredSuppression(ctx, rep_typ, traces[1]))
  745. return;
  746. if (HandleRacyStacks(thr, traces))
  747. return;
  748. // If any of the accesses has a tag, treat this as an "external" race.
  749. uptr tag = kExternalTagNone;
  750. for (uptr i = 0; i < kMop; i++) {
  751. if (tags[i] != kExternalTagNone) {
  752. rep_typ = ReportTypeExternalRace;
  753. tag = tags[i];
  754. break;
  755. }
  756. }
  757. ScopedReport rep(rep_typ, tag);
  758. for (uptr i = 0; i < kMop; i++)
  759. rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
  760. for (uptr i = 0; i < kMop; i++) {
  761. ThreadContext *tctx = static_cast<ThreadContext *>(
  762. ctx->thread_registry.GetThreadLocked(tids[i]));
  763. rep.AddThread(tctx);
  764. }
  765. rep.AddLocation(addr_min, addr_max - addr_min);
  766. #if !SANITIZER_GO
  767. if (!((typ0 | typ1) & kAccessFree) &&
  768. s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
  769. rep.AddSleep(thr->last_sleep_stack_id);
  770. #endif
  771. OutputReport(thr, rep);
  772. }
  773. void PrintCurrentStack(ThreadState *thr, uptr pc) {
  774. VarSizeStackTrace trace;
  775. ObtainCurrentStack(thr, pc, &trace);
  776. PrintStack(SymbolizeStack(trace));
  777. }
  778. // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
  779. // __sanitizer_print_stack_trace exists in the actual unwinded stack, but
  780. // tail-call to PrintCurrentStackSlow breaks this assumption because
  781. // __sanitizer_print_stack_trace disappears after tail-call.
  782. // However, this solution is not reliable enough, please see dvyukov's comment
  783. // http://reviews.llvm.org/D19148#406208
  784. // Also see PR27280 comment 2 and 3 for breaking examples and analysis.
  785. ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
  786. #if !SANITIZER_GO
  787. uptr bp = GET_CURRENT_FRAME();
  788. auto *ptrace = New<BufferedStackTrace>();
  789. ptrace->Unwind(pc, bp, nullptr, false);
  790. for (uptr i = 0; i < ptrace->size / 2; i++) {
  791. uptr tmp = ptrace->trace_buffer[i];
  792. ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
  793. ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
  794. }
  795. PrintStack(SymbolizeStack(*ptrace));
  796. #endif
  797. }
  798. } // namespace __tsan
  799. using namespace __tsan;
  800. extern "C" {
  801. SANITIZER_INTERFACE_ATTRIBUTE
  802. void __sanitizer_print_stack_trace() {
  803. PrintCurrentStackSlow(StackTrace::GetCurrentPc());
  804. }
  805. } // extern "C"