tsan_rtl_mutex.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. //===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
  13. #include <sanitizer_common/sanitizer_stackdepot.h>
  14. #include "tsan_rtl.h"
  15. #include "tsan_flags.h"
  16. #include "tsan_sync.h"
  17. #include "tsan_report.h"
  18. #include "tsan_symbolize.h"
  19. #include "tsan_platform.h"
  20. namespace __tsan {
  21. void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
  22. void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
  23. FastState last_lock, StackID creation_stack_id);
  24. struct Callback final : public DDCallback {
  25. ThreadState *thr;
  26. uptr pc;
  27. Callback(ThreadState *thr, uptr pc)
  28. : thr(thr)
  29. , pc(pc) {
  30. DDCallback::pt = thr->proc()->dd_pt;
  31. DDCallback::lt = thr->dd_lt;
  32. }
  33. StackID Unwind() override { return CurrentStackId(thr, pc); }
  34. int UniqueTid() override { return thr->tid; }
  35. };
  36. void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
  37. Callback cb(thr, pc);
  38. ctx->dd->MutexInit(&cb, &s->dd);
  39. s->dd.ctx = s->addr;
  40. }
  41. static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
  42. uptr addr, StackID creation_stack_id) {
  43. // In Go, these misuses are either impossible, or detected by std lib,
  44. // or false positives (e.g. unlock in a different thread).
  45. if (SANITIZER_GO)
  46. return;
  47. if (!ShouldReport(thr, typ))
  48. return;
  49. ThreadRegistryLock l(&ctx->thread_registry);
  50. ScopedReport rep(typ);
  51. rep.AddMutex(addr, creation_stack_id);
  52. VarSizeStackTrace trace;
  53. ObtainCurrentStack(thr, pc, &trace);
  54. rep.AddStack(trace, true);
  55. rep.AddLocation(addr, 1);
  56. OutputReport(thr, rep);
  57. }
  58. static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
  59. StackID stack_id, bool write) {
  60. auto typ = write ? EventType::kLock : EventType::kRLock;
  61. // Note: it's important to trace before modifying mutex set
  62. // because tracing can switch trace part and we write the current
  63. // mutex set in the beginning of each part.
  64. // If we do it in the opposite order, we will write already reduced
  65. // mutex set in the beginning of the part and then trace unlock again.
  66. TraceMutexLock(thr, typ, pc, addr, stack_id);
  67. thr->mset.AddAddr(addr, stack_id, write);
  68. }
  69. static void RecordMutexUnlock(ThreadState *thr, uptr addr) {
  70. // See the comment in RecordMutexLock re order of operations.
  71. TraceMutexUnlock(thr, addr);
  72. thr->mset.DelAddr(addr);
  73. }
  74. void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  75. DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
  76. if (!(flagz & MutexFlagLinkerInit) && pc && IsAppMem(addr))
  77. MemoryAccess(thr, pc, addr, 1, kAccessWrite);
  78. SlotLocker locker(thr);
  79. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  80. s->SetFlags(flagz & MutexCreationFlagMask);
  81. // Save stack in the case the sync object was created before as atomic.
  82. if (!SANITIZER_GO && s->creation_stack_id == kInvalidStackID)
  83. s->creation_stack_id = CurrentStackId(thr, pc);
  84. }
  85. void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  86. DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
  87. bool unlock_locked = false;
  88. StackID creation_stack_id;
  89. FastState last_lock;
  90. {
  91. auto s = ctx->metamap.GetSyncIfExists(addr);
  92. if (!s)
  93. return;
  94. SlotLocker locker(thr);
  95. {
  96. Lock lock(&s->mtx);
  97. creation_stack_id = s->creation_stack_id;
  98. last_lock = s->last_lock;
  99. if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) ||
  100. ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
  101. // Destroy is no-op for linker-initialized mutexes.
  102. return;
  103. }
  104. if (common_flags()->detect_deadlocks) {
  105. Callback cb(thr, pc);
  106. ctx->dd->MutexDestroy(&cb, &s->dd);
  107. ctx->dd->MutexInit(&cb, &s->dd);
  108. }
  109. if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
  110. !s->IsFlagSet(MutexFlagBroken)) {
  111. s->SetFlags(MutexFlagBroken);
  112. unlock_locked = true;
  113. }
  114. s->Reset();
  115. }
  116. // Imitate a memory write to catch unlock-destroy races.
  117. if (pc && IsAppMem(addr))
  118. MemoryAccess(thr, pc, addr, 1, kAccessWrite | kAccessFree);
  119. }
  120. if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked))
  121. ReportDestroyLocked(thr, pc, addr, last_lock, creation_stack_id);
  122. thr->mset.DelAddr(addr, true);
  123. // s will be destroyed and freed in MetaMap::FreeBlock.
  124. }
  125. void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  126. DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
  127. if (flagz & MutexFlagTryLock)
  128. return;
  129. if (!common_flags()->detect_deadlocks)
  130. return;
  131. Callback cb(thr, pc);
  132. {
  133. SlotLocker locker(thr);
  134. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  135. ReadLock lock(&s->mtx);
  136. s->UpdateFlags(flagz);
  137. if (s->owner_tid != thr->tid)
  138. ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
  139. }
  140. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  141. }
  142. void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
  143. DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
  144. thr->tid, addr, flagz, rec);
  145. if (flagz & MutexFlagRecursiveLock)
  146. CHECK_GT(rec, 0);
  147. else
  148. rec = 1;
  149. if (pc && IsAppMem(addr))
  150. MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
  151. bool report_double_lock = false;
  152. bool pre_lock = false;
  153. bool first = false;
  154. StackID creation_stack_id = kInvalidStackID;
  155. {
  156. SlotLocker locker(thr);
  157. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  158. creation_stack_id = s->creation_stack_id;
  159. RecordMutexLock(thr, pc, addr, creation_stack_id, true);
  160. {
  161. Lock lock(&s->mtx);
  162. first = s->recursion == 0;
  163. s->UpdateFlags(flagz);
  164. if (s->owner_tid == kInvalidTid) {
  165. CHECK_EQ(s->recursion, 0);
  166. s->owner_tid = thr->tid;
  167. s->last_lock = thr->fast_state;
  168. } else if (s->owner_tid == thr->tid) {
  169. CHECK_GT(s->recursion, 0);
  170. } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
  171. s->SetFlags(MutexFlagBroken);
  172. report_double_lock = true;
  173. }
  174. s->recursion += rec;
  175. if (first) {
  176. if (!thr->ignore_sync) {
  177. thr->clock.Acquire(s->clock);
  178. thr->clock.Acquire(s->read_clock);
  179. }
  180. }
  181. if (first && common_flags()->detect_deadlocks) {
  182. pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
  183. !(flagz & MutexFlagTryLock);
  184. Callback cb(thr, pc);
  185. if (pre_lock)
  186. ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
  187. ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
  188. }
  189. }
  190. }
  191. if (report_double_lock)
  192. ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr,
  193. creation_stack_id);
  194. if (first && pre_lock && common_flags()->detect_deadlocks) {
  195. Callback cb(thr, pc);
  196. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  197. }
  198. }
  199. int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  200. DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
  201. if (pc && IsAppMem(addr))
  202. MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
  203. StackID creation_stack_id;
  204. RecordMutexUnlock(thr, addr);
  205. bool report_bad_unlock = false;
  206. int rec = 0;
  207. {
  208. SlotLocker locker(thr);
  209. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  210. bool released = false;
  211. {
  212. Lock lock(&s->mtx);
  213. creation_stack_id = s->creation_stack_id;
  214. if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
  215. if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
  216. s->SetFlags(MutexFlagBroken);
  217. report_bad_unlock = true;
  218. }
  219. } else {
  220. rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
  221. s->recursion -= rec;
  222. if (s->recursion == 0) {
  223. s->owner_tid = kInvalidTid;
  224. if (!thr->ignore_sync) {
  225. thr->clock.ReleaseStore(&s->clock);
  226. released = true;
  227. }
  228. }
  229. }
  230. if (common_flags()->detect_deadlocks && s->recursion == 0 &&
  231. !report_bad_unlock) {
  232. Callback cb(thr, pc);
  233. ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
  234. }
  235. }
  236. if (released)
  237. IncrementEpoch(thr);
  238. }
  239. if (report_bad_unlock)
  240. ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
  241. creation_stack_id);
  242. if (common_flags()->detect_deadlocks && !report_bad_unlock) {
  243. Callback cb(thr, pc);
  244. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  245. }
  246. return rec;
  247. }
  248. void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  249. DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
  250. if ((flagz & MutexFlagTryLock) || !common_flags()->detect_deadlocks)
  251. return;
  252. Callback cb(thr, pc);
  253. {
  254. SlotLocker locker(thr);
  255. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  256. ReadLock lock(&s->mtx);
  257. s->UpdateFlags(flagz);
  258. ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
  259. }
  260. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  261. }
  262. void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  263. DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
  264. if (pc && IsAppMem(addr))
  265. MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
  266. bool report_bad_lock = false;
  267. bool pre_lock = false;
  268. StackID creation_stack_id = kInvalidStackID;
  269. {
  270. SlotLocker locker(thr);
  271. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  272. creation_stack_id = s->creation_stack_id;
  273. RecordMutexLock(thr, pc, addr, creation_stack_id, false);
  274. {
  275. ReadLock lock(&s->mtx);
  276. s->UpdateFlags(flagz);
  277. if (s->owner_tid != kInvalidTid) {
  278. if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
  279. s->SetFlags(MutexFlagBroken);
  280. report_bad_lock = true;
  281. }
  282. }
  283. if (!thr->ignore_sync)
  284. thr->clock.Acquire(s->clock);
  285. s->last_lock = thr->fast_state;
  286. if (common_flags()->detect_deadlocks) {
  287. pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
  288. !(flagz & MutexFlagTryLock);
  289. Callback cb(thr, pc);
  290. if (pre_lock)
  291. ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
  292. ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
  293. }
  294. }
  295. }
  296. if (report_bad_lock)
  297. ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr,
  298. creation_stack_id);
  299. if (pre_lock && common_flags()->detect_deadlocks) {
  300. Callback cb(thr, pc);
  301. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  302. }
  303. }
  304. void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
  305. DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
  306. if (pc && IsAppMem(addr))
  307. MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
  308. RecordMutexUnlock(thr, addr);
  309. StackID creation_stack_id;
  310. bool report_bad_unlock = false;
  311. {
  312. SlotLocker locker(thr);
  313. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  314. bool released = false;
  315. {
  316. Lock lock(&s->mtx);
  317. creation_stack_id = s->creation_stack_id;
  318. if (s->owner_tid != kInvalidTid) {
  319. if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
  320. s->SetFlags(MutexFlagBroken);
  321. report_bad_unlock = true;
  322. }
  323. }
  324. if (!thr->ignore_sync) {
  325. thr->clock.Release(&s->read_clock);
  326. released = true;
  327. }
  328. if (common_flags()->detect_deadlocks && s->recursion == 0) {
  329. Callback cb(thr, pc);
  330. ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
  331. }
  332. }
  333. if (released)
  334. IncrementEpoch(thr);
  335. }
  336. if (report_bad_unlock)
  337. ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr,
  338. creation_stack_id);
  339. if (common_flags()->detect_deadlocks) {
  340. Callback cb(thr, pc);
  341. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  342. }
  343. }
  344. void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
  345. DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
  346. if (pc && IsAppMem(addr))
  347. MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
  348. RecordMutexUnlock(thr, addr);
  349. StackID creation_stack_id;
  350. bool report_bad_unlock = false;
  351. bool write = true;
  352. {
  353. SlotLocker locker(thr);
  354. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  355. bool released = false;
  356. {
  357. Lock lock(&s->mtx);
  358. creation_stack_id = s->creation_stack_id;
  359. if (s->owner_tid == kInvalidTid) {
  360. // Seems to be read unlock.
  361. write = false;
  362. if (!thr->ignore_sync) {
  363. thr->clock.Release(&s->read_clock);
  364. released = true;
  365. }
  366. } else if (s->owner_tid == thr->tid) {
  367. // Seems to be write unlock.
  368. CHECK_GT(s->recursion, 0);
  369. s->recursion--;
  370. if (s->recursion == 0) {
  371. s->owner_tid = kInvalidTid;
  372. if (!thr->ignore_sync) {
  373. thr->clock.ReleaseStore(&s->clock);
  374. released = true;
  375. }
  376. }
  377. } else if (!s->IsFlagSet(MutexFlagBroken)) {
  378. s->SetFlags(MutexFlagBroken);
  379. report_bad_unlock = true;
  380. }
  381. if (common_flags()->detect_deadlocks && s->recursion == 0) {
  382. Callback cb(thr, pc);
  383. ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
  384. }
  385. }
  386. if (released)
  387. IncrementEpoch(thr);
  388. }
  389. if (report_bad_unlock)
  390. ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
  391. creation_stack_id);
  392. if (common_flags()->detect_deadlocks) {
  393. Callback cb(thr, pc);
  394. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  395. }
  396. }
  397. void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
  398. DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
  399. SlotLocker locker(thr);
  400. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  401. Lock lock(&s->mtx);
  402. s->owner_tid = kInvalidTid;
  403. s->recursion = 0;
  404. }
  405. void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
  406. DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
  407. StackID creation_stack_id = kInvalidStackID;
  408. {
  409. SlotLocker locker(thr);
  410. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  411. if (s)
  412. creation_stack_id = s->creation_stack_id;
  413. }
  414. ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr,
  415. creation_stack_id);
  416. }
  417. void Acquire(ThreadState *thr, uptr pc, uptr addr) {
  418. DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
  419. if (thr->ignore_sync)
  420. return;
  421. auto s = ctx->metamap.GetSyncIfExists(addr);
  422. if (!s)
  423. return;
  424. SlotLocker locker(thr);
  425. if (!s->clock)
  426. return;
  427. ReadLock lock(&s->mtx);
  428. thr->clock.Acquire(s->clock);
  429. }
  430. void AcquireGlobal(ThreadState *thr) {
  431. DPrintf("#%d: AcquireGlobal\n", thr->tid);
  432. if (thr->ignore_sync)
  433. return;
  434. SlotLocker locker(thr);
  435. for (auto &slot : ctx->slots) thr->clock.Set(slot.sid, slot.epoch());
  436. }
  437. void Release(ThreadState *thr, uptr pc, uptr addr) {
  438. DPrintf("#%d: Release %zx\n", thr->tid, addr);
  439. if (thr->ignore_sync)
  440. return;
  441. SlotLocker locker(thr);
  442. {
  443. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
  444. Lock lock(&s->mtx);
  445. thr->clock.Release(&s->clock);
  446. }
  447. IncrementEpoch(thr);
  448. }
  449. void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
  450. DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
  451. if (thr->ignore_sync)
  452. return;
  453. SlotLocker locker(thr);
  454. {
  455. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
  456. Lock lock(&s->mtx);
  457. thr->clock.ReleaseStore(&s->clock);
  458. }
  459. IncrementEpoch(thr);
  460. }
  461. void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
  462. DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
  463. if (thr->ignore_sync)
  464. return;
  465. SlotLocker locker(thr);
  466. {
  467. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
  468. Lock lock(&s->mtx);
  469. thr->clock.ReleaseStoreAcquire(&s->clock);
  470. }
  471. IncrementEpoch(thr);
  472. }
  473. void IncrementEpoch(ThreadState *thr) {
  474. DCHECK(!thr->ignore_sync);
  475. DCHECK(thr->slot_locked);
  476. Epoch epoch = EpochInc(thr->fast_state.epoch());
  477. if (!EpochOverflow(epoch)) {
  478. Sid sid = thr->fast_state.sid();
  479. thr->clock.Set(sid, epoch);
  480. thr->fast_state.SetEpoch(epoch);
  481. thr->slot->SetEpoch(epoch);
  482. TraceTime(thr);
  483. }
  484. }
  485. #if !SANITIZER_GO
  486. void AfterSleep(ThreadState *thr, uptr pc) {
  487. DPrintf("#%d: AfterSleep\n", thr->tid);
  488. if (thr->ignore_sync)
  489. return;
  490. thr->last_sleep_stack_id = CurrentStackId(thr, pc);
  491. thr->last_sleep_clock.Reset();
  492. SlotLocker locker(thr);
  493. for (auto &slot : ctx->slots)
  494. thr->last_sleep_clock.Set(slot.sid, slot.epoch());
  495. }
  496. #endif
  497. void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
  498. if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
  499. return;
  500. ThreadRegistryLock l(&ctx->thread_registry);
  501. ScopedReport rep(ReportTypeDeadlock);
  502. for (int i = 0; i < r->n; i++) {
  503. rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
  504. rep.AddUniqueTid((int)r->loop[i].thr_ctx);
  505. rep.AddThread((int)r->loop[i].thr_ctx);
  506. }
  507. uptr dummy_pc = 0x42;
  508. for (int i = 0; i < r->n; i++) {
  509. for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
  510. u32 stk = r->loop[i].stk[j];
  511. if (stk && stk != kInvalidStackID) {
  512. rep.AddStack(StackDepotGet(stk), true);
  513. } else {
  514. // Sometimes we fail to extract the stack trace (FIXME: investigate),
  515. // but we should still produce some stack trace in the report.
  516. rep.AddStack(StackTrace(&dummy_pc, 1), true);
  517. }
  518. }
  519. }
  520. OutputReport(thr, rep);
  521. }
  522. void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
  523. FastState last_lock, StackID creation_stack_id) {
  524. // We need to lock the slot during RestoreStack because it protects
  525. // the slot journal.
  526. Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
  527. ThreadRegistryLock l0(&ctx->thread_registry);
  528. Lock slots_lock(&ctx->slot_mtx);
  529. ScopedReport rep(ReportTypeMutexDestroyLocked);
  530. rep.AddMutex(addr, creation_stack_id);
  531. VarSizeStackTrace trace;
  532. ObtainCurrentStack(thr, pc, &trace);
  533. rep.AddStack(trace, true);
  534. Tid tid;
  535. DynamicMutexSet mset;
  536. uptr tag;
  537. if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr,
  538. 0, kAccessWrite, &tid, &trace, mset, &tag))
  539. return;
  540. rep.AddStack(trace, true);
  541. rep.AddLocation(addr, 1);
  542. OutputReport(thr, rep);
  543. }
  544. } // namespace __tsan