tsan_rtl_mutex.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. //===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
  13. #include <sanitizer_common/sanitizer_stackdepot.h>
  14. #include "tsan_rtl.h"
  15. #include "tsan_flags.h"
  16. #include "tsan_sync.h"
  17. #include "tsan_report.h"
  18. #include "tsan_symbolize.h"
  19. #include "tsan_platform.h"
  20. namespace __tsan {
  21. void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
  22. void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
  23. FastState last_lock, StackID creation_stack_id);
  24. struct Callback final : public DDCallback {
  25. ThreadState *thr;
  26. uptr pc;
  27. Callback(ThreadState *thr, uptr pc)
  28. : thr(thr)
  29. , pc(pc) {
  30. DDCallback::pt = thr->proc()->dd_pt;
  31. DDCallback::lt = thr->dd_lt;
  32. }
  33. StackID Unwind() override { return CurrentStackId(thr, pc); }
  34. int UniqueTid() override { return thr->tid; }
  35. };
  36. void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
  37. Callback cb(thr, pc);
  38. ctx->dd->MutexInit(&cb, &s->dd);
  39. s->dd.ctx = s->addr;
  40. }
  41. static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
  42. uptr addr, StackID creation_stack_id) {
  43. // In Go, these misuses are either impossible, or detected by std lib,
  44. // or false positives (e.g. unlock in a different thread).
  45. if (SANITIZER_GO)
  46. return;
  47. if (!ShouldReport(thr, typ))
  48. return;
  49. ThreadRegistryLock l(&ctx->thread_registry);
  50. ScopedReport rep(typ);
  51. rep.AddMutex(addr, creation_stack_id);
  52. VarSizeStackTrace trace;
  53. ObtainCurrentStack(thr, pc, &trace);
  54. rep.AddStack(trace, true);
  55. rep.AddLocation(addr, 1);
  56. OutputReport(thr, rep);
  57. }
  58. static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
  59. StackID stack_id, bool write) {
  60. auto typ = write ? EventType::kLock : EventType::kRLock;
  61. // Note: it's important to trace before modifying mutex set
  62. // because tracing can switch trace part and we write the current
  63. // mutex set in the beginning of each part.
  64. // If we do it in the opposite order, we will write already reduced
  65. // mutex set in the beginning of the part and then trace unlock again.
  66. TraceMutexLock(thr, typ, pc, addr, stack_id);
  67. thr->mset.AddAddr(addr, stack_id, write);
  68. }
  69. static void RecordMutexUnlock(ThreadState *thr, uptr addr) {
  70. // See the comment in RecordMutexLock re order of operations.
  71. TraceMutexUnlock(thr, addr);
  72. thr->mset.DelAddr(addr);
  73. }
  74. void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  75. DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
  76. if (!(flagz & MutexFlagLinkerInit) && pc && IsAppMem(addr))
  77. MemoryAccess(thr, pc, addr, 1, kAccessWrite);
  78. SlotLocker locker(thr);
  79. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  80. s->SetFlags(flagz & MutexCreationFlagMask);
  81. // Save stack in the case the sync object was created before as atomic.
  82. if (!SANITIZER_GO && s->creation_stack_id == kInvalidStackID)
  83. s->creation_stack_id = CurrentStackId(thr, pc);
  84. }
  85. void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  86. DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
  87. bool unlock_locked = false;
  88. StackID creation_stack_id;
  89. FastState last_lock;
  90. {
  91. auto s = ctx->metamap.GetSyncIfExists(addr);
  92. if (!s)
  93. return;
  94. SlotLocker locker(thr);
  95. {
  96. Lock lock(&s->mtx);
  97. creation_stack_id = s->creation_stack_id;
  98. last_lock = s->last_lock;
  99. if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) ||
  100. ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
  101. // Destroy is no-op for linker-initialized mutexes.
  102. return;
  103. }
  104. if (common_flags()->detect_deadlocks) {
  105. Callback cb(thr, pc);
  106. ctx->dd->MutexDestroy(&cb, &s->dd);
  107. ctx->dd->MutexInit(&cb, &s->dd);
  108. }
  109. if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
  110. !s->IsFlagSet(MutexFlagBroken)) {
  111. s->SetFlags(MutexFlagBroken);
  112. unlock_locked = true;
  113. }
  114. s->Reset();
  115. }
  116. // Imitate a memory write to catch unlock-destroy races.
  117. if (pc && IsAppMem(addr))
  118. MemoryAccess(thr, pc, addr, 1,
  119. kAccessWrite | kAccessFree | kAccessSlotLocked);
  120. }
  121. if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked))
  122. ReportDestroyLocked(thr, pc, addr, last_lock, creation_stack_id);
  123. thr->mset.DelAddr(addr, true);
  124. // s will be destroyed and freed in MetaMap::FreeBlock.
  125. }
  126. void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  127. DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
  128. if (flagz & MutexFlagTryLock)
  129. return;
  130. if (!common_flags()->detect_deadlocks)
  131. return;
  132. Callback cb(thr, pc);
  133. {
  134. SlotLocker locker(thr);
  135. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  136. ReadLock lock(&s->mtx);
  137. s->UpdateFlags(flagz);
  138. if (s->owner_tid != thr->tid)
  139. ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
  140. }
  141. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  142. }
  143. void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
  144. DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
  145. thr->tid, addr, flagz, rec);
  146. if (flagz & MutexFlagRecursiveLock)
  147. CHECK_GT(rec, 0);
  148. else
  149. rec = 1;
  150. if (pc && IsAppMem(addr))
  151. MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
  152. bool report_double_lock = false;
  153. bool pre_lock = false;
  154. bool first = false;
  155. StackID creation_stack_id = kInvalidStackID;
  156. {
  157. SlotLocker locker(thr);
  158. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  159. creation_stack_id = s->creation_stack_id;
  160. RecordMutexLock(thr, pc, addr, creation_stack_id, true);
  161. {
  162. Lock lock(&s->mtx);
  163. first = s->recursion == 0;
  164. s->UpdateFlags(flagz);
  165. if (s->owner_tid == kInvalidTid) {
  166. CHECK_EQ(s->recursion, 0);
  167. s->owner_tid = thr->tid;
  168. s->last_lock = thr->fast_state;
  169. } else if (s->owner_tid == thr->tid) {
  170. CHECK_GT(s->recursion, 0);
  171. } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
  172. s->SetFlags(MutexFlagBroken);
  173. report_double_lock = true;
  174. }
  175. s->recursion += rec;
  176. if (first) {
  177. if (!thr->ignore_sync) {
  178. thr->clock.Acquire(s->clock);
  179. thr->clock.Acquire(s->read_clock);
  180. }
  181. }
  182. if (first && common_flags()->detect_deadlocks) {
  183. pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
  184. !(flagz & MutexFlagTryLock);
  185. Callback cb(thr, pc);
  186. if (pre_lock)
  187. ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
  188. ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
  189. }
  190. }
  191. }
  192. if (report_double_lock)
  193. ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr,
  194. creation_stack_id);
  195. if (first && pre_lock && common_flags()->detect_deadlocks) {
  196. Callback cb(thr, pc);
  197. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  198. }
  199. }
  200. int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  201. DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
  202. if (pc && IsAppMem(addr))
  203. MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
  204. StackID creation_stack_id;
  205. RecordMutexUnlock(thr, addr);
  206. bool report_bad_unlock = false;
  207. int rec = 0;
  208. {
  209. SlotLocker locker(thr);
  210. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  211. bool released = false;
  212. {
  213. Lock lock(&s->mtx);
  214. creation_stack_id = s->creation_stack_id;
  215. if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
  216. if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
  217. s->SetFlags(MutexFlagBroken);
  218. report_bad_unlock = true;
  219. }
  220. } else {
  221. rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
  222. s->recursion -= rec;
  223. if (s->recursion == 0) {
  224. s->owner_tid = kInvalidTid;
  225. if (!thr->ignore_sync) {
  226. thr->clock.ReleaseStore(&s->clock);
  227. released = true;
  228. }
  229. }
  230. }
  231. if (common_flags()->detect_deadlocks && s->recursion == 0 &&
  232. !report_bad_unlock) {
  233. Callback cb(thr, pc);
  234. ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
  235. }
  236. }
  237. if (released)
  238. IncrementEpoch(thr);
  239. }
  240. if (report_bad_unlock)
  241. ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
  242. creation_stack_id);
  243. if (common_flags()->detect_deadlocks && !report_bad_unlock) {
  244. Callback cb(thr, pc);
  245. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  246. }
  247. return rec;
  248. }
  249. void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  250. DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
  251. if ((flagz & MutexFlagTryLock) || !common_flags()->detect_deadlocks)
  252. return;
  253. Callback cb(thr, pc);
  254. {
  255. SlotLocker locker(thr);
  256. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  257. ReadLock lock(&s->mtx);
  258. s->UpdateFlags(flagz);
  259. ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
  260. }
  261. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  262. }
  263. void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
  264. DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
  265. if (pc && IsAppMem(addr))
  266. MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
  267. bool report_bad_lock = false;
  268. bool pre_lock = false;
  269. StackID creation_stack_id = kInvalidStackID;
  270. {
  271. SlotLocker locker(thr);
  272. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  273. creation_stack_id = s->creation_stack_id;
  274. RecordMutexLock(thr, pc, addr, creation_stack_id, false);
  275. {
  276. ReadLock lock(&s->mtx);
  277. s->UpdateFlags(flagz);
  278. if (s->owner_tid != kInvalidTid) {
  279. if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
  280. s->SetFlags(MutexFlagBroken);
  281. report_bad_lock = true;
  282. }
  283. }
  284. if (!thr->ignore_sync)
  285. thr->clock.Acquire(s->clock);
  286. s->last_lock = thr->fast_state;
  287. if (common_flags()->detect_deadlocks) {
  288. pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
  289. !(flagz & MutexFlagTryLock);
  290. Callback cb(thr, pc);
  291. if (pre_lock)
  292. ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
  293. ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
  294. }
  295. }
  296. }
  297. if (report_bad_lock)
  298. ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr,
  299. creation_stack_id);
  300. if (pre_lock && common_flags()->detect_deadlocks) {
  301. Callback cb(thr, pc);
  302. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  303. }
  304. }
  305. void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
  306. DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
  307. if (pc && IsAppMem(addr))
  308. MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
  309. RecordMutexUnlock(thr, addr);
  310. StackID creation_stack_id;
  311. bool report_bad_unlock = false;
  312. {
  313. SlotLocker locker(thr);
  314. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  315. bool released = false;
  316. {
  317. Lock lock(&s->mtx);
  318. creation_stack_id = s->creation_stack_id;
  319. if (s->owner_tid != kInvalidTid) {
  320. if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
  321. s->SetFlags(MutexFlagBroken);
  322. report_bad_unlock = true;
  323. }
  324. }
  325. if (!thr->ignore_sync) {
  326. thr->clock.Release(&s->read_clock);
  327. released = true;
  328. }
  329. if (common_flags()->detect_deadlocks && s->recursion == 0) {
  330. Callback cb(thr, pc);
  331. ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
  332. }
  333. }
  334. if (released)
  335. IncrementEpoch(thr);
  336. }
  337. if (report_bad_unlock)
  338. ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr,
  339. creation_stack_id);
  340. if (common_flags()->detect_deadlocks) {
  341. Callback cb(thr, pc);
  342. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  343. }
  344. }
  345. void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
  346. DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
  347. if (pc && IsAppMem(addr))
  348. MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
  349. RecordMutexUnlock(thr, addr);
  350. StackID creation_stack_id;
  351. bool report_bad_unlock = false;
  352. bool write = true;
  353. {
  354. SlotLocker locker(thr);
  355. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  356. bool released = false;
  357. {
  358. Lock lock(&s->mtx);
  359. creation_stack_id = s->creation_stack_id;
  360. if (s->owner_tid == kInvalidTid) {
  361. // Seems to be read unlock.
  362. write = false;
  363. if (!thr->ignore_sync) {
  364. thr->clock.Release(&s->read_clock);
  365. released = true;
  366. }
  367. } else if (s->owner_tid == thr->tid) {
  368. // Seems to be write unlock.
  369. CHECK_GT(s->recursion, 0);
  370. s->recursion--;
  371. if (s->recursion == 0) {
  372. s->owner_tid = kInvalidTid;
  373. if (!thr->ignore_sync) {
  374. thr->clock.ReleaseStore(&s->clock);
  375. released = true;
  376. }
  377. }
  378. } else if (!s->IsFlagSet(MutexFlagBroken)) {
  379. s->SetFlags(MutexFlagBroken);
  380. report_bad_unlock = true;
  381. }
  382. if (common_flags()->detect_deadlocks && s->recursion == 0) {
  383. Callback cb(thr, pc);
  384. ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
  385. }
  386. }
  387. if (released)
  388. IncrementEpoch(thr);
  389. }
  390. if (report_bad_unlock)
  391. ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
  392. creation_stack_id);
  393. if (common_flags()->detect_deadlocks) {
  394. Callback cb(thr, pc);
  395. ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
  396. }
  397. }
  398. void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
  399. DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
  400. SlotLocker locker(thr);
  401. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  402. Lock lock(&s->mtx);
  403. s->owner_tid = kInvalidTid;
  404. s->recursion = 0;
  405. }
  406. void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
  407. DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
  408. StackID creation_stack_id = kInvalidStackID;
  409. {
  410. SlotLocker locker(thr);
  411. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
  412. if (s)
  413. creation_stack_id = s->creation_stack_id;
  414. }
  415. ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr,
  416. creation_stack_id);
  417. }
  418. void Acquire(ThreadState *thr, uptr pc, uptr addr) {
  419. DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
  420. if (thr->ignore_sync)
  421. return;
  422. auto s = ctx->metamap.GetSyncIfExists(addr);
  423. if (!s)
  424. return;
  425. SlotLocker locker(thr);
  426. if (!s->clock)
  427. return;
  428. ReadLock lock(&s->mtx);
  429. thr->clock.Acquire(s->clock);
  430. }
  431. void AcquireGlobal(ThreadState *thr) {
  432. DPrintf("#%d: AcquireGlobal\n", thr->tid);
  433. if (thr->ignore_sync)
  434. return;
  435. SlotLocker locker(thr);
  436. for (auto &slot : ctx->slots) thr->clock.Set(slot.sid, slot.epoch());
  437. }
  438. void Release(ThreadState *thr, uptr pc, uptr addr) {
  439. DPrintf("#%d: Release %zx\n", thr->tid, addr);
  440. if (thr->ignore_sync)
  441. return;
  442. SlotLocker locker(thr);
  443. {
  444. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
  445. Lock lock(&s->mtx);
  446. thr->clock.Release(&s->clock);
  447. }
  448. IncrementEpoch(thr);
  449. }
  450. void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
  451. DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
  452. if (thr->ignore_sync)
  453. return;
  454. SlotLocker locker(thr);
  455. {
  456. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
  457. Lock lock(&s->mtx);
  458. thr->clock.ReleaseStore(&s->clock);
  459. }
  460. IncrementEpoch(thr);
  461. }
  462. void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
  463. DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
  464. if (thr->ignore_sync)
  465. return;
  466. SlotLocker locker(thr);
  467. {
  468. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
  469. Lock lock(&s->mtx);
  470. thr->clock.ReleaseStoreAcquire(&s->clock);
  471. }
  472. IncrementEpoch(thr);
  473. }
  474. void IncrementEpoch(ThreadState *thr) {
  475. DCHECK(!thr->ignore_sync);
  476. DCHECK(thr->slot_locked);
  477. Epoch epoch = EpochInc(thr->fast_state.epoch());
  478. if (!EpochOverflow(epoch)) {
  479. Sid sid = thr->fast_state.sid();
  480. thr->clock.Set(sid, epoch);
  481. thr->fast_state.SetEpoch(epoch);
  482. thr->slot->SetEpoch(epoch);
  483. TraceTime(thr);
  484. }
  485. }
  486. #if !SANITIZER_GO
  487. void AfterSleep(ThreadState *thr, uptr pc) {
  488. DPrintf("#%d: AfterSleep\n", thr->tid);
  489. if (thr->ignore_sync)
  490. return;
  491. thr->last_sleep_stack_id = CurrentStackId(thr, pc);
  492. thr->last_sleep_clock.Reset();
  493. SlotLocker locker(thr);
  494. for (auto &slot : ctx->slots)
  495. thr->last_sleep_clock.Set(slot.sid, slot.epoch());
  496. }
  497. #endif
  498. void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
  499. if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
  500. return;
  501. ThreadRegistryLock l(&ctx->thread_registry);
  502. ScopedReport rep(ReportTypeDeadlock);
  503. for (int i = 0; i < r->n; i++) {
  504. rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
  505. rep.AddUniqueTid((int)r->loop[i].thr_ctx);
  506. rep.AddThread((int)r->loop[i].thr_ctx);
  507. }
  508. uptr dummy_pc = 0x42;
  509. for (int i = 0; i < r->n; i++) {
  510. for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
  511. u32 stk = r->loop[i].stk[j];
  512. if (stk && stk != kInvalidStackID) {
  513. rep.AddStack(StackDepotGet(stk), true);
  514. } else {
  515. // Sometimes we fail to extract the stack trace (FIXME: investigate),
  516. // but we should still produce some stack trace in the report.
  517. rep.AddStack(StackTrace(&dummy_pc, 1), true);
  518. }
  519. }
  520. }
  521. OutputReport(thr, rep);
  522. }
  523. void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
  524. FastState last_lock, StackID creation_stack_id) {
  525. // We need to lock the slot during RestoreStack because it protects
  526. // the slot journal.
  527. Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
  528. ThreadRegistryLock l0(&ctx->thread_registry);
  529. Lock slots_lock(&ctx->slot_mtx);
  530. ScopedReport rep(ReportTypeMutexDestroyLocked);
  531. rep.AddMutex(addr, creation_stack_id);
  532. VarSizeStackTrace trace;
  533. ObtainCurrentStack(thr, pc, &trace);
  534. rep.AddStack(trace, true);
  535. Tid tid;
  536. DynamicMutexSet mset;
  537. uptr tag;
  538. if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr,
  539. 0, kAccessWrite, &tid, &trace, mset, &tag))
  540. return;
  541. rep.AddStack(trace, true);
  542. rep.AddLocation(addr, 1);
  543. OutputReport(thr, rep);
  544. }
  545. } // namespace __tsan