sanitizer_deadlock_detector2.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. //===-- sanitizer_deadlock_detector2.cpp ----------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Deadlock detector implementation based on adjacency lists.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "sanitizer_deadlock_detector_interface.h"
  13. #include "sanitizer_common.h"
  14. #include "sanitizer_allocator_internal.h"
  15. #include "sanitizer_placement_new.h"
  16. #include "sanitizer_mutex.h"
  17. #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
  18. namespace __sanitizer {
  19. const int kMaxNesting = 64;
  20. const u32 kNoId = -1;
  21. const u32 kEndId = -2;
  22. const int kMaxLink = 8;
  23. const int kL1Size = 1024;
  24. const int kL2Size = 1024;
  25. const int kMaxMutex = kL1Size * kL2Size;
  26. struct Id {
  27. u32 id;
  28. u32 seq;
  29. explicit Id(u32 id = 0, u32 seq = 0)
  30. : id(id)
  31. , seq(seq) {
  32. }
  33. };
  34. struct Link {
  35. u32 id;
  36. u32 seq;
  37. u32 tid;
  38. u32 stk0;
  39. u32 stk1;
  40. explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
  41. : id(id)
  42. , seq(seq)
  43. , tid(tid)
  44. , stk0(s0)
  45. , stk1(s1) {
  46. }
  47. };
  48. struct DDPhysicalThread {
  49. DDReport rep;
  50. bool report_pending;
  51. bool visited[kMaxMutex];
  52. Link pending[kMaxMutex];
  53. Link path[kMaxMutex];
  54. };
  55. struct ThreadMutex {
  56. u32 id;
  57. u32 stk;
  58. };
  59. struct DDLogicalThread {
  60. u64 ctx;
  61. ThreadMutex locked[kMaxNesting];
  62. int nlocked;
  63. };
  64. struct MutexState {
  65. StaticSpinMutex mtx;
  66. u32 seq;
  67. int nlink;
  68. Link link[kMaxLink];
  69. };
  70. struct DD final : public DDetector {
  71. explicit DD(const DDFlags *flags);
  72. DDPhysicalThread* CreatePhysicalThread();
  73. void DestroyPhysicalThread(DDPhysicalThread *pt);
  74. DDLogicalThread* CreateLogicalThread(u64 ctx);
  75. void DestroyLogicalThread(DDLogicalThread *lt);
  76. void MutexInit(DDCallback *cb, DDMutex *m);
  77. void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
  78. void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
  79. bool trylock);
  80. void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
  81. void MutexDestroy(DDCallback *cb, DDMutex *m);
  82. DDReport *GetReport(DDCallback *cb);
  83. void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
  84. void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
  85. u32 allocateId(DDCallback *cb);
  86. MutexState *getMutex(u32 id);
  87. u32 getMutexId(MutexState *m);
  88. DDFlags flags;
  89. MutexState *mutex[kL1Size];
  90. SpinMutex mtx;
  91. InternalMmapVector<u32> free_id;
  92. int id_gen = 0;
  93. };
  94. DDetector *DDetector::Create(const DDFlags *flags) {
  95. (void)flags;
  96. void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
  97. return new(mem) DD(flags);
  98. }
  99. DD::DD(const DDFlags *flags) : flags(*flags) { free_id.reserve(1024); }
  100. DDPhysicalThread* DD::CreatePhysicalThread() {
  101. DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
  102. "deadlock detector (physical thread)");
  103. return pt;
  104. }
  105. void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
  106. pt->~DDPhysicalThread();
  107. UnmapOrDie(pt, sizeof(DDPhysicalThread));
  108. }
  109. DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
  110. DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
  111. sizeof(DDLogicalThread));
  112. lt->ctx = ctx;
  113. lt->nlocked = 0;
  114. return lt;
  115. }
  116. void DD::DestroyLogicalThread(DDLogicalThread *lt) {
  117. lt->~DDLogicalThread();
  118. InternalFree(lt);
  119. }
  120. void DD::MutexInit(DDCallback *cb, DDMutex *m) {
  121. VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
  122. m->id = kNoId;
  123. m->recursion = 0;
  124. atomic_store(&m->owner, 0, memory_order_relaxed);
  125. }
  126. MutexState *DD::getMutex(u32 id) { return &mutex[id / kL2Size][id % kL2Size]; }
  127. u32 DD::getMutexId(MutexState *m) {
  128. for (int i = 0; i < kL1Size; i++) {
  129. MutexState *tab = mutex[i];
  130. if (tab == 0)
  131. break;
  132. if (m >= tab && m < tab + kL2Size)
  133. return i * kL2Size + (m - tab);
  134. }
  135. return -1;
  136. }
  137. u32 DD::allocateId(DDCallback *cb) {
  138. u32 id = -1;
  139. SpinMutexLock l(&mtx);
  140. if (free_id.size() > 0) {
  141. id = free_id.back();
  142. free_id.pop_back();
  143. } else {
  144. CHECK_LT(id_gen, kMaxMutex);
  145. if ((id_gen % kL2Size) == 0) {
  146. mutex[id_gen / kL2Size] = (MutexState *)MmapOrDie(
  147. kL2Size * sizeof(MutexState), "deadlock detector (mutex table)");
  148. }
  149. id = id_gen++;
  150. }
  151. CHECK_LE(id, kMaxMutex);
  152. VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
  153. return id;
  154. }
  155. void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
  156. VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
  157. cb->lt->ctx, m, wlock, cb->lt->nlocked);
  158. DDPhysicalThread *pt = cb->pt;
  159. DDLogicalThread *lt = cb->lt;
  160. uptr owner = atomic_load(&m->owner, memory_order_relaxed);
  161. if (owner == (uptr)cb->lt) {
  162. VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
  163. cb->lt->ctx);
  164. return;
  165. }
  166. CHECK_LE(lt->nlocked, kMaxNesting);
  167. // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
  168. if (m->id == kNoId)
  169. m->id = allocateId(cb);
  170. ThreadMutex *tm = &lt->locked[lt->nlocked++];
  171. tm->id = m->id;
  172. if (flags.second_deadlock_stack)
  173. tm->stk = cb->Unwind();
  174. if (lt->nlocked == 1) {
  175. VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
  176. cb->lt->ctx);
  177. return;
  178. }
  179. bool added = false;
  180. MutexState *mtx = getMutex(m->id);
  181. for (int i = 0; i < lt->nlocked - 1; i++) {
  182. u32 id1 = lt->locked[i].id;
  183. u32 stk1 = lt->locked[i].stk;
  184. MutexState *mtx1 = getMutex(id1);
  185. SpinMutexLock l(&mtx1->mtx);
  186. if (mtx1->nlink == kMaxLink) {
  187. // FIXME(dvyukov): check stale links
  188. continue;
  189. }
  190. int li = 0;
  191. for (; li < mtx1->nlink; li++) {
  192. Link *link = &mtx1->link[li];
  193. if (link->id == m->id) {
  194. if (link->seq != mtx->seq) {
  195. link->seq = mtx->seq;
  196. link->tid = lt->ctx;
  197. link->stk0 = stk1;
  198. link->stk1 = cb->Unwind();
  199. added = true;
  200. VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
  201. cb->lt->ctx, getMutexId(mtx1), m->id);
  202. }
  203. break;
  204. }
  205. }
  206. if (li == mtx1->nlink) {
  207. // FIXME(dvyukov): check stale links
  208. Link *link = &mtx1->link[mtx1->nlink++];
  209. link->id = m->id;
  210. link->seq = mtx->seq;
  211. link->tid = lt->ctx;
  212. link->stk0 = stk1;
  213. link->stk1 = cb->Unwind();
  214. added = true;
  215. VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
  216. cb->lt->ctx, getMutexId(mtx1), m->id);
  217. }
  218. }
  219. if (!added || mtx->nlink == 0) {
  220. VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
  221. cb->lt->ctx);
  222. return;
  223. }
  224. CycleCheck(pt, lt, m);
  225. }
  226. void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
  227. bool trylock) {
  228. VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
  229. cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
  230. DDLogicalThread *lt = cb->lt;
  231. uptr owner = atomic_load(&m->owner, memory_order_relaxed);
  232. if (owner == (uptr)cb->lt) {
  233. VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
  234. CHECK(wlock);
  235. m->recursion++;
  236. return;
  237. }
  238. CHECK_EQ(owner, 0);
  239. if (wlock) {
  240. VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
  241. CHECK_EQ(m->recursion, 0);
  242. m->recursion = 1;
  243. atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
  244. }
  245. if (!trylock)
  246. return;
  247. CHECK_LE(lt->nlocked, kMaxNesting);
  248. if (m->id == kNoId)
  249. m->id = allocateId(cb);
  250. ThreadMutex *tm = &lt->locked[lt->nlocked++];
  251. tm->id = m->id;
  252. if (flags.second_deadlock_stack)
  253. tm->stk = cb->Unwind();
  254. }
  255. void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
  256. VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
  257. cb->lt->ctx, m, wlock, cb->lt->nlocked);
  258. DDLogicalThread *lt = cb->lt;
  259. uptr owner = atomic_load(&m->owner, memory_order_relaxed);
  260. if (owner == (uptr)cb->lt) {
  261. VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
  262. if (--m->recursion > 0)
  263. return;
  264. VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
  265. atomic_store(&m->owner, 0, memory_order_relaxed);
  266. }
  267. CHECK_NE(m->id, kNoId);
  268. int last = lt->nlocked - 1;
  269. for (int i = last; i >= 0; i--) {
  270. if (cb->lt->locked[i].id == m->id) {
  271. lt->locked[i] = lt->locked[last];
  272. lt->nlocked--;
  273. break;
  274. }
  275. }
  276. }
  277. void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
  278. VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
  279. cb->lt->ctx, m);
  280. DDLogicalThread *lt = cb->lt;
  281. if (m->id == kNoId)
  282. return;
  283. // Remove the mutex from lt->locked if there.
  284. int last = lt->nlocked - 1;
  285. for (int i = last; i >= 0; i--) {
  286. if (lt->locked[i].id == m->id) {
  287. lt->locked[i] = lt->locked[last];
  288. lt->nlocked--;
  289. break;
  290. }
  291. }
  292. // Clear and invalidate the mutex descriptor.
  293. {
  294. MutexState *mtx = getMutex(m->id);
  295. SpinMutexLock l(&mtx->mtx);
  296. mtx->seq++;
  297. mtx->nlink = 0;
  298. }
  299. // Return id to cache.
  300. {
  301. SpinMutexLock l(&mtx);
  302. free_id.push_back(m->id);
  303. }
  304. }
  305. void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
  306. DDMutex *m) {
  307. internal_memset(pt->visited, 0, sizeof(pt->visited));
  308. int npath = 0;
  309. int npending = 0;
  310. {
  311. MutexState *mtx = getMutex(m->id);
  312. SpinMutexLock l(&mtx->mtx);
  313. for (int li = 0; li < mtx->nlink; li++)
  314. pt->pending[npending++] = mtx->link[li];
  315. }
  316. while (npending > 0) {
  317. Link link = pt->pending[--npending];
  318. if (link.id == kEndId) {
  319. npath--;
  320. continue;
  321. }
  322. if (pt->visited[link.id])
  323. continue;
  324. MutexState *mtx1 = getMutex(link.id);
  325. SpinMutexLock l(&mtx1->mtx);
  326. if (mtx1->seq != link.seq)
  327. continue;
  328. pt->visited[link.id] = true;
  329. if (mtx1->nlink == 0)
  330. continue;
  331. pt->path[npath++] = link;
  332. pt->pending[npending++] = Link(kEndId);
  333. if (link.id == m->id)
  334. return Report(pt, lt, npath); // Bingo!
  335. for (int li = 0; li < mtx1->nlink; li++) {
  336. Link *link1 = &mtx1->link[li];
  337. // MutexState *mtx2 = getMutex(link->id);
  338. // FIXME(dvyukov): fast seq check
  339. // FIXME(dvyukov): fast nlink != 0 check
  340. // FIXME(dvyukov): fast pending check?
  341. // FIXME(dvyukov): npending can be larger than kMaxMutex
  342. pt->pending[npending++] = *link1;
  343. }
  344. }
  345. }
  346. void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
  347. DDReport *rep = &pt->rep;
  348. rep->n = npath;
  349. for (int i = 0; i < npath; i++) {
  350. Link *link = &pt->path[i];
  351. Link *link0 = &pt->path[i ? i - 1 : npath - 1];
  352. rep->loop[i].thr_ctx = link->tid;
  353. rep->loop[i].mtx_ctx0 = link0->id;
  354. rep->loop[i].mtx_ctx1 = link->id;
  355. rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
  356. rep->loop[i].stk[1] = link->stk1;
  357. }
  358. pt->report_pending = true;
  359. }
  360. DDReport *DD::GetReport(DDCallback *cb) {
  361. if (!cb->pt->report_pending)
  362. return 0;
  363. cb->pt->report_pending = false;
  364. return &cb->pt->rep;
  365. }
  366. } // namespace __sanitizer
  367. #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2