sanitizer_deadlock_detector1.cpp 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. //===-- sanitizer_deadlock_detector1.cpp ----------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Deadlock detector implementation based on NxN adjacency bit matrix.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "sanitizer_deadlock_detector_interface.h"
  13. #include "sanitizer_deadlock_detector.h"
  14. #include "sanitizer_allocator_internal.h"
  15. #include "sanitizer_placement_new.h"
  16. #include "sanitizer_mutex.h"
  17. #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
  18. namespace __sanitizer {
  19. typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector.
  20. struct DDPhysicalThread {
  21. };
  22. struct DDLogicalThread {
  23. u64 ctx;
  24. DeadlockDetectorTLS<DDBV> dd;
  25. DDReport rep;
  26. bool report_pending;
  27. };
  28. struct DD final : public DDetector {
  29. SpinMutex mtx;
  30. DeadlockDetector<DDBV> dd;
  31. DDFlags flags;
  32. explicit DD(const DDFlags *flags);
  33. DDPhysicalThread *CreatePhysicalThread() override;
  34. void DestroyPhysicalThread(DDPhysicalThread *pt) override;
  35. DDLogicalThread *CreateLogicalThread(u64 ctx) override;
  36. void DestroyLogicalThread(DDLogicalThread *lt) override;
  37. void MutexInit(DDCallback *cb, DDMutex *m) override;
  38. void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
  39. void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
  40. bool trylock) override;
  41. void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
  42. void MutexDestroy(DDCallback *cb, DDMutex *m) override;
  43. DDReport *GetReport(DDCallback *cb) override;
  44. void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
  45. void ReportDeadlock(DDCallback *cb, DDMutex *m);
  46. };
  47. DDetector *DDetector::Create(const DDFlags *flags) {
  48. (void)flags;
  49. void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
  50. return new(mem) DD(flags);
  51. }
  52. DD::DD(const DDFlags *flags)
  53. : flags(*flags) {
  54. dd.clear();
  55. }
  56. DDPhysicalThread* DD::CreatePhysicalThread() {
  57. return nullptr;
  58. }
  59. void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
  60. }
  61. DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
  62. DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
  63. lt->ctx = ctx;
  64. lt->dd.clear();
  65. lt->report_pending = false;
  66. return lt;
  67. }
  68. void DD::DestroyLogicalThread(DDLogicalThread *lt) {
  69. lt->~DDLogicalThread();
  70. InternalFree(lt);
  71. }
  72. void DD::MutexInit(DDCallback *cb, DDMutex *m) {
  73. m->id = 0;
  74. m->stk = cb->Unwind();
  75. }
  76. void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
  77. if (!dd.nodeBelongsToCurrentEpoch(m->id))
  78. m->id = dd.newNode(reinterpret_cast<uptr>(m));
  79. dd.ensureCurrentEpoch(&lt->dd);
  80. }
  81. void DD::MutexBeforeLock(DDCallback *cb,
  82. DDMutex *m, bool wlock) {
  83. DDLogicalThread *lt = cb->lt;
  84. if (lt->dd.empty()) return; // This will be the first lock held by lt.
  85. if (dd.hasAllEdges(&lt->dd, m->id)) return; // We already have all edges.
  86. SpinMutexLock lk(&mtx);
  87. MutexEnsureID(lt, m);
  88. if (dd.isHeld(&lt->dd, m->id))
  89. return; // FIXME: allow this only for recursive locks.
  90. if (dd.onLockBefore(&lt->dd, m->id)) {
  91. // Actually add this edge now so that we have all the stack traces.
  92. dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
  93. ReportDeadlock(cb, m);
  94. }
  95. }
  96. void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
  97. DDLogicalThread *lt = cb->lt;
  98. uptr path[20];
  99. uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
  100. if (len == 0U) {
  101. // A cycle of 20+ locks? Well, that's a bit odd...
  102. Printf("WARNING: too long mutex cycle found\n");
  103. return;
  104. }
  105. CHECK_EQ(m->id, path[0]);
  106. lt->report_pending = true;
  107. len = Min<uptr>(len, DDReport::kMaxLoopSize);
  108. DDReport *rep = &lt->rep;
  109. rep->n = len;
  110. for (uptr i = 0; i < len; i++) {
  111. uptr from = path[i];
  112. uptr to = path[(i + 1) % len];
  113. DDMutex *m0 = (DDMutex*)dd.getData(from);
  114. DDMutex *m1 = (DDMutex*)dd.getData(to);
  115. u32 stk_from = 0, stk_to = 0;
  116. int unique_tid = 0;
  117. dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
  118. // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
  119. // unique_tid);
  120. rep->loop[i].thr_ctx = unique_tid;
  121. rep->loop[i].mtx_ctx0 = m0->ctx;
  122. rep->loop[i].mtx_ctx1 = m1->ctx;
  123. rep->loop[i].stk[0] = stk_to;
  124. rep->loop[i].stk[1] = stk_from;
  125. }
  126. }
  127. void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
  128. DDLogicalThread *lt = cb->lt;
  129. u32 stk = 0;
  130. if (flags.second_deadlock_stack)
  131. stk = cb->Unwind();
  132. // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
  133. if (dd.onFirstLock(&lt->dd, m->id, stk))
  134. return;
  135. if (dd.onLockFast(&lt->dd, m->id, stk))
  136. return;
  137. SpinMutexLock lk(&mtx);
  138. MutexEnsureID(lt, m);
  139. if (wlock) // Only a recursive rlock may be held.
  140. CHECK(!dd.isHeld(&lt->dd, m->id));
  141. if (!trylock)
  142. dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
  143. dd.onLockAfter(&lt->dd, m->id, stk);
  144. }
  145. void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
  146. // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
  147. dd.onUnlock(&cb->lt->dd, m->id);
  148. }
  149. void DD::MutexDestroy(DDCallback *cb,
  150. DDMutex *m) {
  151. if (!m->id) return;
  152. SpinMutexLock lk(&mtx);
  153. if (dd.nodeBelongsToCurrentEpoch(m->id))
  154. dd.removeNode(m->id);
  155. m->id = 0;
  156. }
  157. DDReport *DD::GetReport(DDCallback *cb) {
  158. if (!cb->lt->report_pending)
  159. return nullptr;
  160. cb->lt->report_pending = false;
  161. return &cb->lt->rep;
  162. }
  163. } // namespace __sanitizer
  164. #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1