sanitizer_mutex.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef SANITIZER_MUTEX_H
  13. #define SANITIZER_MUTEX_H
  14. #include "sanitizer_atomic.h"
  15. #include "sanitizer_internal_defs.h"
  16. #include "sanitizer_libc.h"
  17. #include "sanitizer_thread_safety.h"
  18. namespace __sanitizer {
  19. class SANITIZER_MUTEX StaticSpinMutex {
  20. public:
  21. void Init() {
  22. atomic_store(&state_, 0, memory_order_relaxed);
  23. }
  24. void Lock() SANITIZER_ACQUIRE() {
  25. if (LIKELY(TryLock()))
  26. return;
  27. LockSlow();
  28. }
  29. bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
  30. return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
  31. }
  32. void Unlock() SANITIZER_RELEASE() {
  33. atomic_store(&state_, 0, memory_order_release);
  34. }
  35. void CheckLocked() const SANITIZER_CHECK_LOCKED() {
  36. CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
  37. }
  38. private:
  39. atomic_uint8_t state_;
  40. void LockSlow();
  41. };
  42. class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
  43. public:
  44. SpinMutex() {
  45. Init();
  46. }
  47. SpinMutex(const SpinMutex &) = delete;
  48. void operator=(const SpinMutex &) = delete;
  49. };
  50. // Semaphore provides an OS-dependent way to park/unpark threads.
  51. // The last thread returned from Wait can destroy the object
  52. // (destruction-safety).
  53. class Semaphore {
  54. public:
  55. constexpr Semaphore() {}
  56. Semaphore(const Semaphore &) = delete;
  57. void operator=(const Semaphore &) = delete;
  58. void Wait();
  59. void Post(u32 count = 1);
  60. private:
  61. atomic_uint32_t state_ = {0};
  62. };
  63. typedef int MutexType;
  64. enum {
  65. // Used as sentinel and to catch unassigned types
  66. // (should not be used as real Mutex type).
  67. MutexInvalid = 0,
  68. MutexThreadRegistry,
  69. // Each tool own mutexes must start at this number.
  70. MutexLastCommon,
  71. // Type for legacy mutexes that are not checked for deadlocks.
  72. MutexUnchecked = -1,
  73. // Special marks that can be used in MutexMeta::can_lock table.
  74. // The leaf mutexes can be locked under any other non-leaf mutex,
  75. // but no other mutex can be locked while under a leaf mutex.
  76. MutexLeaf = -1,
  77. // Multiple mutexes of this type can be locked at the same time.
  78. MutexMulti = -3,
  79. };
  80. // Go linker does not support THREADLOCAL variables,
  81. // so we can't use per-thread state.
  82. // Disable checked locks on Darwin. Although Darwin platforms support
  83. // THREADLOCAL variables they are not usable early on during process init when
  84. // `__sanitizer::Mutex` is used.
  85. #define SANITIZER_CHECK_DEADLOCKS \
  86. (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_MAC)
  87. #if SANITIZER_CHECK_DEADLOCKS
  88. struct MutexMeta {
  89. MutexType type;
  90. const char *name;
  91. // The table fixes what mutexes can be locked under what mutexes.
  92. // If the entry for MutexTypeFoo contains MutexTypeBar,
  93. // then Bar mutex can be locked while under Foo mutex.
  94. // Can also contain the special MutexLeaf/MutexMulti marks.
  95. MutexType can_lock[10];
  96. };
  97. #endif
  98. class CheckedMutex {
  99. public:
  100. explicit constexpr CheckedMutex(MutexType type)
  101. #if SANITIZER_CHECK_DEADLOCKS
  102. : type_(type)
  103. #endif
  104. {
  105. }
  106. ALWAYS_INLINE void Lock() {
  107. #if SANITIZER_CHECK_DEADLOCKS
  108. LockImpl(GET_CALLER_PC());
  109. #endif
  110. }
  111. ALWAYS_INLINE void Unlock() {
  112. #if SANITIZER_CHECK_DEADLOCKS
  113. UnlockImpl();
  114. #endif
  115. }
  116. // Checks that the current thread does not hold any mutexes
  117. // (e.g. when returning from a runtime function to user code).
  118. static void CheckNoLocks() {
  119. #if SANITIZER_CHECK_DEADLOCKS
  120. CheckNoLocksImpl();
  121. #endif
  122. }
  123. private:
  124. #if SANITIZER_CHECK_DEADLOCKS
  125. const MutexType type_;
  126. void LockImpl(uptr pc);
  127. void UnlockImpl();
  128. static void CheckNoLocksImpl();
  129. #endif
  130. };
  131. // Reader-writer mutex.
  132. // Derive from CheckedMutex for the purposes of EBO.
  133. // We could make it a field marked with [[no_unique_address]],
  134. // but this attribute is not supported by some older compilers.
  135. class SANITIZER_MUTEX Mutex : CheckedMutex {
  136. public:
  137. explicit constexpr Mutex(MutexType type = MutexUnchecked)
  138. : CheckedMutex(type) {}
  139. void Lock() SANITIZER_ACQUIRE() {
  140. CheckedMutex::Lock();
  141. u64 reset_mask = ~0ull;
  142. u64 state = atomic_load_relaxed(&state_);
  143. for (uptr spin_iters = 0;; spin_iters++) {
  144. u64 new_state;
  145. bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
  146. if (LIKELY(!locked)) {
  147. // The mutex is not read-/write-locked, try to lock.
  148. new_state = (state | kWriterLock) & reset_mask;
  149. } else if (spin_iters > kMaxSpinIters) {
  150. // We've spun enough, increment waiting writers count and block.
  151. // The counter will be decremented by whoever wakes us.
  152. new_state = (state + kWaitingWriterInc) & reset_mask;
  153. } else if ((state & kWriterSpinWait) == 0) {
  154. // Active spinning, but denote our presence so that unlocking
  155. // thread does not wake up other threads.
  156. new_state = state | kWriterSpinWait;
  157. } else {
  158. // Active spinning.
  159. state = atomic_load(&state_, memory_order_relaxed);
  160. continue;
  161. }
  162. if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  163. memory_order_acquire)))
  164. continue;
  165. if (LIKELY(!locked))
  166. return; // We've locked the mutex.
  167. if (spin_iters > kMaxSpinIters) {
  168. // We've incremented waiting writers, so now block.
  169. writers_.Wait();
  170. spin_iters = 0;
  171. } else {
  172. // We've set kWriterSpinWait, but we are still in active spinning.
  173. }
  174. // We either blocked and were unblocked,
  175. // or we just spun but set kWriterSpinWait.
  176. // Either way we need to reset kWriterSpinWait
  177. // next time we take the lock or block again.
  178. reset_mask = ~kWriterSpinWait;
  179. state = atomic_load(&state_, memory_order_relaxed);
  180. DCHECK_NE(state & kWriterSpinWait, 0);
  181. }
  182. }
  183. void Unlock() SANITIZER_RELEASE() {
  184. CheckedMutex::Unlock();
  185. bool wake_writer;
  186. u64 wake_readers;
  187. u64 new_state;
  188. u64 state = atomic_load_relaxed(&state_);
  189. do {
  190. DCHECK_NE(state & kWriterLock, 0);
  191. DCHECK_EQ(state & kReaderLockMask, 0);
  192. new_state = state & ~kWriterLock;
  193. wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
  194. (state & kWaitingWriterMask) != 0;
  195. if (wake_writer)
  196. new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
  197. wake_readers =
  198. wake_writer || (state & kWriterSpinWait) != 0
  199. ? 0
  200. : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
  201. if (wake_readers)
  202. new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
  203. } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  204. memory_order_release)));
  205. if (UNLIKELY(wake_writer))
  206. writers_.Post();
  207. else if (UNLIKELY(wake_readers))
  208. readers_.Post(wake_readers);
  209. }
  210. void ReadLock() SANITIZER_ACQUIRE_SHARED() {
  211. CheckedMutex::Lock();
  212. u64 reset_mask = ~0ull;
  213. u64 state = atomic_load_relaxed(&state_);
  214. for (uptr spin_iters = 0;; spin_iters++) {
  215. bool locked = (state & kWriterLock) != 0;
  216. u64 new_state;
  217. if (LIKELY(!locked)) {
  218. new_state = (state + kReaderLockInc) & reset_mask;
  219. } else if (spin_iters > kMaxSpinIters) {
  220. new_state = (state + kWaitingReaderInc) & reset_mask;
  221. } else if ((state & kReaderSpinWait) == 0) {
  222. // Active spinning, but denote our presence so that unlocking
  223. // thread does not wake up other threads.
  224. new_state = state | kReaderSpinWait;
  225. } else {
  226. // Active spinning.
  227. state = atomic_load(&state_, memory_order_relaxed);
  228. continue;
  229. }
  230. if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  231. memory_order_acquire)))
  232. continue;
  233. if (LIKELY(!locked))
  234. return; // We've locked the mutex.
  235. if (spin_iters > kMaxSpinIters) {
  236. // We've incremented waiting readers, so now block.
  237. readers_.Wait();
  238. spin_iters = 0;
  239. } else {
  240. // We've set kReaderSpinWait, but we are still in active spinning.
  241. }
  242. reset_mask = ~kReaderSpinWait;
  243. state = atomic_load(&state_, memory_order_relaxed);
  244. }
  245. }
  246. void ReadUnlock() SANITIZER_RELEASE_SHARED() {
  247. CheckedMutex::Unlock();
  248. bool wake;
  249. u64 new_state;
  250. u64 state = atomic_load_relaxed(&state_);
  251. do {
  252. DCHECK_NE(state & kReaderLockMask, 0);
  253. DCHECK_EQ(state & kWriterLock, 0);
  254. new_state = state - kReaderLockInc;
  255. wake = (new_state &
  256. (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
  257. (new_state & kWaitingWriterMask) != 0;
  258. if (wake)
  259. new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
  260. } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  261. memory_order_release)));
  262. if (UNLIKELY(wake))
  263. writers_.Post();
  264. }
  265. // This function does not guarantee an explicit check that the calling thread
  266. // is the thread which owns the mutex. This behavior, while more strictly
  267. // correct, causes problems in cases like StopTheWorld, where a parent thread
  268. // owns the mutex but a child checks that it is locked. Rather than
  269. // maintaining complex state to work around those situations, the check only
  270. // checks that the mutex is owned.
  271. void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
  272. CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
  273. }
  274. void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
  275. void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
  276. CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
  277. }
  278. private:
  279. atomic_uint64_t state_ = {0};
  280. Semaphore writers_;
  281. Semaphore readers_;
  282. // The state has 3 counters:
  283. // - number of readers holding the lock,
  284. // if non zero, the mutex is read-locked
  285. // - number of waiting readers,
  286. // if not zero, the mutex is write-locked
  287. // - number of waiting writers,
  288. // if non zero, the mutex is read- or write-locked
  289. // And 2 flags:
  290. // - writer lock
  291. // if set, the mutex is write-locked
  292. // - a writer is awake and spin-waiting
  293. // the flag is used to prevent thundering herd problem
  294. // (new writers are not woken if this flag is set)
  295. // - a reader is awake and spin-waiting
  296. //
  297. // Both writers and readers use active spinning before blocking.
  298. // But readers are more aggressive and always take the mutex
  299. // if there are any other readers.
  300. // After wake up both writers and readers compete to lock the
  301. // mutex again. This is needed to allow repeated locks even in presence
  302. // of other blocked threads.
  303. static constexpr u64 kCounterWidth = 20;
  304. static constexpr u64 kReaderLockShift = 0;
  305. static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
  306. static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
  307. << kReaderLockShift;
  308. static constexpr u64 kWaitingReaderShift = kCounterWidth;
  309. static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
  310. static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
  311. << kWaitingReaderShift;
  312. static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
  313. static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
  314. static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
  315. << kWaitingWriterShift;
  316. static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
  317. static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
  318. static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
  319. static constexpr uptr kMaxSpinIters = 1500;
  320. Mutex(LinkerInitialized) = delete;
  321. Mutex(const Mutex &) = delete;
  322. void operator=(const Mutex &) = delete;
  323. };
  324. void FutexWait(atomic_uint32_t *p, u32 cmp);
  325. void FutexWake(atomic_uint32_t *p, u32 count);
  326. template <typename MutexType>
  327. class SANITIZER_SCOPED_LOCK GenericScopedLock {
  328. public:
  329. explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
  330. mu_->Lock();
  331. }
  332. ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
  333. private:
  334. MutexType *mu_;
  335. GenericScopedLock(const GenericScopedLock &) = delete;
  336. void operator=(const GenericScopedLock &) = delete;
  337. };
  338. template <typename MutexType>
  339. class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
  340. public:
  341. explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
  342. : mu_(mu) {
  343. mu_->ReadLock();
  344. }
  345. ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
  346. private:
  347. MutexType *mu_;
  348. GenericScopedReadLock(const GenericScopedReadLock &) = delete;
  349. void operator=(const GenericScopedReadLock &) = delete;
  350. };
  351. template <typename MutexType>
  352. class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
  353. public:
  354. ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
  355. SANITIZER_ACQUIRE(mu)
  356. : mu_(mu), write_(write) {
  357. if (write_)
  358. mu_->Lock();
  359. else
  360. mu_->ReadLock();
  361. }
  362. ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
  363. if (write_)
  364. mu_->Unlock();
  365. else
  366. mu_->ReadUnlock();
  367. }
  368. private:
  369. MutexType *mu_;
  370. bool write_;
  371. GenericScopedRWLock(const GenericScopedRWLock &) = delete;
  372. void operator=(const GenericScopedRWLock &) = delete;
  373. };
  374. typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
  375. typedef GenericScopedLock<Mutex> Lock;
  376. typedef GenericScopedReadLock<Mutex> ReadLock;
  377. typedef GenericScopedRWLock<Mutex> RWLock;
  378. } // namespace __sanitizer
  379. #endif // SANITIZER_MUTEX_H