sanitizer_mutex.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef SANITIZER_MUTEX_H
  13. #define SANITIZER_MUTEX_H
  14. #include "sanitizer_atomic.h"
  15. #include "sanitizer_internal_defs.h"
  16. #include "sanitizer_libc.h"
  17. #include "sanitizer_thread_safety.h"
  18. namespace __sanitizer {
  19. class SANITIZER_MUTEX StaticSpinMutex {
  20. public:
  21. void Init() {
  22. atomic_store(&state_, 0, memory_order_relaxed);
  23. }
  24. void Lock() SANITIZER_ACQUIRE() {
  25. if (LIKELY(TryLock()))
  26. return;
  27. LockSlow();
  28. }
  29. bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
  30. return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
  31. }
  32. void Unlock() SANITIZER_RELEASE() {
  33. atomic_store(&state_, 0, memory_order_release);
  34. }
  35. void CheckLocked() const SANITIZER_CHECK_LOCKED() {
  36. CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
  37. }
  38. private:
  39. atomic_uint8_t state_;
  40. void LockSlow();
  41. };
  42. class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
  43. public:
  44. SpinMutex() {
  45. Init();
  46. }
  47. SpinMutex(const SpinMutex &) = delete;
  48. void operator=(const SpinMutex &) = delete;
  49. };
  50. // Semaphore provides an OS-dependent way to park/unpark threads.
  51. // The last thread returned from Wait can destroy the object
  52. // (destruction-safety).
  53. class Semaphore {
  54. public:
  55. constexpr Semaphore() {}
  56. Semaphore(const Semaphore &) = delete;
  57. void operator=(const Semaphore &) = delete;
  58. void Wait();
  59. void Post(u32 count = 1);
  60. private:
  61. atomic_uint32_t state_ = {0};
  62. };
  63. typedef int MutexType;
  64. enum {
  65. // Used as sentinel and to catch unassigned types
  66. // (should not be used as real Mutex type).
  67. MutexInvalid = 0,
  68. MutexThreadRegistry,
  69. // Each tool own mutexes must start at this number.
  70. MutexLastCommon,
  71. // Type for legacy mutexes that are not checked for deadlocks.
  72. MutexUnchecked = -1,
  73. // Special marks that can be used in MutexMeta::can_lock table.
  74. // The leaf mutexes can be locked under any other non-leaf mutex,
  75. // but no other mutex can be locked while under a leaf mutex.
  76. MutexLeaf = -1,
  77. // Multiple mutexes of this type can be locked at the same time.
  78. MutexMulti = -3,
  79. };
  80. // Go linker does not support THREADLOCAL variables,
  81. // so we can't use per-thread state.
  82. // Disable checked locks on Darwin. Although Darwin platforms support
  83. // THREADLOCAL variables they are not usable early on during process init when
  84. // `__sanitizer::Mutex` is used.
  85. #define SANITIZER_CHECK_DEADLOCKS \
  86. (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_APPLE)
  87. #if SANITIZER_CHECK_DEADLOCKS
  88. struct MutexMeta {
  89. MutexType type;
  90. const char *name;
  91. // The table fixes what mutexes can be locked under what mutexes.
  92. // If the entry for MutexTypeFoo contains MutexTypeBar,
  93. // then Bar mutex can be locked while under Foo mutex.
  94. // Can also contain the special MutexLeaf/MutexMulti marks.
  95. MutexType can_lock[10];
  96. };
  97. #endif
  98. class CheckedMutex {
  99. public:
  100. explicit constexpr CheckedMutex(MutexType type)
  101. #if SANITIZER_CHECK_DEADLOCKS
  102. : type_(type)
  103. #endif
  104. {
  105. }
  106. ALWAYS_INLINE void Lock() {
  107. #if SANITIZER_CHECK_DEADLOCKS
  108. LockImpl(GET_CALLER_PC());
  109. #endif
  110. }
  111. ALWAYS_INLINE void Unlock() {
  112. #if SANITIZER_CHECK_DEADLOCKS
  113. UnlockImpl();
  114. #endif
  115. }
  116. // Checks that the current thread does not hold any mutexes
  117. // (e.g. when returning from a runtime function to user code).
  118. static void CheckNoLocks() {
  119. #if SANITIZER_CHECK_DEADLOCKS
  120. CheckNoLocksImpl();
  121. #endif
  122. }
  123. private:
  124. #if SANITIZER_CHECK_DEADLOCKS
  125. const MutexType type_;
  126. void LockImpl(uptr pc);
  127. void UnlockImpl();
  128. static void CheckNoLocksImpl();
  129. #endif
  130. };
  131. // Reader-writer mutex.
  132. // Derive from CheckedMutex for the purposes of EBO.
  133. // We could make it a field marked with [[no_unique_address]],
  134. // but this attribute is not supported by some older compilers.
  135. class SANITIZER_MUTEX Mutex : CheckedMutex {
  136. public:
  137. explicit constexpr Mutex(MutexType type = MutexUnchecked)
  138. : CheckedMutex(type) {}
  139. void Lock() SANITIZER_ACQUIRE() {
  140. CheckedMutex::Lock();
  141. u64 reset_mask = ~0ull;
  142. u64 state = atomic_load_relaxed(&state_);
  143. for (uptr spin_iters = 0;; spin_iters++) {
  144. u64 new_state;
  145. bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
  146. if (LIKELY(!locked)) {
  147. // The mutex is not read-/write-locked, try to lock.
  148. new_state = (state | kWriterLock) & reset_mask;
  149. } else if (spin_iters > kMaxSpinIters) {
  150. // We've spun enough, increment waiting writers count and block.
  151. // The counter will be decremented by whoever wakes us.
  152. new_state = (state + kWaitingWriterInc) & reset_mask;
  153. } else if ((state & kWriterSpinWait) == 0) {
  154. // Active spinning, but denote our presence so that unlocking
  155. // thread does not wake up other threads.
  156. new_state = state | kWriterSpinWait;
  157. } else {
  158. // Active spinning.
  159. state = atomic_load(&state_, memory_order_relaxed);
  160. continue;
  161. }
  162. if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  163. memory_order_acquire)))
  164. continue;
  165. if (LIKELY(!locked))
  166. return; // We've locked the mutex.
  167. if (spin_iters > kMaxSpinIters) {
  168. // We've incremented waiting writers, so now block.
  169. writers_.Wait();
  170. spin_iters = 0;
  171. } else {
  172. // We've set kWriterSpinWait, but we are still in active spinning.
  173. }
  174. // We either blocked and were unblocked,
  175. // or we just spun but set kWriterSpinWait.
  176. // Either way we need to reset kWriterSpinWait
  177. // next time we take the lock or block again.
  178. reset_mask = ~kWriterSpinWait;
  179. state = atomic_load(&state_, memory_order_relaxed);
  180. DCHECK_NE(state & kWriterSpinWait, 0);
  181. }
  182. }
  183. bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
  184. u64 state = atomic_load_relaxed(&state_);
  185. for (;;) {
  186. if (UNLIKELY(state & (kWriterLock | kReaderLockMask)))
  187. return false;
  188. // The mutex is not read-/write-locked, try to lock.
  189. if (LIKELY(atomic_compare_exchange_weak(
  190. &state_, &state, state | kWriterLock, memory_order_acquire))) {
  191. CheckedMutex::Lock();
  192. return true;
  193. }
  194. }
  195. }
  196. void Unlock() SANITIZER_RELEASE() {
  197. CheckedMutex::Unlock();
  198. bool wake_writer;
  199. u64 wake_readers;
  200. u64 new_state;
  201. u64 state = atomic_load_relaxed(&state_);
  202. do {
  203. DCHECK_NE(state & kWriterLock, 0);
  204. DCHECK_EQ(state & kReaderLockMask, 0);
  205. new_state = state & ~kWriterLock;
  206. wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
  207. (state & kWaitingWriterMask) != 0;
  208. if (wake_writer)
  209. new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
  210. wake_readers =
  211. wake_writer || (state & kWriterSpinWait) != 0
  212. ? 0
  213. : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
  214. if (wake_readers)
  215. new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
  216. } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  217. memory_order_release)));
  218. if (UNLIKELY(wake_writer))
  219. writers_.Post();
  220. else if (UNLIKELY(wake_readers))
  221. readers_.Post(wake_readers);
  222. }
  223. void ReadLock() SANITIZER_ACQUIRE_SHARED() {
  224. CheckedMutex::Lock();
  225. u64 reset_mask = ~0ull;
  226. u64 state = atomic_load_relaxed(&state_);
  227. for (uptr spin_iters = 0;; spin_iters++) {
  228. bool locked = (state & kWriterLock) != 0;
  229. u64 new_state;
  230. if (LIKELY(!locked)) {
  231. new_state = (state + kReaderLockInc) & reset_mask;
  232. } else if (spin_iters > kMaxSpinIters) {
  233. new_state = (state + kWaitingReaderInc) & reset_mask;
  234. } else if ((state & kReaderSpinWait) == 0) {
  235. // Active spinning, but denote our presence so that unlocking
  236. // thread does not wake up other threads.
  237. new_state = state | kReaderSpinWait;
  238. } else {
  239. // Active spinning.
  240. state = atomic_load(&state_, memory_order_relaxed);
  241. continue;
  242. }
  243. if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  244. memory_order_acquire)))
  245. continue;
  246. if (LIKELY(!locked))
  247. return; // We've locked the mutex.
  248. if (spin_iters > kMaxSpinIters) {
  249. // We've incremented waiting readers, so now block.
  250. readers_.Wait();
  251. spin_iters = 0;
  252. } else {
  253. // We've set kReaderSpinWait, but we are still in active spinning.
  254. }
  255. reset_mask = ~kReaderSpinWait;
  256. state = atomic_load(&state_, memory_order_relaxed);
  257. }
  258. }
  259. void ReadUnlock() SANITIZER_RELEASE_SHARED() {
  260. CheckedMutex::Unlock();
  261. bool wake;
  262. u64 new_state;
  263. u64 state = atomic_load_relaxed(&state_);
  264. do {
  265. DCHECK_NE(state & kReaderLockMask, 0);
  266. DCHECK_EQ(state & kWriterLock, 0);
  267. new_state = state - kReaderLockInc;
  268. wake = (new_state &
  269. (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
  270. (new_state & kWaitingWriterMask) != 0;
  271. if (wake)
  272. new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
  273. } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  274. memory_order_release)));
  275. if (UNLIKELY(wake))
  276. writers_.Post();
  277. }
  278. // This function does not guarantee an explicit check that the calling thread
  279. // is the thread which owns the mutex. This behavior, while more strictly
  280. // correct, causes problems in cases like StopTheWorld, where a parent thread
  281. // owns the mutex but a child checks that it is locked. Rather than
  282. // maintaining complex state to work around those situations, the check only
  283. // checks that the mutex is owned.
  284. void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
  285. CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
  286. }
  287. void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
  288. void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
  289. CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
  290. }
  291. private:
  292. atomic_uint64_t state_ = {0};
  293. Semaphore writers_;
  294. Semaphore readers_;
  295. // The state has 3 counters:
  296. // - number of readers holding the lock,
  297. // if non zero, the mutex is read-locked
  298. // - number of waiting readers,
  299. // if not zero, the mutex is write-locked
  300. // - number of waiting writers,
  301. // if non zero, the mutex is read- or write-locked
  302. // And 2 flags:
  303. // - writer lock
  304. // if set, the mutex is write-locked
  305. // - a writer is awake and spin-waiting
  306. // the flag is used to prevent thundering herd problem
  307. // (new writers are not woken if this flag is set)
  308. // - a reader is awake and spin-waiting
  309. //
  310. // Both writers and readers use active spinning before blocking.
  311. // But readers are more aggressive and always take the mutex
  312. // if there are any other readers.
  313. // After wake up both writers and readers compete to lock the
  314. // mutex again. This is needed to allow repeated locks even in presence
  315. // of other blocked threads.
  316. static constexpr u64 kCounterWidth = 20;
  317. static constexpr u64 kReaderLockShift = 0;
  318. static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
  319. static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
  320. << kReaderLockShift;
  321. static constexpr u64 kWaitingReaderShift = kCounterWidth;
  322. static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
  323. static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
  324. << kWaitingReaderShift;
  325. static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
  326. static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
  327. static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
  328. << kWaitingWriterShift;
  329. static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
  330. static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
  331. static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
  332. static constexpr uptr kMaxSpinIters = 1500;
  333. Mutex(LinkerInitialized) = delete;
  334. Mutex(const Mutex &) = delete;
  335. void operator=(const Mutex &) = delete;
  336. };
  337. void FutexWait(atomic_uint32_t *p, u32 cmp);
  338. void FutexWake(atomic_uint32_t *p, u32 count);
  339. template <typename MutexType>
  340. class SANITIZER_SCOPED_LOCK GenericScopedLock {
  341. public:
  342. explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
  343. mu_->Lock();
  344. }
  345. ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
  346. private:
  347. MutexType *mu_;
  348. GenericScopedLock(const GenericScopedLock &) = delete;
  349. void operator=(const GenericScopedLock &) = delete;
  350. };
  351. template <typename MutexType>
  352. class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
  353. public:
  354. explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
  355. : mu_(mu) {
  356. mu_->ReadLock();
  357. }
  358. ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
  359. private:
  360. MutexType *mu_;
  361. GenericScopedReadLock(const GenericScopedReadLock &) = delete;
  362. void operator=(const GenericScopedReadLock &) = delete;
  363. };
  364. template <typename MutexType>
  365. class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
  366. public:
  367. ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
  368. SANITIZER_ACQUIRE(mu)
  369. : mu_(mu), write_(write) {
  370. if (write_)
  371. mu_->Lock();
  372. else
  373. mu_->ReadLock();
  374. }
  375. ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
  376. if (write_)
  377. mu_->Unlock();
  378. else
  379. mu_->ReadUnlock();
  380. }
  381. private:
  382. MutexType *mu_;
  383. bool write_;
  384. GenericScopedRWLock(const GenericScopedRWLock &) = delete;
  385. void operator=(const GenericScopedRWLock &) = delete;
  386. };
  387. typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
  388. typedef GenericScopedLock<Mutex> Lock;
  389. typedef GenericScopedReadLock<Mutex> ReadLock;
  390. typedef GenericScopedRWLock<Mutex> RWLock;
  391. } // namespace __sanitizer
  392. #endif // SANITIZER_MUTEX_H