mutex.h 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221
  1. // Copyright 2017 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. //
  15. // -----------------------------------------------------------------------------
  16. // mutex.h
  17. // -----------------------------------------------------------------------------
  18. //
  19. // This header file defines a `Mutex` -- a mutually exclusive lock -- and the
  20. // most common type of synchronization primitive for facilitating locks on
  21. // shared resources. A mutex is used to prevent multiple threads from accessing
  22. // and/or writing to a shared resource concurrently.
  23. //
  24. // Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
  25. // features:
  26. // * Conditional predicates intrinsic to the `Mutex` object
  27. // * Shared/reader locks, in addition to standard exclusive/writer locks
  28. // * Deadlock detection and debug support.
  29. //
  30. // The following helper classes are also defined within this file:
  31. //
  32. // MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
  33. // write access within the current scope.
  34. //
  35. // ReaderMutexLock
  36. // - An RAII wrapper to acquire and release a `Mutex` for shared/read
  37. // access within the current scope.
  38. //
  39. // WriterMutexLock
  40. // - Effectively an alias for `MutexLock` above, designed for use in
  41. // distinguishing reader and writer locks within code.
  42. //
  43. // In addition to simple mutex locks, this file also defines ways to perform
  44. // locking under certain conditions.
  45. //
  46. // Condition - (Preferred) Used to wait for a particular predicate that
  47. // depends on state protected by the `Mutex` to become true.
  48. // CondVar - A lower-level variant of `Condition` that relies on
  49. // application code to explicitly signal the `CondVar` when
  50. // a condition has been met.
  51. //
  52. // See below for more information on using `Condition` or `CondVar`.
  53. //
  54. // Mutexes and mutex behavior can be quite complicated. The information within
  55. // this header file is limited, as a result. Please consult the Mutex guide for
  56. // more complete information and examples.
  57. #ifndef Y_ABSL_SYNCHRONIZATION_MUTEX_H_
  58. #define Y_ABSL_SYNCHRONIZATION_MUTEX_H_
  59. #include <atomic>
  60. #include <cstdint>
  61. #include <cstring>
  62. #include <iterator>
  63. #include <util/generic/string.h>
  64. #include "y_absl/base/attributes.h"
  65. #include "y_absl/base/const_init.h"
  66. #include "y_absl/base/internal/identity.h"
  67. #include "y_absl/base/internal/low_level_alloc.h"
  68. #include "y_absl/base/internal/thread_identity.h"
  69. #include "y_absl/base/internal/tsan_mutex_interface.h"
  70. #include "y_absl/base/port.h"
  71. #include "y_absl/base/thread_annotations.h"
  72. #include "y_absl/synchronization/internal/kernel_timeout.h"
  73. #include "y_absl/synchronization/internal/per_thread_sem.h"
  74. #include "y_absl/time/time.h"
  75. namespace y_absl {
  76. Y_ABSL_NAMESPACE_BEGIN
  77. class Condition;
  78. struct SynchWaitParams;
  79. // -----------------------------------------------------------------------------
  80. // Mutex
  81. // -----------------------------------------------------------------------------
  82. //
  83. // A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
  84. // on some resource, typically a variable or data structure with associated
  85. // invariants. Proper usage of mutexes prevents concurrent access by different
  86. // threads to the same resource.
  87. //
  88. // A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
  89. // The `Lock()` operation *acquires* a `Mutex` (in a state known as an
  90. // *exclusive* -- or *write* -- lock), and the `Unlock()` operation *releases* a
  91. // Mutex. During the span of time between the Lock() and Unlock() operations,
  92. // a mutex is said to be *held*. By design, all mutexes support exclusive/write
  93. // locks, as this is the most common way to use a mutex.
  94. //
  95. // Mutex operations are only allowed under certain conditions; otherwise an
  96. // operation is "invalid", and disallowed by the API. The conditions concern
  97. // both the current state of the mutex and the identity of the threads that
  98. // are performing the operations.
  99. //
  100. // The `Mutex` state machine for basic lock/unlock operations is quite simple:
  101. //
  102. // | | Lock() | Unlock() |
  103. // |----------------+------------------------+----------|
  104. // | Free | Exclusive | invalid |
  105. // | Exclusive | blocks, then exclusive | Free |
  106. //
  107. // The full conditions are as follows.
  108. //
  109. // * Calls to `Unlock()` require that the mutex be held, and must be made in the
  110. // same thread that performed the corresponding `Lock()` operation which
  111. // acquired the mutex; otherwise the call is invalid.
  112. //
  113. // * The mutex being non-reentrant (or non-recursive) means that a call to
  114. // `Lock()` or `TryLock()` must not be made in a thread that already holds the
  115. // mutex; such a call is invalid.
  116. //
  117. // * In other words, the state of being "held" has both a temporal component
  118. // (from `Lock()` until `Unlock()`) as well as a thread identity component:
  119. // the mutex is held *by a particular thread*.
  120. //
  121. // An "invalid" operation has undefined behavior. The `Mutex` implementation
  122. // is allowed to do anything on an invalid call, including, but not limited to,
  123. // crashing with a useful error message, silently succeeding, or corrupting
  124. // data structures. In debug mode, the implementation may crash with a useful
  125. // error message.
  126. //
  127. // `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
  128. // is, however, approximately fair over long periods, and starvation-free for
  129. // threads at the same priority.
  130. //
  131. // The lock/unlock primitives are now annotated with lock annotations
  132. // defined in (base/thread_annotations.h). When writing multi-threaded code,
  133. // you should use lock annotations whenever possible to document your lock
  134. // synchronization policy. Besides acting as documentation, these annotations
  135. // also help compilers or static analysis tools to identify and warn about
  136. // issues that could potentially result in race conditions and deadlocks.
  137. //
  138. // For more information about the lock annotations, please see
  139. // [Thread Safety
  140. // Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
  141. // documentation.
  142. //
  143. // See also `MutexLock`, below, for scoped `Mutex` acquisition.
  144. class Y_ABSL_LOCKABLE Y_ABSL_ATTRIBUTE_WARN_UNUSED Mutex {
  145. public:
  146. // Creates a `Mutex` that is not held by anyone. This constructor is
  147. // typically used for Mutexes allocated on the heap or the stack.
  148. //
  149. // To create `Mutex` instances with static storage duration
  150. // (e.g. a namespace-scoped or global variable), see
  151. // `Mutex::Mutex(y_absl::kConstInit)` below instead.
  152. Mutex();
  153. // Creates a mutex with static storage duration. A global variable
  154. // constructed this way avoids the lifetime issues that can occur on program
  155. // startup and shutdown. (See y_absl/base/const_init.h.)
  156. //
  157. // For Mutexes allocated on the heap and stack, instead use the default
  158. // constructor, which can interact more fully with the thread sanitizer.
  159. //
  160. // Example usage:
  161. // namespace foo {
  162. // Y_ABSL_CONST_INIT y_absl::Mutex mu(y_absl::kConstInit);
  163. // }
  164. explicit constexpr Mutex(y_absl::ConstInitType);
  165. ~Mutex();
  166. // Mutex::Lock()
  167. //
  168. // Blocks the calling thread, if necessary, until this `Mutex` is free, and
  169. // then acquires it exclusively. (This lock is also known as a "write lock.")
  170. void Lock() Y_ABSL_EXCLUSIVE_LOCK_FUNCTION();
  171. // Mutex::Unlock()
  172. //
  173. // Releases this `Mutex` and returns it from the exclusive/write state to the
  174. // free state. Calling thread must hold the `Mutex` exclusively.
  175. void Unlock() Y_ABSL_UNLOCK_FUNCTION();
  176. // Mutex::TryLock()
  177. //
  178. // If the mutex can be acquired without blocking, does so exclusively and
  179. // returns `true`. Otherwise, returns `false`. Returns `true` with high
  180. // probability if the `Mutex` was free.
  181. Y_ABSL_MUST_USE_RESULT bool TryLock() Y_ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
  182. // Mutex::AssertHeld()
  183. //
  184. // Require that the mutex be held exclusively (write mode) by this thread.
  185. //
  186. // If the mutex is not currently held by this thread, this function may report
  187. // an error (typically by crashing with a diagnostic) or it may do nothing.
  188. // This function is intended only as a tool to assist debugging; it doesn't
  189. // guarantee correctness.
  190. void AssertHeld() const Y_ABSL_ASSERT_EXCLUSIVE_LOCK();
  191. // ---------------------------------------------------------------------------
  192. // Reader-Writer Locking
  193. // ---------------------------------------------------------------------------
  194. // A Mutex can also be used as a starvation-free reader-writer lock.
  195. // Neither read-locks nor write-locks are reentrant/recursive to avoid
  196. // potential client programming errors.
  197. //
  198. // The Mutex API provides `Writer*()` aliases for the existing `Lock()`,
  199. // `Unlock()` and `TryLock()` methods for use within applications mixing
  200. // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this
  201. // manner can make locking behavior clearer when mixing read and write modes.
  202. //
  203. // Introducing reader locks necessarily complicates the `Mutex` state
  204. // machine somewhat. The table below illustrates the allowed state transitions
  205. // of a mutex in such cases. Note that ReaderLock() may block even if the lock
  206. // is held in shared mode; this occurs when another thread is blocked on a
  207. // call to WriterLock().
  208. //
  209. // ---------------------------------------------------------------------------
  210. // Operation: WriterLock() Unlock() ReaderLock() ReaderUnlock()
  211. // ---------------------------------------------------------------------------
  212. // State
  213. // ---------------------------------------------------------------------------
  214. // Free Exclusive invalid Shared(1) invalid
  215. // Shared(1) blocks invalid Shared(2) or blocks Free
  216. // Shared(n) n>1 blocks invalid Shared(n+1) or blocks Shared(n-1)
  217. // Exclusive blocks Free blocks invalid
  218. // ---------------------------------------------------------------------------
  219. //
  220. // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
  221. // Mutex::ReaderLock()
  222. //
  223. // Blocks the calling thread, if necessary, until this `Mutex` is either free,
  224. // or in shared mode, and then acquires a share of it. Note that
  225. // `ReaderLock()` will block if some other thread has an exclusive/writer lock
  226. // on the mutex.
  227. void ReaderLock() Y_ABSL_SHARED_LOCK_FUNCTION();
  228. // Mutex::ReaderUnlock()
  229. //
  230. // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
  231. // the free state if this thread holds the last reader lock on the mutex. Note
  232. // that you cannot call `ReaderUnlock()` on a mutex held in write mode.
  233. void ReaderUnlock() Y_ABSL_UNLOCK_FUNCTION();
  234. // Mutex::ReaderTryLock()
  235. //
  236. // If the mutex can be acquired without blocking, acquires this mutex for
  237. // shared access and returns `true`. Otherwise, returns `false`. Returns
  238. // `true` with high probability if the `Mutex` was free or shared.
  239. Y_ABSL_MUST_USE_RESULT bool ReaderTryLock() Y_ABSL_SHARED_TRYLOCK_FUNCTION(true);
  240. // Mutex::AssertReaderHeld()
  241. //
  242. // Require that the mutex be held at least in shared mode (read mode) by this
  243. // thread.
  244. //
  245. // If the mutex is not currently held by this thread, this function may report
  246. // an error (typically by crashing with a diagnostic) or it may do nothing.
  247. // This function is intended only as a tool to assist debugging; it doesn't
  248. // guarantee correctness.
  249. void AssertReaderHeld() const Y_ABSL_ASSERT_SHARED_LOCK();
  250. // Mutex::WriterLock()
  251. // Mutex::WriterUnlock()
  252. // Mutex::WriterTryLock()
  253. //
  254. // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
  255. //
  256. // These methods may be used (along with the complementary `Reader*()`
  257. // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
  258. // etc.) from reader/writer lock usage.
  259. void WriterLock() Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
  260. void WriterUnlock() Y_ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
  261. Y_ABSL_MUST_USE_RESULT bool WriterTryLock()
  262. Y_ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
  263. return this->TryLock();
  264. }
  265. // ---------------------------------------------------------------------------
  266. // Conditional Critical Regions
  267. // ---------------------------------------------------------------------------
  268. // Conditional usage of a `Mutex` can occur using two distinct paradigms:
  269. //
  270. // * Use of `Mutex` member functions with `Condition` objects.
  271. // * Use of the separate `CondVar` abstraction.
  272. //
  273. // In general, prefer use of `Condition` and the `Mutex` member functions
  274. // listed below over `CondVar`. When there are multiple threads waiting on
  275. // distinctly different conditions, however, a battery of `CondVar`s may be
  276. // more efficient. This section discusses use of `Condition` objects.
  277. //
  278. // `Mutex` contains member functions for performing lock operations only under
  279. // certain conditions, of class `Condition`. For correctness, the `Condition`
  280. // must return a boolean that is a pure function, only of state protected by
  281. // the `Mutex`. The condition must be invariant w.r.t. environmental state
  282. // such as thread, cpu id, or time, and must be `noexcept`. The condition will
  283. // always be invoked with the mutex held in at least read mode, so you should
  284. // not block it for long periods or sleep it on a timer.
  285. //
  286. // Since a condition must not depend directly on the current time, use
  287. // `*WithTimeout()` member function variants to make your condition
  288. // effectively true after a given duration, or `*WithDeadline()` variants to
  289. // make your condition effectively true after a given time.
  290. //
  291. // The condition function should have no side-effects aside from debug
  292. // logging; as a special exception, the function may acquire other mutexes
  293. // provided it releases all those that it acquires. (This exception was
  294. // required to allow logging.)
  295. // Mutex::Await()
  296. //
  297. // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
  298. // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
  299. // same mode in which it was previously held. If the condition is initially
  300. // `true`, `Await()` *may* skip the release/re-acquire step.
  301. //
  302. // `Await()` requires that this thread holds this `Mutex` in some mode.
  303. void Await(const Condition& cond) {
  304. AwaitCommon(cond, synchronization_internal::KernelTimeout::Never());
  305. }
  306. // Mutex::LockWhen()
  307. // Mutex::ReaderLockWhen()
  308. // Mutex::WriterLockWhen()
  309. //
  310. // Blocks until simultaneously both `cond` is `true` and this `Mutex` can
  311. // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
  312. // logically equivalent to `*Lock(); Await();` though they may have different
  313. // performance characteristics.
  314. void LockWhen(const Condition& cond) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  315. LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
  316. true);
  317. }
  318. void ReaderLockWhen(const Condition& cond) Y_ABSL_SHARED_LOCK_FUNCTION() {
  319. LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
  320. false);
  321. }
  322. void WriterLockWhen(const Condition& cond) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  323. this->LockWhen(cond);
  324. }
  325. // ---------------------------------------------------------------------------
  326. // Mutex Variants with Timeouts/Deadlines
  327. // ---------------------------------------------------------------------------
  328. // Mutex::AwaitWithTimeout()
  329. // Mutex::AwaitWithDeadline()
  330. //
  331. // Unlocks this `Mutex` and blocks until simultaneously:
  332. // - either `cond` is true or the {timeout has expired, deadline has passed}
  333. // and
  334. // - this `Mutex` can be reacquired,
  335. // then reacquire this `Mutex` in the same mode in which it was previously
  336. // held, returning `true` iff `cond` is `true` on return.
  337. //
  338. // If the condition is initially `true`, the implementation *may* skip the
  339. // release/re-acquire step and return immediately.
  340. //
  341. // Deadlines in the past are equivalent to an immediate deadline.
  342. // Negative timeouts are equivalent to a zero timeout.
  343. //
  344. // This method requires that this thread holds this `Mutex` in some mode.
  345. bool AwaitWithTimeout(const Condition& cond, y_absl::Duration timeout) {
  346. return AwaitCommon(cond, synchronization_internal::KernelTimeout{timeout});
  347. }
  348. bool AwaitWithDeadline(const Condition& cond, y_absl::Time deadline) {
  349. return AwaitCommon(cond, synchronization_internal::KernelTimeout{deadline});
  350. }
  351. // Mutex::LockWhenWithTimeout()
  352. // Mutex::ReaderLockWhenWithTimeout()
  353. // Mutex::WriterLockWhenWithTimeout()
  354. //
  355. // Blocks until simultaneously both:
  356. // - either `cond` is `true` or the timeout has expired, and
  357. // - this `Mutex` can be acquired,
  358. // then atomically acquires this `Mutex`, returning `true` iff `cond` is
  359. // `true` on return.
  360. //
  361. // Negative timeouts are equivalent to a zero timeout.
  362. bool LockWhenWithTimeout(const Condition& cond, y_absl::Duration timeout)
  363. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  364. return LockWhenCommon(
  365. cond, synchronization_internal::KernelTimeout{timeout}, true);
  366. }
  367. bool ReaderLockWhenWithTimeout(const Condition& cond, y_absl::Duration timeout)
  368. Y_ABSL_SHARED_LOCK_FUNCTION() {
  369. return LockWhenCommon(
  370. cond, synchronization_internal::KernelTimeout{timeout}, false);
  371. }
  372. bool WriterLockWhenWithTimeout(const Condition& cond, y_absl::Duration timeout)
  373. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  374. return this->LockWhenWithTimeout(cond, timeout);
  375. }
  376. // Mutex::LockWhenWithDeadline()
  377. // Mutex::ReaderLockWhenWithDeadline()
  378. // Mutex::WriterLockWhenWithDeadline()
  379. //
  380. // Blocks until simultaneously both:
  381. // - either `cond` is `true` or the deadline has been passed, and
  382. // - this `Mutex` can be acquired,
  383. // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
  384. // on return.
  385. //
  386. // Deadlines in the past are equivalent to an immediate deadline.
  387. bool LockWhenWithDeadline(const Condition& cond, y_absl::Time deadline)
  388. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  389. return LockWhenCommon(
  390. cond, synchronization_internal::KernelTimeout{deadline}, true);
  391. }
  392. bool ReaderLockWhenWithDeadline(const Condition& cond, y_absl::Time deadline)
  393. Y_ABSL_SHARED_LOCK_FUNCTION() {
  394. return LockWhenCommon(
  395. cond, synchronization_internal::KernelTimeout{deadline}, false);
  396. }
  397. bool WriterLockWhenWithDeadline(const Condition& cond, y_absl::Time deadline)
  398. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  399. return this->LockWhenWithDeadline(cond, deadline);
  400. }
  401. // ---------------------------------------------------------------------------
  402. // Debug Support: Invariant Checking, Deadlock Detection, Logging.
  403. // ---------------------------------------------------------------------------
  404. // Mutex::EnableInvariantDebugging()
  405. //
  406. // If `invariant`!=null and if invariant debugging has been enabled globally,
  407. // cause `(*invariant)(arg)` to be called at moments when the invariant for
  408. // this `Mutex` should hold (for example: just after acquire, just before
  409. // release).
  410. //
  411. // The routine `invariant` should have no side-effects since it is not
  412. // guaranteed how many times it will be called; it should check the invariant
  413. // and crash if it does not hold. Enabling global invariant debugging may
  414. // substantially reduce `Mutex` performance; it should be set only for
  415. // non-production runs. Optimization options may also disable invariant
  416. // checks.
  417. void EnableInvariantDebugging(void (*invariant)(void*), void* arg);
  418. // Mutex::EnableDebugLog()
  419. //
  420. // Cause all subsequent uses of this `Mutex` to be logged via
  421. // `Y_ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
  422. // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
  423. //
  424. // Note: This method substantially reduces `Mutex` performance.
  425. void EnableDebugLog(const char* name);
  426. // Deadlock detection
  427. // Mutex::ForgetDeadlockInfo()
  428. //
  429. // Forget any deadlock-detection information previously gathered
  430. // about this `Mutex`. Call this method in debug mode when the lock ordering
  431. // of a `Mutex` changes.
  432. void ForgetDeadlockInfo();
  433. // Mutex::AssertNotHeld()
  434. //
  435. // Return immediately if this thread does not hold this `Mutex` in any
  436. // mode; otherwise, may report an error (typically by crashing with a
  437. // diagnostic), or may return immediately.
  438. //
  439. // Currently this check is performed only if all of:
  440. // - in debug mode
  441. // - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
  442. // - number of locks concurrently held by this thread is not large.
  443. // are true.
  444. void AssertNotHeld() const;
  445. // Special cases.
  446. // A `MuHow` is a constant that indicates how a lock should be acquired.
  447. // Internal implementation detail. Clients should ignore.
  448. typedef const struct MuHowS* MuHow;
  449. // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
  450. //
  451. // Causes the `Mutex` implementation to prepare itself for re-entry caused by
  452. // future use of `Mutex` within a fatal signal handler. This method is
  453. // intended for use only for last-ditch attempts to log crash information.
  454. // It does not guarantee that attempts to use Mutexes within the handler will
  455. // not deadlock; it merely makes other faults less likely.
  456. //
  457. // WARNING: This routine must be invoked from a signal handler, and the
  458. // signal handler must either loop forever or terminate the process.
  459. // Attempts to return from (or `longjmp` out of) the signal handler once this
  460. // call has been made may cause arbitrary program behaviour including
  461. // crashes and deadlocks.
  462. static void InternalAttemptToUseMutexInFatalSignalHandler();
  463. private:
  464. std::atomic<intptr_t> mu_; // The Mutex state.
  465. // Post()/Wait() versus associated PerThreadSem; in class for required
  466. // friendship with PerThreadSem.
  467. static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w);
  468. static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w,
  469. synchronization_internal::KernelTimeout t);
  470. // slow path acquire
  471. void LockSlowLoop(SynchWaitParams* waitp, int flags);
  472. // wrappers around LockSlowLoop()
  473. bool LockSlowWithDeadline(MuHow how, const Condition* cond,
  474. synchronization_internal::KernelTimeout t,
  475. int flags);
  476. void LockSlow(MuHow how, const Condition* cond,
  477. int flags) Y_ABSL_ATTRIBUTE_COLD;
  478. // slow path release
  479. void UnlockSlow(SynchWaitParams* waitp) Y_ABSL_ATTRIBUTE_COLD;
  480. // TryLock slow path.
  481. bool TryLockSlow();
  482. // ReaderTryLock slow path.
  483. bool ReaderTryLockSlow();
  484. // Common code between Await() and AwaitWithTimeout/Deadline()
  485. bool AwaitCommon(const Condition& cond,
  486. synchronization_internal::KernelTimeout t);
  487. bool LockWhenCommon(const Condition& cond,
  488. synchronization_internal::KernelTimeout t, bool write);
  489. // Attempt to remove thread s from queue.
  490. void TryRemove(base_internal::PerThreadSynch* s);
  491. // Block a thread on mutex.
  492. void Block(base_internal::PerThreadSynch* s);
  493. // Wake a thread; return successor.
  494. base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
  495. void Dtor();
  496. friend class CondVar; // for access to Trans()/Fer().
  497. void Trans(MuHow how); // used for CondVar->Mutex transfer
  498. void Fer(
  499. base_internal::PerThreadSynch* w); // used for CondVar->Mutex transfer
  500. // Catch the error of writing Mutex when intending MutexLock.
  501. explicit Mutex(const volatile Mutex* /*ignored*/) {}
  502. Mutex(const Mutex&) = delete;
  503. Mutex& operator=(const Mutex&) = delete;
  504. };
  505. // -----------------------------------------------------------------------------
  506. // Mutex RAII Wrappers
  507. // -----------------------------------------------------------------------------
  508. // MutexLock
  509. //
  510. // `MutexLock` is a helper class, which acquires and releases a `Mutex` via
  511. // RAII.
  512. //
  513. // Example:
  514. //
  515. // Class Foo {
  516. // public:
  517. // Foo::Bar* Baz() {
  518. // MutexLock lock(&mu_);
  519. // ...
  520. // return bar;
  521. // }
  522. //
  523. // private:
  524. // Mutex mu_;
  525. // };
  526. class Y_ABSL_SCOPED_LOCKABLE MutexLock {
  527. public:
  528. // Constructors
  529. // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
  530. // guaranteed to be locked when this object is constructed. Requires that
  531. // `mu` be dereferenceable.
  532. explicit MutexLock(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
  533. this->mu_->Lock();
  534. }
  535. // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
  536. // the above, the condition given by `cond` is also guaranteed to hold when
  537. // this object is constructed.
  538. explicit MutexLock(Mutex* mu, const Condition& cond)
  539. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  540. : mu_(mu) {
  541. this->mu_->LockWhen(cond);
  542. }
  543. MutexLock(const MutexLock&) = delete; // NOLINT(runtime/mutex)
  544. MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
  545. MutexLock& operator=(const MutexLock&) = delete;
  546. MutexLock& operator=(MutexLock&&) = delete;
  547. ~MutexLock() Y_ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
  548. private:
  549. Mutex* const mu_;
  550. };
  551. // ReaderMutexLock
  552. //
  553. // The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
  554. // releases a shared lock on a `Mutex` via RAII.
  555. class Y_ABSL_SCOPED_LOCKABLE ReaderMutexLock {
  556. public:
  557. explicit ReaderMutexLock(Mutex* mu) Y_ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
  558. mu->ReaderLock();
  559. }
  560. explicit ReaderMutexLock(Mutex* mu, const Condition& cond)
  561. Y_ABSL_SHARED_LOCK_FUNCTION(mu)
  562. : mu_(mu) {
  563. mu->ReaderLockWhen(cond);
  564. }
  565. ReaderMutexLock(const ReaderMutexLock&) = delete;
  566. ReaderMutexLock(ReaderMutexLock&&) = delete;
  567. ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
  568. ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
  569. ~ReaderMutexLock() Y_ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
  570. private:
  571. Mutex* const mu_;
  572. };
  573. // WriterMutexLock
  574. //
  575. // The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
  576. // releases a write (exclusive) lock on a `Mutex` via RAII.
  577. class Y_ABSL_SCOPED_LOCKABLE WriterMutexLock {
  578. public:
  579. explicit WriterMutexLock(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  580. : mu_(mu) {
  581. mu->WriterLock();
  582. }
  583. explicit WriterMutexLock(Mutex* mu, const Condition& cond)
  584. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  585. : mu_(mu) {
  586. mu->WriterLockWhen(cond);
  587. }
  588. WriterMutexLock(const WriterMutexLock&) = delete;
  589. WriterMutexLock(WriterMutexLock&&) = delete;
  590. WriterMutexLock& operator=(const WriterMutexLock&) = delete;
  591. WriterMutexLock& operator=(WriterMutexLock&&) = delete;
  592. ~WriterMutexLock() Y_ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
  593. private:
  594. Mutex* const mu_;
  595. };
  596. // -----------------------------------------------------------------------------
  597. // Condition
  598. // -----------------------------------------------------------------------------
  599. //
  600. // `Mutex` contains a number of member functions which take a `Condition` as an
  601. // argument; clients can wait for conditions to become `true` before attempting
  602. // to acquire the mutex. These sections are known as "condition critical"
  603. // sections. To use a `Condition`, you simply need to construct it, and use
  604. // within an appropriate `Mutex` member function; everything else in the
  605. // `Condition` class is an implementation detail.
  606. //
  607. // A `Condition` is specified as a function pointer which returns a boolean.
  608. // `Condition` functions should be pure functions -- their results should depend
  609. // only on passed arguments, should not consult any external state (such as
  610. // clocks), and should have no side-effects, aside from debug logging. Any
  611. // objects that the function may access should be limited to those which are
  612. // constant while the mutex is blocked on the condition (e.g. a stack variable),
  613. // or objects of state protected explicitly by the mutex.
  614. //
  615. // No matter which construction is used for `Condition`, the underlying
  616. // function pointer / functor / callable must not throw any
  617. // exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
  618. // the face of a throwing `Condition`. (When Abseil is allowed to depend
  619. // on C++17, these function pointers will be explicitly marked
  620. // `noexcept`; until then this requirement cannot be enforced in the
  621. // type system.)
  622. //
  623. // Note: to use a `Condition`, you need only construct it and pass it to a
  624. // suitable `Mutex' member function, such as `Mutex::Await()`, or to the
  625. // constructor of one of the scope guard classes.
  626. //
  627. // Example using LockWhen/Unlock:
  628. //
  629. // // assume count_ is not internal reference count
  630. // int count_ Y_ABSL_GUARDED_BY(mu_);
  631. // Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
  632. //
  633. // mu_.LockWhen(count_is_zero);
  634. // // ...
  635. // mu_.Unlock();
  636. //
  637. // Example using a scope guard:
  638. //
  639. // {
  640. // MutexLock lock(&mu_, count_is_zero);
  641. // // ...
  642. // }
  643. //
  644. // When multiple threads are waiting on exactly the same condition, make sure
  645. // that they are constructed with the same parameters (same pointer to function
  646. // + arg, or same pointer to object + method), so that the mutex implementation
  647. // can avoid redundantly evaluating the same condition for each thread.
  648. class Condition {
  649. public:
  650. // A Condition that returns the result of "(*func)(arg)"
  651. Condition(bool (*func)(void*), void* arg);
  652. // Templated version for people who are averse to casts.
  653. //
  654. // To use a lambda, prepend it with unary plus, which converts the lambda
  655. // into a function pointer:
  656. // Condition(+[](T* t) { return ...; }, arg).
  657. //
  658. // Note: lambdas in this case must contain no bound variables.
  659. //
  660. // See class comment for performance advice.
  661. template <typename T>
  662. Condition(bool (*func)(T*), T* arg);
  663. // Same as above, but allows for cases where `arg` comes from a pointer that
  664. // is convertible to the function parameter type `T*` but not an exact match.
  665. //
  666. // For example, the argument might be `X*` but the function takes `const X*`,
  667. // or the argument might be `Derived*` while the function takes `Base*`, and
  668. // so on for cases where the argument pointer can be implicitly converted.
  669. //
  670. // Implementation notes: This constructor overload is required in addition to
  671. // the one above to allow deduction of `T` from `arg` for cases such as where
  672. // a function template is passed as `func`. Also, the dummy `typename = void`
  673. // template parameter exists just to work around a MSVC mangling bug.
  674. template <typename T, typename = void>
  675. Condition(bool (*func)(T*),
  676. typename y_absl::internal::type_identity<T>::type* arg);
  677. // Templated version for invoking a method that returns a `bool`.
  678. //
  679. // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
  680. // `object->Method()`.
  681. //
  682. // Implementation Note: `y_absl::internal::type_identity` is used to allow
  683. // methods to come from base classes. A simpler signature like
  684. // `Condition(T*, bool (T::*)())` does not suffice.
  685. template <typename T>
  686. Condition(T* object,
  687. bool (y_absl::internal::type_identity<T>::type::*method)());
  688. // Same as above, for const members
  689. template <typename T>
  690. Condition(const T* object,
  691. bool (y_absl::internal::type_identity<T>::type::*method)() const);
  692. // A Condition that returns the value of `*cond`
  693. explicit Condition(const bool* cond);
  694. // Templated version for invoking a functor that returns a `bool`.
  695. // This approach accepts pointers to non-mutable lambdas, `std::function`,
  696. // the result of` std::bind` and user-defined functors that define
  697. // `bool F::operator()() const`.
  698. //
  699. // Example:
  700. //
  701. // auto reached = [this, current]() {
  702. // mu_.AssertReaderHeld(); // For annotalysis.
  703. // return processed_ >= current;
  704. // };
  705. // mu_.Await(Condition(&reached));
  706. //
  707. // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
  708. // the lambda as it may be called when the mutex is being unlocked from a
  709. // scope holding only a reader lock, which will make the assertion not
  710. // fulfilled and crash the binary.
  711. // See class comment for performance advice. In particular, if there
  712. // might be more than one waiter for the same condition, make sure
  713. // that all waiters construct the condition with the same pointers.
  714. // Implementation note: The second template parameter ensures that this
  715. // constructor doesn't participate in overload resolution if T doesn't have
  716. // `bool operator() const`.
  717. template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
  718. &T::operator()))>
  719. explicit Condition(const T* obj)
  720. : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
  721. // A Condition that always returns `true`.
  722. // kTrue is only useful in a narrow set of circumstances, mostly when
  723. // it's passed conditionally. For example:
  724. //
  725. // mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);
  726. //
  727. // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition
  728. // don't return immediately when the timeout happens, they still block until
  729. // the Mutex becomes available. The return value of these methods does
  730. // not indicate if the timeout was reached; rather it indicates whether or
  731. // not the condition is true.
  732. Y_ABSL_CONST_INIT static const Condition kTrue;
  733. // Evaluates the condition.
  734. bool Eval() const;
  735. // Returns `true` if the two conditions are guaranteed to return the same
  736. // value if evaluated at the same time, `false` if the evaluation *may* return
  737. // different results.
  738. //
  739. // Two `Condition` values are guaranteed equal if both their `func` and `arg`
  740. // components are the same. A null pointer is equivalent to a `true`
  741. // condition.
  742. static bool GuaranteedEqual(const Condition* a, const Condition* b);
  743. private:
  744. // Sizing an allocation for a method pointer can be subtle. In the Itanium
  745. // specifications, a method pointer has a predictable, uniform size. On the
  746. // other hand, MSVC ABI, method pointer sizes vary based on the
  747. // inheritance of the class. Specifically, method pointers from classes with
  748. // multiple inheritance are bigger than those of classes with single
  749. // inheritance. Other variations also exist.
  750. #ifndef _MSC_VER
  751. // Allocation for a function pointer or method pointer.
  752. // The {0} initializer ensures that all unused bytes of this buffer are
  753. // always zeroed out. This is necessary, because GuaranteedEqual() compares
  754. // all of the bytes, unaware of which bytes are relevant to a given `eval_`.
  755. using MethodPtr = bool (Condition::*)();
  756. char callback_[sizeof(MethodPtr)] = {0};
  757. #else
  758. // It is well known that the larget MSVC pointer-to-member is 24 bytes. This
  759. // may be the largest known pointer-to-member of any platform. For this
  760. // reason we will allocate 24 bytes for MSVC platform toolchains.
  761. char callback_[24] = {0};
  762. #endif
  763. // Function with which to evaluate callbacks and/or arguments.
  764. bool (*eval_)(const Condition*) = nullptr;
  765. // Either an argument for a function call or an object for a method call.
  766. void* arg_ = nullptr;
  767. // Various functions eval_ can point to:
  768. static bool CallVoidPtrFunction(const Condition*);
  769. template <typename T>
  770. static bool CastAndCallFunction(const Condition* c);
  771. template <typename T, typename ConditionMethodPtr>
  772. static bool CastAndCallMethod(const Condition* c);
  773. // Helper methods for storing, validating, and reading callback arguments.
  774. template <typename T>
  775. inline void StoreCallback(T callback) {
  776. static_assert(
  777. sizeof(callback) <= sizeof(callback_),
  778. "An overlarge pointer was passed as a callback to Condition.");
  779. std::memcpy(callback_, &callback, sizeof(callback));
  780. }
  781. template <typename T>
  782. inline void ReadCallback(T* callback) const {
  783. std::memcpy(callback, callback_, sizeof(*callback));
  784. }
  785. static bool AlwaysTrue(const Condition*) { return true; }
  786. // Used only to create kTrue.
  787. constexpr Condition() : eval_(AlwaysTrue), arg_(nullptr) {}
  788. };
  789. // -----------------------------------------------------------------------------
  790. // CondVar
  791. // -----------------------------------------------------------------------------
  792. //
  793. // A condition variable, reflecting state evaluated separately outside of the
  794. // `Mutex` object, which can be signaled to wake callers.
  795. // This class is not normally needed; use `Mutex` member functions such as
  796. // `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
  797. // with many threads and many conditions, `CondVar` may be faster.
  798. //
  799. // The implementation may deliver signals to any condition variable at
  800. // any time, even when no call to `Signal()` or `SignalAll()` is made; as a
  801. // result, upon being awoken, you must check the logical condition you have
  802. // been waiting upon.
  803. //
  804. // Examples:
  805. //
  806. // Usage for a thread waiting for some condition C protected by mutex mu:
  807. // mu.Lock();
  808. // while (!C) { cv->Wait(&mu); } // releases and reacquires mu
  809. // // C holds; process data
  810. // mu.Unlock();
  811. //
  812. // Usage to wake T is:
  813. // mu.Lock();
  814. // // process data, possibly establishing C
  815. // if (C) { cv->Signal(); }
  816. // mu.Unlock();
  817. //
  818. // If C may be useful to more than one waiter, use `SignalAll()` instead of
  819. // `Signal()`.
  820. //
  821. // With this implementation it is efficient to use `Signal()/SignalAll()` inside
  822. // the locked region; this usage can make reasoning about your program easier.
  823. //
  824. class CondVar {
  825. public:
  826. // A `CondVar` allocated on the heap or on the stack can use the this
  827. // constructor.
  828. CondVar();
  829. // CondVar::Wait()
  830. //
  831. // Atomically releases a `Mutex` and blocks on this condition variable.
  832. // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
  833. // spurious wakeup), then reacquires the `Mutex` and returns.
  834. //
  835. // Requires and ensures that the current thread holds the `Mutex`.
  836. void Wait(Mutex* mu) {
  837. WaitCommon(mu, synchronization_internal::KernelTimeout::Never());
  838. }
  839. // CondVar::WaitWithTimeout()
  840. //
  841. // Atomically releases a `Mutex` and blocks on this condition variable.
  842. // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
  843. // spurious wakeup), or until the timeout has expired, then reacquires
  844. // the `Mutex` and returns.
  845. //
  846. // Returns true if the timeout has expired without this `CondVar`
  847. // being signalled in any manner. If both the timeout has expired
  848. // and this `CondVar` has been signalled, the implementation is free
  849. // to return `true` or `false`.
  850. //
  851. // Requires and ensures that the current thread holds the `Mutex`.
  852. bool WaitWithTimeout(Mutex* mu, y_absl::Duration timeout) {
  853. return WaitCommon(mu, synchronization_internal::KernelTimeout(timeout));
  854. }
  855. // CondVar::WaitWithDeadline()
  856. //
  857. // Atomically releases a `Mutex` and blocks on this condition variable.
  858. // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
  859. // spurious wakeup), or until the deadline has passed, then reacquires
  860. // the `Mutex` and returns.
  861. //
  862. // Deadlines in the past are equivalent to an immediate deadline.
  863. //
  864. // Returns true if the deadline has passed without this `CondVar`
  865. // being signalled in any manner. If both the deadline has passed
  866. // and this `CondVar` has been signalled, the implementation is free
  867. // to return `true` or `false`.
  868. //
  869. // Requires and ensures that the current thread holds the `Mutex`.
  870. bool WaitWithDeadline(Mutex* mu, y_absl::Time deadline) {
  871. return WaitCommon(mu, synchronization_internal::KernelTimeout(deadline));
  872. }
  873. // CondVar::Signal()
  874. //
  875. // Signal this `CondVar`; wake at least one waiter if one exists.
  876. void Signal();
  877. // CondVar::SignalAll()
  878. //
  879. // Signal this `CondVar`; wake all waiters.
  880. void SignalAll();
  881. // CondVar::EnableDebugLog()
  882. //
  883. // Causes all subsequent uses of this `CondVar` to be logged via
  884. // `Y_ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
  885. // Note: this method substantially reduces `CondVar` performance.
  886. void EnableDebugLog(const char* name);
  887. private:
  888. bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t);
  889. void Remove(base_internal::PerThreadSynch* s);
  890. std::atomic<intptr_t> cv_; // Condition variable state.
  891. CondVar(const CondVar&) = delete;
  892. CondVar& operator=(const CondVar&) = delete;
  893. };
  894. // Variants of MutexLock.
  895. //
  896. // If you find yourself using one of these, consider instead using
  897. // Mutex::Unlock() and/or if-statements for clarity.
  898. // MutexLockMaybe
  899. //
  900. // MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
  901. class Y_ABSL_SCOPED_LOCKABLE MutexLockMaybe {
  902. public:
  903. explicit MutexLockMaybe(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  904. : mu_(mu) {
  905. if (this->mu_ != nullptr) {
  906. this->mu_->Lock();
  907. }
  908. }
  909. explicit MutexLockMaybe(Mutex* mu, const Condition& cond)
  910. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  911. : mu_(mu) {
  912. if (this->mu_ != nullptr) {
  913. this->mu_->LockWhen(cond);
  914. }
  915. }
  916. ~MutexLockMaybe() Y_ABSL_UNLOCK_FUNCTION() {
  917. if (this->mu_ != nullptr) {
  918. this->mu_->Unlock();
  919. }
  920. }
  921. private:
  922. Mutex* const mu_;
  923. MutexLockMaybe(const MutexLockMaybe&) = delete;
  924. MutexLockMaybe(MutexLockMaybe&&) = delete;
  925. MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
  926. MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;
  927. };
  928. // ReleasableMutexLock
  929. //
  930. // ReleasableMutexLock is like MutexLock, but permits `Release()` of its
  931. // mutex before destruction. `Release()` may be called at most once.
  932. class Y_ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
  933. public:
  934. explicit ReleasableMutexLock(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  935. : mu_(mu) {
  936. this->mu_->Lock();
  937. }
  938. explicit ReleasableMutexLock(Mutex* mu, const Condition& cond)
  939. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  940. : mu_(mu) {
  941. this->mu_->LockWhen(cond);
  942. }
  943. ~ReleasableMutexLock() Y_ABSL_UNLOCK_FUNCTION() {
  944. if (this->mu_ != nullptr) {
  945. this->mu_->Unlock();
  946. }
  947. }
  948. void Release() Y_ABSL_UNLOCK_FUNCTION();
  949. private:
  950. Mutex* mu_;
  951. ReleasableMutexLock(const ReleasableMutexLock&) = delete;
  952. ReleasableMutexLock(ReleasableMutexLock&&) = delete;
  953. ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
  954. ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
  955. };
  956. inline Mutex::Mutex() : mu_(0) {
  957. Y_ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
  958. }
  959. inline constexpr Mutex::Mutex(y_absl::ConstInitType) : mu_(0) {}
  960. #if !defined(__APPLE__) && !defined(Y_ABSL_BUILD_DLL)
  961. Y_ABSL_ATTRIBUTE_ALWAYS_INLINE
  962. inline Mutex::~Mutex() { Dtor(); }
  963. #endif
  964. #if defined(NDEBUG) && !defined(Y_ABSL_HAVE_THREAD_SANITIZER)
  965. // Use default (empty) destructor in release build for performance reasons.
  966. // We need to mark both Dtor and ~Mutex as always inline for inconsistent
  967. // builds that use both NDEBUG and !NDEBUG with dynamic libraries. In these
  968. // cases we want the empty functions to dissolve entirely rather than being
  969. // exported from dynamic libraries and potentially override the non-empty ones.
  970. Y_ABSL_ATTRIBUTE_ALWAYS_INLINE
  971. inline void Mutex::Dtor() {}
  972. #endif
  973. inline CondVar::CondVar() : cv_(0) {}
  974. // static
  975. template <typename T, typename ConditionMethodPtr>
  976. bool Condition::CastAndCallMethod(const Condition* c) {
  977. T* object = static_cast<T*>(c->arg_);
  978. ConditionMethodPtr condition_method_pointer;
  979. c->ReadCallback(&condition_method_pointer);
  980. return (object->*condition_method_pointer)();
  981. }
  982. // static
  983. template <typename T>
  984. bool Condition::CastAndCallFunction(const Condition* c) {
  985. bool (*function)(T*);
  986. c->ReadCallback(&function);
  987. T* argument = static_cast<T*>(c->arg_);
  988. return (*function)(argument);
  989. }
  990. template <typename T>
  991. inline Condition::Condition(bool (*func)(T*), T* arg)
  992. : eval_(&CastAndCallFunction<T>),
  993. arg_(const_cast<void*>(static_cast<const void*>(arg))) {
  994. static_assert(sizeof(&func) <= sizeof(callback_),
  995. "An overlarge function pointer was passed to Condition.");
  996. StoreCallback(func);
  997. }
  998. template <typename T, typename>
  999. inline Condition::Condition(
  1000. bool (*func)(T*), typename y_absl::internal::type_identity<T>::type* arg)
  1001. // Just delegate to the overload above.
  1002. : Condition(func, arg) {}
  1003. template <typename T>
  1004. inline Condition::Condition(
  1005. T* object, bool (y_absl::internal::type_identity<T>::type::*method)())
  1006. : eval_(&CastAndCallMethod<T, decltype(method)>), arg_(object) {
  1007. static_assert(sizeof(&method) <= sizeof(callback_),
  1008. "An overlarge method pointer was passed to Condition.");
  1009. StoreCallback(method);
  1010. }
  1011. template <typename T>
  1012. inline Condition::Condition(
  1013. const T* object,
  1014. bool (y_absl::internal::type_identity<T>::type::*method)() const)
  1015. : eval_(&CastAndCallMethod<const T, decltype(method)>),
  1016. arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
  1017. StoreCallback(method);
  1018. }
  1019. // Register hooks for profiling support.
  1020. //
  1021. // The function pointer registered here will be called whenever a mutex is
  1022. // contended. The callback is given the cycles for which waiting happened (as
  1023. // measured by //y_absl/base/internal/cycleclock.h, and which may not
  1024. // be real "cycle" counts.)
  1025. //
  1026. // There is no ordering guarantee between when the hook is registered and when
  1027. // callbacks will begin. Only a single profiler can be installed in a running
  1028. // binary; if this function is called a second time with a different function
  1029. // pointer, the value is ignored (and will cause an assertion failure in debug
  1030. // mode.)
  1031. void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
  1032. // Register a hook for Mutex tracing.
  1033. //
  1034. // The function pointer registered here will be called whenever a mutex is
  1035. // contended. The callback is given an opaque handle to the contended mutex,
  1036. // an event name, and the number of wait cycles (as measured by
  1037. // //y_absl/base/internal/cycleclock.h, and which may not be real
  1038. // "cycle" counts.)
  1039. //
  1040. // The only event name currently sent is "slow release".
  1041. //
  1042. // This has the same ordering and single-use limitations as
  1043. // RegisterMutexProfiler() above.
  1044. void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
  1045. int64_t wait_cycles));
  1046. // Register a hook for CondVar tracing.
  1047. //
  1048. // The function pointer registered here will be called here on various CondVar
  1049. // events. The callback is given an opaque handle to the CondVar object and
  1050. // a string identifying the event. This is thread-safe, but only a single
  1051. // tracer can be registered.
  1052. //
  1053. // Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
  1054. // "SignalAll wakeup".
  1055. //
  1056. // This has the same ordering and single-use limitations as
  1057. // RegisterMutexProfiler() above.
  1058. void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv));
  1059. void ResetDeadlockGraphMu();
  1060. // EnableMutexInvariantDebugging()
  1061. //
  1062. // Enable or disable global support for Mutex invariant debugging. If enabled,
  1063. // then invariant predicates can be registered per-Mutex for debug checking.
  1064. // See Mutex::EnableInvariantDebugging().
  1065. void EnableMutexInvariantDebugging(bool enabled);
  1066. // When in debug mode, and when the feature has been enabled globally, the
  1067. // implementation will keep track of lock ordering and complain (or optionally
  1068. // crash) if a cycle is detected in the acquired-before graph.
  1069. // Possible modes of operation for the deadlock detector in debug mode.
  1070. enum class OnDeadlockCycle {
  1071. kIgnore, // Neither report on nor attempt to track cycles in lock ordering
  1072. kReport, // Report lock cycles to stderr when detected
  1073. kAbort, // Report lock cycles to stderr when detected, then abort
  1074. };
  1075. // SetMutexDeadlockDetectionMode()
  1076. //
  1077. // Enable or disable global support for detection of potential deadlocks
  1078. // due to Mutex lock ordering inversions. When set to 'kIgnore', tracking of
  1079. // lock ordering is disabled. Otherwise, in debug builds, a lock ordering graph
  1080. // will be maintained internally, and detected cycles will be reported in
  1081. // the manner chosen here.
  1082. void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
  1083. Y_ABSL_NAMESPACE_END
  1084. } // namespace y_absl
  1085. // In some build configurations we pass --detect-odr-violations to the
  1086. // gold linker. This causes it to flag weak symbol overrides as ODR
  1087. // violations. Because ODR only applies to C++ and not C,
  1088. // --detect-odr-violations ignores symbols not mangled with C++ names.
  1089. // By changing our extension points to be extern "C", we dodge this
  1090. // check.
  1091. extern "C" {
  1092. void Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
  1093. } // extern "C"
  1094. #endif // Y_ABSL_SYNCHRONIZATION_MUTEX_H_