mutex.h 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220
  1. // Copyright 2017 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. //
  15. // -----------------------------------------------------------------------------
  16. // mutex.h
  17. // -----------------------------------------------------------------------------
  18. //
  19. // This header file defines a `Mutex` -- a mutually exclusive lock -- and the
  20. // most common type of synchronization primitive for facilitating locks on
  21. // shared resources. A mutex is used to prevent multiple threads from accessing
  22. // and/or writing to a shared resource concurrently.
  23. //
  24. // Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
  25. // features:
  26. // * Conditional predicates intrinsic to the `Mutex` object
  27. // * Shared/reader locks, in addition to standard exclusive/writer locks
  28. // * Deadlock detection and debug support.
  29. //
  30. // The following helper classes are also defined within this file:
  31. //
  32. // MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
  33. // write access within the current scope.
  34. //
  35. // ReaderMutexLock
  36. // - An RAII wrapper to acquire and release a `Mutex` for shared/read
  37. // access within the current scope.
  38. //
  39. // WriterMutexLock
  40. // - Effectively an alias for `MutexLock` above, designed for use in
  41. // distinguishing reader and writer locks within code.
  42. //
  43. // In addition to simple mutex locks, this file also defines ways to perform
  44. // locking under certain conditions.
  45. //
  46. // Condition - (Preferred) Used to wait for a particular predicate that
  47. // depends on state protected by the `Mutex` to become true.
  48. // CondVar - A lower-level variant of `Condition` that relies on
  49. // application code to explicitly signal the `CondVar` when
  50. // a condition has been met.
  51. //
  52. // See below for more information on using `Condition` or `CondVar`.
  53. //
  54. // Mutexes and mutex behavior can be quite complicated. The information within
  55. // this header file is limited, as a result. Please consult the Mutex guide for
  56. // more complete information and examples.
  57. #ifndef Y_ABSL_SYNCHRONIZATION_MUTEX_H_
  58. #define Y_ABSL_SYNCHRONIZATION_MUTEX_H_
  59. #include <atomic>
  60. #include <cstdint>
  61. #include <cstring>
  62. #include <iterator>
  63. #include <util/generic/string.h>
  64. #include "y_absl/base/attributes.h"
  65. #include "y_absl/base/const_init.h"
  66. #include "y_absl/base/internal/identity.h"
  67. #include "y_absl/base/internal/low_level_alloc.h"
  68. #include "y_absl/base/internal/thread_identity.h"
  69. #include "y_absl/base/internal/tsan_mutex_interface.h"
  70. #include "y_absl/base/port.h"
  71. #include "y_absl/base/thread_annotations.h"
  72. #include "y_absl/synchronization/internal/kernel_timeout.h"
  73. #include "y_absl/synchronization/internal/per_thread_sem.h"
  74. #include "y_absl/time/time.h"
  75. namespace y_absl {
  76. Y_ABSL_NAMESPACE_BEGIN
  77. class Condition;
  78. struct SynchWaitParams;
  79. // -----------------------------------------------------------------------------
  80. // Mutex
  81. // -----------------------------------------------------------------------------
  82. //
  83. // A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
  84. // on some resource, typically a variable or data structure with associated
  85. // invariants. Proper usage of mutexes prevents concurrent access by different
  86. // threads to the same resource.
  87. //
  88. // A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
  89. // The `Lock()` operation *acquires* a `Mutex` (in a state known as an
  90. // *exclusive* -- or *write* -- lock), and the `Unlock()` operation *releases* a
  91. // Mutex. During the span of time between the Lock() and Unlock() operations,
  92. // a mutex is said to be *held*. By design, all mutexes support exclusive/write
  93. // locks, as this is the most common way to use a mutex.
  94. //
  95. // Mutex operations are only allowed under certain conditions; otherwise an
  96. // operation is "invalid", and disallowed by the API. The conditions concern
  97. // both the current state of the mutex and the identity of the threads that
  98. // are performing the operations.
  99. //
  100. // The `Mutex` state machine for basic lock/unlock operations is quite simple:
  101. //
  102. // | | Lock() | Unlock() |
  103. // |----------------+------------------------+----------|
  104. // | Free | Exclusive | invalid |
  105. // | Exclusive | blocks, then exclusive | Free |
  106. //
  107. // The full conditions are as follows.
  108. //
  109. // * Calls to `Unlock()` require that the mutex be held, and must be made in the
  110. // same thread that performed the corresponding `Lock()` operation which
  111. // acquired the mutex; otherwise the call is invalid.
  112. //
  113. // * The mutex being non-reentrant (or non-recursive) means that a call to
  114. // `Lock()` or `TryLock()` must not be made in a thread that already holds the
  115. // mutex; such a call is invalid.
  116. //
  117. // * In other words, the state of being "held" has both a temporal component
  118. // (from `Lock()` until `Unlock()`) as well as a thread identity component:
  119. // the mutex is held *by a particular thread*.
  120. //
  121. // An "invalid" operation has undefined behavior. The `Mutex` implementation
  122. // is allowed to do anything on an invalid call, including, but not limited to,
  123. // crashing with a useful error message, silently succeeding, or corrupting
  124. // data structures. In debug mode, the implementation may crash with a useful
  125. // error message.
  126. //
  127. // `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
  128. // is, however, approximately fair over long periods, and starvation-free for
  129. // threads at the same priority.
  130. //
  131. // The lock/unlock primitives are now annotated with lock annotations
  132. // defined in (base/thread_annotations.h). When writing multi-threaded code,
  133. // you should use lock annotations whenever possible to document your lock
  134. // synchronization policy. Besides acting as documentation, these annotations
  135. // also help compilers or static analysis tools to identify and warn about
  136. // issues that could potentially result in race conditions and deadlocks.
  137. //
  138. // For more information about the lock annotations, please see
  139. // [Thread Safety
  140. // Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
  141. // documentation.
  142. //
  143. // See also `MutexLock`, below, for scoped `Mutex` acquisition.
  144. class Y_ABSL_LOCKABLE Mutex {
  145. public:
  146. // Creates a `Mutex` that is not held by anyone. This constructor is
  147. // typically used for Mutexes allocated on the heap or the stack.
  148. //
  149. // To create `Mutex` instances with static storage duration
  150. // (e.g. a namespace-scoped or global variable), see
  151. // `Mutex::Mutex(y_absl::kConstInit)` below instead.
  152. Mutex();
  153. // Creates a mutex with static storage duration. A global variable
  154. // constructed this way avoids the lifetime issues that can occur on program
  155. // startup and shutdown. (See y_absl/base/const_init.h.)
  156. //
  157. // For Mutexes allocated on the heap and stack, instead use the default
  158. // constructor, which can interact more fully with the thread sanitizer.
  159. //
  160. // Example usage:
  161. // namespace foo {
  162. // Y_ABSL_CONST_INIT y_absl::Mutex mu(y_absl::kConstInit);
  163. // }
  164. explicit constexpr Mutex(y_absl::ConstInitType);
  165. ~Mutex();
  166. // Mutex::Lock()
  167. //
  168. // Blocks the calling thread, if necessary, until this `Mutex` is free, and
  169. // then acquires it exclusively. (This lock is also known as a "write lock.")
  170. void Lock() Y_ABSL_EXCLUSIVE_LOCK_FUNCTION();
  171. // Mutex::Unlock()
  172. //
  173. // Releases this `Mutex` and returns it from the exclusive/write state to the
  174. // free state. Calling thread must hold the `Mutex` exclusively.
  175. void Unlock() Y_ABSL_UNLOCK_FUNCTION();
  176. // Mutex::TryLock()
  177. //
  178. // If the mutex can be acquired without blocking, does so exclusively and
  179. // returns `true`. Otherwise, returns `false`. Returns `true` with high
  180. // probability if the `Mutex` was free.
  181. bool TryLock() Y_ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
  182. // Mutex::AssertHeld()
  183. //
  184. // Require that the mutex be held exclusively (write mode) by this thread.
  185. //
  186. // If the mutex is not currently held by this thread, this function may report
  187. // an error (typically by crashing with a diagnostic) or it may do nothing.
  188. // This function is intended only as a tool to assist debugging; it doesn't
  189. // guarantee correctness.
  190. void AssertHeld() const Y_ABSL_ASSERT_EXCLUSIVE_LOCK();
  191. // ---------------------------------------------------------------------------
  192. // Reader-Writer Locking
  193. // ---------------------------------------------------------------------------
  194. // A Mutex can also be used as a starvation-free reader-writer lock.
  195. // Neither read-locks nor write-locks are reentrant/recursive to avoid
  196. // potential client programming errors.
  197. //
  198. // The Mutex API provides `Writer*()` aliases for the existing `Lock()`,
  199. // `Unlock()` and `TryLock()` methods for use within applications mixing
  200. // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this
  201. // manner can make locking behavior clearer when mixing read and write modes.
  202. //
  203. // Introducing reader locks necessarily complicates the `Mutex` state
  204. // machine somewhat. The table below illustrates the allowed state transitions
  205. // of a mutex in such cases. Note that ReaderLock() may block even if the lock
  206. // is held in shared mode; this occurs when another thread is blocked on a
  207. // call to WriterLock().
  208. //
  209. // ---------------------------------------------------------------------------
  210. // Operation: WriterLock() Unlock() ReaderLock() ReaderUnlock()
  211. // ---------------------------------------------------------------------------
  212. // State
  213. // ---------------------------------------------------------------------------
  214. // Free Exclusive invalid Shared(1) invalid
  215. // Shared(1) blocks invalid Shared(2) or blocks Free
  216. // Shared(n) n>1 blocks invalid Shared(n+1) or blocks Shared(n-1)
  217. // Exclusive blocks Free blocks invalid
  218. // ---------------------------------------------------------------------------
  219. //
  220. // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
  221. // Mutex::ReaderLock()
  222. //
  223. // Blocks the calling thread, if necessary, until this `Mutex` is either free,
  224. // or in shared mode, and then acquires a share of it. Note that
  225. // `ReaderLock()` will block if some other thread has an exclusive/writer lock
  226. // on the mutex.
  227. void ReaderLock() Y_ABSL_SHARED_LOCK_FUNCTION();
  228. // Mutex::ReaderUnlock()
  229. //
  230. // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
  231. // the free state if this thread holds the last reader lock on the mutex. Note
  232. // that you cannot call `ReaderUnlock()` on a mutex held in write mode.
  233. void ReaderUnlock() Y_ABSL_UNLOCK_FUNCTION();
  234. // Mutex::ReaderTryLock()
  235. //
  236. // If the mutex can be acquired without blocking, acquires this mutex for
  237. // shared access and returns `true`. Otherwise, returns `false`. Returns
  238. // `true` with high probability if the `Mutex` was free or shared.
  239. bool ReaderTryLock() Y_ABSL_SHARED_TRYLOCK_FUNCTION(true);
  240. // Mutex::AssertReaderHeld()
  241. //
  242. // Require that the mutex be held at least in shared mode (read mode) by this
  243. // thread.
  244. //
  245. // If the mutex is not currently held by this thread, this function may report
  246. // an error (typically by crashing with a diagnostic) or it may do nothing.
  247. // This function is intended only as a tool to assist debugging; it doesn't
  248. // guarantee correctness.
  249. void AssertReaderHeld() const Y_ABSL_ASSERT_SHARED_LOCK();
  250. // Mutex::WriterLock()
  251. // Mutex::WriterUnlock()
  252. // Mutex::WriterTryLock()
  253. //
  254. // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
  255. //
  256. // These methods may be used (along with the complementary `Reader*()`
  257. // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
  258. // etc.) from reader/writer lock usage.
  259. void WriterLock() Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
  260. void WriterUnlock() Y_ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
  261. bool WriterTryLock() Y_ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
  262. return this->TryLock();
  263. }
  264. // ---------------------------------------------------------------------------
  265. // Conditional Critical Regions
  266. // ---------------------------------------------------------------------------
  267. // Conditional usage of a `Mutex` can occur using two distinct paradigms:
  268. //
  269. // * Use of `Mutex` member functions with `Condition` objects.
  270. // * Use of the separate `CondVar` abstraction.
  271. //
  272. // In general, prefer use of `Condition` and the `Mutex` member functions
  273. // listed below over `CondVar`. When there are multiple threads waiting on
  274. // distinctly different conditions, however, a battery of `CondVar`s may be
  275. // more efficient. This section discusses use of `Condition` objects.
  276. //
  277. // `Mutex` contains member functions for performing lock operations only under
  278. // certain conditions, of class `Condition`. For correctness, the `Condition`
  279. // must return a boolean that is a pure function, only of state protected by
  280. // the `Mutex`. The condition must be invariant w.r.t. environmental state
  281. // such as thread, cpu id, or time, and must be `noexcept`. The condition will
  282. // always be invoked with the mutex held in at least read mode, so you should
  283. // not block it for long periods or sleep it on a timer.
  284. //
  285. // Since a condition must not depend directly on the current time, use
  286. // `*WithTimeout()` member function variants to make your condition
  287. // effectively true after a given duration, or `*WithDeadline()` variants to
  288. // make your condition effectively true after a given time.
  289. //
  290. // The condition function should have no side-effects aside from debug
  291. // logging; as a special exception, the function may acquire other mutexes
  292. // provided it releases all those that it acquires. (This exception was
  293. // required to allow logging.)
  294. // Mutex::Await()
  295. //
  296. // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
  297. // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
  298. // same mode in which it was previously held. If the condition is initially
  299. // `true`, `Await()` *may* skip the release/re-acquire step.
  300. //
  301. // `Await()` requires that this thread holds this `Mutex` in some mode.
  302. void Await(const Condition& cond) {
  303. AwaitCommon(cond, synchronization_internal::KernelTimeout::Never());
  304. }
  305. // Mutex::LockWhen()
  306. // Mutex::ReaderLockWhen()
  307. // Mutex::WriterLockWhen()
  308. //
  309. // Blocks until simultaneously both `cond` is `true` and this `Mutex` can
  310. // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
  311. // logically equivalent to `*Lock(); Await();` though they may have different
  312. // performance characteristics.
  313. void LockWhen(const Condition& cond) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  314. LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
  315. true);
  316. }
  317. void ReaderLockWhen(const Condition& cond) Y_ABSL_SHARED_LOCK_FUNCTION() {
  318. LockWhenCommon(cond, synchronization_internal::KernelTimeout::Never(),
  319. false);
  320. }
  321. void WriterLockWhen(const Condition& cond) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  322. this->LockWhen(cond);
  323. }
  324. // ---------------------------------------------------------------------------
  325. // Mutex Variants with Timeouts/Deadlines
  326. // ---------------------------------------------------------------------------
  327. // Mutex::AwaitWithTimeout()
  328. // Mutex::AwaitWithDeadline()
  329. //
  330. // Unlocks this `Mutex` and blocks until simultaneously:
  331. // - either `cond` is true or the {timeout has expired, deadline has passed}
  332. // and
  333. // - this `Mutex` can be reacquired,
  334. // then reacquire this `Mutex` in the same mode in which it was previously
  335. // held, returning `true` iff `cond` is `true` on return.
  336. //
  337. // If the condition is initially `true`, the implementation *may* skip the
  338. // release/re-acquire step and return immediately.
  339. //
  340. // Deadlines in the past are equivalent to an immediate deadline.
  341. // Negative timeouts are equivalent to a zero timeout.
  342. //
  343. // This method requires that this thread holds this `Mutex` in some mode.
  344. bool AwaitWithTimeout(const Condition& cond, y_absl::Duration timeout) {
  345. return AwaitCommon(cond, synchronization_internal::KernelTimeout{timeout});
  346. }
  347. bool AwaitWithDeadline(const Condition& cond, y_absl::Time deadline) {
  348. return AwaitCommon(cond, synchronization_internal::KernelTimeout{deadline});
  349. }
  350. // Mutex::LockWhenWithTimeout()
  351. // Mutex::ReaderLockWhenWithTimeout()
  352. // Mutex::WriterLockWhenWithTimeout()
  353. //
  354. // Blocks until simultaneously both:
  355. // - either `cond` is `true` or the timeout has expired, and
  356. // - this `Mutex` can be acquired,
  357. // then atomically acquires this `Mutex`, returning `true` iff `cond` is
  358. // `true` on return.
  359. //
  360. // Negative timeouts are equivalent to a zero timeout.
  361. bool LockWhenWithTimeout(const Condition& cond, y_absl::Duration timeout)
  362. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  363. return LockWhenCommon(
  364. cond, synchronization_internal::KernelTimeout{timeout}, true);
  365. }
  366. bool ReaderLockWhenWithTimeout(const Condition& cond, y_absl::Duration timeout)
  367. Y_ABSL_SHARED_LOCK_FUNCTION() {
  368. return LockWhenCommon(
  369. cond, synchronization_internal::KernelTimeout{timeout}, false);
  370. }
  371. bool WriterLockWhenWithTimeout(const Condition& cond, y_absl::Duration timeout)
  372. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  373. return this->LockWhenWithTimeout(cond, timeout);
  374. }
  375. // Mutex::LockWhenWithDeadline()
  376. // Mutex::ReaderLockWhenWithDeadline()
  377. // Mutex::WriterLockWhenWithDeadline()
  378. //
  379. // Blocks until simultaneously both:
  380. // - either `cond` is `true` or the deadline has been passed, and
  381. // - this `Mutex` can be acquired,
  382. // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
  383. // on return.
  384. //
  385. // Deadlines in the past are equivalent to an immediate deadline.
  386. bool LockWhenWithDeadline(const Condition& cond, y_absl::Time deadline)
  387. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  388. return LockWhenCommon(
  389. cond, synchronization_internal::KernelTimeout{deadline}, true);
  390. }
  391. bool ReaderLockWhenWithDeadline(const Condition& cond, y_absl::Time deadline)
  392. Y_ABSL_SHARED_LOCK_FUNCTION() {
  393. return LockWhenCommon(
  394. cond, synchronization_internal::KernelTimeout{deadline}, false);
  395. }
  396. bool WriterLockWhenWithDeadline(const Condition& cond, y_absl::Time deadline)
  397. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION() {
  398. return this->LockWhenWithDeadline(cond, deadline);
  399. }
  400. // ---------------------------------------------------------------------------
  401. // Debug Support: Invariant Checking, Deadlock Detection, Logging.
  402. // ---------------------------------------------------------------------------
  403. // Mutex::EnableInvariantDebugging()
  404. //
  405. // If `invariant`!=null and if invariant debugging has been enabled globally,
  406. // cause `(*invariant)(arg)` to be called at moments when the invariant for
  407. // this `Mutex` should hold (for example: just after acquire, just before
  408. // release).
  409. //
  410. // The routine `invariant` should have no side-effects since it is not
  411. // guaranteed how many times it will be called; it should check the invariant
  412. // and crash if it does not hold. Enabling global invariant debugging may
  413. // substantially reduce `Mutex` performance; it should be set only for
  414. // non-production runs. Optimization options may also disable invariant
  415. // checks.
  416. void EnableInvariantDebugging(void (*invariant)(void*), void* arg);
  417. // Mutex::EnableDebugLog()
  418. //
  419. // Cause all subsequent uses of this `Mutex` to be logged via
  420. // `Y_ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
  421. // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
  422. //
  423. // Note: This method substantially reduces `Mutex` performance.
  424. void EnableDebugLog(const char* name);
  425. // Deadlock detection
  426. // Mutex::ForgetDeadlockInfo()
  427. //
  428. // Forget any deadlock-detection information previously gathered
  429. // about this `Mutex`. Call this method in debug mode when the lock ordering
  430. // of a `Mutex` changes.
  431. void ForgetDeadlockInfo();
  432. // Mutex::AssertNotHeld()
  433. //
  434. // Return immediately if this thread does not hold this `Mutex` in any
  435. // mode; otherwise, may report an error (typically by crashing with a
  436. // diagnostic), or may return immediately.
  437. //
  438. // Currently this check is performed only if all of:
  439. // - in debug mode
  440. // - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
  441. // - number of locks concurrently held by this thread is not large.
  442. // are true.
  443. void AssertNotHeld() const;
  444. // Special cases.
  445. // A `MuHow` is a constant that indicates how a lock should be acquired.
  446. // Internal implementation detail. Clients should ignore.
  447. typedef const struct MuHowS* MuHow;
  448. // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
  449. //
  450. // Causes the `Mutex` implementation to prepare itself for re-entry caused by
  451. // future use of `Mutex` within a fatal signal handler. This method is
  452. // intended for use only for last-ditch attempts to log crash information.
  453. // It does not guarantee that attempts to use Mutexes within the handler will
  454. // not deadlock; it merely makes other faults less likely.
  455. //
  456. // WARNING: This routine must be invoked from a signal handler, and the
  457. // signal handler must either loop forever or terminate the process.
  458. // Attempts to return from (or `longjmp` out of) the signal handler once this
  459. // call has been made may cause arbitrary program behaviour including
  460. // crashes and deadlocks.
  461. static void InternalAttemptToUseMutexInFatalSignalHandler();
  462. private:
  463. std::atomic<intptr_t> mu_; // The Mutex state.
  464. // Post()/Wait() versus associated PerThreadSem; in class for required
  465. // friendship with PerThreadSem.
  466. static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w);
  467. static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w,
  468. synchronization_internal::KernelTimeout t);
  469. // slow path acquire
  470. void LockSlowLoop(SynchWaitParams* waitp, int flags);
  471. // wrappers around LockSlowLoop()
  472. bool LockSlowWithDeadline(MuHow how, const Condition* cond,
  473. synchronization_internal::KernelTimeout t,
  474. int flags);
  475. void LockSlow(MuHow how, const Condition* cond,
  476. int flags) Y_ABSL_ATTRIBUTE_COLD;
  477. // slow path release
  478. void UnlockSlow(SynchWaitParams* waitp) Y_ABSL_ATTRIBUTE_COLD;
  479. // TryLock slow path.
  480. bool TryLockSlow();
  481. // ReaderTryLock slow path.
  482. bool ReaderTryLockSlow();
  483. // Common code between Await() and AwaitWithTimeout/Deadline()
  484. bool AwaitCommon(const Condition& cond,
  485. synchronization_internal::KernelTimeout t);
  486. bool LockWhenCommon(const Condition& cond,
  487. synchronization_internal::KernelTimeout t, bool write);
  488. // Attempt to remove thread s from queue.
  489. void TryRemove(base_internal::PerThreadSynch* s);
  490. // Block a thread on mutex.
  491. void Block(base_internal::PerThreadSynch* s);
  492. // Wake a thread; return successor.
  493. base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
  494. void Dtor();
  495. friend class CondVar; // for access to Trans()/Fer().
  496. void Trans(MuHow how); // used for CondVar->Mutex transfer
  497. void Fer(
  498. base_internal::PerThreadSynch* w); // used for CondVar->Mutex transfer
  499. // Catch the error of writing Mutex when intending MutexLock.
  500. explicit Mutex(const volatile Mutex* /*ignored*/) {}
  501. Mutex(const Mutex&) = delete;
  502. Mutex& operator=(const Mutex&) = delete;
  503. };
  504. // -----------------------------------------------------------------------------
  505. // Mutex RAII Wrappers
  506. // -----------------------------------------------------------------------------
  507. // MutexLock
  508. //
  509. // `MutexLock` is a helper class, which acquires and releases a `Mutex` via
  510. // RAII.
  511. //
  512. // Example:
  513. //
  514. // Class Foo {
  515. // public:
  516. // Foo::Bar* Baz() {
  517. // MutexLock lock(&mu_);
  518. // ...
  519. // return bar;
  520. // }
  521. //
  522. // private:
  523. // Mutex mu_;
  524. // };
  525. class Y_ABSL_SCOPED_LOCKABLE MutexLock {
  526. public:
  527. // Constructors
  528. // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
  529. // guaranteed to be locked when this object is constructed. Requires that
  530. // `mu` be dereferenceable.
  531. explicit MutexLock(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
  532. this->mu_->Lock();
  533. }
  534. // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
  535. // the above, the condition given by `cond` is also guaranteed to hold when
  536. // this object is constructed.
  537. explicit MutexLock(Mutex* mu, const Condition& cond)
  538. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  539. : mu_(mu) {
  540. this->mu_->LockWhen(cond);
  541. }
  542. MutexLock(const MutexLock&) = delete; // NOLINT(runtime/mutex)
  543. MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
  544. MutexLock& operator=(const MutexLock&) = delete;
  545. MutexLock& operator=(MutexLock&&) = delete;
  546. ~MutexLock() Y_ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
  547. private:
  548. Mutex* const mu_;
  549. };
  550. // ReaderMutexLock
  551. //
  552. // The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
  553. // releases a shared lock on a `Mutex` via RAII.
  554. class Y_ABSL_SCOPED_LOCKABLE ReaderMutexLock {
  555. public:
  556. explicit ReaderMutexLock(Mutex* mu) Y_ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
  557. mu->ReaderLock();
  558. }
  559. explicit ReaderMutexLock(Mutex* mu, const Condition& cond)
  560. Y_ABSL_SHARED_LOCK_FUNCTION(mu)
  561. : mu_(mu) {
  562. mu->ReaderLockWhen(cond);
  563. }
  564. ReaderMutexLock(const ReaderMutexLock&) = delete;
  565. ReaderMutexLock(ReaderMutexLock&&) = delete;
  566. ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
  567. ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
  568. ~ReaderMutexLock() Y_ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
  569. private:
  570. Mutex* const mu_;
  571. };
  572. // WriterMutexLock
  573. //
  574. // The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
  575. // releases a write (exclusive) lock on a `Mutex` via RAII.
  576. class Y_ABSL_SCOPED_LOCKABLE WriterMutexLock {
  577. public:
  578. explicit WriterMutexLock(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  579. : mu_(mu) {
  580. mu->WriterLock();
  581. }
  582. explicit WriterMutexLock(Mutex* mu, const Condition& cond)
  583. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  584. : mu_(mu) {
  585. mu->WriterLockWhen(cond);
  586. }
  587. WriterMutexLock(const WriterMutexLock&) = delete;
  588. WriterMutexLock(WriterMutexLock&&) = delete;
  589. WriterMutexLock& operator=(const WriterMutexLock&) = delete;
  590. WriterMutexLock& operator=(WriterMutexLock&&) = delete;
  591. ~WriterMutexLock() Y_ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
  592. private:
  593. Mutex* const mu_;
  594. };
  595. // -----------------------------------------------------------------------------
  596. // Condition
  597. // -----------------------------------------------------------------------------
  598. //
  599. // `Mutex` contains a number of member functions which take a `Condition` as an
  600. // argument; clients can wait for conditions to become `true` before attempting
  601. // to acquire the mutex. These sections are known as "condition critical"
  602. // sections. To use a `Condition`, you simply need to construct it, and use
  603. // within an appropriate `Mutex` member function; everything else in the
  604. // `Condition` class is an implementation detail.
  605. //
  606. // A `Condition` is specified as a function pointer which returns a boolean.
  607. // `Condition` functions should be pure functions -- their results should depend
  608. // only on passed arguments, should not consult any external state (such as
  609. // clocks), and should have no side-effects, aside from debug logging. Any
  610. // objects that the function may access should be limited to those which are
  611. // constant while the mutex is blocked on the condition (e.g. a stack variable),
  612. // or objects of state protected explicitly by the mutex.
  613. //
  614. // No matter which construction is used for `Condition`, the underlying
  615. // function pointer / functor / callable must not throw any
  616. // exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
  617. // the face of a throwing `Condition`. (When Abseil is allowed to depend
  618. // on C++17, these function pointers will be explicitly marked
  619. // `noexcept`; until then this requirement cannot be enforced in the
  620. // type system.)
  621. //
  622. // Note: to use a `Condition`, you need only construct it and pass it to a
  623. // suitable `Mutex' member function, such as `Mutex::Await()`, or to the
  624. // constructor of one of the scope guard classes.
  625. //
  626. // Example using LockWhen/Unlock:
  627. //
  628. // // assume count_ is not internal reference count
  629. // int count_ Y_ABSL_GUARDED_BY(mu_);
  630. // Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
  631. //
  632. // mu_.LockWhen(count_is_zero);
  633. // // ...
  634. // mu_.Unlock();
  635. //
  636. // Example using a scope guard:
  637. //
  638. // {
  639. // MutexLock lock(&mu_, count_is_zero);
  640. // // ...
  641. // }
  642. //
  643. // When multiple threads are waiting on exactly the same condition, make sure
  644. // that they are constructed with the same parameters (same pointer to function
  645. // + arg, or same pointer to object + method), so that the mutex implementation
  646. // can avoid redundantly evaluating the same condition for each thread.
  647. class Condition {
  648. public:
  649. // A Condition that returns the result of "(*func)(arg)"
  650. Condition(bool (*func)(void*), void* arg);
  651. // Templated version for people who are averse to casts.
  652. //
  653. // To use a lambda, prepend it with unary plus, which converts the lambda
  654. // into a function pointer:
  655. // Condition(+[](T* t) { return ...; }, arg).
  656. //
  657. // Note: lambdas in this case must contain no bound variables.
  658. //
  659. // See class comment for performance advice.
  660. template <typename T>
  661. Condition(bool (*func)(T*), T* arg);
  662. // Same as above, but allows for cases where `arg` comes from a pointer that
  663. // is convertible to the function parameter type `T*` but not an exact match.
  664. //
  665. // For example, the argument might be `X*` but the function takes `const X*`,
  666. // or the argument might be `Derived*` while the function takes `Base*`, and
  667. // so on for cases where the argument pointer can be implicitly converted.
  668. //
  669. // Implementation notes: This constructor overload is required in addition to
  670. // the one above to allow deduction of `T` from `arg` for cases such as where
  671. // a function template is passed as `func`. Also, the dummy `typename = void`
  672. // template parameter exists just to work around a MSVC mangling bug.
  673. template <typename T, typename = void>
  674. Condition(bool (*func)(T*),
  675. typename y_absl::internal::type_identity<T>::type* arg);
  676. // Templated version for invoking a method that returns a `bool`.
  677. //
  678. // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
  679. // `object->Method()`.
  680. //
  681. // Implementation Note: `y_absl::internal::type_identity` is used to allow
  682. // methods to come from base classes. A simpler signature like
  683. // `Condition(T*, bool (T::*)())` does not suffice.
  684. template <typename T>
  685. Condition(T* object,
  686. bool (y_absl::internal::type_identity<T>::type::*method)());
  687. // Same as above, for const members
  688. template <typename T>
  689. Condition(const T* object,
  690. bool (y_absl::internal::type_identity<T>::type::*method)() const);
  691. // A Condition that returns the value of `*cond`
  692. explicit Condition(const bool* cond);
  693. // Templated version for invoking a functor that returns a `bool`.
  694. // This approach accepts pointers to non-mutable lambdas, `std::function`,
  695. // the result of` std::bind` and user-defined functors that define
  696. // `bool F::operator()() const`.
  697. //
  698. // Example:
  699. //
  700. // auto reached = [this, current]() {
  701. // mu_.AssertReaderHeld(); // For annotalysis.
  702. // return processed_ >= current;
  703. // };
  704. // mu_.Await(Condition(&reached));
  705. //
  706. // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
  707. // the lambda as it may be called when the mutex is being unlocked from a
  708. // scope holding only a reader lock, which will make the assertion not
  709. // fulfilled and crash the binary.
  710. // See class comment for performance advice. In particular, if there
  711. // might be more than one waiter for the same condition, make sure
  712. // that all waiters construct the condition with the same pointers.
  713. // Implementation note: The second template parameter ensures that this
  714. // constructor doesn't participate in overload resolution if T doesn't have
  715. // `bool operator() const`.
  716. template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
  717. &T::operator()))>
  718. explicit Condition(const T* obj)
  719. : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
  720. // A Condition that always returns `true`.
  721. // kTrue is only useful in a narrow set of circumstances, mostly when
  722. // it's passed conditionally. For example:
  723. //
  724. // mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);
  725. //
  726. // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition
  727. // don't return immediately when the timeout happens, they still block until
  728. // the Mutex becomes available. The return value of these methods does
  729. // not indicate if the timeout was reached; rather it indicates whether or
  730. // not the condition is true.
  731. Y_ABSL_CONST_INIT static const Condition kTrue;
  732. // Evaluates the condition.
  733. bool Eval() const;
  734. // Returns `true` if the two conditions are guaranteed to return the same
  735. // value if evaluated at the same time, `false` if the evaluation *may* return
  736. // different results.
  737. //
  738. // Two `Condition` values are guaranteed equal if both their `func` and `arg`
  739. // components are the same. A null pointer is equivalent to a `true`
  740. // condition.
  741. static bool GuaranteedEqual(const Condition* a, const Condition* b);
  742. private:
  743. // Sizing an allocation for a method pointer can be subtle. In the Itanium
  744. // specifications, a method pointer has a predictable, uniform size. On the
  745. // other hand, MSVC ABI, method pointer sizes vary based on the
  746. // inheritance of the class. Specifically, method pointers from classes with
  747. // multiple inheritance are bigger than those of classes with single
  748. // inheritance. Other variations also exist.
  749. #ifndef _MSC_VER
  750. // Allocation for a function pointer or method pointer.
  751. // The {0} initializer ensures that all unused bytes of this buffer are
  752. // always zeroed out. This is necessary, because GuaranteedEqual() compares
  753. // all of the bytes, unaware of which bytes are relevant to a given `eval_`.
  754. using MethodPtr = bool (Condition::*)();
  755. char callback_[sizeof(MethodPtr)] = {0};
  756. #else
  757. // It is well known that the larget MSVC pointer-to-member is 24 bytes. This
  758. // may be the largest known pointer-to-member of any platform. For this
  759. // reason we will allocate 24 bytes for MSVC platform toolchains.
  760. char callback_[24] = {0};
  761. #endif
  762. // Function with which to evaluate callbacks and/or arguments.
  763. bool (*eval_)(const Condition*) = nullptr;
  764. // Either an argument for a function call or an object for a method call.
  765. void* arg_ = nullptr;
  766. // Various functions eval_ can point to:
  767. static bool CallVoidPtrFunction(const Condition*);
  768. template <typename T>
  769. static bool CastAndCallFunction(const Condition* c);
  770. template <typename T, typename ConditionMethodPtr>
  771. static bool CastAndCallMethod(const Condition* c);
  772. // Helper methods for storing, validating, and reading callback arguments.
  773. template <typename T>
  774. inline void StoreCallback(T callback) {
  775. static_assert(
  776. sizeof(callback) <= sizeof(callback_),
  777. "An overlarge pointer was passed as a callback to Condition.");
  778. std::memcpy(callback_, &callback, sizeof(callback));
  779. }
  780. template <typename T>
  781. inline void ReadCallback(T* callback) const {
  782. std::memcpy(callback, callback_, sizeof(*callback));
  783. }
  784. static bool AlwaysTrue(const Condition*) { return true; }
  785. // Used only to create kTrue.
  786. constexpr Condition() : eval_(AlwaysTrue), arg_(nullptr) {}
  787. };
  788. // -----------------------------------------------------------------------------
  789. // CondVar
  790. // -----------------------------------------------------------------------------
  791. //
  792. // A condition variable, reflecting state evaluated separately outside of the
  793. // `Mutex` object, which can be signaled to wake callers.
  794. // This class is not normally needed; use `Mutex` member functions such as
  795. // `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
  796. // with many threads and many conditions, `CondVar` may be faster.
  797. //
  798. // The implementation may deliver signals to any condition variable at
  799. // any time, even when no call to `Signal()` or `SignalAll()` is made; as a
  800. // result, upon being awoken, you must check the logical condition you have
  801. // been waiting upon.
  802. //
  803. // Examples:
  804. //
  805. // Usage for a thread waiting for some condition C protected by mutex mu:
  806. // mu.Lock();
  807. // while (!C) { cv->Wait(&mu); } // releases and reacquires mu
  808. // // C holds; process data
  809. // mu.Unlock();
  810. //
  811. // Usage to wake T is:
  812. // mu.Lock();
  813. // // process data, possibly establishing C
  814. // if (C) { cv->Signal(); }
  815. // mu.Unlock();
  816. //
  817. // If C may be useful to more than one waiter, use `SignalAll()` instead of
  818. // `Signal()`.
  819. //
  820. // With this implementation it is efficient to use `Signal()/SignalAll()` inside
  821. // the locked region; this usage can make reasoning about your program easier.
  822. //
  823. class CondVar {
  824. public:
  825. // A `CondVar` allocated on the heap or on the stack can use the this
  826. // constructor.
  827. CondVar();
  828. // CondVar::Wait()
  829. //
  830. // Atomically releases a `Mutex` and blocks on this condition variable.
  831. // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
  832. // spurious wakeup), then reacquires the `Mutex` and returns.
  833. //
  834. // Requires and ensures that the current thread holds the `Mutex`.
  835. void Wait(Mutex* mu) {
  836. WaitCommon(mu, synchronization_internal::KernelTimeout::Never());
  837. }
  838. // CondVar::WaitWithTimeout()
  839. //
  840. // Atomically releases a `Mutex` and blocks on this condition variable.
  841. // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
  842. // spurious wakeup), or until the timeout has expired, then reacquires
  843. // the `Mutex` and returns.
  844. //
  845. // Returns true if the timeout has expired without this `CondVar`
  846. // being signalled in any manner. If both the timeout has expired
  847. // and this `CondVar` has been signalled, the implementation is free
  848. // to return `true` or `false`.
  849. //
  850. // Requires and ensures that the current thread holds the `Mutex`.
  851. bool WaitWithTimeout(Mutex* mu, y_absl::Duration timeout) {
  852. return WaitCommon(mu, synchronization_internal::KernelTimeout(timeout));
  853. }
  854. // CondVar::WaitWithDeadline()
  855. //
  856. // Atomically releases a `Mutex` and blocks on this condition variable.
  857. // Waits until awakened by a call to `Signal()` or `SignalAll()` (or a
  858. // spurious wakeup), or until the deadline has passed, then reacquires
  859. // the `Mutex` and returns.
  860. //
  861. // Deadlines in the past are equivalent to an immediate deadline.
  862. //
  863. // Returns true if the deadline has passed without this `CondVar`
  864. // being signalled in any manner. If both the deadline has passed
  865. // and this `CondVar` has been signalled, the implementation is free
  866. // to return `true` or `false`.
  867. //
  868. // Requires and ensures that the current thread holds the `Mutex`.
  869. bool WaitWithDeadline(Mutex* mu, y_absl::Time deadline) {
  870. return WaitCommon(mu, synchronization_internal::KernelTimeout(deadline));
  871. }
  872. // CondVar::Signal()
  873. //
  874. // Signal this `CondVar`; wake at least one waiter if one exists.
  875. void Signal();
  876. // CondVar::SignalAll()
  877. //
  878. // Signal this `CondVar`; wake all waiters.
  879. void SignalAll();
  880. // CondVar::EnableDebugLog()
  881. //
  882. // Causes all subsequent uses of this `CondVar` to be logged via
  883. // `Y_ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
  884. // Note: this method substantially reduces `CondVar` performance.
  885. void EnableDebugLog(const char* name);
  886. private:
  887. bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t);
  888. void Remove(base_internal::PerThreadSynch* s);
  889. std::atomic<intptr_t> cv_; // Condition variable state.
  890. CondVar(const CondVar&) = delete;
  891. CondVar& operator=(const CondVar&) = delete;
  892. };
  893. // Variants of MutexLock.
  894. //
  895. // If you find yourself using one of these, consider instead using
  896. // Mutex::Unlock() and/or if-statements for clarity.
  897. // MutexLockMaybe
  898. //
  899. // MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
  900. class Y_ABSL_SCOPED_LOCKABLE MutexLockMaybe {
  901. public:
  902. explicit MutexLockMaybe(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  903. : mu_(mu) {
  904. if (this->mu_ != nullptr) {
  905. this->mu_->Lock();
  906. }
  907. }
  908. explicit MutexLockMaybe(Mutex* mu, const Condition& cond)
  909. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  910. : mu_(mu) {
  911. if (this->mu_ != nullptr) {
  912. this->mu_->LockWhen(cond);
  913. }
  914. }
  915. ~MutexLockMaybe() Y_ABSL_UNLOCK_FUNCTION() {
  916. if (this->mu_ != nullptr) {
  917. this->mu_->Unlock();
  918. }
  919. }
  920. private:
  921. Mutex* const mu_;
  922. MutexLockMaybe(const MutexLockMaybe&) = delete;
  923. MutexLockMaybe(MutexLockMaybe&&) = delete;
  924. MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
  925. MutexLockMaybe& operator=(MutexLockMaybe&&) = delete;
  926. };
  927. // ReleasableMutexLock
  928. //
  929. // ReleasableMutexLock is like MutexLock, but permits `Release()` of its
  930. // mutex before destruction. `Release()` may be called at most once.
  931. class Y_ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
  932. public:
  933. explicit ReleasableMutexLock(Mutex* mu) Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  934. : mu_(mu) {
  935. this->mu_->Lock();
  936. }
  937. explicit ReleasableMutexLock(Mutex* mu, const Condition& cond)
  938. Y_ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
  939. : mu_(mu) {
  940. this->mu_->LockWhen(cond);
  941. }
  942. ~ReleasableMutexLock() Y_ABSL_UNLOCK_FUNCTION() {
  943. if (this->mu_ != nullptr) {
  944. this->mu_->Unlock();
  945. }
  946. }
  947. void Release() Y_ABSL_UNLOCK_FUNCTION();
  948. private:
  949. Mutex* mu_;
  950. ReleasableMutexLock(const ReleasableMutexLock&) = delete;
  951. ReleasableMutexLock(ReleasableMutexLock&&) = delete;
  952. ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
  953. ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
  954. };
  955. inline Mutex::Mutex() : mu_(0) {
  956. Y_ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
  957. }
  958. inline constexpr Mutex::Mutex(y_absl::ConstInitType) : mu_(0) {}
  959. #if !defined(__APPLE__) && !defined(Y_ABSL_BUILD_DLL)
  960. Y_ABSL_ATTRIBUTE_ALWAYS_INLINE
  961. inline Mutex::~Mutex() { Dtor(); }
  962. #endif
  963. #if defined(NDEBUG) && !defined(Y_ABSL_HAVE_THREAD_SANITIZER)
  964. // Use default (empty) destructor in release build for performance reasons.
  965. // We need to mark both Dtor and ~Mutex as always inline for inconsistent
  966. // builds that use both NDEBUG and !NDEBUG with dynamic libraries. In these
  967. // cases we want the empty functions to dissolve entirely rather than being
  968. // exported from dynamic libraries and potentially override the non-empty ones.
  969. Y_ABSL_ATTRIBUTE_ALWAYS_INLINE
  970. inline void Mutex::Dtor() {}
  971. #endif
  972. inline CondVar::CondVar() : cv_(0) {}
  973. // static
  974. template <typename T, typename ConditionMethodPtr>
  975. bool Condition::CastAndCallMethod(const Condition* c) {
  976. T* object = static_cast<T*>(c->arg_);
  977. ConditionMethodPtr condition_method_pointer;
  978. c->ReadCallback(&condition_method_pointer);
  979. return (object->*condition_method_pointer)();
  980. }
  981. // static
  982. template <typename T>
  983. bool Condition::CastAndCallFunction(const Condition* c) {
  984. bool (*function)(T*);
  985. c->ReadCallback(&function);
  986. T* argument = static_cast<T*>(c->arg_);
  987. return (*function)(argument);
  988. }
  989. template <typename T>
  990. inline Condition::Condition(bool (*func)(T*), T* arg)
  991. : eval_(&CastAndCallFunction<T>),
  992. arg_(const_cast<void*>(static_cast<const void*>(arg))) {
  993. static_assert(sizeof(&func) <= sizeof(callback_),
  994. "An overlarge function pointer was passed to Condition.");
  995. StoreCallback(func);
  996. }
  997. template <typename T, typename>
  998. inline Condition::Condition(
  999. bool (*func)(T*), typename y_absl::internal::type_identity<T>::type* arg)
  1000. // Just delegate to the overload above.
  1001. : Condition(func, arg) {}
  1002. template <typename T>
  1003. inline Condition::Condition(
  1004. T* object, bool (y_absl::internal::type_identity<T>::type::*method)())
  1005. : eval_(&CastAndCallMethod<T, decltype(method)>), arg_(object) {
  1006. static_assert(sizeof(&method) <= sizeof(callback_),
  1007. "An overlarge method pointer was passed to Condition.");
  1008. StoreCallback(method);
  1009. }
  1010. template <typename T>
  1011. inline Condition::Condition(
  1012. const T* object,
  1013. bool (y_absl::internal::type_identity<T>::type::*method)() const)
  1014. : eval_(&CastAndCallMethod<const T, decltype(method)>),
  1015. arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
  1016. StoreCallback(method);
  1017. }
  1018. // Register hooks for profiling support.
  1019. //
  1020. // The function pointer registered here will be called whenever a mutex is
  1021. // contended. The callback is given the cycles for which waiting happened (as
  1022. // measured by //y_absl/base/internal/cycleclock.h, and which may not
  1023. // be real "cycle" counts.)
  1024. //
  1025. // There is no ordering guarantee between when the hook is registered and when
  1026. // callbacks will begin. Only a single profiler can be installed in a running
  1027. // binary; if this function is called a second time with a different function
  1028. // pointer, the value is ignored (and will cause an assertion failure in debug
  1029. // mode.)
  1030. void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
  1031. // Register a hook for Mutex tracing.
  1032. //
  1033. // The function pointer registered here will be called whenever a mutex is
  1034. // contended. The callback is given an opaque handle to the contended mutex,
  1035. // an event name, and the number of wait cycles (as measured by
  1036. // //y_absl/base/internal/cycleclock.h, and which may not be real
  1037. // "cycle" counts.)
  1038. //
  1039. // The only event name currently sent is "slow release".
  1040. //
  1041. // This has the same ordering and single-use limitations as
  1042. // RegisterMutexProfiler() above.
  1043. void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
  1044. int64_t wait_cycles));
  1045. // Register a hook for CondVar tracing.
  1046. //
  1047. // The function pointer registered here will be called here on various CondVar
  1048. // events. The callback is given an opaque handle to the CondVar object and
  1049. // a string identifying the event. This is thread-safe, but only a single
  1050. // tracer can be registered.
  1051. //
  1052. // Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
  1053. // "SignalAll wakeup".
  1054. //
  1055. // This has the same ordering and single-use limitations as
  1056. // RegisterMutexProfiler() above.
  1057. void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv));
  1058. void ResetDeadlockGraphMu();
  1059. // EnableMutexInvariantDebugging()
  1060. //
  1061. // Enable or disable global support for Mutex invariant debugging. If enabled,
  1062. // then invariant predicates can be registered per-Mutex for debug checking.
  1063. // See Mutex::EnableInvariantDebugging().
  1064. void EnableMutexInvariantDebugging(bool enabled);
  1065. // When in debug mode, and when the feature has been enabled globally, the
  1066. // implementation will keep track of lock ordering and complain (or optionally
  1067. // crash) if a cycle is detected in the acquired-before graph.
  1068. // Possible modes of operation for the deadlock detector in debug mode.
  1069. enum class OnDeadlockCycle {
  1070. kIgnore, // Neither report on nor attempt to track cycles in lock ordering
  1071. kReport, // Report lock cycles to stderr when detected
  1072. kAbort, // Report lock cycles to stderr when detected, then abort
  1073. };
  1074. // SetMutexDeadlockDetectionMode()
  1075. //
  1076. // Enable or disable global support for detection of potential deadlocks
  1077. // due to Mutex lock ordering inversions. When set to 'kIgnore', tracking of
  1078. // lock ordering is disabled. Otherwise, in debug builds, a lock ordering graph
  1079. // will be maintained internally, and detected cycles will be reported in
  1080. // the manner chosen here.
  1081. void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
  1082. Y_ABSL_NAMESPACE_END
  1083. } // namespace y_absl
  1084. // In some build configurations we pass --detect-odr-violations to the
  1085. // gold linker. This causes it to flag weak symbol overrides as ODR
  1086. // violations. Because ODR only applies to C++ and not C,
  1087. // --detect-odr-violations ignores symbols not mangled with C++ names.
  1088. // By changing our extension points to be extern "C", we dodge this
  1089. // check.
  1090. extern "C" {
  1091. void Y_ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
  1092. } // extern "C"
  1093. #endif // Y_ABSL_SYNCHRONIZATION_MUTEX_H_