mutex.h 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. //===-- mutex.h -------------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_MUTEX_H_
  9. #define SCUDO_MUTEX_H_
  10. #include "atomic_helpers.h"
  11. #include "common.h"
  12. #include "thread_annotations.h"
  13. #include <string.h>
  14. #if SCUDO_FUCHSIA
  15. #error #include <lib/sync/mutex.h> // for sync_mutex_t
  16. #endif
  17. namespace scudo {
  18. class CAPABILITY("mutex") HybridMutex {
  19. public:
  20. bool tryLock() TRY_ACQUIRE(true);
  21. NOINLINE void lock() ACQUIRE() {
  22. if (LIKELY(tryLock()))
  23. return;
  24. // The compiler may try to fully unroll the loop, ending up in a
  25. // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
  26. // is large, ugly and unneeded, a compact loop is better for our purpose
  27. // here. Use a pragma to tell the compiler not to unroll the loop.
  28. #ifdef __clang__
  29. #pragma nounroll
  30. #endif
  31. for (u8 I = 0U; I < NumberOfTries; I++) {
  32. delayLoop();
  33. if (tryLock())
  34. return;
  35. }
  36. lockSlow();
  37. }
  38. void unlock() RELEASE();
  39. // TODO(chiahungduan): In general, we may want to assert the owner of lock as
  40. // well. Given the current uses of HybridMutex, it's acceptable without
  41. // asserting the owner. Re-evaluate this when we have certain scenarios which
  42. // requires a more fine-grained lock granularity.
  43. ALWAYS_INLINE void assertHeld() ASSERT_CAPABILITY(this) {
  44. if (SCUDO_DEBUG)
  45. assertHeldImpl();
  46. }
  47. private:
  48. void delayLoop() {
  49. // The value comes from the average time spent in accessing caches (which
  50. // are the fastest operations) so that we are unlikely to wait too long for
  51. // fast operations.
  52. constexpr u32 SpinTimes = 16;
  53. volatile u32 V = 0;
  54. for (u32 I = 0; I < SpinTimes; ++I) {
  55. u32 Tmp = V + 1;
  56. V = Tmp;
  57. }
  58. }
  59. void assertHeldImpl();
  60. // TODO(chiahungduan): Adapt this value based on scenarios. E.g., primary and
  61. // secondary allocator have different allocation times.
  62. static constexpr u8 NumberOfTries = 32U;
  63. #if SCUDO_LINUX
  64. atomic_u32 M = {};
  65. #elif SCUDO_FUCHSIA
  66. sync_mutex_t M = {};
  67. #endif
  68. void lockSlow() ACQUIRE();
  69. };
  70. class SCOPED_CAPABILITY ScopedLock {
  71. public:
  72. explicit ScopedLock(HybridMutex &M) ACQUIRE(M) : Mutex(M) { Mutex.lock(); }
  73. ~ScopedLock() RELEASE() { Mutex.unlock(); }
  74. private:
  75. HybridMutex &Mutex;
  76. ScopedLock(const ScopedLock &) = delete;
  77. void operator=(const ScopedLock &) = delete;
  78. };
  79. } // namespace scudo
  80. #endif // SCUDO_MUTEX_H_