spinlock.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. #pragma once
  2. #include "platform.h"
  3. #include "spin_wait.h"
  4. #include <atomic>
  5. class TSpinLockBase {
  6. protected:
  7. TSpinLockBase() = default;
  8. // These were unearthed in IGNIETFERRO-1105
  9. // Need to get rid of them separately
  10. TSpinLockBase(const TSpinLockBase& other)
  11. : Val_(other.Val_.load())
  12. {
  13. }
  14. TSpinLockBase& operator=(const TSpinLockBase& other)
  15. {
  16. Val_.store(other.Val_);
  17. return *this;
  18. }
  19. public:
  20. inline bool IsLocked() const noexcept {
  21. return Val_.load();
  22. }
  23. inline bool TryAcquire() noexcept {
  24. intptr_t zero = 0;
  25. return Val_.compare_exchange_strong(zero, 1);
  26. }
  27. inline bool try_lock() noexcept {
  28. return TryAcquire();
  29. }
  30. protected:
  31. std::atomic<intptr_t> Val_{0};
  32. };
  33. static inline void SpinLockPause() {
  34. #if defined(__GNUC__)
  35. #if defined(_i386_) || defined(_x86_64_)
  36. __asm __volatile("pause");
  37. #elif defined(_arm64_)
  38. __asm __volatile("yield" ::
  39. : "memory");
  40. #endif
  41. #endif
  42. }
  43. /*
  44. * You should almost always use TAdaptiveLock instead of TSpinLock
  45. */
  46. class TSpinLock: public TSpinLockBase {
  47. public:
  48. using TSpinLockBase::TSpinLockBase;
  49. inline void Release() noexcept {
  50. Val_.store(0, std::memory_order_release);
  51. }
  52. inline void Acquire() noexcept {
  53. intptr_t zero = 0;
  54. if (Val_.compare_exchange_strong(zero, 1)) {
  55. return;
  56. }
  57. do {
  58. SpinLockPause();
  59. zero = 0;
  60. } while (Val_.load(std::memory_order_acquire) != 0 ||
  61. !Val_.compare_exchange_strong(zero, 1));
  62. }
  63. inline void unlock() noexcept {
  64. Release();
  65. }
  66. inline void lock() noexcept {
  67. Acquire();
  68. }
  69. };
  70. /**
  71. * TAdaptiveLock almost always should be used instead of TSpinLock.
  72. * It also should be used instead of TMutex for short-term locks.
  73. * This usually means that the locked code should not use syscalls,
  74. * since almost every syscall:
  75. * - might run unpredictably long and the waiting thread will waste a lot of CPU
  76. * - takes considerable amount of time, so you should not care about the mutex performance
  77. */
  78. class TAdaptiveLock: public TSpinLockBase {
  79. public:
  80. using TSpinLockBase::TSpinLockBase;
  81. void Release() noexcept {
  82. Val_.store(0, std::memory_order_release);
  83. }
  84. void Acquire() noexcept {
  85. intptr_t zero = 0;
  86. if (Val_.compare_exchange_strong(zero, 1)) {
  87. return;
  88. }
  89. TSpinWait sw;
  90. for (;;) {
  91. zero = 0;
  92. if (Val_.load(std::memory_order_acquire) == 0 &&
  93. Val_.compare_exchange_strong(zero, 1)) {
  94. break;
  95. }
  96. sw.Sleep();
  97. }
  98. }
  99. inline void unlock() noexcept {
  100. Release();
  101. }
  102. inline void lock() noexcept {
  103. Acquire();
  104. }
  105. };