singleton.cpp 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. #include "singleton.h"
  2. #include <util/system/spinlock.h>
  3. #include <util/system/thread.h>
  4. #include <util/system/sanitizers.h>
  5. #include <cstring>
  6. namespace {
  7. static inline bool MyAtomicTryLock(std::atomic<size_t>& a, size_t v) noexcept {
  8. size_t zero = 0;
  9. return a.compare_exchange_strong(zero, v);
  10. }
  11. static inline bool MyAtomicTryAndTryLock(std::atomic<size_t>& a, size_t v) noexcept {
  12. return a.load(std::memory_order_acquire) == 0 && MyAtomicTryLock(a, v);
  13. }
  14. static inline size_t MyThreadId() noexcept {
  15. const size_t ret = TThread::CurrentThreadId();
  16. if (ret) {
  17. return ret;
  18. }
  19. //clash almost impossible, ONLY if we have threads with ids 0 and 1!
  20. return 1;
  21. }
  22. }
  23. void NPrivate::FillWithTrash(void* ptr, size_t len) {
  24. #if defined(NDEBUG)
  25. Y_UNUSED(ptr);
  26. Y_UNUSED(len);
  27. #else
  28. if constexpr (NSan::TSanIsOn()) {
  29. Y_UNUSED(ptr);
  30. Y_UNUSED(len);
  31. } else {
  32. memset(ptr, 0xBA, len);
  33. }
  34. #endif
  35. }
  36. void NPrivate::LockRecursive(std::atomic<size_t>& lock) noexcept {
  37. const size_t id = MyThreadId();
  38. Y_ABORT_UNLESS(lock.load(std::memory_order_acquire) != id, "recursive singleton initialization");
  39. if (!MyAtomicTryLock(lock, id)) {
  40. TSpinWait sw;
  41. do {
  42. sw.Sleep();
  43. } while (!MyAtomicTryAndTryLock(lock, id));
  44. }
  45. }
  46. void NPrivate::UnlockRecursive(std::atomic<size_t>& lock) noexcept {
  47. Y_ABORT_UNLESS(lock.load(std::memory_order_acquire) == MyThreadId(), "unlock from another thread?!?!");
  48. lock.store(0);
  49. }