tsd_exclusive.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_TSD_EXCLUSIVE_H_
  9. #define SCUDO_TSD_EXCLUSIVE_H_
  10. #include "tsd.h"
  11. namespace scudo {
  12. struct ThreadState {
  13. bool DisableMemInit : 1;
  14. enum : unsigned {
  15. NotInitialized = 0,
  16. Initialized,
  17. TornDown,
  18. } InitState : 2;
  19. };
  20. template <class Allocator> void teardownThread(void *Ptr);
  21. template <class Allocator> struct TSDRegistryExT {
  22. void init(Allocator *Instance) {
  23. DCHECK(!Initialized);
  24. Instance->init();
  25. CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
  26. FallbackTSD.init(Instance);
  27. Initialized = true;
  28. }
  29. void initOnceMaybe(Allocator *Instance) {
  30. ScopedLock L(Mutex);
  31. if (LIKELY(Initialized))
  32. return;
  33. init(Instance); // Sets Initialized.
  34. }
  35. void unmapTestOnly(Allocator *Instance) {
  36. DCHECK(Instance);
  37. if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
  38. DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
  39. Instance);
  40. ThreadTSD.commitBack(Instance);
  41. ThreadTSD = {};
  42. }
  43. CHECK_EQ(pthread_key_delete(PThreadKey), 0);
  44. PThreadKey = {};
  45. FallbackTSD.commitBack(Instance);
  46. FallbackTSD = {};
  47. State = {};
  48. Initialized = false;
  49. }
  50. ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
  51. if (LIKELY(State.InitState != ThreadState::NotInitialized))
  52. return;
  53. initThread(Instance, MinimalInit);
  54. }
  55. ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
  56. if (LIKELY(State.InitState == ThreadState::Initialized &&
  57. !atomic_load(&Disabled, memory_order_acquire))) {
  58. *UnlockRequired = false;
  59. return &ThreadTSD;
  60. }
  61. FallbackTSD.lock();
  62. *UnlockRequired = true;
  63. return &FallbackTSD;
  64. }
  65. // To disable the exclusive TSD registry, we effectively lock the fallback TSD
  66. // and force all threads to attempt to use it instead of their local one.
  67. void disable() {
  68. Mutex.lock();
  69. FallbackTSD.lock();
  70. atomic_store(&Disabled, 1U, memory_order_release);
  71. }
  72. void enable() {
  73. atomic_store(&Disabled, 0U, memory_order_release);
  74. FallbackTSD.unlock();
  75. Mutex.unlock();
  76. }
  77. bool setOption(Option O, sptr Value) {
  78. if (O == Option::ThreadDisableMemInit)
  79. State.DisableMemInit = Value;
  80. if (O == Option::MaxTSDsCount)
  81. return false;
  82. return true;
  83. }
  84. bool getDisableMemInit() { return State.DisableMemInit; }
  85. private:
  86. // Using minimal initialization allows for global initialization while keeping
  87. // the thread specific structure untouched. The fallback structure will be
  88. // used instead.
  89. NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
  90. initOnceMaybe(Instance);
  91. if (UNLIKELY(MinimalInit))
  92. return;
  93. CHECK_EQ(
  94. pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
  95. ThreadTSD.init(Instance);
  96. State.InitState = ThreadState::Initialized;
  97. Instance->callPostInitCallback();
  98. }
  99. pthread_key_t PThreadKey = {};
  100. bool Initialized = false;
  101. atomic_u8 Disabled = {};
  102. TSD<Allocator> FallbackTSD;
  103. HybridMutex Mutex;
  104. static thread_local ThreadState State;
  105. static thread_local TSD<Allocator> ThreadTSD;
  106. friend void teardownThread<Allocator>(void *Ptr);
  107. };
  108. template <class Allocator>
  109. thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
  110. template <class Allocator>
  111. thread_local ThreadState TSDRegistryExT<Allocator>::State;
  112. template <class Allocator> void teardownThread(void *Ptr) {
  113. typedef TSDRegistryExT<Allocator> TSDRegistryT;
  114. Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
  115. // The glibc POSIX thread-local-storage deallocation routine calls user
  116. // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
  117. // We want to be called last since other destructors might call free and the
  118. // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
  119. // quarantine and swallowing the cache.
  120. if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
  121. TSDRegistryT::ThreadTSD.DestructorIterations--;
  122. // If pthread_setspecific fails, we will go ahead with the teardown.
  123. if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
  124. Ptr) == 0))
  125. return;
  126. }
  127. TSDRegistryT::ThreadTSD.commitBack(Instance);
  128. TSDRegistryT::State.InitState = ThreadState::TornDown;
  129. }
  130. } // namespace scudo
  131. #endif // SCUDO_TSD_EXCLUSIVE_H_