tsd_exclusive.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_TSD_EXCLUSIVE_H_
  9. #define SCUDO_TSD_EXCLUSIVE_H_
  10. #include "tsd.h"
  11. #include "string_utils.h"
  12. namespace scudo {
  13. struct ThreadState {
  14. bool DisableMemInit : 1;
  15. enum : unsigned {
  16. NotInitialized = 0,
  17. Initialized,
  18. TornDown,
  19. } InitState : 2;
  20. };
  21. template <class Allocator> void teardownThread(void *Ptr);
  22. template <class Allocator> struct TSDRegistryExT {
  23. void init(Allocator *Instance) REQUIRES(Mutex) {
  24. DCHECK(!Initialized);
  25. Instance->init();
  26. CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
  27. FallbackTSD.init(Instance);
  28. Initialized = true;
  29. }
  30. void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
  31. ScopedLock L(Mutex);
  32. if (LIKELY(Initialized))
  33. return;
  34. init(Instance); // Sets Initialized.
  35. }
  36. void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
  37. DCHECK(Instance);
  38. if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
  39. DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
  40. Instance);
  41. ThreadTSD.commitBack(Instance);
  42. ThreadTSD = {};
  43. }
  44. CHECK_EQ(pthread_key_delete(PThreadKey), 0);
  45. PThreadKey = {};
  46. FallbackTSD.commitBack(Instance);
  47. FallbackTSD = {};
  48. State = {};
  49. ScopedLock L(Mutex);
  50. Initialized = false;
  51. }
  52. void drainCaches(Allocator *Instance) {
  53. // We don't have a way to iterate all thread local `ThreadTSD`s. Simply
  54. // drain the `ThreadTSD` of current thread and `FallbackTSD`.
  55. Instance->drainCache(&ThreadTSD);
  56. FallbackTSD.lock();
  57. Instance->drainCache(&FallbackTSD);
  58. FallbackTSD.unlock();
  59. }
  60. ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
  61. if (LIKELY(State.InitState != ThreadState::NotInitialized))
  62. return;
  63. initThread(Instance, MinimalInit);
  64. }
  65. // TODO(chiahungduan): Consider removing the argument `UnlockRequired` by
  66. // embedding the logic into TSD or always locking the TSD. It will enable us
  67. // to properly mark thread annotation here and adding proper runtime
  68. // assertions in the member functions of TSD. For example, assert the lock is
  69. // acquired before calling TSD::commitBack().
  70. ALWAYS_INLINE TSD<Allocator> *
  71. getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
  72. if (LIKELY(State.InitState == ThreadState::Initialized &&
  73. !atomic_load(&Disabled, memory_order_acquire))) {
  74. *UnlockRequired = false;
  75. return &ThreadTSD;
  76. }
  77. FallbackTSD.lock();
  78. *UnlockRequired = true;
  79. return &FallbackTSD;
  80. }
  81. // To disable the exclusive TSD registry, we effectively lock the fallback TSD
  82. // and force all threads to attempt to use it instead of their local one.
  83. void disable() NO_THREAD_SAFETY_ANALYSIS {
  84. Mutex.lock();
  85. FallbackTSD.lock();
  86. atomic_store(&Disabled, 1U, memory_order_release);
  87. }
  88. void enable() NO_THREAD_SAFETY_ANALYSIS {
  89. atomic_store(&Disabled, 0U, memory_order_release);
  90. FallbackTSD.unlock();
  91. Mutex.unlock();
  92. }
  93. bool setOption(Option O, sptr Value) {
  94. if (O == Option::ThreadDisableMemInit)
  95. State.DisableMemInit = Value;
  96. if (O == Option::MaxTSDsCount)
  97. return false;
  98. return true;
  99. }
  100. bool getDisableMemInit() { return State.DisableMemInit; }
  101. void getStats(ScopedString *Str) {
  102. // We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
  103. // printing only self `ThreadTSD` which may mislead the usage, we just skip
  104. // it.
  105. Str->append("Exclusive TSD don't support iterating each TSD\n");
  106. }
  107. private:
  108. // Using minimal initialization allows for global initialization while keeping
  109. // the thread specific structure untouched. The fallback structure will be
  110. // used instead.
  111. NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
  112. initOnceMaybe(Instance);
  113. if (UNLIKELY(MinimalInit))
  114. return;
  115. CHECK_EQ(
  116. pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
  117. ThreadTSD.init(Instance);
  118. State.InitState = ThreadState::Initialized;
  119. Instance->callPostInitCallback();
  120. }
  121. pthread_key_t PThreadKey = {};
  122. bool Initialized GUARDED_BY(Mutex) = false;
  123. atomic_u8 Disabled = {};
  124. TSD<Allocator> FallbackTSD;
  125. HybridMutex Mutex;
  126. static thread_local ThreadState State;
  127. static thread_local TSD<Allocator> ThreadTSD;
  128. friend void teardownThread<Allocator>(void *Ptr);
  129. };
  130. template <class Allocator>
  131. thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
  132. template <class Allocator>
  133. thread_local ThreadState TSDRegistryExT<Allocator>::State;
  134. template <class Allocator>
  135. void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
  136. typedef TSDRegistryExT<Allocator> TSDRegistryT;
  137. Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
  138. // The glibc POSIX thread-local-storage deallocation routine calls user
  139. // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
  140. // We want to be called last since other destructors might call free and the
  141. // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
  142. // quarantine and swallowing the cache.
  143. if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
  144. TSDRegistryT::ThreadTSD.DestructorIterations--;
  145. // If pthread_setspecific fails, we will go ahead with the teardown.
  146. if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
  147. Ptr) == 0))
  148. return;
  149. }
  150. TSDRegistryT::ThreadTSD.commitBack(Instance);
  151. TSDRegistryT::State.InitState = ThreadState::TornDown;
  152. }
  153. } // namespace scudo
  154. #endif // SCUDO_TSD_EXCLUSIVE_H_