tsd.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. //===-- tsd.h ---------------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_TSD_H_
  9. #define SCUDO_TSD_H_
  10. #include "atomic_helpers.h"
  11. #include "common.h"
  12. #include "mutex.h"
  13. #include "thread_annotations.h"
  14. #include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
  15. #include <pthread.h>
  16. // With some build setups, this might still not be defined.
  17. #ifndef PTHREAD_DESTRUCTOR_ITERATIONS
  18. #define PTHREAD_DESTRUCTOR_ITERATIONS 4
  19. #endif
  20. namespace scudo {
  21. template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
  22. using ThisT = TSD<Allocator>;
  23. u8 DestructorIterations = 0;
  24. void init(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
  25. DCHECK_EQ(DestructorIterations, 0U);
  26. DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
  27. Instance->initCache(&Cache);
  28. DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
  29. }
  30. inline bool tryLock() NO_THREAD_SAFETY_ANALYSIS {
  31. if (Mutex.tryLock()) {
  32. atomic_store_relaxed(&Precedence, 0);
  33. return true;
  34. }
  35. if (atomic_load_relaxed(&Precedence) == 0)
  36. atomic_store_relaxed(&Precedence,
  37. static_cast<uptr>(getMonotonicTimeFast() >>
  38. FIRST_32_SECOND_64(16, 0)));
  39. return false;
  40. }
  41. inline void lock() NO_THREAD_SAFETY_ANALYSIS {
  42. atomic_store_relaxed(&Precedence, 0);
  43. Mutex.lock();
  44. }
  45. inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
  46. inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
  47. void commitBack(Allocator *Instance) { Instance->commitBack(this); }
  48. // As the comments attached to `getCache()`, the TSD doesn't always need to be
  49. // locked. In that case, we would only skip the check before we have all TSDs
  50. // locked in all paths.
  51. void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) {
  52. if (SCUDO_DEBUG && !BypassCheck)
  53. Mutex.assertHeld();
  54. }
  55. // Ideally, we may want to assert that all the operations on
  56. // Cache/QuarantineCache always have the `Mutex` acquired. However, the
  57. // current architecture of accessing TSD is not easy to cooperate with the
  58. // thread-safety analysis because of pointer aliasing. So now we just add the
  59. // assertion on the getters of Cache/QuarantineCache.
  60. //
  61. // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
  62. // TSD doesn't always require holding the lock. Add this assertion while the
  63. // lock is always acquired.
  64. typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; }
  65. typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) {
  66. return QuarantineCache;
  67. }
  68. private:
  69. HybridMutex Mutex;
  70. atomic_uptr Precedence = {};
  71. typename Allocator::CacheT Cache GUARDED_BY(Mutex);
  72. typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex);
  73. };
  74. } // namespace scudo
  75. #endif // SCUDO_TSD_H_