tsan_dense_alloc.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. //===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. // A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
  12. // DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
  13. // The only difference with traditional slab allocators is that DenseSlabAlloc
  14. // allocates/free indices of objects and provide a functionality to map
  15. // the index onto the real pointer. The index is u32, that is, 2 times smaller
  16. // than uptr (hense the Dense prefix).
  17. //===----------------------------------------------------------------------===//
  18. #ifndef TSAN_DENSE_ALLOC_H
  19. #define TSAN_DENSE_ALLOC_H
  20. #include "sanitizer_common/sanitizer_common.h"
  21. #include "tsan_defs.h"
  22. namespace __tsan {
  23. class DenseSlabAllocCache {
  24. static const uptr kSize = 128;
  25. typedef u32 IndexT;
  26. uptr pos;
  27. IndexT cache[kSize];
  28. template <typename, uptr, uptr, u64>
  29. friend class DenseSlabAlloc;
  30. };
  31. template <typename T, uptr kL1Size, uptr kL2Size, u64 kReserved = 0>
  32. class DenseSlabAlloc {
  33. public:
  34. typedef DenseSlabAllocCache Cache;
  35. typedef typename Cache::IndexT IndexT;
  36. static_assert((kL1Size & (kL1Size - 1)) == 0,
  37. "kL1Size must be a power-of-two");
  38. static_assert((kL2Size & (kL2Size - 1)) == 0,
  39. "kL2Size must be a power-of-two");
  40. static_assert((kL1Size * kL2Size) <= (1ull << (sizeof(IndexT) * 8)),
  41. "kL1Size/kL2Size are too large");
  42. static_assert(((kL1Size * kL2Size - 1) & kReserved) == 0,
  43. "reserved bits don't fit");
  44. static_assert(sizeof(T) > sizeof(IndexT),
  45. "it doesn't make sense to use dense alloc");
  46. DenseSlabAlloc(LinkerInitialized, const char *name) : name_(name) {}
  47. explicit DenseSlabAlloc(const char *name)
  48. : DenseSlabAlloc(LINKER_INITIALIZED, name) {
  49. // It can be very large.
  50. // Don't page it in for linker initialized objects.
  51. internal_memset(map_, 0, sizeof(map_));
  52. }
  53. ~DenseSlabAlloc() {
  54. for (uptr i = 0; i < kL1Size; i++) {
  55. if (map_[i] != 0)
  56. UnmapOrDie(map_[i], kL2Size * sizeof(T));
  57. }
  58. }
  59. IndexT Alloc(Cache *c) {
  60. if (c->pos == 0)
  61. Refill(c);
  62. return c->cache[--c->pos];
  63. }
  64. void Free(Cache *c, IndexT idx) {
  65. DCHECK_NE(idx, 0);
  66. if (c->pos == Cache::kSize)
  67. Drain(c);
  68. c->cache[c->pos++] = idx;
  69. }
  70. T *Map(IndexT idx) {
  71. DCHECK_NE(idx, 0);
  72. DCHECK_LE(idx, kL1Size * kL2Size);
  73. return &map_[idx / kL2Size][idx % kL2Size];
  74. }
  75. void FlushCache(Cache *c) {
  76. if (!c->pos)
  77. return;
  78. SpinMutexLock lock(&mtx_);
  79. while (c->pos) {
  80. IndexT idx = c->cache[--c->pos];
  81. *(IndexT*)Map(idx) = freelist_;
  82. freelist_ = idx;
  83. }
  84. }
  85. void InitCache(Cache *c) {
  86. c->pos = 0;
  87. internal_memset(c->cache, 0, sizeof(c->cache));
  88. }
  89. uptr AllocatedMemory() const {
  90. return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T);
  91. }
  92. template <typename Func>
  93. void ForEach(Func func) {
  94. SpinMutexLock lock(&mtx_);
  95. uptr fillpos = atomic_load_relaxed(&fillpos_);
  96. for (uptr l1 = 0; l1 < fillpos; l1++) {
  97. for (IndexT l2 = l1 == 0 ? 1 : 0; l2 < kL2Size; l2++) func(&map_[l1][l2]);
  98. }
  99. }
  100. private:
  101. T *map_[kL1Size];
  102. SpinMutex mtx_;
  103. IndexT freelist_ = {0};
  104. atomic_uintptr_t fillpos_ = {0};
  105. const char *const name_;
  106. void Refill(Cache *c) {
  107. SpinMutexLock lock(&mtx_);
  108. if (freelist_ == 0) {
  109. uptr fillpos = atomic_load_relaxed(&fillpos_);
  110. if (fillpos == kL1Size) {
  111. Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
  112. name_, kL1Size, kL2Size);
  113. Die();
  114. }
  115. VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,
  116. fillpos, kL1Size, kL2Size);
  117. T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
  118. // Reserve 0 as invalid index.
  119. IndexT start = fillpos == 0 ? 1 : 0;
  120. for (IndexT i = start; i < kL2Size; i++) {
  121. new(batch + i) T;
  122. *(IndexT *)(batch + i) = i + 1 + fillpos * kL2Size;
  123. }
  124. *(IndexT*)(batch + kL2Size - 1) = 0;
  125. freelist_ = fillpos * kL2Size + start;
  126. map_[fillpos] = batch;
  127. atomic_store_relaxed(&fillpos_, fillpos + 1);
  128. }
  129. for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
  130. IndexT idx = freelist_;
  131. c->cache[c->pos++] = idx;
  132. freelist_ = *(IndexT*)Map(idx);
  133. }
  134. }
  135. void Drain(Cache *c) {
  136. SpinMutexLock lock(&mtx_);
  137. for (uptr i = 0; i < Cache::kSize / 2; i++) {
  138. IndexT idx = c->cache[--c->pos];
  139. *(IndexT*)Map(idx) = freelist_;
  140. freelist_ = idx;
  141. }
  142. }
  143. };
  144. } // namespace __tsan
  145. #endif // TSAN_DENSE_ALLOC_H