quarantine.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. //===-- quarantine.h --------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_QUARANTINE_H_
  9. #define SCUDO_QUARANTINE_H_
  10. #include "list.h"
  11. #include "mutex.h"
  12. #include "string_utils.h"
  13. #include "thread_annotations.h"
  14. namespace scudo {
  15. struct QuarantineBatch {
  16. // With the following count, a batch (and the header that protects it) occupy
  17. // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
  18. static const u32 MaxCount = 1019;
  19. QuarantineBatch *Next;
  20. uptr Size;
  21. u32 Count;
  22. void *Batch[MaxCount];
  23. void init(void *Ptr, uptr Size) {
  24. Count = 1;
  25. Batch[0] = Ptr;
  26. this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
  27. }
  28. // The total size of quarantined nodes recorded in this batch.
  29. uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
  30. void push_back(void *Ptr, uptr Size) {
  31. DCHECK_LT(Count, MaxCount);
  32. Batch[Count++] = Ptr;
  33. this->Size += Size;
  34. }
  35. bool canMerge(const QuarantineBatch *const From) const {
  36. return Count + From->Count <= MaxCount;
  37. }
  38. void merge(QuarantineBatch *const From) {
  39. DCHECK_LE(Count + From->Count, MaxCount);
  40. DCHECK_GE(Size, sizeof(QuarantineBatch));
  41. for (uptr I = 0; I < From->Count; ++I)
  42. Batch[Count + I] = From->Batch[I];
  43. Count += From->Count;
  44. Size += From->getQuarantinedSize();
  45. From->Count = 0;
  46. From->Size = sizeof(QuarantineBatch);
  47. }
  48. void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
  49. };
  50. static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
  51. // Per-thread cache of memory blocks.
  52. template <typename Callback> class QuarantineCache {
  53. public:
  54. void init() { DCHECK_EQ(atomic_load_relaxed(&Size), 0U); }
  55. // Total memory used, including internal accounting.
  56. uptr getSize() const { return atomic_load_relaxed(&Size); }
  57. // Memory used for internal accounting.
  58. uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
  59. void enqueue(Callback Cb, void *Ptr, uptr Size) {
  60. if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
  61. QuarantineBatch *B =
  62. reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
  63. DCHECK(B);
  64. B->init(Ptr, Size);
  65. enqueueBatch(B);
  66. } else {
  67. List.back()->push_back(Ptr, Size);
  68. addToSize(Size);
  69. }
  70. }
  71. void transfer(QuarantineCache *From) {
  72. List.append_back(&From->List);
  73. addToSize(From->getSize());
  74. atomic_store_relaxed(&From->Size, 0);
  75. }
  76. void enqueueBatch(QuarantineBatch *B) {
  77. List.push_back(B);
  78. addToSize(B->Size);
  79. }
  80. QuarantineBatch *dequeueBatch() {
  81. if (List.empty())
  82. return nullptr;
  83. QuarantineBatch *B = List.front();
  84. List.pop_front();
  85. subFromSize(B->Size);
  86. return B;
  87. }
  88. void mergeBatches(QuarantineCache *ToDeallocate) {
  89. uptr ExtractedSize = 0;
  90. QuarantineBatch *Current = List.front();
  91. while (Current && Current->Next) {
  92. if (Current->canMerge(Current->Next)) {
  93. QuarantineBatch *Extracted = Current->Next;
  94. // Move all the chunks into the current batch.
  95. Current->merge(Extracted);
  96. DCHECK_EQ(Extracted->Count, 0);
  97. DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
  98. // Remove the next batch From the list and account for its Size.
  99. List.extract(Current, Extracted);
  100. ExtractedSize += Extracted->Size;
  101. // Add it to deallocation list.
  102. ToDeallocate->enqueueBatch(Extracted);
  103. } else {
  104. Current = Current->Next;
  105. }
  106. }
  107. subFromSize(ExtractedSize);
  108. }
  109. void getStats(ScopedString *Str) const {
  110. uptr BatchCount = 0;
  111. uptr TotalOverheadBytes = 0;
  112. uptr TotalBytes = 0;
  113. uptr TotalQuarantineChunks = 0;
  114. for (const QuarantineBatch &Batch : List) {
  115. BatchCount++;
  116. TotalBytes += Batch.Size;
  117. TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
  118. TotalQuarantineChunks += Batch.Count;
  119. }
  120. const uptr QuarantineChunksCapacity =
  121. BatchCount * QuarantineBatch::MaxCount;
  122. const uptr ChunksUsagePercent =
  123. (QuarantineChunksCapacity == 0)
  124. ? 0
  125. : TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
  126. const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
  127. const uptr MemoryOverheadPercent =
  128. (TotalQuarantinedBytes == 0)
  129. ? 0
  130. : TotalOverheadBytes * 100 / TotalQuarantinedBytes;
  131. Str->append(
  132. "Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
  133. "(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
  134. BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
  135. QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
  136. }
  137. private:
  138. SinglyLinkedList<QuarantineBatch> List;
  139. atomic_uptr Size = {};
  140. void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
  141. void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
  142. };
  143. // The callback interface is:
  144. // void Callback::recycle(Node *Ptr);
  145. // void *Callback::allocate(uptr Size);
  146. // void Callback::deallocate(void *Ptr);
  147. template <typename Callback, typename Node> class GlobalQuarantine {
  148. public:
  149. typedef QuarantineCache<Callback> CacheT;
  150. using ThisT = GlobalQuarantine<Callback, Node>;
  151. void init(uptr Size, uptr CacheSize) NO_THREAD_SAFETY_ANALYSIS {
  152. DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
  153. DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);
  154. DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);
  155. DCHECK_EQ(atomic_load_relaxed(&MaxCacheSize), 0U);
  156. // Thread local quarantine size can be zero only when global quarantine size
  157. // is zero (it allows us to perform just one atomic read per put() call).
  158. CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
  159. atomic_store_relaxed(&MaxSize, Size);
  160. atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
  161. atomic_store_relaxed(&MaxCacheSize, CacheSize);
  162. Cache.init();
  163. }
  164. uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
  165. uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
  166. // This is supposed to be used in test only.
  167. bool isEmpty() {
  168. ScopedLock L(CacheMutex);
  169. return Cache.getSize() == 0U;
  170. }
  171. void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
  172. C->enqueue(Cb, Ptr, Size);
  173. if (C->getSize() > getCacheSize())
  174. drain(C, Cb);
  175. }
  176. void NOINLINE drain(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
  177. bool needRecycle = false;
  178. {
  179. ScopedLock L(CacheMutex);
  180. Cache.transfer(C);
  181. needRecycle = Cache.getSize() > getMaxSize();
  182. }
  183. if (needRecycle && RecycleMutex.tryLock())
  184. recycle(atomic_load_relaxed(&MinSize), Cb);
  185. }
  186. void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
  187. {
  188. ScopedLock L(CacheMutex);
  189. Cache.transfer(C);
  190. }
  191. RecycleMutex.lock();
  192. recycle(0, Cb);
  193. }
  194. void getStats(ScopedString *Str) EXCLUDES(CacheMutex) {
  195. ScopedLock L(CacheMutex);
  196. // It assumes that the world is stopped, just as the allocator's printStats.
  197. Cache.getStats(Str);
  198. Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
  199. getMaxSize() >> 10, getCacheSize() >> 10);
  200. }
  201. void disable() NO_THREAD_SAFETY_ANALYSIS {
  202. // RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
  203. RecycleMutex.lock();
  204. CacheMutex.lock();
  205. }
  206. void enable() NO_THREAD_SAFETY_ANALYSIS {
  207. CacheMutex.unlock();
  208. RecycleMutex.unlock();
  209. }
  210. private:
  211. // Read-only data.
  212. alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
  213. CacheT Cache GUARDED_BY(CacheMutex);
  214. alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
  215. atomic_uptr MinSize = {};
  216. atomic_uptr MaxSize = {};
  217. alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
  218. void NOINLINE recycle(uptr MinSize, Callback Cb) RELEASE(RecycleMutex)
  219. EXCLUDES(CacheMutex) {
  220. CacheT Tmp;
  221. Tmp.init();
  222. {
  223. ScopedLock L(CacheMutex);
  224. // Go over the batches and merge partially filled ones to
  225. // save some memory, otherwise batches themselves (since the memory used
  226. // by them is counted against quarantine limit) can overcome the actual
  227. // user's quarantined chunks, which diminishes the purpose of the
  228. // quarantine.
  229. const uptr CacheSize = Cache.getSize();
  230. const uptr OverheadSize = Cache.getOverheadSize();
  231. DCHECK_GE(CacheSize, OverheadSize);
  232. // Do the merge only when overhead exceeds this predefined limit (might
  233. // require some tuning). It saves us merge attempt when the batch list
  234. // quarantine is unlikely to contain batches suitable for merge.
  235. constexpr uptr OverheadThresholdPercents = 100;
  236. if (CacheSize > OverheadSize &&
  237. OverheadSize * (100 + OverheadThresholdPercents) >
  238. CacheSize * OverheadThresholdPercents) {
  239. Cache.mergeBatches(&Tmp);
  240. }
  241. // Extract enough chunks from the quarantine to get below the max
  242. // quarantine size and leave some leeway for the newly quarantined chunks.
  243. while (Cache.getSize() > MinSize)
  244. Tmp.enqueueBatch(Cache.dequeueBatch());
  245. }
  246. RecycleMutex.unlock();
  247. doRecycle(&Tmp, Cb);
  248. }
  249. void NOINLINE doRecycle(CacheT *C, Callback Cb) {
  250. while (QuarantineBatch *B = C->dequeueBatch()) {
  251. const u32 Seed = static_cast<u32>(
  252. (reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
  253. B->shuffle(Seed);
  254. constexpr uptr NumberOfPrefetch = 8UL;
  255. CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
  256. for (uptr I = 0; I < NumberOfPrefetch; I++)
  257. PREFETCH(B->Batch[I]);
  258. for (uptr I = 0, Count = B->Count; I < Count; I++) {
  259. if (I + NumberOfPrefetch < Count)
  260. PREFETCH(B->Batch[I + NumberOfPrefetch]);
  261. Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
  262. }
  263. Cb.deallocate(B);
  264. }
  265. }
  266. };
  267. } // namespace scudo
  268. #endif // SCUDO_QUARANTINE_H_