quarantine.h 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. //===-- quarantine.h --------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_QUARANTINE_H_
  9. #define SCUDO_QUARANTINE_H_
  10. #include "list.h"
  11. #include "mutex.h"
  12. #include "string_utils.h"
  13. namespace scudo {
  14. struct QuarantineBatch {
  15. // With the following count, a batch (and the header that protects it) occupy
  16. // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
  17. static const u32 MaxCount = 1019;
  18. QuarantineBatch *Next;
  19. uptr Size;
  20. u32 Count;
  21. void *Batch[MaxCount];
  22. void init(void *Ptr, uptr Size) {
  23. Count = 1;
  24. Batch[0] = Ptr;
  25. this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
  26. }
  27. // The total size of quarantined nodes recorded in this batch.
  28. uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
  29. void push_back(void *Ptr, uptr Size) {
  30. DCHECK_LT(Count, MaxCount);
  31. Batch[Count++] = Ptr;
  32. this->Size += Size;
  33. }
  34. bool canMerge(const QuarantineBatch *const From) const {
  35. return Count + From->Count <= MaxCount;
  36. }
  37. void merge(QuarantineBatch *const From) {
  38. DCHECK_LE(Count + From->Count, MaxCount);
  39. DCHECK_GE(Size, sizeof(QuarantineBatch));
  40. for (uptr I = 0; I < From->Count; ++I)
  41. Batch[Count + I] = From->Batch[I];
  42. Count += From->Count;
  43. Size += From->getQuarantinedSize();
  44. From->Count = 0;
  45. From->Size = sizeof(QuarantineBatch);
  46. }
  47. void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
  48. };
  49. static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
  50. // Per-thread cache of memory blocks.
  51. template <typename Callback> class QuarantineCache {
  52. public:
  53. void init() { DCHECK_EQ(atomic_load_relaxed(&Size), 0U); }
  54. // Total memory used, including internal accounting.
  55. uptr getSize() const { return atomic_load_relaxed(&Size); }
  56. // Memory used for internal accounting.
  57. uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
  58. void enqueue(Callback Cb, void *Ptr, uptr Size) {
  59. if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
  60. QuarantineBatch *B =
  61. reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
  62. DCHECK(B);
  63. B->init(Ptr, Size);
  64. enqueueBatch(B);
  65. } else {
  66. List.back()->push_back(Ptr, Size);
  67. addToSize(Size);
  68. }
  69. }
  70. void transfer(QuarantineCache *From) {
  71. List.append_back(&From->List);
  72. addToSize(From->getSize());
  73. atomic_store_relaxed(&From->Size, 0);
  74. }
  75. void enqueueBatch(QuarantineBatch *B) {
  76. List.push_back(B);
  77. addToSize(B->Size);
  78. }
  79. QuarantineBatch *dequeueBatch() {
  80. if (List.empty())
  81. return nullptr;
  82. QuarantineBatch *B = List.front();
  83. List.pop_front();
  84. subFromSize(B->Size);
  85. return B;
  86. }
  87. void mergeBatches(QuarantineCache *ToDeallocate) {
  88. uptr ExtractedSize = 0;
  89. QuarantineBatch *Current = List.front();
  90. while (Current && Current->Next) {
  91. if (Current->canMerge(Current->Next)) {
  92. QuarantineBatch *Extracted = Current->Next;
  93. // Move all the chunks into the current batch.
  94. Current->merge(Extracted);
  95. DCHECK_EQ(Extracted->Count, 0);
  96. DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
  97. // Remove the next batch From the list and account for its Size.
  98. List.extract(Current, Extracted);
  99. ExtractedSize += Extracted->Size;
  100. // Add it to deallocation list.
  101. ToDeallocate->enqueueBatch(Extracted);
  102. } else {
  103. Current = Current->Next;
  104. }
  105. }
  106. subFromSize(ExtractedSize);
  107. }
  108. void getStats(ScopedString *Str) const {
  109. uptr BatchCount = 0;
  110. uptr TotalOverheadBytes = 0;
  111. uptr TotalBytes = 0;
  112. uptr TotalQuarantineChunks = 0;
  113. for (const QuarantineBatch &Batch : List) {
  114. BatchCount++;
  115. TotalBytes += Batch.Size;
  116. TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
  117. TotalQuarantineChunks += Batch.Count;
  118. }
  119. const uptr QuarantineChunksCapacity =
  120. BatchCount * QuarantineBatch::MaxCount;
  121. const uptr ChunksUsagePercent =
  122. (QuarantineChunksCapacity == 0)
  123. ? 0
  124. : TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
  125. const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
  126. const uptr MemoryOverheadPercent =
  127. (TotalQuarantinedBytes == 0)
  128. ? 0
  129. : TotalOverheadBytes * 100 / TotalQuarantinedBytes;
  130. Str->append(
  131. "Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
  132. "(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
  133. BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
  134. QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
  135. }
  136. private:
  137. SinglyLinkedList<QuarantineBatch> List;
  138. atomic_uptr Size = {};
  139. void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
  140. void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
  141. };
  142. // The callback interface is:
  143. // void Callback::recycle(Node *Ptr);
  144. // void *Callback::allocate(uptr Size);
  145. // void Callback::deallocate(void *Ptr);
  146. template <typename Callback, typename Node> class GlobalQuarantine {
  147. public:
  148. typedef QuarantineCache<Callback> CacheT;
  149. using ThisT = GlobalQuarantine<Callback, Node>;
  150. void init(uptr Size, uptr CacheSize) {
  151. DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
  152. DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);
  153. DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);
  154. DCHECK_EQ(atomic_load_relaxed(&MaxCacheSize), 0U);
  155. // Thread local quarantine size can be zero only when global quarantine size
  156. // is zero (it allows us to perform just one atomic read per put() call).
  157. CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
  158. atomic_store_relaxed(&MaxSize, Size);
  159. atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
  160. atomic_store_relaxed(&MaxCacheSize, CacheSize);
  161. Cache.init();
  162. }
  163. uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
  164. uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
  165. void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
  166. C->enqueue(Cb, Ptr, Size);
  167. if (C->getSize() > getCacheSize())
  168. drain(C, Cb);
  169. }
  170. void NOINLINE drain(CacheT *C, Callback Cb) {
  171. {
  172. ScopedLock L(CacheMutex);
  173. Cache.transfer(C);
  174. }
  175. if (Cache.getSize() > getMaxSize() && RecycleMutex.tryLock())
  176. recycle(atomic_load_relaxed(&MinSize), Cb);
  177. }
  178. void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
  179. {
  180. ScopedLock L(CacheMutex);
  181. Cache.transfer(C);
  182. }
  183. RecycleMutex.lock();
  184. recycle(0, Cb);
  185. }
  186. void getStats(ScopedString *Str) const {
  187. // It assumes that the world is stopped, just as the allocator's printStats.
  188. Cache.getStats(Str);
  189. Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
  190. getMaxSize() >> 10, getCacheSize() >> 10);
  191. }
  192. void disable() {
  193. // RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
  194. RecycleMutex.lock();
  195. CacheMutex.lock();
  196. }
  197. void enable() {
  198. CacheMutex.unlock();
  199. RecycleMutex.unlock();
  200. }
  201. private:
  202. // Read-only data.
  203. alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
  204. CacheT Cache;
  205. alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
  206. atomic_uptr MinSize = {};
  207. atomic_uptr MaxSize = {};
  208. alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
  209. void NOINLINE recycle(uptr MinSize, Callback Cb) {
  210. CacheT Tmp;
  211. Tmp.init();
  212. {
  213. ScopedLock L(CacheMutex);
  214. // Go over the batches and merge partially filled ones to
  215. // save some memory, otherwise batches themselves (since the memory used
  216. // by them is counted against quarantine limit) can overcome the actual
  217. // user's quarantined chunks, which diminishes the purpose of the
  218. // quarantine.
  219. const uptr CacheSize = Cache.getSize();
  220. const uptr OverheadSize = Cache.getOverheadSize();
  221. DCHECK_GE(CacheSize, OverheadSize);
  222. // Do the merge only when overhead exceeds this predefined limit (might
  223. // require some tuning). It saves us merge attempt when the batch list
  224. // quarantine is unlikely to contain batches suitable for merge.
  225. constexpr uptr OverheadThresholdPercents = 100;
  226. if (CacheSize > OverheadSize &&
  227. OverheadSize * (100 + OverheadThresholdPercents) >
  228. CacheSize * OverheadThresholdPercents) {
  229. Cache.mergeBatches(&Tmp);
  230. }
  231. // Extract enough chunks from the quarantine to get below the max
  232. // quarantine size and leave some leeway for the newly quarantined chunks.
  233. while (Cache.getSize() > MinSize)
  234. Tmp.enqueueBatch(Cache.dequeueBatch());
  235. }
  236. RecycleMutex.unlock();
  237. doRecycle(&Tmp, Cb);
  238. }
  239. void NOINLINE doRecycle(CacheT *C, Callback Cb) {
  240. while (QuarantineBatch *B = C->dequeueBatch()) {
  241. const u32 Seed = static_cast<u32>(
  242. (reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
  243. B->shuffle(Seed);
  244. constexpr uptr NumberOfPrefetch = 8UL;
  245. CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
  246. for (uptr I = 0; I < NumberOfPrefetch; I++)
  247. PREFETCH(B->Batch[I]);
  248. for (uptr I = 0, Count = B->Count; I < Count; I++) {
  249. if (I + NumberOfPrefetch < Count)
  250. PREFETCH(B->Batch[I + NumberOfPrefetch]);
  251. Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
  252. }
  253. Cb.deallocate(B);
  254. }
  255. }
  256. };
  257. } // namespace scudo
  258. #endif // SCUDO_QUARANTINE_H_