sanitizer_quarantine.h 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Memory quarantine for AddressSanitizer and potentially other tools.
  10. // Quarantine caches some specified amount of memory in per-thread caches,
  11. // then evicts to global FIFO queue. When the queue reaches specified threshold,
  12. // oldest memory is recycled.
  13. //
  14. //===----------------------------------------------------------------------===//
  15. #ifndef SANITIZER_QUARANTINE_H
  16. #define SANITIZER_QUARANTINE_H
  17. #include "sanitizer_internal_defs.h"
  18. #include "sanitizer_mutex.h"
  19. #include "sanitizer_list.h"
  20. namespace __sanitizer {
  21. template<typename Node> class QuarantineCache;
  22. struct QuarantineBatch {
  23. static const uptr kSize = 1021;
  24. QuarantineBatch *next;
  25. uptr size;
  26. uptr count;
  27. void *batch[kSize];
  28. void init(void *ptr, uptr size) {
  29. count = 1;
  30. batch[0] = ptr;
  31. this->size = size + sizeof(QuarantineBatch); // Account for the batch size.
  32. }
  33. // The total size of quarantined nodes recorded in this batch.
  34. uptr quarantined_size() const {
  35. return size - sizeof(QuarantineBatch);
  36. }
  37. void push_back(void *ptr, uptr size) {
  38. CHECK_LT(count, kSize);
  39. batch[count++] = ptr;
  40. this->size += size;
  41. }
  42. bool can_merge(const QuarantineBatch* const from) const {
  43. return count + from->count <= kSize;
  44. }
  45. void merge(QuarantineBatch* const from) {
  46. CHECK_LE(count + from->count, kSize);
  47. CHECK_GE(size, sizeof(QuarantineBatch));
  48. for (uptr i = 0; i < from->count; ++i)
  49. batch[count + i] = from->batch[i];
  50. count += from->count;
  51. size += from->quarantined_size();
  52. from->count = 0;
  53. from->size = sizeof(QuarantineBatch);
  54. }
  55. };
  56. COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
  57. // The callback interface is:
  58. // void Callback::Recycle(Node *ptr);
  59. // void *cb.Allocate(uptr size);
  60. // void cb.Deallocate(void *ptr);
  61. template<typename Callback, typename Node>
  62. class Quarantine {
  63. public:
  64. typedef QuarantineCache<Callback> Cache;
  65. explicit Quarantine(LinkerInitialized)
  66. : cache_(LINKER_INITIALIZED) {
  67. }
  68. void Init(uptr size, uptr cache_size) {
  69. // Thread local quarantine size can be zero only when global quarantine size
  70. // is zero (it allows us to perform just one atomic read per Put() call).
  71. CHECK((size == 0 && cache_size == 0) || cache_size != 0);
  72. atomic_store_relaxed(&max_size_, size);
  73. atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size.
  74. atomic_store_relaxed(&max_cache_size_, cache_size);
  75. cache_mutex_.Init();
  76. recycle_mutex_.Init();
  77. }
  78. uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
  79. uptr GetCacheSize() const {
  80. return atomic_load_relaxed(&max_cache_size_);
  81. }
  82. void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
  83. uptr cache_size = GetCacheSize();
  84. if (cache_size) {
  85. c->Enqueue(cb, ptr, size);
  86. } else {
  87. // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
  88. cb.Recycle(ptr);
  89. }
  90. // Check cache size anyway to accommodate for runtime cache_size change.
  91. if (c->Size() > cache_size)
  92. Drain(c, cb);
  93. }
  94. void NOINLINE Drain(Cache *c, Callback cb) {
  95. {
  96. SpinMutexLock l(&cache_mutex_);
  97. cache_.Transfer(c);
  98. }
  99. if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
  100. Recycle(atomic_load_relaxed(&min_size_), cb);
  101. }
  102. void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
  103. {
  104. SpinMutexLock l(&cache_mutex_);
  105. cache_.Transfer(c);
  106. }
  107. recycle_mutex_.Lock();
  108. Recycle(0, cb);
  109. }
  110. void PrintStats() const {
  111. // It assumes that the world is stopped, just as the allocator's PrintStats.
  112. Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
  113. GetSize() >> 20, GetCacheSize() >> 10);
  114. cache_.PrintStats();
  115. }
  116. private:
  117. // Read-only data.
  118. char pad0_[kCacheLineSize];
  119. atomic_uintptr_t max_size_;
  120. atomic_uintptr_t min_size_;
  121. atomic_uintptr_t max_cache_size_;
  122. char pad1_[kCacheLineSize];
  123. StaticSpinMutex cache_mutex_;
  124. StaticSpinMutex recycle_mutex_;
  125. Cache cache_;
  126. char pad2_[kCacheLineSize];
  127. void NOINLINE Recycle(uptr min_size, Callback cb)
  128. SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) {
  129. Cache tmp;
  130. {
  131. SpinMutexLock l(&cache_mutex_);
  132. // Go over the batches and merge partially filled ones to
  133. // save some memory, otherwise batches themselves (since the memory used
  134. // by them is counted against quarantine limit) can overcome the actual
  135. // user's quarantined chunks, which diminishes the purpose of the
  136. // quarantine.
  137. uptr cache_size = cache_.Size();
  138. uptr overhead_size = cache_.OverheadSize();
  139. CHECK_GE(cache_size, overhead_size);
  140. // Do the merge only when overhead exceeds this predefined limit (might
  141. // require some tuning). It saves us merge attempt when the batch list
  142. // quarantine is unlikely to contain batches suitable for merge.
  143. const uptr kOverheadThresholdPercents = 100;
  144. if (cache_size > overhead_size &&
  145. overhead_size * (100 + kOverheadThresholdPercents) >
  146. cache_size * kOverheadThresholdPercents) {
  147. cache_.MergeBatches(&tmp);
  148. }
  149. // Extract enough chunks from the quarantine to get below the max
  150. // quarantine size and leave some leeway for the newly quarantined chunks.
  151. while (cache_.Size() > min_size) {
  152. tmp.EnqueueBatch(cache_.DequeueBatch());
  153. }
  154. }
  155. recycle_mutex_.Unlock();
  156. DoRecycle(&tmp, cb);
  157. }
  158. void NOINLINE DoRecycle(Cache *c, Callback cb) {
  159. while (QuarantineBatch *b = c->DequeueBatch()) {
  160. const uptr kPrefetch = 16;
  161. CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
  162. for (uptr i = 0; i < kPrefetch; i++)
  163. PREFETCH(b->batch[i]);
  164. for (uptr i = 0, count = b->count; i < count; i++) {
  165. if (i + kPrefetch < count)
  166. PREFETCH(b->batch[i + kPrefetch]);
  167. cb.Recycle((Node*)b->batch[i]);
  168. }
  169. cb.Deallocate(b);
  170. }
  171. }
  172. };
  173. // Per-thread cache of memory blocks.
  174. template<typename Callback>
  175. class QuarantineCache {
  176. public:
  177. explicit QuarantineCache(LinkerInitialized) {
  178. }
  179. QuarantineCache()
  180. : size_() {
  181. list_.clear();
  182. }
  183. // Total memory used, including internal accounting.
  184. uptr Size() const {
  185. return atomic_load_relaxed(&size_);
  186. }
  187. // Memory used for internal accounting.
  188. uptr OverheadSize() const {
  189. return list_.size() * sizeof(QuarantineBatch);
  190. }
  191. void Enqueue(Callback cb, void *ptr, uptr size) {
  192. if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
  193. QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
  194. CHECK(b);
  195. b->init(ptr, size);
  196. EnqueueBatch(b);
  197. } else {
  198. list_.back()->push_back(ptr, size);
  199. SizeAdd(size);
  200. }
  201. }
  202. void Transfer(QuarantineCache *from_cache) {
  203. list_.append_back(&from_cache->list_);
  204. SizeAdd(from_cache->Size());
  205. atomic_store_relaxed(&from_cache->size_, 0);
  206. }
  207. void EnqueueBatch(QuarantineBatch *b) {
  208. list_.push_back(b);
  209. SizeAdd(b->size);
  210. }
  211. QuarantineBatch *DequeueBatch() {
  212. if (list_.empty())
  213. return nullptr;
  214. QuarantineBatch *b = list_.front();
  215. list_.pop_front();
  216. SizeSub(b->size);
  217. return b;
  218. }
  219. void MergeBatches(QuarantineCache *to_deallocate) {
  220. uptr extracted_size = 0;
  221. QuarantineBatch *current = list_.front();
  222. while (current && current->next) {
  223. if (current->can_merge(current->next)) {
  224. QuarantineBatch *extracted = current->next;
  225. // Move all the chunks into the current batch.
  226. current->merge(extracted);
  227. CHECK_EQ(extracted->count, 0);
  228. CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
  229. // Remove the next batch from the list and account for its size.
  230. list_.extract(current, extracted);
  231. extracted_size += extracted->size;
  232. // Add it to deallocation list.
  233. to_deallocate->EnqueueBatch(extracted);
  234. } else {
  235. current = current->next;
  236. }
  237. }
  238. SizeSub(extracted_size);
  239. }
  240. void PrintStats() const {
  241. uptr batch_count = 0;
  242. uptr total_overhead_bytes = 0;
  243. uptr total_bytes = 0;
  244. uptr total_quarantine_chunks = 0;
  245. for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
  246. batch_count++;
  247. total_bytes += (*it).size;
  248. total_overhead_bytes += (*it).size - (*it).quarantined_size();
  249. total_quarantine_chunks += (*it).count;
  250. }
  251. uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
  252. int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
  253. 0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
  254. uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
  255. int memory_overhead_percent = total_quarantined_bytes == 0 ?
  256. 0 : total_overhead_bytes * 100 / total_quarantined_bytes;
  257. Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
  258. "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
  259. "\n",
  260. batch_count, total_bytes, total_quarantined_bytes,
  261. total_quarantine_chunks, quarantine_chunks_capacity,
  262. chunks_usage_percent, memory_overhead_percent);
  263. }
  264. private:
  265. typedef IntrusiveList<QuarantineBatch> List;
  266. List list_;
  267. atomic_uintptr_t size_;
  268. void SizeAdd(uptr add) {
  269. atomic_store_relaxed(&size_, Size() + add);
  270. }
  271. void SizeSub(uptr sub) {
  272. atomic_store_relaxed(&size_, Size() - sub);
  273. }
  274. };
  275. } // namespace __sanitizer
  276. #endif // SANITIZER_QUARANTINE_H