local_cache.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. //===-- local_cache.h -------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_LOCAL_CACHE_H_
  9. #define SCUDO_LOCAL_CACHE_H_
  10. #include "internal_defs.h"
  11. #include "list.h"
  12. #include "platform.h"
  13. #include "report.h"
  14. #include "stats.h"
  15. #include "string_utils.h"
  16. namespace scudo {
  17. template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
  18. typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
  19. typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
  20. void init(GlobalStats *S, SizeClassAllocator *A) {
  21. DCHECK(isEmpty());
  22. Stats.init();
  23. if (LIKELY(S))
  24. S->link(&Stats);
  25. Allocator = A;
  26. initCache();
  27. }
  28. void destroy(GlobalStats *S) {
  29. drain();
  30. if (LIKELY(S))
  31. S->unlink(&Stats);
  32. }
  33. void *allocate(uptr ClassId) {
  34. DCHECK_LT(ClassId, NumClasses);
  35. PerClass *C = &PerClassArray[ClassId];
  36. if (C->Count == 0) {
  37. // Refill half of the number of max cached.
  38. DCHECK_GT(C->MaxCount / 2, 0U);
  39. if (UNLIKELY(!refill(C, ClassId, C->MaxCount / 2)))
  40. return nullptr;
  41. DCHECK_GT(C->Count, 0);
  42. }
  43. // We read ClassSize first before accessing Chunks because it's adjacent to
  44. // Count, while Chunks might be further off (depending on Count). That keeps
  45. // the memory accesses in close quarters.
  46. const uptr ClassSize = C->ClassSize;
  47. CompactPtrT CompactP = C->Chunks[--C->Count];
  48. Stats.add(StatAllocated, ClassSize);
  49. Stats.sub(StatFree, ClassSize);
  50. return Allocator->decompactPtr(ClassId, CompactP);
  51. }
  52. bool deallocate(uptr ClassId, void *P) {
  53. CHECK_LT(ClassId, NumClasses);
  54. PerClass *C = &PerClassArray[ClassId];
  55. // If the cache is full, drain half of blocks back to the main allocator.
  56. const bool NeedToDrainCache = C->Count == C->MaxCount;
  57. if (NeedToDrainCache)
  58. drain(C, ClassId);
  59. // See comment in allocate() about memory accesses.
  60. const uptr ClassSize = C->ClassSize;
  61. C->Chunks[C->Count++] =
  62. Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
  63. Stats.sub(StatAllocated, ClassSize);
  64. Stats.add(StatFree, ClassSize);
  65. return NeedToDrainCache;
  66. }
  67. bool isEmpty() const {
  68. for (uptr I = 0; I < NumClasses; ++I)
  69. if (PerClassArray[I].Count)
  70. return false;
  71. return true;
  72. }
  73. void drain() {
  74. // Drain BatchClassId last as it may be needed while draining normal blocks.
  75. for (uptr I = 0; I < NumClasses; ++I) {
  76. if (I == BatchClassId)
  77. continue;
  78. while (PerClassArray[I].Count > 0)
  79. drain(&PerClassArray[I], I);
  80. }
  81. while (PerClassArray[BatchClassId].Count > 0)
  82. drain(&PerClassArray[BatchClassId], BatchClassId);
  83. DCHECK(isEmpty());
  84. }
  85. void *getBatchClassBlock() {
  86. void *B = allocate(BatchClassId);
  87. if (UNLIKELY(!B))
  88. reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
  89. return B;
  90. }
  91. LocalStats &getStats() { return Stats; }
  92. void getStats(ScopedString *Str) {
  93. bool EmptyCache = true;
  94. for (uptr I = 0; I < NumClasses; ++I) {
  95. if (PerClassArray[I].Count == 0)
  96. continue;
  97. EmptyCache = false;
  98. // The size of BatchClass is set to 0 intentionally. See the comment in
  99. // initCache() for more details.
  100. const uptr ClassSize = I == BatchClassId
  101. ? SizeClassAllocator::getSizeByClassId(I)
  102. : PerClassArray[I].ClassSize;
  103. // Note that the string utils don't support printing u16 thus we cast it
  104. // to a common use type uptr.
  105. Str->append(" %02zu (%6zu): cached: %4zu max: %4zu\n", I, ClassSize,
  106. static_cast<uptr>(PerClassArray[I].Count),
  107. static_cast<uptr>(PerClassArray[I].MaxCount));
  108. }
  109. if (EmptyCache)
  110. Str->append(" No block is cached.\n");
  111. }
  112. static u16 getMaxCached(uptr Size) {
  113. return Min(SizeClassMap::MaxNumCachedHint,
  114. SizeClassMap::getMaxCachedHint(Size));
  115. }
  116. private:
  117. static const uptr NumClasses = SizeClassMap::NumClasses;
  118. static const uptr BatchClassId = SizeClassMap::BatchClassId;
  119. struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
  120. u16 Count;
  121. u16 MaxCount;
  122. // Note: ClassSize is zero for the transfer batch.
  123. uptr ClassSize;
  124. CompactPtrT Chunks[2 * SizeClassMap::MaxNumCachedHint];
  125. };
  126. PerClass PerClassArray[NumClasses] = {};
  127. LocalStats Stats;
  128. SizeClassAllocator *Allocator = nullptr;
  129. NOINLINE void initCache() {
  130. for (uptr I = 0; I < NumClasses; I++) {
  131. PerClass *P = &PerClassArray[I];
  132. const uptr Size = SizeClassAllocator::getSizeByClassId(I);
  133. P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
  134. if (I != BatchClassId) {
  135. P->ClassSize = Size;
  136. } else {
  137. // ClassSize in this struct is only used for malloc/free stats, which
  138. // should only track user allocations, not internal movements.
  139. P->ClassSize = 0;
  140. }
  141. }
  142. }
  143. void destroyBatch(uptr ClassId, void *B) {
  144. if (ClassId != BatchClassId)
  145. deallocate(BatchClassId, B);
  146. }
  147. NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {
  148. const u16 NumBlocksRefilled =
  149. Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);
  150. DCHECK_LE(NumBlocksRefilled, MaxRefill);
  151. C->Count = static_cast<u16>(C->Count + NumBlocksRefilled);
  152. return NumBlocksRefilled != 0;
  153. }
  154. NOINLINE void drain(PerClass *C, uptr ClassId) {
  155. const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);
  156. Allocator->pushBlocks(this, ClassId, &C->Chunks[0], Count);
  157. // u16 will be promoted to int by arithmetic type conversion.
  158. C->Count = static_cast<u16>(C->Count - Count);
  159. for (u16 I = 0; I < C->Count; I++)
  160. C->Chunks[I] = C->Chunks[I + Count];
  161. }
  162. };
  163. } // namespace scudo
  164. #endif // SCUDO_LOCAL_CACHE_H_