123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234 |
- //===-- local_cache.h -------------------------------------------*- C++ -*-===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- #ifndef SCUDO_LOCAL_CACHE_H_
- #define SCUDO_LOCAL_CACHE_H_
- #include "internal_defs.h"
- #include "list.h"
- #include "platform.h"
- #include "report.h"
- #include "stats.h"
- namespace scudo {
- template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
- typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
- typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
- struct TransferBatch {
- static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
- void setFromArray(CompactPtrT *Array, u16 N) {
- DCHECK_LE(N, MaxNumCached);
- Count = N;
- memcpy(Batch, Array, sizeof(Batch[0]) * Count);
- }
- void appendFromArray(CompactPtrT *Array, u16 N) {
- DCHECK_LE(N, MaxNumCached - Count);
- memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
- // u16 will be promoted to int by arithmetic type conversion.
- Count = static_cast<u16>(Count + N);
- }
- void clear() { Count = 0; }
- void add(CompactPtrT P) {
- DCHECK_LT(Count, MaxNumCached);
- Batch[Count++] = P;
- }
- void copyToArray(CompactPtrT *Array) const {
- memcpy(Array, Batch, sizeof(Batch[0]) * Count);
- }
- u16 getCount() const { return Count; }
- CompactPtrT get(u16 I) const {
- DCHECK_LE(I, Count);
- return Batch[I];
- }
- static u16 getMaxCached(uptr Size) {
- return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
- }
- TransferBatch *Next;
- private:
- CompactPtrT Batch[MaxNumCached];
- u16 Count;
- };
- // A BatchGroup is used to collect blocks. Each group has a group id to
- // identify the group kind of contained blocks.
- struct BatchGroup {
- // `Next` is used by IntrusiveList.
- BatchGroup *Next;
- // The identifier of each group
- uptr GroupId;
- // Cache value of TransferBatch::getMaxCached()
- u16 MaxCachedPerBatch;
- // Number of blocks pushed into this group. This is an increment-only
- // counter.
- uptr PushedBlocks;
- // This is used to track how many blocks are pushed since last time we
- // checked `PushedBlocks`. It's useful for page releasing to determine the
- // usage of a BatchGroup.
- uptr PushedBlocksAtLastCheckpoint;
- // Blocks are managed by TransferBatch in a list.
- SinglyLinkedList<TransferBatch> Batches;
- };
- static_assert(sizeof(BatchGroup) <= sizeof(TransferBatch),
- "BatchGroup uses the same class size as TransferBatch");
- void init(GlobalStats *S, SizeClassAllocator *A) {
- DCHECK(isEmpty());
- Stats.init();
- if (LIKELY(S))
- S->link(&Stats);
- Allocator = A;
- }
- void destroy(GlobalStats *S) {
- drain();
- if (LIKELY(S))
- S->unlink(&Stats);
- }
- void *allocate(uptr ClassId) {
- DCHECK_LT(ClassId, NumClasses);
- PerClass *C = &PerClassArray[ClassId];
- if (C->Count == 0) {
- if (UNLIKELY(!refill(C, ClassId)))
- return nullptr;
- DCHECK_GT(C->Count, 0);
- }
- // We read ClassSize first before accessing Chunks because it's adjacent to
- // Count, while Chunks might be further off (depending on Count). That keeps
- // the memory accesses in close quarters.
- const uptr ClassSize = C->ClassSize;
- CompactPtrT CompactP = C->Chunks[--C->Count];
- Stats.add(StatAllocated, ClassSize);
- Stats.sub(StatFree, ClassSize);
- return Allocator->decompactPtr(ClassId, CompactP);
- }
- void deallocate(uptr ClassId, void *P) {
- CHECK_LT(ClassId, NumClasses);
- PerClass *C = &PerClassArray[ClassId];
- // We still have to initialize the cache in the event that the first heap
- // operation in a thread is a deallocation.
- initCacheMaybe(C);
- if (C->Count == C->MaxCount)
- drain(C, ClassId);
- // See comment in allocate() about memory accesses.
- const uptr ClassSize = C->ClassSize;
- C->Chunks[C->Count++] =
- Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
- Stats.sub(StatAllocated, ClassSize);
- Stats.add(StatFree, ClassSize);
- }
- bool isEmpty() const {
- for (uptr I = 0; I < NumClasses; ++I)
- if (PerClassArray[I].Count)
- return false;
- return true;
- }
- void drain() {
- // Drain BatchClassId last as createBatch can refill it.
- for (uptr I = 0; I < NumClasses; ++I) {
- if (I == BatchClassId)
- continue;
- while (PerClassArray[I].Count > 0)
- drain(&PerClassArray[I], I);
- }
- while (PerClassArray[BatchClassId].Count > 0)
- drain(&PerClassArray[BatchClassId], BatchClassId);
- DCHECK(isEmpty());
- }
- TransferBatch *createBatch(uptr ClassId, void *B) {
- if (ClassId != BatchClassId)
- B = allocate(BatchClassId);
- if (UNLIKELY(!B))
- reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
- return reinterpret_cast<TransferBatch *>(B);
- }
- BatchGroup *createGroup() {
- void *Ptr = allocate(BatchClassId);
- if (UNLIKELY(!Ptr))
- reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
- return reinterpret_cast<BatchGroup *>(Ptr);
- }
- LocalStats &getStats() { return Stats; }
- private:
- static const uptr NumClasses = SizeClassMap::NumClasses;
- static const uptr BatchClassId = SizeClassMap::BatchClassId;
- struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
- u16 Count;
- u16 MaxCount;
- // Note: ClassSize is zero for the transfer batch.
- uptr ClassSize;
- CompactPtrT Chunks[2 * TransferBatch::MaxNumCached];
- };
- PerClass PerClassArray[NumClasses] = {};
- LocalStats Stats;
- SizeClassAllocator *Allocator = nullptr;
- ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
- if (LIKELY(C->MaxCount))
- return;
- initCache();
- DCHECK_NE(C->MaxCount, 0U);
- }
- NOINLINE void initCache() {
- for (uptr I = 0; I < NumClasses; I++) {
- PerClass *P = &PerClassArray[I];
- const uptr Size = SizeClassAllocator::getSizeByClassId(I);
- P->MaxCount = static_cast<u16>(2 * TransferBatch::getMaxCached(Size));
- if (I != BatchClassId) {
- P->ClassSize = Size;
- } else {
- // ClassSize in this struct is only used for malloc/free stats, which
- // should only track user allocations, not internal movements.
- P->ClassSize = 0;
- }
- }
- }
- void destroyBatch(uptr ClassId, void *B) {
- if (ClassId != BatchClassId)
- deallocate(BatchClassId, B);
- }
- NOINLINE bool refill(PerClass *C, uptr ClassId) {
- initCacheMaybe(C);
- TransferBatch *B = Allocator->popBatch(this, ClassId);
- if (UNLIKELY(!B))
- return false;
- DCHECK_GT(B->getCount(), 0);
- C->Count = B->getCount();
- B->copyToArray(C->Chunks);
- B->clear();
- destroyBatch(ClassId, B);
- return true;
- }
- NOINLINE void drain(PerClass *C, uptr ClassId) {
- const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);
- Allocator->pushBlocks(this, ClassId, &C->Chunks[0], Count);
- // u16 will be promoted to int by arithmetic type conversion.
- C->Count = static_cast<u16>(C->Count - Count);
- for (u16 I = 0; I < C->Count; I++)
- C->Chunks[I] = C->Chunks[I + Count];
- }
- };
- } // namespace scudo
- #endif // SCUDO_LOCAL_CACHE_H_
|