123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708 |
- //===-- secondary.h ---------------------------------------------*- C++ -*-===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- #ifndef SCUDO_SECONDARY_H_
- #define SCUDO_SECONDARY_H_
- #include "chunk.h"
- #include "common.h"
- #include "list.h"
- #include "mem_map.h"
- #include "memtag.h"
- #include "mutex.h"
- #include "options.h"
- #include "stats.h"
- #include "string_utils.h"
- #include "thread_annotations.h"
- namespace scudo {
- // This allocator wraps the platform allocation primitives, and as such is on
- // the slower side and should preferably be used for larger sized allocations.
- // Blocks allocated will be preceded and followed by a guard page, and hold
- // their own header that is not checksummed: the guard pages and the Combined
- // header should be enough for our purpose.
- namespace LargeBlock {
- struct alignas(Max<uptr>(archSupportsMemoryTagging()
- ? archMemoryTagGranuleSize()
- : 1,
- 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
- LargeBlock::Header *Prev;
- LargeBlock::Header *Next;
- uptr CommitBase;
- uptr CommitSize;
- MemMapT MemMap;
- };
- static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
- static_assert(!archSupportsMemoryTagging() ||
- sizeof(Header) % archMemoryTagGranuleSize() == 0,
- "");
- constexpr uptr getHeaderSize() { return sizeof(Header); }
- template <typename Config> static uptr addHeaderTag(uptr Ptr) {
- if (allocatorSupportsMemoryTagging<Config>())
- return addFixedTag(Ptr, 1);
- return Ptr;
- }
- template <typename Config> static Header *getHeader(uptr Ptr) {
- return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
- }
- template <typename Config> static Header *getHeader(const void *Ptr) {
- return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
- }
- } // namespace LargeBlock
- static inline void unmap(LargeBlock::Header *H) {
- // Note that the `H->MapMap` is stored on the pages managed by itself. Take
- // over the ownership before unmap() so that any operation along with unmap()
- // won't touch inaccessible pages.
- MemMapT MemMap = H->MemMap;
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
- }
- namespace {
- struct CachedBlock {
- uptr CommitBase = 0;
- uptr CommitSize = 0;
- uptr BlockBegin = 0;
- MemMapT MemMap = {};
- u64 Time = 0;
- bool isValid() { return CommitBase != 0; }
- void invalidate() { CommitBase = 0; }
- };
- } // namespace
- template <typename Config> class MapAllocatorNoCache {
- public:
- void init(UNUSED s32 ReleaseToOsInterval) {}
- bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
- UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
- UNUSED bool *Zeroed) {
- return false;
- }
- void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
- bool canCache(UNUSED uptr Size) { return false; }
- void disable() {}
- void enable() {}
- void releaseToOS() {}
- void disableMemoryTagging() {}
- void unmapTestOnly() {}
- bool setOption(Option O, UNUSED sptr Value) {
- if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
- O == Option::MaxCacheEntrySize)
- return false;
- // Not supported by the Secondary Cache, but not an error either.
- return true;
- }
- void getStats(UNUSED ScopedString *Str) {
- Str->append("Secondary Cache Disabled\n");
- }
- };
- static const uptr MaxUnusedCachePages = 4U;
- template <typename Config>
- bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
- uptr AllocPos, uptr Flags, MemMapT &MemMap) {
- Flags |= MAP_RESIZABLE;
- Flags |= MAP_ALLOWNOMEM;
- const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
- if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
- const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
- return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
- MAP_MEMTAG | Flags) &&
- MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
- "scudo:secondary", Flags);
- } else {
- const uptr RemapFlags =
- (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
- return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
- }
- }
- // Template specialization to avoid producing zero-length array
- template <typename T, size_t Size> class NonZeroLengthArray {
- public:
- T &operator[](uptr Idx) { return values[Idx]; }
- private:
- T values[Size];
- };
- template <typename T> class NonZeroLengthArray<T, 0> {
- public:
- T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
- };
- template <typename Config> class MapAllocatorCache {
- public:
- using CacheConfig = typename Config::Secondary::Cache;
- void getStats(ScopedString *Str) {
- ScopedLock L(Mutex);
- uptr Integral;
- uptr Fractional;
- computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
- &Fractional);
- Str->append("Stats: MapAllocatorCache: EntriesCount: %d, "
- "MaxEntriesCount: %u, MaxEntrySize: %zu\n",
- EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
- atomic_load_relaxed(&MaxEntrySize));
- Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
- "(%zu.%02zu%%)\n",
- SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
- for (CachedBlock Entry : Entries) {
- if (!Entry.isValid())
- continue;
- Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
- "BlockSize: %zu %s\n",
- Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
- Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
- }
- }
- // Ensure the default maximum specified fits the array.
- static_assert(CacheConfig::DefaultMaxEntriesCount <=
- CacheConfig::EntriesArraySize,
- "");
- void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
- DCHECK_EQ(EntriesCount, 0U);
- setOption(Option::MaxCacheEntriesCount,
- static_cast<sptr>(CacheConfig::DefaultMaxEntriesCount));
- setOption(Option::MaxCacheEntrySize,
- static_cast<sptr>(CacheConfig::DefaultMaxEntrySize));
- setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
- }
- void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
- if (!canCache(H->CommitSize))
- return unmap(H);
- bool EntryCached = false;
- bool EmptyCache = false;
- const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
- const u64 Time = getMonotonicTimeFast();
- const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
- CachedBlock Entry;
- Entry.CommitBase = H->CommitBase;
- Entry.CommitSize = H->CommitSize;
- Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
- Entry.MemMap = H->MemMap;
- Entry.Time = Time;
- if (useMemoryTagging<Config>(Options)) {
- if (Interval == 0 && !SCUDO_FUCHSIA) {
- // Release the memory and make it inaccessible at the same time by
- // creating a new MAP_NOACCESS mapping on top of the existing mapping.
- // Fuchsia does not support replacing mappings by creating a new mapping
- // on top so we just do the two syscalls there.
- Entry.Time = 0;
- mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
- Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
- } else {
- Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
- MAP_NOACCESS);
- }
- } else if (Interval == 0) {
- Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
- Entry.Time = 0;
- }
- do {
- ScopedLock L(Mutex);
- if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
- // If we get here then memory tagging was disabled in between when we
- // read Options and when we locked Mutex. We can't insert our entry into
- // the quarantine or the cache because the permissions would be wrong so
- // just unmap it.
- break;
- }
- if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
- QuarantinePos =
- (QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
- if (!Quarantine[QuarantinePos].isValid()) {
- Quarantine[QuarantinePos] = Entry;
- return;
- }
- CachedBlock PrevEntry = Quarantine[QuarantinePos];
- Quarantine[QuarantinePos] = Entry;
- if (OldestTime == 0)
- OldestTime = Entry.Time;
- Entry = PrevEntry;
- }
- if (EntriesCount >= MaxCount) {
- if (IsFullEvents++ == 4U)
- EmptyCache = true;
- } else {
- for (u32 I = 0; I < MaxCount; I++) {
- if (Entries[I].isValid())
- continue;
- if (I != 0)
- Entries[I] = Entries[0];
- Entries[0] = Entry;
- EntriesCount++;
- if (OldestTime == 0)
- OldestTime = Entry.Time;
- EntryCached = true;
- break;
- }
- }
- } while (0);
- if (EmptyCache)
- empty();
- else if (Interval >= 0)
- releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
- if (!EntryCached)
- Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
- }
- bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
- LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
- const uptr PageSize = getPageSizeCached();
- const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
- // 10% of the requested size proved to be the optimal choice for
- // retrieving cached blocks after testing several options.
- constexpr u32 FragmentedBytesDivisor = 10;
- bool Found = false;
- CachedBlock Entry;
- uptr EntryHeaderPos = 0;
- {
- ScopedLock L(Mutex);
- CallsToRetrieve++;
- if (EntriesCount == 0)
- return false;
- u32 OptimalFitIndex = 0;
- uptr MinDiff = UINTPTR_MAX;
- for (u32 I = 0; I < MaxCount; I++) {
- if (!Entries[I].isValid())
- continue;
- const uptr CommitBase = Entries[I].CommitBase;
- const uptr CommitSize = Entries[I].CommitSize;
- const uptr AllocPos =
- roundDown(CommitBase + CommitSize - Size, Alignment);
- const uptr HeaderPos = AllocPos - HeadersSize;
- if (HeaderPos > CommitBase + CommitSize)
- continue;
- if (HeaderPos < CommitBase ||
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
- continue;
- }
- Found = true;
- const uptr Diff = HeaderPos - CommitBase;
- // immediately use a cached block if it's size is close enough to the
- // requested size.
- const uptr MaxAllowedFragmentedBytes =
- (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
- if (Diff <= MaxAllowedFragmentedBytes) {
- OptimalFitIndex = I;
- EntryHeaderPos = HeaderPos;
- break;
- }
- // keep track of the smallest cached block
- // that is greater than (AllocSize + HeaderSize)
- if (Diff > MinDiff)
- continue;
- OptimalFitIndex = I;
- MinDiff = Diff;
- EntryHeaderPos = HeaderPos;
- }
- if (Found) {
- Entry = Entries[OptimalFitIndex];
- Entries[OptimalFitIndex].invalidate();
- EntriesCount--;
- SuccessfulRetrieves++;
- }
- }
- if (!Found)
- return false;
- *H = reinterpret_cast<LargeBlock::Header *>(
- LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
- *Zeroed = Entry.Time == 0;
- if (useMemoryTagging<Config>(Options))
- Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
- uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
- if (useMemoryTagging<Config>(Options)) {
- if (*Zeroed) {
- storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
- NewBlockBegin);
- } else if (Entry.BlockBegin < NewBlockBegin) {
- storeTags(Entry.BlockBegin, NewBlockBegin);
- } else {
- storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
- }
- }
- (*H)->CommitBase = Entry.CommitBase;
- (*H)->CommitSize = Entry.CommitSize;
- (*H)->MemMap = Entry.MemMap;
- return true;
- }
- bool canCache(uptr Size) {
- return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
- Size <= atomic_load_relaxed(&MaxEntrySize);
- }
- bool setOption(Option O, sptr Value) {
- if (O == Option::ReleaseInterval) {
- const s32 Interval = Max(
- Min(static_cast<s32>(Value), CacheConfig::MaxReleaseToOsIntervalMs),
- CacheConfig::MinReleaseToOsIntervalMs);
- atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
- return true;
- }
- if (O == Option::MaxCacheEntriesCount) {
- const u32 MaxCount = static_cast<u32>(Value);
- if (MaxCount > CacheConfig::EntriesArraySize)
- return false;
- atomic_store_relaxed(&MaxEntriesCount, MaxCount);
- return true;
- }
- if (O == Option::MaxCacheEntrySize) {
- atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
- return true;
- }
- // Not supported by the Secondary Cache, but not an error either.
- return true;
- }
- void releaseToOS() { releaseOlderThan(UINT64_MAX); }
- void disableMemoryTagging() EXCLUDES(Mutex) {
- ScopedLock L(Mutex);
- for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
- if (Quarantine[I].isValid()) {
- MemMapT &MemMap = Quarantine[I].MemMap;
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
- Quarantine[I].invalidate();
- }
- }
- const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
- for (u32 I = 0; I < MaxCount; I++) {
- if (Entries[I].isValid()) {
- Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
- Entries[I].CommitSize, 0);
- }
- }
- QuarantinePos = -1U;
- }
- void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
- void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
- void unmapTestOnly() { empty(); }
- private:
- void empty() {
- MemMapT MapInfo[CacheConfig::EntriesArraySize];
- uptr N = 0;
- {
- ScopedLock L(Mutex);
- for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
- if (!Entries[I].isValid())
- continue;
- MapInfo[N] = Entries[I].MemMap;
- Entries[I].invalidate();
- N++;
- }
- EntriesCount = 0;
- IsFullEvents = 0;
- }
- for (uptr I = 0; I < N; I++) {
- MemMapT &MemMap = MapInfo[I];
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
- }
- }
- void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
- if (!Entry.isValid() || !Entry.Time)
- return;
- if (Entry.Time > Time) {
- if (OldestTime == 0 || Entry.Time < OldestTime)
- OldestTime = Entry.Time;
- return;
- }
- Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
- Entry.Time = 0;
- }
- void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
- ScopedLock L(Mutex);
- if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
- return;
- OldestTime = 0;
- for (uptr I = 0; I < CacheConfig::QuarantineSize; I++)
- releaseIfOlderThan(Quarantine[I], Time);
- for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++)
- releaseIfOlderThan(Entries[I], Time);
- }
- HybridMutex Mutex;
- u32 EntriesCount GUARDED_BY(Mutex) = 0;
- u32 QuarantinePos GUARDED_BY(Mutex) = 0;
- atomic_u32 MaxEntriesCount = {};
- atomic_uptr MaxEntrySize = {};
- u64 OldestTime GUARDED_BY(Mutex) = 0;
- u32 IsFullEvents GUARDED_BY(Mutex) = 0;
- atomic_s32 ReleaseToOsIntervalMs = {};
- u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
- u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
- CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
- NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
- Quarantine GUARDED_BY(Mutex) = {};
- };
- template <typename Config> class MapAllocator {
- public:
- void init(GlobalStats *S,
- s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
- DCHECK_EQ(AllocatedBytes, 0U);
- DCHECK_EQ(FreedBytes, 0U);
- Cache.init(ReleaseToOsInterval);
- Stats.init();
- if (LIKELY(S))
- S->link(&Stats);
- }
- void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
- uptr *BlockEnd = nullptr,
- FillContentsMode FillContents = NoFill);
- void deallocate(const Options &Options, void *Ptr);
- static uptr getBlockEnd(void *Ptr) {
- auto *B = LargeBlock::getHeader<Config>(Ptr);
- return B->CommitBase + B->CommitSize;
- }
- static uptr getBlockSize(void *Ptr) {
- return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
- }
- static constexpr uptr getHeadersSize() {
- return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
- }
- void disable() NO_THREAD_SAFETY_ANALYSIS {
- Mutex.lock();
- Cache.disable();
- }
- void enable() NO_THREAD_SAFETY_ANALYSIS {
- Cache.enable();
- Mutex.unlock();
- }
- template <typename F> void iterateOverBlocks(F Callback) const {
- Mutex.assertHeld();
- for (const auto &H : InUseBlocks) {
- uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
- if (allocatorSupportsMemoryTagging<Config>())
- Ptr = untagPointer(Ptr);
- Callback(Ptr);
- }
- }
- bool canCache(uptr Size) { return Cache.canCache(Size); }
- bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
- void releaseToOS() { Cache.releaseToOS(); }
- void disableMemoryTagging() { Cache.disableMemoryTagging(); }
- void unmapTestOnly() { Cache.unmapTestOnly(); }
- void getStats(ScopedString *Str);
- private:
- typename Config::Secondary::template CacheT<Config> Cache;
- mutable HybridMutex Mutex;
- DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
- uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
- uptr FreedBytes GUARDED_BY(Mutex) = 0;
- uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
- uptr LargestSize GUARDED_BY(Mutex) = 0;
- u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
- u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
- LocalStats Stats GUARDED_BY(Mutex);
- };
- // As with the Primary, the size passed to this function includes any desired
- // alignment, so that the frontend can align the user allocation. The hint
- // parameter allows us to unmap spurious memory when dealing with larger
- // (greater than a page) alignments on 32-bit platforms.
- // Due to the sparsity of address space available on those platforms, requesting
- // an allocation from the Secondary with a large alignment would end up wasting
- // VA space (even though we are not committing the whole thing), hence the need
- // to trim off some of the reserved space.
- // For allocations requested with an alignment greater than or equal to a page,
- // the committed memory will amount to something close to Size - AlignmentHint
- // (pending rounding and headers).
- template <typename Config>
- void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
- uptr Alignment, uptr *BlockEndPtr,
- FillContentsMode FillContents) {
- if (Options.get(OptionBit::AddLargeAllocationSlack))
- Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
- Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
- const uptr PageSize = getPageSizeCached();
- // Note that cached blocks may have aligned address already. Thus we simply
- // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
- const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
- if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
- LargeBlock::Header *H;
- bool Zeroed;
- if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
- &Zeroed)) {
- const uptr BlockEnd = H->CommitBase + H->CommitSize;
- if (BlockEndPtr)
- *BlockEndPtr = BlockEnd;
- uptr HInt = reinterpret_cast<uptr>(H);
- if (allocatorSupportsMemoryTagging<Config>())
- HInt = untagPointer(HInt);
- const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
- void *Ptr = reinterpret_cast<void *>(PtrInt);
- if (FillContents && !Zeroed)
- memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
- BlockEnd - PtrInt);
- {
- ScopedLock L(Mutex);
- InUseBlocks.push_back(H);
- AllocatedBytes += H->CommitSize;
- FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
- NumberOfAllocs++;
- Stats.add(StatAllocated, H->CommitSize);
- Stats.add(StatMapped, H->MemMap.getCapacity());
- }
- return Ptr;
- }
- }
- uptr RoundedSize =
- roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
- if (Alignment > PageSize)
- RoundedSize += Alignment - PageSize;
- ReservedMemoryT ReservedMemory;
- const uptr MapSize = RoundedSize + 2 * PageSize;
- if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
- MAP_ALLOWNOMEM))) {
- return nullptr;
- }
- // Take the entire ownership of reserved region.
- MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
- ReservedMemory.getCapacity());
- uptr MapBase = MemMap.getBase();
- uptr CommitBase = MapBase + PageSize;
- uptr MapEnd = MapBase + MapSize;
- // In the unlikely event of alignments larger than a page, adjust the amount
- // of memory we want to commit, and trim the extra memory.
- if (UNLIKELY(Alignment >= PageSize)) {
- // For alignments greater than or equal to a page, the user pointer (eg: the
- // pointer that is returned by the C or C++ allocation APIs) ends up on a
- // page boundary , and our headers will live in the preceding page.
- CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
- const uptr NewMapBase = CommitBase - PageSize;
- DCHECK_GE(NewMapBase, MapBase);
- // We only trim the extra memory on 32-bit platforms: 64-bit platforms
- // are less constrained memory wise, and that saves us two syscalls.
- if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
- MemMap.unmap(MapBase, NewMapBase - MapBase);
- MapBase = NewMapBase;
- }
- const uptr NewMapEnd =
- CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
- DCHECK_LE(NewMapEnd, MapEnd);
- if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
- MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
- MapEnd = NewMapEnd;
- }
- }
- const uptr CommitSize = MapEnd - PageSize - CommitBase;
- const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
- if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
- MemMap)) {
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
- return nullptr;
- }
- const uptr HeaderPos = AllocPos - getHeadersSize();
- LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
- LargeBlock::addHeaderTag<Config>(HeaderPos));
- if (useMemoryTagging<Config>(Options))
- storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
- reinterpret_cast<uptr>(H + 1));
- H->CommitBase = CommitBase;
- H->CommitSize = CommitSize;
- H->MemMap = MemMap;
- if (BlockEndPtr)
- *BlockEndPtr = CommitBase + CommitSize;
- {
- ScopedLock L(Mutex);
- InUseBlocks.push_back(H);
- AllocatedBytes += CommitSize;
- FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
- if (LargestSize < CommitSize)
- LargestSize = CommitSize;
- NumberOfAllocs++;
- Stats.add(StatAllocated, CommitSize);
- Stats.add(StatMapped, H->MemMap.getCapacity());
- }
- return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
- }
- template <typename Config>
- void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
- EXCLUDES(Mutex) {
- LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
- const uptr CommitSize = H->CommitSize;
- {
- ScopedLock L(Mutex);
- InUseBlocks.remove(H);
- FreedBytes += CommitSize;
- FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
- NumberOfFrees++;
- Stats.sub(StatAllocated, CommitSize);
- Stats.sub(StatMapped, H->MemMap.getCapacity());
- }
- Cache.store(Options, H);
- }
- template <typename Config>
- void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
- ScopedLock L(Mutex);
- Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
- "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
- NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
- FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
- (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
- FragmentedBytes >> 10);
- Cache.getStats(Str);
- }
- } // namespace scudo
- #endif // SCUDO_SECONDARY_H_
|