123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831 |
- //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- ///
- /// Scudo Hardened Allocator implementation.
- /// It uses the sanitizer_common allocator as a base and aims at mitigating
- /// heap corruption vulnerabilities. It provides a checksum-guarded chunk
- /// header, a delayed free list, and additional sanity checks.
- ///
- //===----------------------------------------------------------------------===//
- #include "scudo_allocator.h"
- #include "scudo_crc32.h"
- #include "scudo_errors.h"
- #include "scudo_flags.h"
- #include "scudo_interface_internal.h"
- #include "scudo_tsd.h"
- #include "scudo_utils.h"
- #include "sanitizer_common/sanitizer_allocator_checks.h"
- #include "sanitizer_common/sanitizer_allocator_interface.h"
- #include "sanitizer_common/sanitizer_quarantine.h"
- #ifdef GWP_ASAN_HOOKS
- # include "gwp_asan/guarded_pool_allocator.h"
- # include "gwp_asan/optional/backtrace.h"
- # include "gwp_asan/optional/options_parser.h"
- #include "gwp_asan/optional/segv_handler.h"
- #endif // GWP_ASAN_HOOKS
- #include <errno.h>
- #include <string.h>
- namespace __scudo {
- // Global static cookie, initialized at start-up.
- static u32 Cookie;
- // We default to software CRC32 if the alternatives are not supported, either
- // at compilation or at runtime.
- static atomic_uint8_t HashAlgorithm = { CRC32Software };
- inline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
- // If the hardware CRC32 feature is defined here, it was enabled everywhere,
- // as opposed to only for scudo_crc32.cpp. This means that other hardware
- // specific instructions were likely emitted at other places, and as a
- // result there is no reason to not use it here.
- #if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
- Crc = CRC32_INTRINSIC(Crc, Value);
- for (uptr i = 0; i < ArraySize; i++)
- Crc = CRC32_INTRINSIC(Crc, Array[i]);
- return Crc;
- #else
- if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
- Crc = computeHardwareCRC32(Crc, Value);
- for (uptr i = 0; i < ArraySize; i++)
- Crc = computeHardwareCRC32(Crc, Array[i]);
- return Crc;
- }
- Crc = computeSoftwareCRC32(Crc, Value);
- for (uptr i = 0; i < ArraySize; i++)
- Crc = computeSoftwareCRC32(Crc, Array[i]);
- return Crc;
- #endif // defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
- }
- static BackendT &getBackend();
- namespace Chunk {
- static inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
- return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
- getHeaderSize());
- }
- static inline
- const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
- return reinterpret_cast<const AtomicPackedHeader *>(
- reinterpret_cast<uptr>(Ptr) - getHeaderSize());
- }
- static inline bool isAligned(const void *Ptr) {
- return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
- }
- // We can't use the offset member of the chunk itself, as we would double
- // fetch it without any warranty that it wouldn't have been tampered. To
- // prevent this, we work with a local copy of the header.
- static inline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
- return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
- getHeaderSize() - (Header->Offset << MinAlignmentLog));
- }
- // Returns the usable size for a chunk, meaning the amount of bytes from the
- // beginning of the user data to the end of the backend allocated chunk.
- static inline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
- const uptr ClassId = Header->ClassId;
- if (ClassId)
- return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
- (Header->Offset << MinAlignmentLog);
- return SecondaryT::GetActuallyAllocatedSize(
- getBackendPtr(Ptr, Header)) - getHeaderSize();
- }
- // Returns the size the user requested when allocating the chunk.
- static inline uptr getSize(const void *Ptr, UnpackedHeader *Header) {
- const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
- if (Header->ClassId)
- return SizeOrUnusedBytes;
- return SecondaryT::GetActuallyAllocatedSize(
- getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
- }
- // Compute the checksum of the chunk pointer and its header.
- static inline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
- UnpackedHeader ZeroChecksumHeader = *Header;
- ZeroChecksumHeader.Checksum = 0;
- uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
- memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
- const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
- HeaderHolder, ARRAY_SIZE(HeaderHolder));
- return static_cast<u16>(Crc);
- }
- // Checks the validity of a chunk by verifying its checksum. It doesn't
- // incur termination in the event of an invalid chunk.
- static inline bool isValid(const void *Ptr) {
- PackedHeader NewPackedHeader =
- atomic_load_relaxed(getConstAtomicHeader(Ptr));
- UnpackedHeader NewUnpackedHeader =
- bit_cast<UnpackedHeader>(NewPackedHeader);
- return (NewUnpackedHeader.Checksum ==
- computeChecksum(Ptr, &NewUnpackedHeader));
- }
- // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid
- // for a fully nulled out header, its state will be available anyway.
- COMPILER_CHECK(ChunkAvailable == 0);
- // Loads and unpacks the header, verifying the checksum in the process.
- static inline
- void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
- PackedHeader NewPackedHeader =
- atomic_load_relaxed(getConstAtomicHeader(Ptr));
- *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
- if (UNLIKELY(NewUnpackedHeader->Checksum !=
- computeChecksum(Ptr, NewUnpackedHeader)))
- dieWithMessage("corrupted chunk header at address %p\n", Ptr);
- }
- // Packs and stores the header, computing the checksum in the process.
- static inline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
- NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
- PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
- atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
- }
- // Packs and stores the header, computing the checksum in the process. We
- // compare the current header with the expected provided one to ensure that
- // we are not being raced by a corruption occurring in another thread.
- static inline void compareExchangeHeader(void *Ptr,
- UnpackedHeader *NewUnpackedHeader,
- UnpackedHeader *OldUnpackedHeader) {
- NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
- PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
- PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
- if (UNLIKELY(!atomic_compare_exchange_strong(
- getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
- memory_order_relaxed)))
- dieWithMessage("race on chunk header at address %p\n", Ptr);
- }
- } // namespace Chunk
- struct QuarantineCallback {
- explicit QuarantineCallback(AllocatorCacheT *Cache)
- : Cache_(Cache) {}
- // Chunk recycling function, returns a quarantined chunk to the backend,
- // first making sure it hasn't been tampered with.
- void Recycle(void *Ptr) {
- UnpackedHeader Header;
- Chunk::loadHeader(Ptr, &Header);
- if (UNLIKELY(Header.State != ChunkQuarantine))
- dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
- UnpackedHeader NewHeader = Header;
- NewHeader.State = ChunkAvailable;
- Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header);
- void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
- if (Header.ClassId)
- getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
- else
- getBackend().deallocateSecondary(BackendPtr);
- }
- // Internal quarantine allocation and deallocation functions. We first check
- // that the batches are indeed serviced by the Primary.
- // TODO(kostyak): figure out the best way to protect the batches.
- void *Allocate(uptr Size) {
- const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
- return getBackend().allocatePrimary(Cache_, BatchClassId);
- }
- void Deallocate(void *Ptr) {
- const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
- getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
- }
- AllocatorCacheT *Cache_;
- COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
- };
- typedef Quarantine<QuarantineCallback, void> QuarantineT;
- typedef QuarantineT::Cache QuarantineCacheT;
- COMPILER_CHECK(sizeof(QuarantineCacheT) <=
- sizeof(ScudoTSD::QuarantineCachePlaceHolder));
- QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
- return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
- }
- #ifdef GWP_ASAN_HOOKS
- static gwp_asan::GuardedPoolAllocator GuardedAlloc;
- #endif // GWP_ASAN_HOOKS
- struct Allocator {
- static const uptr MaxAllowedMallocSize =
- FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
- BackendT Backend;
- QuarantineT Quarantine;
- u32 QuarantineChunksUpToSize;
- bool DeallocationTypeMismatch;
- bool ZeroContents;
- bool DeleteSizeMismatch;
- bool CheckRssLimit;
- uptr HardRssLimitMb;
- uptr SoftRssLimitMb;
- atomic_uint8_t RssLimitExceeded;
- atomic_uint64_t RssLastCheckedAtNS;
- explicit Allocator(LinkerInitialized)
- : Quarantine(LINKER_INITIALIZED) {}
- NOINLINE void performSanityChecks();
- void init() {
- SanitizerToolName = "Scudo";
- PrimaryAllocatorName = "ScudoPrimary";
- SecondaryAllocatorName = "ScudoSecondary";
- initFlags();
- performSanityChecks();
- // Check if hardware CRC32 is supported in the binary and by the platform,
- // if so, opt for the CRC32 hardware version of the checksum.
- if (&computeHardwareCRC32 && hasHardwareCRC32())
- atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
- SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
- Backend.init(common_flags()->allocator_release_to_os_interval_ms);
- HardRssLimitMb = common_flags()->hard_rss_limit_mb;
- SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
- Quarantine.Init(
- static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
- static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
- QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 :
- getFlags()->QuarantineChunksUpToSize;
- DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
- DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
- ZeroContents = getFlags()->ZeroContents;
- if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
- /*blocking=*/false))) {
- Cookie = static_cast<u32>((NanoTime() >> 12) ^
- (reinterpret_cast<uptr>(this) >> 4));
- }
- CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
- if (CheckRssLimit)
- atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
- }
- // Helper function that checks for a valid Scudo chunk. nullptr isn't.
- bool isValidPointer(const void *Ptr) {
- initThreadMaybe();
- if (UNLIKELY(!Ptr))
- return false;
- if (!Chunk::isAligned(Ptr))
- return false;
- return Chunk::isValid(Ptr);
- }
- NOINLINE bool isRssLimitExceeded();
- // Allocates a chunk.
- void *
- allocate(uptr Size, uptr Alignment, AllocType Type,
- bool ForceZeroContents = false) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
- initThreadMaybe();
- if (UNLIKELY(Alignment > MaxAlignment)) {
- if (AllocatorMayReturnNull())
- return nullptr;
- reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
- }
- if (UNLIKELY(Alignment < MinAlignment))
- Alignment = MinAlignment;
- #ifdef GWP_ASAN_HOOKS
- if (UNLIKELY(GuardedAlloc.shouldSample())) {
- if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
- if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
- __sanitizer_malloc_hook(Ptr, Size);
- return Ptr;
- }
- }
- #endif // GWP_ASAN_HOOKS
- const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) +
- Chunk::getHeaderSize();
- const uptr AlignedSize = (Alignment > MinAlignment) ?
- NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
- if (UNLIKELY(Size >= MaxAllowedMallocSize) ||
- UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) {
- if (AllocatorMayReturnNull())
- return nullptr;
- reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
- }
- if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) {
- if (AllocatorMayReturnNull())
- return nullptr;
- reportRssLimitExceeded();
- }
- // Primary and Secondary backed allocations have a different treatment. We
- // deal with alignment requirements of Primary serviced allocations here,
- // but the Secondary will take care of its own alignment needs.
- void *BackendPtr;
- uptr BackendSize;
- u8 ClassId;
- if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
- BackendSize = AlignedSize;
- ClassId = SizeClassMap::ClassID(BackendSize);
- bool UnlockRequired;
- ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
- BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
- if (UnlockRequired)
- TSD->unlock();
- } else {
- BackendSize = NeededSize;
- ClassId = 0;
- BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
- }
- if (UNLIKELY(!BackendPtr)) {
- SetAllocatorOutOfMemory();
- if (AllocatorMayReturnNull())
- return nullptr;
- reportOutOfMemory(Size);
- }
- // If requested, we will zero out the entire contents of the returned chunk.
- if ((ForceZeroContents || ZeroContents) && ClassId)
- memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
- UnpackedHeader Header = {};
- uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
- if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
- // Since the Secondary takes care of alignment, a non-aligned pointer
- // means it is from the Primary. It is also the only case where the offset
- // field of the header would be non-zero.
- DCHECK(ClassId);
- const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
- Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
- UserPtr = AlignedUserPtr;
- }
- DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
- Header.State = ChunkAllocated;
- Header.AllocType = Type;
- if (ClassId) {
- Header.ClassId = ClassId;
- Header.SizeOrUnusedBytes = Size;
- } else {
- // The secondary fits the allocations to a page, so the amount of unused
- // bytes is the difference between the end of the user allocation and the
- // next page boundary.
- const uptr PageSize = GetPageSizeCached();
- const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
- if (TrailingBytes)
- Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
- }
- void *Ptr = reinterpret_cast<void *>(UserPtr);
- Chunk::storeHeader(Ptr, &Header);
- if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
- __sanitizer_malloc_hook(Ptr, Size);
- return Ptr;
- }
- // Place a chunk in the quarantine or directly deallocate it in the event of
- // a zero-sized quarantine, or if the size of the chunk is greater than the
- // quarantine chunk size threshold.
- void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, uptr Size)
- SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
- const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
- if (BypassQuarantine) {
- UnpackedHeader NewHeader = *Header;
- NewHeader.State = ChunkAvailable;
- Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
- void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
- if (Header->ClassId) {
- bool UnlockRequired;
- ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
- getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
- Header->ClassId);
- if (UnlockRequired)
- TSD->unlock();
- } else {
- getBackend().deallocateSecondary(BackendPtr);
- }
- } else {
- // If a small memory amount was allocated with a larger alignment, we want
- // to take that into account. Otherwise the Quarantine would be filled
- // with tiny chunks, taking a lot of VA memory. This is an approximation
- // of the usable size, that allows us to not call
- // GetActuallyAllocatedSize.
- const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
- UnpackedHeader NewHeader = *Header;
- NewHeader.State = ChunkQuarantine;
- Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
- bool UnlockRequired;
- ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
- Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
- Ptr, EstimatedSize);
- if (UnlockRequired)
- TSD->unlock();
- }
- }
- // Deallocates a Chunk, which means either adding it to the quarantine or
- // directly returning it to the backend if criteria are met.
- void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
- AllocType Type) {
- // For a deallocation, we only ensure minimal initialization, meaning thread
- // local data will be left uninitialized for now (when using ELF TLS). The
- // fallback cache will be used instead. This is a workaround for a situation
- // where the only heap operation performed in a thread would be a free past
- // the TLS destructors, ending up in initialized thread specific data never
- // being destroyed properly. Any other heap operation will do a full init.
- initThreadMaybe(/*MinimalInit=*/true);
- if (SCUDO_CAN_USE_HOOKS && &__sanitizer_free_hook)
- __sanitizer_free_hook(Ptr);
- if (UNLIKELY(!Ptr))
- return;
- #ifdef GWP_ASAN_HOOKS
- if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
- GuardedAlloc.deallocate(Ptr);
- return;
- }
- #endif // GWP_ASAN_HOOKS
- if (UNLIKELY(!Chunk::isAligned(Ptr)))
- dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
- UnpackedHeader Header;
- Chunk::loadHeader(Ptr, &Header);
- if (UNLIKELY(Header.State != ChunkAllocated))
- dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
- if (DeallocationTypeMismatch) {
- // The deallocation type has to match the allocation one.
- if (Header.AllocType != Type) {
- // With the exception of memalign'd Chunks, that can be still be free'd.
- if (Header.AllocType != FromMemalign || Type != FromMalloc)
- dieWithMessage("allocation type mismatch when deallocating address "
- "%p\n", Ptr);
- }
- }
- const uptr Size = Chunk::getSize(Ptr, &Header);
- if (DeleteSizeMismatch) {
- if (DeleteSize && DeleteSize != Size)
- dieWithMessage("invalid sized delete when deallocating address %p\n",
- Ptr);
- }
- (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches.
- quarantineOrDeallocateChunk(Ptr, &Header, Size);
- }
- // Reallocates a chunk. We can save on a new allocation if the new requested
- // size still fits in the chunk.
- void *reallocate(void *OldPtr, uptr NewSize) {
- initThreadMaybe();
- #ifdef GWP_ASAN_HOOKS
- if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
- size_t OldSize = GuardedAlloc.getSize(OldPtr);
- void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
- if (NewPtr)
- memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
- GuardedAlloc.deallocate(OldPtr);
- return NewPtr;
- }
- #endif // GWP_ASAN_HOOKS
- if (UNLIKELY(!Chunk::isAligned(OldPtr)))
- dieWithMessage("misaligned address when reallocating address %p\n",
- OldPtr);
- UnpackedHeader OldHeader;
- Chunk::loadHeader(OldPtr, &OldHeader);
- if (UNLIKELY(OldHeader.State != ChunkAllocated))
- dieWithMessage("invalid chunk state when reallocating address %p\n",
- OldPtr);
- if (DeallocationTypeMismatch) {
- if (UNLIKELY(OldHeader.AllocType != FromMalloc))
- dieWithMessage("allocation type mismatch when reallocating address "
- "%p\n", OldPtr);
- }
- const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
- // The new size still fits in the current chunk, and the size difference
- // is reasonable.
- if (NewSize <= UsableSize &&
- (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
- UnpackedHeader NewHeader = OldHeader;
- NewHeader.SizeOrUnusedBytes =
- OldHeader.ClassId ? NewSize : UsableSize - NewSize;
- Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
- return OldPtr;
- }
- // Otherwise, we have to allocate a new chunk and copy the contents of the
- // old one.
- void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
- if (NewPtr) {
- const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
- UsableSize - OldHeader.SizeOrUnusedBytes;
- memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
- quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
- }
- return NewPtr;
- }
- // Helper function that returns the actual usable size of a chunk.
- uptr getUsableSize(const void *Ptr) {
- initThreadMaybe();
- if (UNLIKELY(!Ptr))
- return 0;
- #ifdef GWP_ASAN_HOOKS
- if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
- return GuardedAlloc.getSize(Ptr);
- #endif // GWP_ASAN_HOOKS
- UnpackedHeader Header;
- Chunk::loadHeader(Ptr, &Header);
- // Getting the usable size of a chunk only makes sense if it's allocated.
- if (UNLIKELY(Header.State != ChunkAllocated))
- dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
- return Chunk::getUsableSize(Ptr, &Header);
- }
- void *calloc(uptr NMemB, uptr Size) {
- initThreadMaybe();
- if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) {
- if (AllocatorMayReturnNull())
- return nullptr;
- reportCallocOverflow(NMemB, Size);
- }
- return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
- }
- void commitBack(ScudoTSD *TSD) {
- Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
- Backend.destroyCache(&TSD->Cache);
- }
- uptr getStats(AllocatorStat StatType) {
- initThreadMaybe();
- uptr stats[AllocatorStatCount];
- Backend.getStats(stats);
- return stats[StatType];
- }
- bool canReturnNull() {
- initThreadMaybe();
- return AllocatorMayReturnNull();
- }
- void setRssLimit(uptr LimitMb, bool HardLimit) {
- if (HardLimit)
- HardRssLimitMb = LimitMb;
- else
- SoftRssLimitMb = LimitMb;
- CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
- }
- void printStats() {
- initThreadMaybe();
- Backend.printStats();
- }
- };
- NOINLINE void Allocator::performSanityChecks() {
- // Verify that the header offset field can hold the maximum offset. In the
- // case of the Secondary allocator, it takes care of alignment and the
- // offset will always be 0. In the case of the Primary, the worst case
- // scenario happens in the last size class, when the backend allocation
- // would already be aligned on the requested alignment, which would happen
- // to be the maximum alignment that would fit in that size class. As a
- // result, the maximum offset will be at most the maximum alignment for the
- // last size class minus the header size, in multiples of MinAlignment.
- UnpackedHeader Header = {};
- const uptr MaxPrimaryAlignment =
- 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
- const uptr MaxOffset =
- (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
- Header.Offset = MaxOffset;
- if (Header.Offset != MaxOffset)
- dieWithMessage("maximum possible offset doesn't fit in header\n");
- // Verify that we can fit the maximum size or amount of unused bytes in the
- // header. Given that the Secondary fits the allocation to a page, the worst
- // case scenario happens in the Primary. It will depend on the second to
- // last and last class sizes, as well as the dynamic base for the Primary.
- // The following is an over-approximation that works for our needs.
- const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
- Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
- if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
- dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
- const uptr LargestClassId = SizeClassMap::kLargestClassID;
- Header.ClassId = LargestClassId;
- if (Header.ClassId != LargestClassId)
- dieWithMessage("largest class ID doesn't fit in header\n");
- }
- // Opportunistic RSS limit check. This will update the RSS limit status, if
- // it can, every 250ms, otherwise it will just return the current one.
- NOINLINE bool Allocator::isRssLimitExceeded() {
- u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
- const u64 CurrentCheck = MonotonicNanoTime();
- if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL)))
- return atomic_load_relaxed(&RssLimitExceeded);
- if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
- CurrentCheck, memory_order_relaxed))
- return atomic_load_relaxed(&RssLimitExceeded);
- // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
- // RSS from /proc/self/statm by default. We might want to
- // call getrusage directly, even if it's less accurate.
- const uptr CurrentRssMb = GetRSS() >> 20;
- if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
- dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
- HardRssLimitMb, CurrentRssMb);
- if (SoftRssLimitMb) {
- if (atomic_load_relaxed(&RssLimitExceeded)) {
- if (CurrentRssMb <= SoftRssLimitMb)
- atomic_store_relaxed(&RssLimitExceeded, false);
- } else {
- if (CurrentRssMb > SoftRssLimitMb) {
- atomic_store_relaxed(&RssLimitExceeded, true);
- Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
- SoftRssLimitMb, CurrentRssMb);
- }
- }
- }
- return atomic_load_relaxed(&RssLimitExceeded);
- }
- static Allocator Instance(LINKER_INITIALIZED);
- static BackendT &getBackend() {
- return Instance.Backend;
- }
- void initScudo() {
- Instance.init();
- #ifdef GWP_ASAN_HOOKS
- gwp_asan::options::initOptions(__sanitizer::GetEnv("GWP_ASAN_OPTIONS"),
- Printf);
- gwp_asan::options::Options &Opts = gwp_asan::options::getOptions();
- Opts.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
- GuardedAlloc.init(Opts);
- if (Opts.InstallSignalHandlers)
- gwp_asan::segv_handler::installSignalHandlers(
- &GuardedAlloc, __sanitizer::Printf,
- gwp_asan::backtrace::getPrintBacktraceFunction(),
- gwp_asan::backtrace::getSegvBacktraceFunction());
- #endif // GWP_ASAN_HOOKS
- }
- void ScudoTSD::init() {
- getBackend().initCache(&Cache);
- memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
- }
- void ScudoTSD::commitBack() {
- Instance.commitBack(this);
- }
- void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
- if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))) {
- errno = EINVAL;
- if (Instance.canReturnNull())
- return nullptr;
- reportAllocationAlignmentNotPowerOfTwo(Alignment);
- }
- return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
- }
- void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
- Instance.deallocate(Ptr, Size, Alignment, Type);
- }
- void *scudoRealloc(void *Ptr, uptr Size) {
- if (!Ptr)
- return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
- if (Size == 0) {
- Instance.deallocate(Ptr, 0, 0, FromMalloc);
- return nullptr;
- }
- return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
- }
- void *scudoCalloc(uptr NMemB, uptr Size) {
- return SetErrnoOnNull(Instance.calloc(NMemB, Size));
- }
- void *scudoValloc(uptr Size) {
- return SetErrnoOnNull(
- Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
- }
- void *scudoPvalloc(uptr Size) {
- const uptr PageSize = GetPageSizeCached();
- if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
- errno = ENOMEM;
- if (Instance.canReturnNull())
- return nullptr;
- reportPvallocOverflow(Size);
- }
- // pvalloc(0) should allocate one page.
- Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
- return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
- }
- int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
- if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
- if (!Instance.canReturnNull())
- reportInvalidPosixMemalignAlignment(Alignment);
- return EINVAL;
- }
- void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
- if (UNLIKELY(!Ptr))
- return ENOMEM;
- *MemPtr = Ptr;
- return 0;
- }
- void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
- if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
- errno = EINVAL;
- if (Instance.canReturnNull())
- return nullptr;
- reportInvalidAlignedAllocAlignment(Size, Alignment);
- }
- return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
- }
- uptr scudoMallocUsableSize(void *Ptr) {
- return Instance.getUsableSize(Ptr);
- }
- } // namespace __scudo
- using namespace __scudo;
- // MallocExtension helper functions
- uptr __sanitizer_get_current_allocated_bytes() {
- return Instance.getStats(AllocatorStatAllocated);
- }
- uptr __sanitizer_get_heap_size() {
- return Instance.getStats(AllocatorStatMapped);
- }
- uptr __sanitizer_get_free_bytes() {
- return 1;
- }
- uptr __sanitizer_get_unmapped_bytes() {
- return 1;
- }
- uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
- return Size;
- }
- int __sanitizer_get_ownership(const void *Ptr) {
- return Instance.isValidPointer(Ptr);
- }
- uptr __sanitizer_get_allocated_size(const void *Ptr) {
- return Instance.getUsableSize(Ptr);
- }
- #if !SANITIZER_SUPPORTS_WEAK_HOOKS
- SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
- void *Ptr, uptr Size) {
- (void)Ptr;
- (void)Size;
- }
- SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr) {
- (void)Ptr;
- }
- #endif
- // Interface functions
- void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
- if (!SCUDO_CAN_USE_PUBLIC_INTERFACE)
- return;
- Instance.setRssLimit(LimitMb, !!HardLimit);
- }
- void __scudo_print_stats() {
- Instance.printStats();
- }
|