scudo_allocator_secondary.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. //===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. ///
  9. /// Scudo Secondary Allocator.
  10. /// This services allocation that are too large to be serviced by the Primary
  11. /// Allocator. It is directly backed by the memory mapping functions of the
  12. /// operating system.
  13. ///
  14. //===----------------------------------------------------------------------===//
  15. #ifndef SCUDO_ALLOCATOR_SECONDARY_H_
  16. #define SCUDO_ALLOCATOR_SECONDARY_H_
  17. #ifndef SCUDO_ALLOCATOR_H_
  18. # error "This file must be included inside scudo_allocator.h."
  19. #endif
  20. // Secondary backed allocations are standalone chunks that contain extra
  21. // information stored in a LargeChunk::Header prior to the frontend's header.
  22. //
  23. // The secondary takes care of alignment requirements (so that it can release
  24. // unnecessary pages in the rare event of larger alignments), and as such must
  25. // know about the frontend's header size.
  26. //
  27. // Since Windows doesn't support partial releasing of a reserved memory region,
  28. // we have to keep track of both the reserved and the committed memory.
  29. //
  30. // The resulting chunk resembles the following:
  31. //
  32. // +--------------------+
  33. // | Guard page(s) |
  34. // +--------------------+
  35. // | Unused space* |
  36. // +--------------------+
  37. // | LargeChunk::Header |
  38. // +--------------------+
  39. // | {Unp,P}ackedHeader |
  40. // +--------------------+
  41. // | Data (aligned) |
  42. // +--------------------+
  43. // | Unused space** |
  44. // +--------------------+
  45. // | Guard page(s) |
  46. // +--------------------+
  47. namespace LargeChunk {
  48. struct Header {
  49. ReservedAddressRange StoredRange;
  50. uptr CommittedSize;
  51. uptr Size;
  52. };
  53. constexpr uptr getHeaderSize() {
  54. return RoundUpTo(sizeof(Header), MinAlignment);
  55. }
  56. static Header *getHeader(uptr Ptr) {
  57. return reinterpret_cast<Header *>(Ptr - getHeaderSize());
  58. }
  59. static Header *getHeader(const void *Ptr) {
  60. return getHeader(reinterpret_cast<uptr>(Ptr));
  61. }
  62. } // namespace LargeChunk
  63. class LargeMmapAllocator {
  64. public:
  65. void Init() {
  66. internal_memset(this, 0, sizeof(*this));
  67. }
  68. void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
  69. const uptr UserSize = Size - Chunk::getHeaderSize();
  70. // The Scudo frontend prevents us from allocating more than
  71. // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
  72. uptr ReservedSize = Size + LargeChunk::getHeaderSize();
  73. if (UNLIKELY(Alignment > MinAlignment))
  74. ReservedSize += Alignment;
  75. const uptr PageSize = GetPageSizeCached();
  76. ReservedSize = RoundUpTo(ReservedSize, PageSize);
  77. // Account for 2 guard pages, one before and one after the chunk.
  78. ReservedSize += 2 * PageSize;
  79. ReservedAddressRange AddressRange;
  80. uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName);
  81. if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0)))
  82. return nullptr;
  83. // A page-aligned pointer is assumed after that, so check it now.
  84. DCHECK(IsAligned(ReservedBeg, PageSize));
  85. uptr ReservedEnd = ReservedBeg + ReservedSize;
  86. // The beginning of the user area for that allocation comes after the
  87. // initial guard page, and both headers. This is the pointer that has to
  88. // abide by alignment requirements.
  89. uptr CommittedBeg = ReservedBeg + PageSize;
  90. uptr UserBeg = CommittedBeg + HeadersSize;
  91. uptr UserEnd = UserBeg + UserSize;
  92. uptr CommittedEnd = RoundUpTo(UserEnd, PageSize);
  93. // In the rare event of larger alignments, we will attempt to fit the mmap
  94. // area better and unmap extraneous memory. This will also ensure that the
  95. // offset and unused bytes field of the header stay small.
  96. if (UNLIKELY(Alignment > MinAlignment)) {
  97. if (!IsAligned(UserBeg, Alignment)) {
  98. UserBeg = RoundUpTo(UserBeg, Alignment);
  99. CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize);
  100. const uptr NewReservedBeg = CommittedBeg - PageSize;
  101. DCHECK_GE(NewReservedBeg, ReservedBeg);
  102. if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) {
  103. AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg);
  104. ReservedBeg = NewReservedBeg;
  105. }
  106. UserEnd = UserBeg + UserSize;
  107. CommittedEnd = RoundUpTo(UserEnd, PageSize);
  108. }
  109. const uptr NewReservedEnd = CommittedEnd + PageSize;
  110. DCHECK_LE(NewReservedEnd, ReservedEnd);
  111. if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) {
  112. AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd);
  113. ReservedEnd = NewReservedEnd;
  114. }
  115. }
  116. DCHECK_LE(UserEnd, CommittedEnd);
  117. const uptr CommittedSize = CommittedEnd - CommittedBeg;
  118. // Actually mmap the memory, preserving the guard pages on either sides.
  119. CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize));
  120. const uptr Ptr = UserBeg - Chunk::getHeaderSize();
  121. LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
  122. H->StoredRange = AddressRange;
  123. H->Size = CommittedEnd - Ptr;
  124. H->CommittedSize = CommittedSize;
  125. // The primary adds the whole class size to the stats when allocating a
  126. // chunk, so we will do something similar here. But we will not account for
  127. // the guard pages.
  128. {
  129. SpinMutexLock l(&StatsMutex);
  130. Stats->Add(AllocatorStatAllocated, CommittedSize);
  131. Stats->Add(AllocatorStatMapped, CommittedSize);
  132. AllocatedBytes += CommittedSize;
  133. if (LargestSize < CommittedSize)
  134. LargestSize = CommittedSize;
  135. NumberOfAllocs++;
  136. }
  137. return reinterpret_cast<void *>(Ptr);
  138. }
  139. void Deallocate(AllocatorStats *Stats, void *Ptr) {
  140. LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
  141. // Since we're unmapping the entirety of where the ReservedAddressRange
  142. // actually is, copy onto the stack.
  143. ReservedAddressRange AddressRange = H->StoredRange;
  144. const uptr Size = H->CommittedSize;
  145. {
  146. SpinMutexLock l(&StatsMutex);
  147. Stats->Sub(AllocatorStatAllocated, Size);
  148. Stats->Sub(AllocatorStatMapped, Size);
  149. FreedBytes += Size;
  150. NumberOfFrees++;
  151. }
  152. AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
  153. AddressRange.size());
  154. }
  155. static uptr GetActuallyAllocatedSize(void *Ptr) {
  156. return LargeChunk::getHeader(Ptr)->Size;
  157. }
  158. void PrintStats() {
  159. Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), "
  160. "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n",
  161. NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
  162. FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
  163. (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
  164. }
  165. private:
  166. static constexpr uptr HeadersSize =
  167. LargeChunk::getHeaderSize() + Chunk::getHeaderSize();
  168. StaticSpinMutex StatsMutex;
  169. u32 NumberOfAllocs;
  170. u32 NumberOfFrees;
  171. uptr AllocatedBytes;
  172. uptr FreedBytes;
  173. uptr LargestSize;
  174. };
  175. #endif // SCUDO_ALLOCATOR_SECONDARY_H_