sanitizer_allocator_combined.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Part of the Sanitizer Allocator.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef SANITIZER_ALLOCATOR_H
  13. #error This file must be included inside sanitizer_allocator.h
  14. #endif
  15. // This class implements a complete memory allocator by using two
  16. // internal allocators:
  17. // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
  18. // When allocating 2^x bytes it should return 2^x aligned chunk.
  19. // PrimaryAllocator is used via a local AllocatorCache.
  20. // SecondaryAllocator can allocate anything, but is not efficient.
  21. template <class PrimaryAllocator,
  22. class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray>
  23. class CombinedAllocator {
  24. public:
  25. using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
  26. using SecondaryAllocator =
  27. LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback,
  28. LargeMmapAllocatorPtrArray,
  29. typename PrimaryAllocator::AddressSpaceView>;
  30. void InitLinkerInitialized(s32 release_to_os_interval_ms) {
  31. stats_.InitLinkerInitialized();
  32. primary_.Init(release_to_os_interval_ms);
  33. secondary_.InitLinkerInitialized();
  34. }
  35. void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
  36. stats_.Init();
  37. primary_.Init(release_to_os_interval_ms, heap_start);
  38. secondary_.Init();
  39. }
  40. void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
  41. // Returning 0 on malloc(0) may break a lot of code.
  42. if (size == 0)
  43. size = 1;
  44. if (size + alignment < size) {
  45. Report("WARNING: %s: CombinedAllocator allocation overflow: "
  46. "0x%zx bytes with 0x%zx alignment requested\n",
  47. SanitizerToolName, size, alignment);
  48. return nullptr;
  49. }
  50. uptr original_size = size;
  51. // If alignment requirements are to be fulfilled by the frontend allocator
  52. // rather than by the primary or secondary, passing an alignment lower than
  53. // or equal to 8 will prevent any further rounding up, as well as the later
  54. // alignment check.
  55. if (alignment > 8)
  56. size = RoundUpTo(size, alignment);
  57. // The primary allocator should return a 2^x aligned allocation when
  58. // requested 2^x bytes, hence using the rounded up 'size' when being
  59. // serviced by the primary (this is no longer true when the primary is
  60. // using a non-fixed base address). The secondary takes care of the
  61. // alignment without such requirement, and allocating 'size' would use
  62. // extraneous memory, so we employ 'original_size'.
  63. void *res;
  64. if (primary_.CanAllocate(size, alignment))
  65. res = cache->Allocate(&primary_, primary_.ClassID(size));
  66. else
  67. res = secondary_.Allocate(&stats_, original_size, alignment);
  68. if (alignment > 8)
  69. CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
  70. return res;
  71. }
  72. s32 ReleaseToOSIntervalMs() const {
  73. return primary_.ReleaseToOSIntervalMs();
  74. }
  75. void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
  76. primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
  77. }
  78. void ForceReleaseToOS() {
  79. primary_.ForceReleaseToOS();
  80. }
  81. void Deallocate(AllocatorCache *cache, void *p) {
  82. if (!p) return;
  83. if (primary_.PointerIsMine(p))
  84. cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
  85. else
  86. secondary_.Deallocate(&stats_, p);
  87. }
  88. void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
  89. uptr alignment) {
  90. if (!p)
  91. return Allocate(cache, new_size, alignment);
  92. if (!new_size) {
  93. Deallocate(cache, p);
  94. return nullptr;
  95. }
  96. CHECK(PointerIsMine(p));
  97. uptr old_size = GetActuallyAllocatedSize(p);
  98. uptr memcpy_size = Min(new_size, old_size);
  99. void *new_p = Allocate(cache, new_size, alignment);
  100. if (new_p)
  101. internal_memcpy(new_p, p, memcpy_size);
  102. Deallocate(cache, p);
  103. return new_p;
  104. }
  105. bool PointerIsMine(const void *p) const {
  106. if (primary_.PointerIsMine(p))
  107. return true;
  108. return secondary_.PointerIsMine(p);
  109. }
  110. bool FromPrimary(const void *p) const { return primary_.PointerIsMine(p); }
  111. void *GetMetaData(const void *p) {
  112. if (primary_.PointerIsMine(p))
  113. return primary_.GetMetaData(p);
  114. return secondary_.GetMetaData(p);
  115. }
  116. void *GetBlockBegin(const void *p) {
  117. if (primary_.PointerIsMine(p))
  118. return primary_.GetBlockBegin(p);
  119. return secondary_.GetBlockBegin(p);
  120. }
  121. // This function does the same as GetBlockBegin, but is much faster.
  122. // Must be called with the allocator locked.
  123. void *GetBlockBeginFastLocked(const void *p) {
  124. if (primary_.PointerIsMine(p))
  125. return primary_.GetBlockBegin(p);
  126. return secondary_.GetBlockBeginFastLocked(p);
  127. }
  128. uptr GetActuallyAllocatedSize(void *p) {
  129. if (primary_.PointerIsMine(p))
  130. return primary_.GetActuallyAllocatedSize(p);
  131. return secondary_.GetActuallyAllocatedSize(p);
  132. }
  133. uptr TotalMemoryUsed() {
  134. return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
  135. }
  136. void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
  137. void InitCache(AllocatorCache *cache) {
  138. cache->Init(&stats_);
  139. }
  140. void DestroyCache(AllocatorCache *cache) {
  141. cache->Destroy(&primary_, &stats_);
  142. }
  143. void SwallowCache(AllocatorCache *cache) {
  144. cache->Drain(&primary_);
  145. }
  146. void GetStats(AllocatorStatCounters s) const {
  147. stats_.Get(s);
  148. }
  149. void PrintStats() {
  150. primary_.PrintStats();
  151. secondary_.PrintStats();
  152. }
  153. // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
  154. // introspection API.
  155. void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
  156. primary_.ForceLock();
  157. secondary_.ForceLock();
  158. }
  159. void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
  160. secondary_.ForceUnlock();
  161. primary_.ForceUnlock();
  162. }
  163. // Iterate over all existing chunks.
  164. // The allocator must be locked when calling this function.
  165. void ForEachChunk(ForEachChunkCallback callback, void *arg) {
  166. primary_.ForEachChunk(callback, arg);
  167. secondary_.ForEachChunk(callback, arg);
  168. }
  169. private:
  170. PrimaryAllocator primary_;
  171. SecondaryAllocator secondary_;
  172. AllocatorGlobalStats stats_;
  173. };