asan_allocator.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. //===-- asan_allocator.h ----------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of AddressSanitizer, an address sanity checker.
  10. //
  11. // ASan-private header for asan_allocator.cpp.
  12. //===----------------------------------------------------------------------===//
  13. #ifndef ASAN_ALLOCATOR_H
  14. #define ASAN_ALLOCATOR_H
  15. #include "asan_flags.h"
  16. #include "asan_interceptors.h"
  17. #include "asan_internal.h"
  18. #include "sanitizer_common/sanitizer_allocator.h"
  19. #include "sanitizer_common/sanitizer_list.h"
  20. #include "sanitizer_common/sanitizer_platform.h"
  21. namespace __asan {
  22. enum AllocType {
  23. FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc.
  24. FROM_NEW = 2, // Memory block came from operator new.
  25. FROM_NEW_BR = 3 // Memory block came from operator new [ ]
  26. };
  27. class AsanChunk;
  28. struct AllocatorOptions {
  29. u32 quarantine_size_mb;
  30. u32 thread_local_quarantine_size_kb;
  31. u16 min_redzone;
  32. u16 max_redzone;
  33. u8 may_return_null;
  34. u8 alloc_dealloc_mismatch;
  35. s32 release_to_os_interval_ms;
  36. void SetFrom(const Flags *f, const CommonFlags *cf);
  37. void CopyTo(Flags *f, CommonFlags *cf);
  38. };
  39. void InitializeAllocator(const AllocatorOptions &options);
  40. void ReInitializeAllocator(const AllocatorOptions &options);
  41. void GetAllocatorOptions(AllocatorOptions *options);
  42. class AsanChunkView {
  43. public:
  44. explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
  45. bool IsValid() const; // Checks if AsanChunkView points to a valid
  46. // allocated or quarantined chunk.
  47. bool IsAllocated() const; // Checks if the memory is currently allocated.
  48. bool IsQuarantined() const; // Checks if the memory is currently quarantined.
  49. uptr Beg() const; // First byte of user memory.
  50. uptr End() const; // Last byte of user memory.
  51. uptr UsedSize() const; // Size requested by the user.
  52. u32 UserRequestedAlignment() const; // Originally requested alignment.
  53. uptr AllocTid() const;
  54. uptr FreeTid() const;
  55. bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
  56. u32 GetAllocStackId() const;
  57. u32 GetFreeStackId() const;
  58. AllocType GetAllocType() const;
  59. bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
  60. if (addr >= Beg() && (addr + access_size) <= End()) {
  61. *offset = addr - Beg();
  62. return true;
  63. }
  64. return false;
  65. }
  66. bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const {
  67. (void)access_size;
  68. if (addr < Beg()) {
  69. *offset = Beg() - addr;
  70. return true;
  71. }
  72. return false;
  73. }
  74. bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const {
  75. if (addr + access_size > End()) {
  76. *offset = addr - End();
  77. return true;
  78. }
  79. return false;
  80. }
  81. private:
  82. AsanChunk *const chunk_;
  83. };
  84. AsanChunkView FindHeapChunkByAddress(uptr address);
  85. AsanChunkView FindHeapChunkByAllocBeg(uptr address);
  86. // List of AsanChunks with total size.
  87. class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
  88. public:
  89. explicit AsanChunkFifoList(LinkerInitialized) { }
  90. AsanChunkFifoList() { clear(); }
  91. void Push(AsanChunk *n);
  92. void PushList(AsanChunkFifoList *q);
  93. AsanChunk *Pop();
  94. uptr size() { return size_; }
  95. void clear() {
  96. IntrusiveList<AsanChunk>::clear();
  97. size_ = 0;
  98. }
  99. private:
  100. uptr size_;
  101. };
  102. struct AsanMapUnmapCallback {
  103. void OnMap(uptr p, uptr size) const;
  104. void OnMapSecondary(uptr p, uptr size, uptr user_begin, uptr user_size) const;
  105. void OnUnmap(uptr p, uptr size) const;
  106. };
  107. #if SANITIZER_CAN_USE_ALLOCATOR64
  108. # if SANITIZER_FUCHSIA
  109. // This is a sentinel indicating we do not want the primary allocator arena to
  110. // be placed at a fixed address. It will be anonymously mmap'd.
  111. const uptr kAllocatorSpace = ~(uptr)0;
  112. # if SANITIZER_RISCV64
  113. // These are sanitizer tunings that allow all bringup tests for RISCV-64 Sv39 +
  114. // Fuchsia to run with asan-instrumented. That is, we can run bringup, e2e,
  115. // libc, and scudo tests with this configuration.
  116. //
  117. // TODO: This is specifically tuned for Sv39. 48/57 will likely require other
  118. // tunings, or possibly use the same tunings Fuchsia uses for other archs. The
  119. // VMA size isn't technically tied to the Fuchsia System ABI, so once 48/57 is
  120. // supported, we'd need a way of dynamically checking what the VMA size is and
  121. // determining optimal configuration.
  122. // This indicates the total amount of space dedicated for the primary allocator
  123. // during initialization. This is roughly proportional to the size set by the
  124. // FuchsiaConfig for scudo (~11.25GB == ~2^33.49). Requesting any more could
  125. // lead to some failures in sanitized bringup tests where we can't allocate new
  126. // vmars because there wouldn't be enough contiguous space. We could try 2^34 if
  127. // we re-evaluate the SizeClassMap settings.
  128. const uptr kAllocatorSize = UINT64_C(1) << 33; // 8GB
  129. // This is roughly equivalent to the configuration for the VeryDenseSizeClassMap
  130. // but has fewer size classes (ideally at most 32). Fewer class sizes means the
  131. // region size for each class is larger, thus less chances of running out of
  132. // space for each region. The main differences are the MidSizeLog (which is
  133. // smaller) and the MaxSizeLog (which is larger).
  134. //
  135. // - The MaxSizeLog is higher to allow some of the largest allocations I've
  136. // observed to be placed in the primary allocator's arena as opposed to being
  137. // mmap'd by the secondary allocator. This helps reduce fragmentation from
  138. // large classes. A huge example of this the scudo allocator tests (and its
  139. // testing infrastructure) which malloc's/new's objects on the order of
  140. // hundreds of kilobytes which normally would not be in the primary allocator
  141. // arena with the default VeryDenseSizeClassMap.
  142. // - The MidSizeLog is reduced to help shrink the number of size classes and
  143. // increase region size. Without this, we'd see ASan complain many times about
  144. // a region running out of available space.
  145. //
  146. // This differs a bit from the fuchsia config in scudo, mainly from the NumBits,
  147. // MaxSizeLog, and NumCachedHintT. This should place the number of size classes
  148. // for scudo at 45 and some large objects allocated by this config would be
  149. // placed in the arena whereas scudo would mmap them. The asan allocator needs
  150. // to have a number of classes that are a power of 2 for various internal things
  151. // to work, so we can't match the scudo settings to a tee. The sanitizer
  152. // allocator is slightly slower than scudo's but this is enough to get
  153. // memory-intensive scudo tests to run with asan instrumentation.
  154. typedef SizeClassMap</*kNumBits=*/2,
  155. /*kMinSizeLog=*/5,
  156. /*kMidSizeLog=*/8,
  157. /*kMaxSizeLog=*/18,
  158. /*kNumCachedHintT=*/8,
  159. /*kMaxBytesCachedLog=*/10>
  160. SizeClassMap;
  161. static_assert(SizeClassMap::kNumClassesRounded <= 32,
  162. "The above tunings were specifically selected to ensure there "
  163. "would be at most 32 size classes. This restriction could be "
  164. "loosened to 64 size classes if we can find a configuration of "
  165. "allocator size and SizeClassMap tunings that allows us to "
  166. "reliably run all bringup tests in a sanitized environment.");
  167. # else
  168. // These are the default allocator tunings for non-RISCV environments where the
  169. // VMA is usually 48 bits and we have lots of space.
  170. const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
  171. typedef DefaultSizeClassMap SizeClassMap;
  172. # endif
  173. # elif defined(__powerpc64__)
  174. const uptr kAllocatorSpace = ~(uptr)0;
  175. const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
  176. typedef DefaultSizeClassMap SizeClassMap;
  177. # elif defined(__aarch64__) && SANITIZER_ANDROID
  178. // Android needs to support 39, 42 and 48 bit VMA.
  179. const uptr kAllocatorSpace = ~(uptr)0;
  180. const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
  181. typedef VeryCompactSizeClassMap SizeClassMap;
  182. # elif SANITIZER_RISCV64
  183. const uptr kAllocatorSpace = ~(uptr)0;
  184. const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
  185. typedef VeryDenseSizeClassMap SizeClassMap;
  186. # elif defined(__sparc__)
  187. const uptr kAllocatorSpace = ~(uptr)0;
  188. const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
  189. typedef DefaultSizeClassMap SizeClassMap;
  190. # elif SANITIZER_WINDOWS
  191. const uptr kAllocatorSpace = ~(uptr)0;
  192. const uptr kAllocatorSize = 0x8000000000ULL; // 500G
  193. typedef DefaultSizeClassMap SizeClassMap;
  194. # elif SANITIZER_APPLE
  195. const uptr kAllocatorSpace = 0x600000000000ULL;
  196. const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
  197. typedef DefaultSizeClassMap SizeClassMap;
  198. # else
  199. const uptr kAllocatorSpace = 0x500000000000ULL;
  200. const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
  201. typedef DefaultSizeClassMap SizeClassMap;
  202. # endif
  203. template <typename AddressSpaceViewTy>
  204. struct AP64 { // Allocator64 parameters. Deliberately using a short name.
  205. static const uptr kSpaceBeg = kAllocatorSpace;
  206. static const uptr kSpaceSize = kAllocatorSize;
  207. static const uptr kMetadataSize = 0;
  208. typedef __asan::SizeClassMap SizeClassMap;
  209. typedef AsanMapUnmapCallback MapUnmapCallback;
  210. static const uptr kFlags = 0;
  211. using AddressSpaceView = AddressSpaceViewTy;
  212. };
  213. template <typename AddressSpaceView>
  214. using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
  215. using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
  216. #else // Fallback to SizeClassAllocator32.
  217. typedef CompactSizeClassMap SizeClassMap;
  218. template <typename AddressSpaceViewTy>
  219. struct AP32 {
  220. static const uptr kSpaceBeg = 0;
  221. static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
  222. static const uptr kMetadataSize = 0;
  223. typedef __asan::SizeClassMap SizeClassMap;
  224. static const uptr kRegionSizeLog = 20;
  225. using AddressSpaceView = AddressSpaceViewTy;
  226. typedef AsanMapUnmapCallback MapUnmapCallback;
  227. static const uptr kFlags = 0;
  228. };
  229. template <typename AddressSpaceView>
  230. using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView> >;
  231. using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
  232. #endif // SANITIZER_CAN_USE_ALLOCATOR64
  233. static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses;
  234. template <typename AddressSpaceView>
  235. using AsanAllocatorASVT =
  236. CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>>;
  237. using AsanAllocator = AsanAllocatorASVT<LocalAddressSpaceView>;
  238. using AllocatorCache = AsanAllocator::AllocatorCache;
  239. struct AsanThreadLocalMallocStorage {
  240. uptr quarantine_cache[16];
  241. AllocatorCache allocator_cache;
  242. void CommitBack();
  243. private:
  244. // These objects are allocated via mmap() and are zero-initialized.
  245. AsanThreadLocalMallocStorage() {}
  246. };
  247. void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
  248. AllocType alloc_type);
  249. void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
  250. void asan_delete(void *ptr, uptr size, uptr alignment,
  251. BufferedStackTrace *stack, AllocType alloc_type);
  252. void *asan_malloc(uptr size, BufferedStackTrace *stack);
  253. void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
  254. void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack);
  255. void *asan_reallocarray(void *p, uptr nmemb, uptr size,
  256. BufferedStackTrace *stack);
  257. void *asan_valloc(uptr size, BufferedStackTrace *stack);
  258. void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
  259. void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack);
  260. int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
  261. BufferedStackTrace *stack);
  262. uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
  263. uptr asan_mz_size(const void *ptr);
  264. void asan_mz_force_lock();
  265. void asan_mz_force_unlock();
  266. void PrintInternalAllocatorStats();
  267. void AsanSoftRssLimitExceededCallback(bool exceeded);
  268. } // namespace __asan
  269. #endif // ASAN_ALLOCATOR_H