msan_allocator.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. //===-- msan_allocator.cpp -------------------------- ---------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of MemorySanitizer.
  10. //
  11. // MemorySanitizer allocator.
  12. //===----------------------------------------------------------------------===//
  13. #include "msan_allocator.h"
  14. #include "msan.h"
  15. #include "msan_interface_internal.h"
  16. #include "msan_origin.h"
  17. #include "msan_poisoning.h"
  18. #include "msan_thread.h"
  19. #include "sanitizer_common/sanitizer_allocator.h"
  20. #include "sanitizer_common/sanitizer_allocator_checks.h"
  21. #include "sanitizer_common/sanitizer_allocator_interface.h"
  22. #include "sanitizer_common/sanitizer_allocator_report.h"
  23. #include "sanitizer_common/sanitizer_errno.h"
  24. namespace __msan {
  25. struct Metadata {
  26. uptr requested_size;
  27. };
  28. struct MsanMapUnmapCallback {
  29. void OnMap(uptr p, uptr size) const {}
  30. void OnMapSecondary(uptr p, uptr size, uptr user_begin,
  31. uptr user_size) const {}
  32. void OnUnmap(uptr p, uptr size) const {
  33. __msan_unpoison((void *)p, size);
  34. // We are about to unmap a chunk of user memory.
  35. // Mark the corresponding shadow memory as not needed.
  36. uptr shadow_p = MEM_TO_SHADOW(p);
  37. ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
  38. if (__msan_get_track_origins()) {
  39. uptr origin_p = MEM_TO_ORIGIN(p);
  40. ReleaseMemoryPagesToOS(origin_p, origin_p + size);
  41. }
  42. }
  43. };
  44. // Note: to ensure that the allocator is compatible with the application memory
  45. // layout (especially with high-entropy ASLR), kSpaceBeg and kSpaceSize must be
  46. // duplicated as MappingDesc::ALLOCATOR in msan.h.
  47. #if defined(__mips64)
  48. static const uptr kMaxAllowedMallocSize = 2UL << 30;
  49. struct AP32 {
  50. static const uptr kSpaceBeg = 0;
  51. static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
  52. static const uptr kMetadataSize = sizeof(Metadata);
  53. typedef __sanitizer::CompactSizeClassMap SizeClassMap;
  54. static const uptr kRegionSizeLog = 20;
  55. using AddressSpaceView = LocalAddressSpaceView;
  56. typedef MsanMapUnmapCallback MapUnmapCallback;
  57. static const uptr kFlags = 0;
  58. };
  59. typedef SizeClassAllocator32<AP32> PrimaryAllocator;
  60. #elif defined(__x86_64__)
  61. #if SANITIZER_NETBSD || SANITIZER_LINUX
  62. static const uptr kAllocatorSpace = 0x700000000000ULL;
  63. #else
  64. static const uptr kAllocatorSpace = 0x600000000000ULL;
  65. #endif
  66. static const uptr kMaxAllowedMallocSize = 8UL << 30;
  67. struct AP64 { // Allocator64 parameters. Deliberately using a short name.
  68. static const uptr kSpaceBeg = kAllocatorSpace;
  69. static const uptr kSpaceSize = 0x40000000000; // 4T.
  70. static const uptr kMetadataSize = sizeof(Metadata);
  71. typedef DefaultSizeClassMap SizeClassMap;
  72. typedef MsanMapUnmapCallback MapUnmapCallback;
  73. static const uptr kFlags = 0;
  74. using AddressSpaceView = LocalAddressSpaceView;
  75. };
  76. typedef SizeClassAllocator64<AP64> PrimaryAllocator;
  77. #elif defined(__loongarch_lp64)
  78. const uptr kAllocatorSpace = 0x700000000000ULL;
  79. const uptr kMaxAllowedMallocSize = 8UL << 30;
  80. struct AP64 { // Allocator64 parameters. Deliberately using a short name.
  81. static const uptr kSpaceBeg = kAllocatorSpace;
  82. static const uptr kSpaceSize = 0x40000000000; // 4T.
  83. static const uptr kMetadataSize = sizeof(Metadata);
  84. typedef DefaultSizeClassMap SizeClassMap;
  85. typedef MsanMapUnmapCallback MapUnmapCallback;
  86. static const uptr kFlags = 0;
  87. using AddressSpaceView = LocalAddressSpaceView;
  88. };
  89. typedef SizeClassAllocator64<AP64> PrimaryAllocator;
  90. #elif defined(__powerpc64__)
  91. static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
  92. struct AP64 { // Allocator64 parameters. Deliberately using a short name.
  93. static const uptr kSpaceBeg = 0x300000000000;
  94. static const uptr kSpaceSize = 0x020000000000; // 2T.
  95. static const uptr kMetadataSize = sizeof(Metadata);
  96. typedef DefaultSizeClassMap SizeClassMap;
  97. typedef MsanMapUnmapCallback MapUnmapCallback;
  98. static const uptr kFlags = 0;
  99. using AddressSpaceView = LocalAddressSpaceView;
  100. };
  101. typedef SizeClassAllocator64<AP64> PrimaryAllocator;
  102. #elif defined(__s390x__)
  103. static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
  104. struct AP64 { // Allocator64 parameters. Deliberately using a short name.
  105. static const uptr kSpaceBeg = 0x440000000000;
  106. static const uptr kSpaceSize = 0x020000000000; // 2T.
  107. static const uptr kMetadataSize = sizeof(Metadata);
  108. typedef DefaultSizeClassMap SizeClassMap;
  109. typedef MsanMapUnmapCallback MapUnmapCallback;
  110. static const uptr kFlags = 0;
  111. using AddressSpaceView = LocalAddressSpaceView;
  112. };
  113. typedef SizeClassAllocator64<AP64> PrimaryAllocator;
  114. #elif defined(__aarch64__)
  115. static const uptr kMaxAllowedMallocSize = 8UL << 30;
  116. struct AP64 {
  117. static const uptr kSpaceBeg = 0xE00000000000ULL;
  118. static const uptr kSpaceSize = 0x40000000000; // 4T.
  119. static const uptr kMetadataSize = sizeof(Metadata);
  120. typedef DefaultSizeClassMap SizeClassMap;
  121. typedef MsanMapUnmapCallback MapUnmapCallback;
  122. static const uptr kFlags = 0;
  123. using AddressSpaceView = LocalAddressSpaceView;
  124. };
  125. typedef SizeClassAllocator64<AP64> PrimaryAllocator;
  126. #endif
  127. typedef CombinedAllocator<PrimaryAllocator> Allocator;
  128. typedef Allocator::AllocatorCache AllocatorCache;
  129. static Allocator allocator;
  130. static AllocatorCache fallback_allocator_cache;
  131. static StaticSpinMutex fallback_mutex;
  132. static uptr max_malloc_size;
  133. void MsanAllocatorInit() {
  134. SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
  135. allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
  136. if (common_flags()->max_allocation_size_mb)
  137. max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
  138. kMaxAllowedMallocSize);
  139. else
  140. max_malloc_size = kMaxAllowedMallocSize;
  141. }
  142. void LockAllocator() { allocator.ForceLock(); }
  143. void UnlockAllocator() { allocator.ForceUnlock(); }
  144. AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
  145. CHECK(ms);
  146. CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
  147. return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
  148. }
  149. void MsanThreadLocalMallocStorage::Init() {
  150. allocator.InitCache(GetAllocatorCache(this));
  151. }
  152. void MsanThreadLocalMallocStorage::CommitBack() {
  153. allocator.SwallowCache(GetAllocatorCache(this));
  154. allocator.DestroyCache(GetAllocatorCache(this));
  155. }
  156. static void *MsanAllocate(BufferedStackTrace *stack, uptr size, uptr alignment,
  157. bool zeroise) {
  158. if (UNLIKELY(size > max_malloc_size)) {
  159. if (AllocatorMayReturnNull()) {
  160. Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
  161. return nullptr;
  162. }
  163. GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
  164. ReportAllocationSizeTooBig(size, max_malloc_size, stack);
  165. }
  166. if (UNLIKELY(IsRssLimitExceeded())) {
  167. if (AllocatorMayReturnNull())
  168. return nullptr;
  169. GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
  170. ReportRssLimitExceeded(stack);
  171. }
  172. MsanThread *t = GetCurrentThread();
  173. void *allocated;
  174. if (t) {
  175. AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
  176. allocated = allocator.Allocate(cache, size, alignment);
  177. } else {
  178. SpinMutexLock l(&fallback_mutex);
  179. AllocatorCache *cache = &fallback_allocator_cache;
  180. allocated = allocator.Allocate(cache, size, alignment);
  181. }
  182. if (UNLIKELY(!allocated)) {
  183. SetAllocatorOutOfMemory();
  184. if (AllocatorMayReturnNull())
  185. return nullptr;
  186. GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
  187. ReportOutOfMemory(size, stack);
  188. }
  189. Metadata *meta =
  190. reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
  191. meta->requested_size = size;
  192. if (zeroise) {
  193. if (allocator.FromPrimary(allocated))
  194. __msan_clear_and_unpoison(allocated, size);
  195. else
  196. __msan_unpoison(allocated, size); // Mem is already zeroed.
  197. } else if (flags()->poison_in_malloc) {
  198. __msan_poison(allocated, size);
  199. if (__msan_get_track_origins()) {
  200. stack->tag = StackTrace::TAG_ALLOC;
  201. Origin o = Origin::CreateHeapOrigin(stack);
  202. __msan_set_origin(allocated, size, o.raw_id());
  203. }
  204. }
  205. UnpoisonParam(2);
  206. RunMallocHooks(allocated, size);
  207. return allocated;
  208. }
  209. void MsanDeallocate(BufferedStackTrace *stack, void *p) {
  210. CHECK(p);
  211. UnpoisonParam(1);
  212. RunFreeHooks(p);
  213. Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
  214. uptr size = meta->requested_size;
  215. meta->requested_size = 0;
  216. // This memory will not be reused by anyone else, so we are free to keep it
  217. // poisoned. The secondary allocator will unmap and unpoison by
  218. // MsanMapUnmapCallback, no need to poison it here.
  219. if (flags()->poison_in_free && allocator.FromPrimary(p)) {
  220. __msan_poison(p, size);
  221. if (__msan_get_track_origins()) {
  222. stack->tag = StackTrace::TAG_DEALLOC;
  223. Origin o = Origin::CreateHeapOrigin(stack);
  224. __msan_set_origin(p, size, o.raw_id());
  225. }
  226. }
  227. MsanThread *t = GetCurrentThread();
  228. if (t) {
  229. AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
  230. allocator.Deallocate(cache, p);
  231. } else {
  232. SpinMutexLock l(&fallback_mutex);
  233. AllocatorCache *cache = &fallback_allocator_cache;
  234. allocator.Deallocate(cache, p);
  235. }
  236. }
  237. static void *MsanReallocate(BufferedStackTrace *stack, void *old_p,
  238. uptr new_size, uptr alignment) {
  239. Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
  240. uptr old_size = meta->requested_size;
  241. uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
  242. if (new_size <= actually_allocated_size) {
  243. // We are not reallocating here.
  244. meta->requested_size = new_size;
  245. if (new_size > old_size) {
  246. if (flags()->poison_in_malloc) {
  247. stack->tag = StackTrace::TAG_ALLOC;
  248. PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
  249. }
  250. }
  251. return old_p;
  252. }
  253. uptr memcpy_size = Min(new_size, old_size);
  254. void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
  255. if (new_p) {
  256. CopyMemory(new_p, old_p, memcpy_size, stack);
  257. MsanDeallocate(stack, old_p);
  258. }
  259. return new_p;
  260. }
  261. static void *MsanCalloc(BufferedStackTrace *stack, uptr nmemb, uptr size) {
  262. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  263. if (AllocatorMayReturnNull())
  264. return nullptr;
  265. GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
  266. ReportCallocOverflow(nmemb, size, stack);
  267. }
  268. return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
  269. }
  270. static const void *AllocationBegin(const void *p) {
  271. if (!p)
  272. return nullptr;
  273. void *beg = allocator.GetBlockBegin(p);
  274. if (!beg)
  275. return nullptr;
  276. Metadata *b = (Metadata *)allocator.GetMetaData(beg);
  277. if (!b)
  278. return nullptr;
  279. if (b->requested_size == 0)
  280. return nullptr;
  281. return (const void *)beg;
  282. }
  283. static uptr AllocationSize(const void *p) {
  284. if (!p) return 0;
  285. const void *beg = allocator.GetBlockBegin(p);
  286. if (beg != p) return 0;
  287. Metadata *b = (Metadata *)allocator.GetMetaData(p);
  288. return b->requested_size;
  289. }
  290. static uptr AllocationSizeFast(const void *p) {
  291. return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
  292. }
  293. void *msan_malloc(uptr size, BufferedStackTrace *stack) {
  294. return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
  295. }
  296. void *msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
  297. return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
  298. }
  299. void *msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack) {
  300. if (!ptr)
  301. return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
  302. if (size == 0) {
  303. MsanDeallocate(stack, ptr);
  304. return nullptr;
  305. }
  306. return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
  307. }
  308. void *msan_reallocarray(void *ptr, uptr nmemb, uptr size,
  309. BufferedStackTrace *stack) {
  310. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  311. errno = errno_ENOMEM;
  312. if (AllocatorMayReturnNull())
  313. return nullptr;
  314. GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
  315. ReportReallocArrayOverflow(nmemb, size, stack);
  316. }
  317. return msan_realloc(ptr, nmemb * size, stack);
  318. }
  319. void *msan_valloc(uptr size, BufferedStackTrace *stack) {
  320. return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
  321. }
  322. void *msan_pvalloc(uptr size, BufferedStackTrace *stack) {
  323. uptr PageSize = GetPageSizeCached();
  324. if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
  325. errno = errno_ENOMEM;
  326. if (AllocatorMayReturnNull())
  327. return nullptr;
  328. GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
  329. ReportPvallocOverflow(size, stack);
  330. }
  331. // pvalloc(0) should allocate one page.
  332. size = size ? RoundUpTo(size, PageSize) : PageSize;
  333. return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
  334. }
  335. void *msan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
  336. if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
  337. errno = errno_EINVAL;
  338. if (AllocatorMayReturnNull())
  339. return nullptr;
  340. GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
  341. ReportInvalidAlignedAllocAlignment(size, alignment, stack);
  342. }
  343. return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
  344. }
  345. void *msan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) {
  346. if (UNLIKELY(!IsPowerOfTwo(alignment))) {
  347. errno = errno_EINVAL;
  348. if (AllocatorMayReturnNull())
  349. return nullptr;
  350. GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
  351. ReportInvalidAllocationAlignment(alignment, stack);
  352. }
  353. return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
  354. }
  355. int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
  356. BufferedStackTrace *stack) {
  357. if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
  358. if (AllocatorMayReturnNull())
  359. return errno_EINVAL;
  360. GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
  361. ReportInvalidPosixMemalignAlignment(alignment, stack);
  362. }
  363. void *ptr = MsanAllocate(stack, size, alignment, false);
  364. if (UNLIKELY(!ptr))
  365. // OOM error is already taken care of by MsanAllocate.
  366. return errno_ENOMEM;
  367. CHECK(IsAligned((uptr)ptr, alignment));
  368. *memptr = ptr;
  369. return 0;
  370. }
  371. } // namespace __msan
  372. using namespace __msan;
  373. uptr __sanitizer_get_current_allocated_bytes() {
  374. uptr stats[AllocatorStatCount];
  375. allocator.GetStats(stats);
  376. return stats[AllocatorStatAllocated];
  377. }
  378. uptr __sanitizer_get_heap_size() {
  379. uptr stats[AllocatorStatCount];
  380. allocator.GetStats(stats);
  381. return stats[AllocatorStatMapped];
  382. }
  383. uptr __sanitizer_get_free_bytes() { return 1; }
  384. uptr __sanitizer_get_unmapped_bytes() { return 1; }
  385. uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
  386. int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
  387. const void *__sanitizer_get_allocated_begin(const void *p) {
  388. return AllocationBegin(p);
  389. }
  390. uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
  391. uptr __sanitizer_get_allocated_size_fast(const void *p) {
  392. DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
  393. uptr ret = AllocationSizeFast(p);
  394. DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
  395. return ret;
  396. }
  397. void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }