msan_allocator.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. //===-- msan_allocator.cpp -------------------------- ---------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of MemorySanitizer.
  10. //
  11. // MemorySanitizer allocator.
  12. //===----------------------------------------------------------------------===//
  13. #include "sanitizer_common/sanitizer_allocator.h"
  14. #include "sanitizer_common/sanitizer_allocator_checks.h"
  15. #include "sanitizer_common/sanitizer_allocator_interface.h"
  16. #include "sanitizer_common/sanitizer_allocator_report.h"
  17. #include "sanitizer_common/sanitizer_errno.h"
  18. #include "msan.h"
  19. #include "msan_allocator.h"
  20. #include "msan_origin.h"
  21. #include "msan_thread.h"
  22. #include "msan_poisoning.h"
  23. namespace __msan {
  24. struct Metadata {
  25. uptr requested_size;
  26. };
  27. struct MsanMapUnmapCallback {
  28. void OnMap(uptr p, uptr size) const {}
  29. void OnUnmap(uptr p, uptr size) const {
  30. __msan_unpoison((void *)p, size);
  31. // We are about to unmap a chunk of user memory.
  32. // Mark the corresponding shadow memory as not needed.
  33. uptr shadow_p = MEM_TO_SHADOW(p);
  34. ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
  35. if (__msan_get_track_origins()) {
  36. uptr origin_p = MEM_TO_ORIGIN(p);
  37. ReleaseMemoryPagesToOS(origin_p, origin_p + size);
  38. }
  39. }
  40. };
  41. #if defined(__mips64)
  42. static const uptr kMaxAllowedMallocSize = 2UL << 30;
  43. struct AP32 {
  44. static const uptr kSpaceBeg = 0;
  45. static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
  46. static const uptr kMetadataSize = sizeof(Metadata);
  47. typedef __sanitizer::CompactSizeClassMap SizeClassMap;
  48. static const uptr kRegionSizeLog = 20;
  49. using AddressSpaceView = LocalAddressSpaceView;
  50. typedef MsanMapUnmapCallback MapUnmapCallback;
  51. static const uptr kFlags = 0;
  52. };
  53. typedef SizeClassAllocator32<AP32> PrimaryAllocator;
  54. #elif defined(__x86_64__)
  55. #if SANITIZER_NETBSD || \
  56. (SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING))
  57. static const uptr kAllocatorSpace = 0x700000000000ULL;
  58. #else
  59. static const uptr kAllocatorSpace = 0x600000000000ULL;
  60. #endif
  61. static const uptr kMaxAllowedMallocSize = 8UL << 30;
  62. struct AP64 { // Allocator64 parameters. Deliberately using a short name.
  63. static const uptr kSpaceBeg = kAllocatorSpace;
  64. static const uptr kSpaceSize = 0x40000000000; // 4T.
  65. static const uptr kMetadataSize = sizeof(Metadata);
  66. typedef DefaultSizeClassMap SizeClassMap;
  67. typedef MsanMapUnmapCallback MapUnmapCallback;
  68. static const uptr kFlags = 0;
  69. using AddressSpaceView = LocalAddressSpaceView;
  70. };
  71. typedef SizeClassAllocator64<AP64> PrimaryAllocator;
  72. #elif defined(__powerpc64__)
  73. static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
  74. struct AP64 { // Allocator64 parameters. Deliberately using a short name.
  75. static const uptr kSpaceBeg = 0x300000000000;
  76. static const uptr kSpaceSize = 0x020000000000; // 2T.
  77. static const uptr kMetadataSize = sizeof(Metadata);
  78. typedef DefaultSizeClassMap SizeClassMap;
  79. typedef MsanMapUnmapCallback MapUnmapCallback;
  80. static const uptr kFlags = 0;
  81. using AddressSpaceView = LocalAddressSpaceView;
  82. };
  83. typedef SizeClassAllocator64<AP64> PrimaryAllocator;
  84. #elif defined(__s390x__)
  85. static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
  86. struct AP64 { // Allocator64 parameters. Deliberately using a short name.
  87. static const uptr kSpaceBeg = 0x440000000000;
  88. static const uptr kSpaceSize = 0x020000000000; // 2T.
  89. static const uptr kMetadataSize = sizeof(Metadata);
  90. typedef DefaultSizeClassMap SizeClassMap;
  91. typedef MsanMapUnmapCallback MapUnmapCallback;
  92. static const uptr kFlags = 0;
  93. using AddressSpaceView = LocalAddressSpaceView;
  94. };
  95. typedef SizeClassAllocator64<AP64> PrimaryAllocator;
  96. #elif defined(__aarch64__)
  97. static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
  98. struct AP32 {
  99. static const uptr kSpaceBeg = 0;
  100. static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
  101. static const uptr kMetadataSize = sizeof(Metadata);
  102. typedef __sanitizer::CompactSizeClassMap SizeClassMap;
  103. static const uptr kRegionSizeLog = 20;
  104. using AddressSpaceView = LocalAddressSpaceView;
  105. typedef MsanMapUnmapCallback MapUnmapCallback;
  106. static const uptr kFlags = 0;
  107. };
  108. typedef SizeClassAllocator32<AP32> PrimaryAllocator;
  109. #endif
  110. typedef CombinedAllocator<PrimaryAllocator> Allocator;
  111. typedef Allocator::AllocatorCache AllocatorCache;
  112. static Allocator allocator;
  113. static AllocatorCache fallback_allocator_cache;
  114. static StaticSpinMutex fallback_mutex;
  115. static uptr max_malloc_size;
  116. void MsanAllocatorInit() {
  117. SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
  118. allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
  119. if (common_flags()->max_allocation_size_mb)
  120. max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
  121. kMaxAllowedMallocSize);
  122. else
  123. max_malloc_size = kMaxAllowedMallocSize;
  124. }
  125. AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
  126. CHECK(ms);
  127. CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
  128. return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
  129. }
  130. void MsanThreadLocalMallocStorage::CommitBack() {
  131. allocator.SwallowCache(GetAllocatorCache(this));
  132. }
  133. static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
  134. bool zeroise) {
  135. if (size > max_malloc_size) {
  136. if (AllocatorMayReturnNull()) {
  137. Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
  138. return nullptr;
  139. }
  140. ReportAllocationSizeTooBig(size, max_malloc_size, stack);
  141. }
  142. if (UNLIKELY(IsRssLimitExceeded())) {
  143. if (AllocatorMayReturnNull())
  144. return nullptr;
  145. ReportRssLimitExceeded(stack);
  146. }
  147. MsanThread *t = GetCurrentThread();
  148. void *allocated;
  149. if (t) {
  150. AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
  151. allocated = allocator.Allocate(cache, size, alignment);
  152. } else {
  153. SpinMutexLock l(&fallback_mutex);
  154. AllocatorCache *cache = &fallback_allocator_cache;
  155. allocated = allocator.Allocate(cache, size, alignment);
  156. }
  157. if (UNLIKELY(!allocated)) {
  158. SetAllocatorOutOfMemory();
  159. if (AllocatorMayReturnNull())
  160. return nullptr;
  161. ReportOutOfMemory(size, stack);
  162. }
  163. Metadata *meta =
  164. reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
  165. meta->requested_size = size;
  166. if (zeroise) {
  167. __msan_clear_and_unpoison(allocated, size);
  168. } else if (flags()->poison_in_malloc) {
  169. __msan_poison(allocated, size);
  170. if (__msan_get_track_origins()) {
  171. stack->tag = StackTrace::TAG_ALLOC;
  172. Origin o = Origin::CreateHeapOrigin(stack);
  173. __msan_set_origin(allocated, size, o.raw_id());
  174. }
  175. }
  176. MSAN_MALLOC_HOOK(allocated, size);
  177. return allocated;
  178. }
  179. void MsanDeallocate(StackTrace *stack, void *p) {
  180. CHECK(p);
  181. MSAN_FREE_HOOK(p);
  182. Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
  183. uptr size = meta->requested_size;
  184. meta->requested_size = 0;
  185. // This memory will not be reused by anyone else, so we are free to keep it
  186. // poisoned.
  187. if (flags()->poison_in_free) {
  188. __msan_poison(p, size);
  189. if (__msan_get_track_origins()) {
  190. stack->tag = StackTrace::TAG_DEALLOC;
  191. Origin o = Origin::CreateHeapOrigin(stack);
  192. __msan_set_origin(p, size, o.raw_id());
  193. }
  194. }
  195. MsanThread *t = GetCurrentThread();
  196. if (t) {
  197. AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
  198. allocator.Deallocate(cache, p);
  199. } else {
  200. SpinMutexLock l(&fallback_mutex);
  201. AllocatorCache *cache = &fallback_allocator_cache;
  202. allocator.Deallocate(cache, p);
  203. }
  204. }
  205. static void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
  206. uptr alignment) {
  207. Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
  208. uptr old_size = meta->requested_size;
  209. uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
  210. if (new_size <= actually_allocated_size) {
  211. // We are not reallocating here.
  212. meta->requested_size = new_size;
  213. if (new_size > old_size) {
  214. if (flags()->poison_in_malloc) {
  215. stack->tag = StackTrace::TAG_ALLOC;
  216. PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
  217. }
  218. }
  219. return old_p;
  220. }
  221. uptr memcpy_size = Min(new_size, old_size);
  222. void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
  223. if (new_p) {
  224. CopyMemory(new_p, old_p, memcpy_size, stack);
  225. MsanDeallocate(stack, old_p);
  226. }
  227. return new_p;
  228. }
  229. static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
  230. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  231. if (AllocatorMayReturnNull())
  232. return nullptr;
  233. ReportCallocOverflow(nmemb, size, stack);
  234. }
  235. return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
  236. }
  237. static uptr AllocationSize(const void *p) {
  238. if (!p) return 0;
  239. const void *beg = allocator.GetBlockBegin(p);
  240. if (beg != p) return 0;
  241. Metadata *b = (Metadata *)allocator.GetMetaData(p);
  242. return b->requested_size;
  243. }
  244. void *msan_malloc(uptr size, StackTrace *stack) {
  245. return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
  246. }
  247. void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
  248. return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
  249. }
  250. void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
  251. if (!ptr)
  252. return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
  253. if (size == 0) {
  254. MsanDeallocate(stack, ptr);
  255. return nullptr;
  256. }
  257. return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
  258. }
  259. void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
  260. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  261. errno = errno_ENOMEM;
  262. if (AllocatorMayReturnNull())
  263. return nullptr;
  264. ReportReallocArrayOverflow(nmemb, size, stack);
  265. }
  266. return msan_realloc(ptr, nmemb * size, stack);
  267. }
  268. void *msan_valloc(uptr size, StackTrace *stack) {
  269. return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
  270. }
  271. void *msan_pvalloc(uptr size, StackTrace *stack) {
  272. uptr PageSize = GetPageSizeCached();
  273. if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
  274. errno = errno_ENOMEM;
  275. if (AllocatorMayReturnNull())
  276. return nullptr;
  277. ReportPvallocOverflow(size, stack);
  278. }
  279. // pvalloc(0) should allocate one page.
  280. size = size ? RoundUpTo(size, PageSize) : PageSize;
  281. return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
  282. }
  283. void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
  284. if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
  285. errno = errno_EINVAL;
  286. if (AllocatorMayReturnNull())
  287. return nullptr;
  288. ReportInvalidAlignedAllocAlignment(size, alignment, stack);
  289. }
  290. return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
  291. }
  292. void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
  293. if (UNLIKELY(!IsPowerOfTwo(alignment))) {
  294. errno = errno_EINVAL;
  295. if (AllocatorMayReturnNull())
  296. return nullptr;
  297. ReportInvalidAllocationAlignment(alignment, stack);
  298. }
  299. return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
  300. }
  301. int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
  302. StackTrace *stack) {
  303. if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
  304. if (AllocatorMayReturnNull())
  305. return errno_EINVAL;
  306. ReportInvalidPosixMemalignAlignment(alignment, stack);
  307. }
  308. void *ptr = MsanAllocate(stack, size, alignment, false);
  309. if (UNLIKELY(!ptr))
  310. // OOM error is already taken care of by MsanAllocate.
  311. return errno_ENOMEM;
  312. CHECK(IsAligned((uptr)ptr, alignment));
  313. *memptr = ptr;
  314. return 0;
  315. }
  316. } // namespace __msan
  317. using namespace __msan;
  318. uptr __sanitizer_get_current_allocated_bytes() {
  319. uptr stats[AllocatorStatCount];
  320. allocator.GetStats(stats);
  321. return stats[AllocatorStatAllocated];
  322. }
  323. uptr __sanitizer_get_heap_size() {
  324. uptr stats[AllocatorStatCount];
  325. allocator.GetStats(stats);
  326. return stats[AllocatorStatMapped];
  327. }
  328. uptr __sanitizer_get_free_bytes() { return 1; }
  329. uptr __sanitizer_get_unmapped_bytes() { return 1; }
  330. uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
  331. int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
  332. uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }