lsan_allocator.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. //=-- lsan_allocator.cpp --------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of LeakSanitizer.
  10. // See lsan_allocator.h for details.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "lsan_allocator.h"
  14. #include "sanitizer_common/sanitizer_allocator.h"
  15. #include "sanitizer_common/sanitizer_allocator_checks.h"
  16. #include "sanitizer_common/sanitizer_allocator_interface.h"
  17. #include "sanitizer_common/sanitizer_allocator_report.h"
  18. #include "sanitizer_common/sanitizer_errno.h"
  19. #include "sanitizer_common/sanitizer_internal_defs.h"
  20. #include "sanitizer_common/sanitizer_stackdepot.h"
  21. #include "sanitizer_common/sanitizer_stacktrace.h"
  22. #include "lsan_common.h"
  23. extern "C" void *memset(void *ptr, int value, uptr num);
  24. namespace __lsan {
  25. #if defined(__i386__) || defined(__arm__)
  26. static const uptr kMaxAllowedMallocSize = 1ULL << 30;
  27. #elif defined(__mips64) || defined(__aarch64__)
  28. static const uptr kMaxAllowedMallocSize = 4ULL << 30;
  29. #else
  30. static const uptr kMaxAllowedMallocSize = 8ULL << 30;
  31. #endif
  32. static Allocator allocator;
  33. static uptr max_malloc_size;
  34. void InitializeAllocator() {
  35. SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
  36. allocator.InitLinkerInitialized(
  37. common_flags()->allocator_release_to_os_interval_ms);
  38. if (common_flags()->max_allocation_size_mb)
  39. max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
  40. kMaxAllowedMallocSize);
  41. else
  42. max_malloc_size = kMaxAllowedMallocSize;
  43. }
  44. void AllocatorThreadStart() { allocator.InitCache(GetAllocatorCache()); }
  45. void AllocatorThreadFinish() {
  46. allocator.SwallowCache(GetAllocatorCache());
  47. allocator.DestroyCache(GetAllocatorCache());
  48. }
  49. static ChunkMetadata *Metadata(const void *p) {
  50. return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
  51. }
  52. static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
  53. if (!p) return;
  54. ChunkMetadata *m = Metadata(p);
  55. CHECK(m);
  56. m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
  57. m->stack_trace_id = StackDepotPut(stack);
  58. m->requested_size = size;
  59. atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
  60. RunMallocHooks(p, size);
  61. }
  62. static void RegisterDeallocation(void *p) {
  63. if (!p) return;
  64. ChunkMetadata *m = Metadata(p);
  65. CHECK(m);
  66. RunFreeHooks(p);
  67. atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
  68. }
  69. static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
  70. if (AllocatorMayReturnNull()) {
  71. Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
  72. return nullptr;
  73. }
  74. ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
  75. }
  76. void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
  77. bool cleared) {
  78. if (size == 0)
  79. size = 1;
  80. if (size > max_malloc_size)
  81. return ReportAllocationSizeTooBig(size, stack);
  82. if (UNLIKELY(IsRssLimitExceeded())) {
  83. if (AllocatorMayReturnNull())
  84. return nullptr;
  85. ReportRssLimitExceeded(&stack);
  86. }
  87. void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
  88. if (UNLIKELY(!p)) {
  89. SetAllocatorOutOfMemory();
  90. if (AllocatorMayReturnNull())
  91. return nullptr;
  92. ReportOutOfMemory(size, &stack);
  93. }
  94. // Do not rely on the allocator to clear the memory (it's slow).
  95. if (cleared && allocator.FromPrimary(p))
  96. memset(p, 0, size);
  97. RegisterAllocation(stack, p, size);
  98. return p;
  99. }
  100. static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
  101. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  102. if (AllocatorMayReturnNull())
  103. return nullptr;
  104. ReportCallocOverflow(nmemb, size, &stack);
  105. }
  106. size *= nmemb;
  107. return Allocate(stack, size, 1, true);
  108. }
  109. void Deallocate(void *p) {
  110. RegisterDeallocation(p);
  111. allocator.Deallocate(GetAllocatorCache(), p);
  112. }
  113. void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
  114. uptr alignment) {
  115. if (new_size > max_malloc_size) {
  116. ReportAllocationSizeTooBig(new_size, stack);
  117. return nullptr;
  118. }
  119. RegisterDeallocation(p);
  120. void *new_p =
  121. allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
  122. if (new_p)
  123. RegisterAllocation(stack, new_p, new_size);
  124. else if (new_size != 0)
  125. RegisterAllocation(stack, p, new_size);
  126. return new_p;
  127. }
  128. void GetAllocatorCacheRange(uptr *begin, uptr *end) {
  129. *begin = (uptr)GetAllocatorCache();
  130. *end = *begin + sizeof(AllocatorCache);
  131. }
  132. static const void *GetMallocBegin(const void *p) {
  133. if (!p)
  134. return nullptr;
  135. void *beg = allocator.GetBlockBegin(p);
  136. if (!beg)
  137. return nullptr;
  138. ChunkMetadata *m = Metadata(beg);
  139. if (!m)
  140. return nullptr;
  141. if (!m->allocated)
  142. return nullptr;
  143. if (m->requested_size == 0)
  144. return nullptr;
  145. return (const void *)beg;
  146. }
  147. uptr GetMallocUsableSize(const void *p) {
  148. if (!p)
  149. return 0;
  150. ChunkMetadata *m = Metadata(p);
  151. if (!m) return 0;
  152. return m->requested_size;
  153. }
  154. uptr GetMallocUsableSizeFast(const void *p) {
  155. return Metadata(p)->requested_size;
  156. }
  157. int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
  158. const StackTrace &stack) {
  159. if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
  160. if (AllocatorMayReturnNull())
  161. return errno_EINVAL;
  162. ReportInvalidPosixMemalignAlignment(alignment, &stack);
  163. }
  164. void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
  165. if (UNLIKELY(!ptr))
  166. // OOM error is already taken care of by Allocate.
  167. return errno_ENOMEM;
  168. CHECK(IsAligned((uptr)ptr, alignment));
  169. *memptr = ptr;
  170. return 0;
  171. }
  172. void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
  173. if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
  174. errno = errno_EINVAL;
  175. if (AllocatorMayReturnNull())
  176. return nullptr;
  177. ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
  178. }
  179. return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
  180. }
  181. void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
  182. if (UNLIKELY(!IsPowerOfTwo(alignment))) {
  183. errno = errno_EINVAL;
  184. if (AllocatorMayReturnNull())
  185. return nullptr;
  186. ReportInvalidAllocationAlignment(alignment, &stack);
  187. }
  188. return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
  189. }
  190. void *lsan_malloc(uptr size, const StackTrace &stack) {
  191. return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
  192. }
  193. void lsan_free(void *p) {
  194. Deallocate(p);
  195. }
  196. void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
  197. return SetErrnoOnNull(Reallocate(stack, p, size, 1));
  198. }
  199. void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size,
  200. const StackTrace &stack) {
  201. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  202. errno = errno_ENOMEM;
  203. if (AllocatorMayReturnNull())
  204. return nullptr;
  205. ReportReallocArrayOverflow(nmemb, size, &stack);
  206. }
  207. return lsan_realloc(ptr, nmemb * size, stack);
  208. }
  209. void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
  210. return SetErrnoOnNull(Calloc(nmemb, size, stack));
  211. }
  212. void *lsan_valloc(uptr size, const StackTrace &stack) {
  213. return SetErrnoOnNull(
  214. Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
  215. }
  216. void *lsan_pvalloc(uptr size, const StackTrace &stack) {
  217. uptr PageSize = GetPageSizeCached();
  218. if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
  219. errno = errno_ENOMEM;
  220. if (AllocatorMayReturnNull())
  221. return nullptr;
  222. ReportPvallocOverflow(size, &stack);
  223. }
  224. // pvalloc(0) should allocate one page.
  225. size = size ? RoundUpTo(size, PageSize) : PageSize;
  226. return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
  227. }
  228. uptr lsan_mz_size(const void *p) {
  229. return GetMallocUsableSize(p);
  230. }
  231. ///// Interface to the common LSan module. /////
  232. void LockAllocator() {
  233. allocator.ForceLock();
  234. }
  235. void UnlockAllocator() {
  236. allocator.ForceUnlock();
  237. }
  238. void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
  239. *begin = (uptr)&allocator;
  240. *end = *begin + sizeof(allocator);
  241. }
  242. uptr PointsIntoChunk(void* p) {
  243. uptr addr = reinterpret_cast<uptr>(p);
  244. uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
  245. if (!chunk) return 0;
  246. // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
  247. // valid, but we don't want that.
  248. if (addr < chunk) return 0;
  249. ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
  250. CHECK(m);
  251. if (!m->allocated)
  252. return 0;
  253. if (addr < chunk + m->requested_size)
  254. return chunk;
  255. if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
  256. return chunk;
  257. return 0;
  258. }
  259. uptr GetUserBegin(uptr chunk) {
  260. return chunk;
  261. }
  262. uptr GetUserAddr(uptr chunk) {
  263. return chunk;
  264. }
  265. LsanMetadata::LsanMetadata(uptr chunk) {
  266. metadata_ = Metadata(reinterpret_cast<void *>(chunk));
  267. CHECK(metadata_);
  268. }
  269. bool LsanMetadata::allocated() const {
  270. return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
  271. }
  272. ChunkTag LsanMetadata::tag() const {
  273. return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
  274. }
  275. void LsanMetadata::set_tag(ChunkTag value) {
  276. reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
  277. }
  278. uptr LsanMetadata::requested_size() const {
  279. return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
  280. }
  281. u32 LsanMetadata::stack_trace_id() const {
  282. return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
  283. }
  284. void ForEachChunk(ForEachChunkCallback callback, void *arg) {
  285. allocator.ForEachChunk(callback, arg);
  286. }
  287. IgnoreObjectResult IgnoreObject(const void *p) {
  288. void *chunk = allocator.GetBlockBegin(p);
  289. if (!chunk || p < chunk) return kIgnoreObjectInvalid;
  290. ChunkMetadata *m = Metadata(chunk);
  291. CHECK(m);
  292. if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
  293. if (m->tag == kIgnored)
  294. return kIgnoreObjectAlreadyIgnored;
  295. m->tag = kIgnored;
  296. return kIgnoreObjectSuccess;
  297. } else {
  298. return kIgnoreObjectInvalid;
  299. }
  300. }
  301. } // namespace __lsan
  302. using namespace __lsan;
  303. extern "C" {
  304. SANITIZER_INTERFACE_ATTRIBUTE
  305. uptr __sanitizer_get_current_allocated_bytes() {
  306. uptr stats[AllocatorStatCount];
  307. allocator.GetStats(stats);
  308. return stats[AllocatorStatAllocated];
  309. }
  310. SANITIZER_INTERFACE_ATTRIBUTE
  311. uptr __sanitizer_get_heap_size() {
  312. uptr stats[AllocatorStatCount];
  313. allocator.GetStats(stats);
  314. return stats[AllocatorStatMapped];
  315. }
  316. SANITIZER_INTERFACE_ATTRIBUTE
  317. uptr __sanitizer_get_free_bytes() { return 1; }
  318. SANITIZER_INTERFACE_ATTRIBUTE
  319. uptr __sanitizer_get_unmapped_bytes() { return 0; }
  320. SANITIZER_INTERFACE_ATTRIBUTE
  321. uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
  322. SANITIZER_INTERFACE_ATTRIBUTE
  323. int __sanitizer_get_ownership(const void *p) {
  324. return GetMallocBegin(p) != nullptr;
  325. }
  326. SANITIZER_INTERFACE_ATTRIBUTE
  327. const void * __sanitizer_get_allocated_begin(const void *p) {
  328. return GetMallocBegin(p);
  329. }
  330. SANITIZER_INTERFACE_ATTRIBUTE
  331. uptr __sanitizer_get_allocated_size(const void *p) {
  332. return GetMallocUsableSize(p);
  333. }
  334. SANITIZER_INTERFACE_ATTRIBUTE
  335. uptr __sanitizer_get_allocated_size_fast(const void *p) {
  336. DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
  337. uptr ret = GetMallocUsableSizeFast(p);
  338. DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
  339. return ret;
  340. }
  341. SANITIZER_INTERFACE_ATTRIBUTE
  342. void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
  343. } // extern "C"