hwasan_allocator.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of HWAddressSanitizer.
  10. //
  11. // HWAddressSanitizer allocator.
  12. //===----------------------------------------------------------------------===//
  13. #include "sanitizer_common/sanitizer_atomic.h"
  14. #include "sanitizer_common/sanitizer_errno.h"
  15. #include "sanitizer_common/sanitizer_stackdepot.h"
  16. #include "hwasan.h"
  17. #include "hwasan_allocator.h"
  18. #include "hwasan_checks.h"
  19. #include "hwasan_mapping.h"
  20. #include "hwasan_malloc_bisect.h"
  21. #include "hwasan_thread.h"
  22. #include "hwasan_report.h"
  23. #include "lsan/lsan_common.h"
  24. namespace __hwasan {
  25. static Allocator allocator;
  26. static AllocatorCache fallback_allocator_cache;
  27. static SpinMutex fallback_mutex;
  28. static atomic_uint8_t hwasan_allocator_tagging_enabled;
  29. static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
  30. static constexpr tag_t kFallbackFreeTag = 0xBC;
  31. enum {
  32. // Either just allocated by underlying allocator, but AsanChunk is not yet
  33. // ready, or almost returned to undelying allocator and AsanChunk is already
  34. // meaningless.
  35. CHUNK_INVALID = 0,
  36. // The chunk is allocated and not yet freed.
  37. CHUNK_ALLOCATED = 1,
  38. };
  39. // Initialized in HwasanAllocatorInit, an never changed.
  40. static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
  41. bool HwasanChunkView::IsAllocated() const {
  42. return metadata_ && metadata_->IsAllocated();
  43. }
  44. uptr HwasanChunkView::Beg() const {
  45. return block_;
  46. }
  47. uptr HwasanChunkView::End() const {
  48. return Beg() + UsedSize();
  49. }
  50. uptr HwasanChunkView::UsedSize() const {
  51. return metadata_->GetRequestedSize();
  52. }
  53. u32 HwasanChunkView::GetAllocStackId() const {
  54. return metadata_->GetAllocStackId();
  55. }
  56. uptr HwasanChunkView::ActualSize() const {
  57. return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
  58. }
  59. bool HwasanChunkView::FromSmallHeap() const {
  60. return allocator.FromPrimary(reinterpret_cast<void *>(block_));
  61. }
  62. bool HwasanChunkView::AddrIsInside(uptr addr) const {
  63. return (addr >= Beg()) && (addr < Beg() + UsedSize());
  64. }
  65. inline void Metadata::SetAllocated(u32 stack, u64 size) {
  66. Thread *t = GetCurrentThread();
  67. u64 context = t ? t->unique_id() : kMainTid;
  68. context <<= 32;
  69. context += stack;
  70. requested_size_low = size & ((1ul << 32) - 1);
  71. requested_size_high = size >> 32;
  72. atomic_store(&alloc_context_id, context, memory_order_relaxed);
  73. atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
  74. }
  75. inline void Metadata::SetUnallocated() {
  76. atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
  77. requested_size_low = 0;
  78. requested_size_high = 0;
  79. atomic_store(&alloc_context_id, 0, memory_order_relaxed);
  80. }
  81. inline bool Metadata::IsAllocated() const {
  82. return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED &&
  83. GetRequestedSize();
  84. }
  85. inline u64 Metadata::GetRequestedSize() const {
  86. return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
  87. }
  88. inline u32 Metadata::GetAllocStackId() const {
  89. return atomic_load(&alloc_context_id, memory_order_relaxed);
  90. }
  91. void GetAllocatorStats(AllocatorStatCounters s) {
  92. allocator.GetStats(s);
  93. }
  94. inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
  95. lsan_tag = tag;
  96. }
  97. inline __lsan::ChunkTag Metadata::GetLsanTag() const {
  98. return static_cast<__lsan::ChunkTag>(lsan_tag);
  99. }
  100. uptr GetAliasRegionStart() {
  101. #if defined(HWASAN_ALIASING_MODE)
  102. constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
  103. uptr AliasRegionStart =
  104. __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
  105. CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
  106. __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
  107. CHECK_EQ(
  108. (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
  109. __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
  110. return AliasRegionStart;
  111. #else
  112. return 0;
  113. #endif
  114. }
  115. void HwasanAllocatorInit() {
  116. atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
  117. !flags()->disable_allocator_tagging);
  118. SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
  119. allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
  120. GetAliasRegionStart());
  121. for (uptr i = 0; i < sizeof(tail_magic); i++)
  122. tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
  123. }
  124. void HwasanAllocatorLock() { allocator.ForceLock(); }
  125. void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
  126. void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
  127. allocator.SwallowCache(cache);
  128. }
  129. static uptr TaggedSize(uptr size) {
  130. if (!size) size = 1;
  131. uptr new_size = RoundUpTo(size, kShadowAlignment);
  132. CHECK_GE(new_size, size);
  133. return new_size;
  134. }
  135. static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
  136. bool zeroise) {
  137. if (orig_size > kMaxAllowedMallocSize) {
  138. if (AllocatorMayReturnNull()) {
  139. Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
  140. orig_size);
  141. return nullptr;
  142. }
  143. ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
  144. }
  145. if (UNLIKELY(IsRssLimitExceeded())) {
  146. if (AllocatorMayReturnNull())
  147. return nullptr;
  148. ReportRssLimitExceeded(stack);
  149. }
  150. alignment = Max(alignment, kShadowAlignment);
  151. uptr size = TaggedSize(orig_size);
  152. Thread *t = GetCurrentThread();
  153. void *allocated;
  154. if (t) {
  155. allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
  156. } else {
  157. SpinMutexLock l(&fallback_mutex);
  158. AllocatorCache *cache = &fallback_allocator_cache;
  159. allocated = allocator.Allocate(cache, size, alignment);
  160. }
  161. if (UNLIKELY(!allocated)) {
  162. SetAllocatorOutOfMemory();
  163. if (AllocatorMayReturnNull())
  164. return nullptr;
  165. ReportOutOfMemory(size, stack);
  166. }
  167. if (zeroise) {
  168. internal_memset(allocated, 0, size);
  169. } else if (flags()->max_malloc_fill_size > 0) {
  170. uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
  171. internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
  172. }
  173. if (size != orig_size) {
  174. u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
  175. uptr tail_length = size - orig_size;
  176. internal_memcpy(tail, tail_magic, tail_length - 1);
  177. // Short granule is excluded from magic tail, so we explicitly untag.
  178. tail[tail_length - 1] = 0;
  179. }
  180. void *user_ptr = allocated;
  181. // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
  182. // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
  183. // retag to 0.
  184. if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
  185. (flags()->tag_in_malloc || flags()->tag_in_free) &&
  186. atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
  187. if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
  188. tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
  189. uptr tag_size = orig_size ? orig_size : 1;
  190. uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
  191. user_ptr =
  192. (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
  193. if (full_granule_size != tag_size) {
  194. u8 *short_granule =
  195. reinterpret_cast<u8 *>(allocated) + full_granule_size;
  196. TagMemoryAligned((uptr)short_granule, kShadowAlignment,
  197. tag_size % kShadowAlignment);
  198. short_granule[kShadowAlignment - 1] = tag;
  199. }
  200. } else {
  201. user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
  202. }
  203. }
  204. Metadata *meta =
  205. reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
  206. #if CAN_SANITIZE_LEAKS
  207. meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
  208. : __lsan::kDirectlyLeaked);
  209. #endif
  210. meta->SetAllocated(StackDepotPut(*stack), orig_size);
  211. RunMallocHooks(user_ptr, size);
  212. return user_ptr;
  213. }
  214. static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
  215. CHECK(tagged_ptr);
  216. uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
  217. if (!InTaggableRegion(tagged_uptr))
  218. return true;
  219. tag_t mem_tag = *reinterpret_cast<tag_t *>(
  220. MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
  221. return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
  222. }
  223. static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
  224. void *tagged_ptr) {
  225. // This function can return true if halt_on_error is false.
  226. if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
  227. !PointerAndMemoryTagsMatch(tagged_ptr)) {
  228. ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
  229. return true;
  230. }
  231. return false;
  232. }
  233. static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
  234. CHECK(tagged_ptr);
  235. RunFreeHooks(tagged_ptr);
  236. bool in_taggable_region =
  237. InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
  238. void *untagged_ptr = in_taggable_region ? UntagPtr(tagged_ptr) : tagged_ptr;
  239. if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
  240. return;
  241. void *aligned_ptr = reinterpret_cast<void *>(
  242. RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
  243. tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
  244. Metadata *meta =
  245. reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
  246. if (!meta) {
  247. ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
  248. return;
  249. }
  250. uptr orig_size = meta->GetRequestedSize();
  251. u32 free_context_id = StackDepotPut(*stack);
  252. u32 alloc_context_id = meta->GetAllocStackId();
  253. // Check tail magic.
  254. uptr tagged_size = TaggedSize(orig_size);
  255. if (flags()->free_checks_tail_magic && orig_size &&
  256. tagged_size != orig_size) {
  257. uptr tail_size = tagged_size - orig_size - 1;
  258. CHECK_LT(tail_size, kShadowAlignment);
  259. void *tail_beg = reinterpret_cast<void *>(
  260. reinterpret_cast<uptr>(aligned_ptr) + orig_size);
  261. tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
  262. reinterpret_cast<uptr>(tail_beg) + tail_size));
  263. if (tail_size &&
  264. (internal_memcmp(tail_beg, tail_magic, tail_size) ||
  265. (in_taggable_region && pointer_tag != short_granule_memtag)))
  266. ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
  267. orig_size, tail_magic);
  268. }
  269. // TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
  270. meta->SetUnallocated();
  271. // This memory will not be reused by anyone else, so we are free to keep it
  272. // poisoned.
  273. Thread *t = GetCurrentThread();
  274. if (flags()->max_free_fill_size > 0) {
  275. uptr fill_size =
  276. Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
  277. internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
  278. }
  279. if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
  280. atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
  281. // Always store full 8-bit tags on free to maximize UAF detection.
  282. tag_t tag;
  283. if (t) {
  284. // Make sure we are not using a short granule tag as a poison tag. This
  285. // would make us attempt to read the memory on a UaF.
  286. // The tag can be zero if tagging is disabled on this thread.
  287. do {
  288. tag = t->GenerateRandomTag(/*num_bits=*/8);
  289. } while (
  290. UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
  291. } else {
  292. static_assert(kFallbackFreeTag >= kShadowAlignment,
  293. "fallback tag must not be a short granule tag.");
  294. tag = kFallbackFreeTag;
  295. }
  296. TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
  297. tag);
  298. }
  299. if (t) {
  300. allocator.Deallocate(t->allocator_cache(), aligned_ptr);
  301. if (auto *ha = t->heap_allocations())
  302. ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
  303. free_context_id, static_cast<u32>(orig_size)});
  304. } else {
  305. SpinMutexLock l(&fallback_mutex);
  306. AllocatorCache *cache = &fallback_allocator_cache;
  307. allocator.Deallocate(cache, aligned_ptr);
  308. }
  309. }
  310. static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
  311. uptr new_size, uptr alignment) {
  312. void *untagged_ptr_old =
  313. InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr_old))
  314. ? UntagPtr(tagged_ptr_old)
  315. : tagged_ptr_old;
  316. if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
  317. return nullptr;
  318. void *tagged_ptr_new =
  319. HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
  320. if (tagged_ptr_old && tagged_ptr_new) {
  321. Metadata *meta =
  322. reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
  323. internal_memcpy(
  324. UntagPtr(tagged_ptr_new), untagged_ptr_old,
  325. Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
  326. HwasanDeallocate(stack, tagged_ptr_old);
  327. }
  328. return tagged_ptr_new;
  329. }
  330. static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
  331. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  332. if (AllocatorMayReturnNull())
  333. return nullptr;
  334. ReportCallocOverflow(nmemb, size, stack);
  335. }
  336. return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
  337. }
  338. HwasanChunkView FindHeapChunkByAddress(uptr address) {
  339. if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
  340. return HwasanChunkView();
  341. void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
  342. if (!block)
  343. return HwasanChunkView();
  344. Metadata *metadata =
  345. reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
  346. return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
  347. }
  348. static uptr AllocationSize(const void *tagged_ptr) {
  349. const void *untagged_ptr = UntagPtr(tagged_ptr);
  350. if (!untagged_ptr) return 0;
  351. const void *beg = allocator.GetBlockBegin(untagged_ptr);
  352. Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
  353. if (beg != untagged_ptr) return 0;
  354. return b->GetRequestedSize();
  355. }
  356. void *hwasan_malloc(uptr size, StackTrace *stack) {
  357. return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
  358. }
  359. void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
  360. return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
  361. }
  362. void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
  363. if (!ptr)
  364. return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
  365. if (size == 0) {
  366. HwasanDeallocate(stack, ptr);
  367. return nullptr;
  368. }
  369. return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
  370. }
  371. void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
  372. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  373. errno = errno_ENOMEM;
  374. if (AllocatorMayReturnNull())
  375. return nullptr;
  376. ReportReallocArrayOverflow(nmemb, size, stack);
  377. }
  378. return hwasan_realloc(ptr, nmemb * size, stack);
  379. }
  380. void *hwasan_valloc(uptr size, StackTrace *stack) {
  381. return SetErrnoOnNull(
  382. HwasanAllocate(stack, size, GetPageSizeCached(), false));
  383. }
  384. void *hwasan_pvalloc(uptr size, StackTrace *stack) {
  385. uptr PageSize = GetPageSizeCached();
  386. if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
  387. errno = errno_ENOMEM;
  388. if (AllocatorMayReturnNull())
  389. return nullptr;
  390. ReportPvallocOverflow(size, stack);
  391. }
  392. // pvalloc(0) should allocate one page.
  393. size = size ? RoundUpTo(size, PageSize) : PageSize;
  394. return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
  395. }
  396. void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
  397. if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
  398. errno = errno_EINVAL;
  399. if (AllocatorMayReturnNull())
  400. return nullptr;
  401. ReportInvalidAlignedAllocAlignment(size, alignment, stack);
  402. }
  403. return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
  404. }
  405. void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
  406. if (UNLIKELY(!IsPowerOfTwo(alignment))) {
  407. errno = errno_EINVAL;
  408. if (AllocatorMayReturnNull())
  409. return nullptr;
  410. ReportInvalidAllocationAlignment(alignment, stack);
  411. }
  412. return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
  413. }
  414. int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
  415. StackTrace *stack) {
  416. if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
  417. if (AllocatorMayReturnNull())
  418. return errno_EINVAL;
  419. ReportInvalidPosixMemalignAlignment(alignment, stack);
  420. }
  421. void *ptr = HwasanAllocate(stack, size, alignment, false);
  422. if (UNLIKELY(!ptr))
  423. // OOM error is already taken care of by HwasanAllocate.
  424. return errno_ENOMEM;
  425. CHECK(IsAligned((uptr)ptr, alignment));
  426. *memptr = ptr;
  427. return 0;
  428. }
  429. void hwasan_free(void *ptr, StackTrace *stack) {
  430. return HwasanDeallocate(stack, ptr);
  431. }
  432. } // namespace __hwasan
  433. // --- Implementation of LSan-specific functions --- {{{1
  434. namespace __lsan {
  435. void LockAllocator() {
  436. __hwasan::HwasanAllocatorLock();
  437. }
  438. void UnlockAllocator() {
  439. __hwasan::HwasanAllocatorUnlock();
  440. }
  441. void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
  442. *begin = (uptr)&__hwasan::allocator;
  443. *end = *begin + sizeof(__hwasan::allocator);
  444. }
  445. uptr PointsIntoChunk(void *p) {
  446. p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
  447. uptr addr = reinterpret_cast<uptr>(p);
  448. uptr chunk =
  449. reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
  450. if (!chunk)
  451. return 0;
  452. __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
  453. __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
  454. if (!metadata || !metadata->IsAllocated())
  455. return 0;
  456. if (addr < chunk + metadata->GetRequestedSize())
  457. return chunk;
  458. if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
  459. return chunk;
  460. return 0;
  461. }
  462. uptr GetUserBegin(uptr chunk) {
  463. if (__hwasan::InTaggableRegion(chunk))
  464. CHECK_EQ(UntagAddr(chunk), chunk);
  465. void *block = __hwasan::allocator.GetBlockBeginFastLocked(
  466. reinterpret_cast<void *>(chunk));
  467. if (!block)
  468. return 0;
  469. __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
  470. __hwasan::allocator.GetMetaData(block));
  471. if (!metadata || !metadata->IsAllocated())
  472. return 0;
  473. return reinterpret_cast<uptr>(block);
  474. }
  475. LsanMetadata::LsanMetadata(uptr chunk) {
  476. if (__hwasan::InTaggableRegion(chunk))
  477. CHECK_EQ(UntagAddr(chunk), chunk);
  478. metadata_ =
  479. chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
  480. : nullptr;
  481. }
  482. bool LsanMetadata::allocated() const {
  483. if (!metadata_)
  484. return false;
  485. __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
  486. return m->IsAllocated();
  487. }
  488. ChunkTag LsanMetadata::tag() const {
  489. __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
  490. return m->GetLsanTag();
  491. }
  492. void LsanMetadata::set_tag(ChunkTag value) {
  493. __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
  494. m->SetLsanTag(value);
  495. }
  496. uptr LsanMetadata::requested_size() const {
  497. __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
  498. return m->GetRequestedSize();
  499. }
  500. u32 LsanMetadata::stack_trace_id() const {
  501. __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
  502. return m->GetAllocStackId();
  503. }
  504. void ForEachChunk(ForEachChunkCallback callback, void *arg) {
  505. __hwasan::allocator.ForEachChunk(callback, arg);
  506. }
  507. IgnoreObjectResult IgnoreObjectLocked(const void *p) {
  508. p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
  509. uptr addr = reinterpret_cast<uptr>(p);
  510. uptr chunk =
  511. reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
  512. if (!chunk)
  513. return kIgnoreObjectInvalid;
  514. __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
  515. __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
  516. if (!metadata || !metadata->IsAllocated())
  517. return kIgnoreObjectInvalid;
  518. if (addr >= chunk + metadata->GetRequestedSize())
  519. return kIgnoreObjectInvalid;
  520. if (metadata->GetLsanTag() == kIgnored)
  521. return kIgnoreObjectAlreadyIgnored;
  522. metadata->SetLsanTag(kIgnored);
  523. return kIgnoreObjectSuccess;
  524. }
  525. } // namespace __lsan
  526. using namespace __hwasan;
  527. void __hwasan_enable_allocator_tagging() {
  528. atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
  529. }
  530. void __hwasan_disable_allocator_tagging() {
  531. atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
  532. }
  533. uptr __sanitizer_get_current_allocated_bytes() {
  534. uptr stats[AllocatorStatCount];
  535. allocator.GetStats(stats);
  536. return stats[AllocatorStatAllocated];
  537. }
  538. uptr __sanitizer_get_heap_size() {
  539. uptr stats[AllocatorStatCount];
  540. allocator.GetStats(stats);
  541. return stats[AllocatorStatMapped];
  542. }
  543. uptr __sanitizer_get_free_bytes() { return 1; }
  544. uptr __sanitizer_get_unmapped_bytes() { return 1; }
  545. uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
  546. int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
  547. uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }