hwasan_allocator.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of HWAddressSanitizer.
  10. //
  11. // HWAddressSanitizer allocator.
  12. //===----------------------------------------------------------------------===//
  13. #include "sanitizer_common/sanitizer_atomic.h"
  14. #include "sanitizer_common/sanitizer_errno.h"
  15. #include "sanitizer_common/sanitizer_stackdepot.h"
  16. #include "hwasan.h"
  17. #include "hwasan_allocator.h"
  18. #include "hwasan_checks.h"
  19. #include "hwasan_mapping.h"
  20. #include "hwasan_malloc_bisect.h"
  21. #include "hwasan_thread.h"
  22. #include "hwasan_report.h"
  23. #include "lsan/lsan_common.h"
  24. namespace __hwasan {
  25. static Allocator allocator;
  26. static AllocatorCache fallback_allocator_cache;
  27. static SpinMutex fallback_mutex;
  28. static atomic_uint8_t hwasan_allocator_tagging_enabled;
  29. static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
  30. static constexpr tag_t kFallbackFreeTag = 0xBC;
  31. enum {
  32. // Either just allocated by underlying allocator, but AsanChunk is not yet
  33. // ready, or almost returned to undelying allocator and AsanChunk is already
  34. // meaningless.
  35. CHUNK_INVALID = 0,
  36. // The chunk is allocated and not yet freed.
  37. CHUNK_ALLOCATED = 1,
  38. };
  39. // Initialized in HwasanAllocatorInit, an never changed.
  40. static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
  41. static uptr max_malloc_size;
  42. bool HwasanChunkView::IsAllocated() const {
  43. return metadata_ && metadata_->IsAllocated();
  44. }
  45. uptr HwasanChunkView::Beg() const {
  46. return block_;
  47. }
  48. uptr HwasanChunkView::End() const {
  49. return Beg() + UsedSize();
  50. }
  51. uptr HwasanChunkView::UsedSize() const {
  52. return metadata_->GetRequestedSize();
  53. }
  54. u32 HwasanChunkView::GetAllocStackId() const {
  55. return metadata_->GetAllocStackId();
  56. }
  57. u32 HwasanChunkView::GetAllocThreadId() const {
  58. return metadata_->GetAllocThreadId();
  59. }
  60. uptr HwasanChunkView::ActualSize() const {
  61. return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
  62. }
  63. bool HwasanChunkView::FromSmallHeap() const {
  64. return allocator.FromPrimary(reinterpret_cast<void *>(block_));
  65. }
  66. bool HwasanChunkView::AddrIsInside(uptr addr) const {
  67. return (addr >= Beg()) && (addr < Beg() + UsedSize());
  68. }
  69. inline void Metadata::SetAllocated(u32 stack, u64 size) {
  70. Thread *t = GetCurrentThread();
  71. u64 context = t ? t->unique_id() : kMainTid;
  72. context <<= 32;
  73. context += stack;
  74. requested_size_low = size & ((1ul << 32) - 1);
  75. requested_size_high = size >> 32;
  76. atomic_store(&alloc_context_id, context, memory_order_relaxed);
  77. atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
  78. }
  79. inline void Metadata::SetUnallocated() {
  80. atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
  81. requested_size_low = 0;
  82. requested_size_high = 0;
  83. atomic_store(&alloc_context_id, 0, memory_order_relaxed);
  84. }
  85. inline bool Metadata::IsAllocated() const {
  86. return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED;
  87. }
  88. inline u64 Metadata::GetRequestedSize() const {
  89. return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
  90. }
  91. inline u32 Metadata::GetAllocStackId() const {
  92. return atomic_load(&alloc_context_id, memory_order_relaxed);
  93. }
  94. inline u32 Metadata::GetAllocThreadId() const {
  95. u64 context = atomic_load(&alloc_context_id, memory_order_relaxed);
  96. u32 tid = context >> 32;
  97. return tid;
  98. }
  99. void GetAllocatorStats(AllocatorStatCounters s) {
  100. allocator.GetStats(s);
  101. }
  102. inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
  103. lsan_tag = tag;
  104. }
  105. inline __lsan::ChunkTag Metadata::GetLsanTag() const {
  106. return static_cast<__lsan::ChunkTag>(lsan_tag);
  107. }
  108. uptr GetAliasRegionStart() {
  109. #if defined(HWASAN_ALIASING_MODE)
  110. constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
  111. uptr AliasRegionStart =
  112. __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
  113. CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
  114. __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
  115. CHECK_EQ(
  116. (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
  117. __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
  118. return AliasRegionStart;
  119. #else
  120. return 0;
  121. #endif
  122. }
  123. void HwasanAllocatorInit() {
  124. atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
  125. !flags()->disable_allocator_tagging);
  126. SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
  127. allocator.InitLinkerInitialized(
  128. common_flags()->allocator_release_to_os_interval_ms,
  129. GetAliasRegionStart());
  130. for (uptr i = 0; i < sizeof(tail_magic); i++)
  131. tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
  132. if (common_flags()->max_allocation_size_mb) {
  133. max_malloc_size = common_flags()->max_allocation_size_mb << 20;
  134. max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);
  135. } else {
  136. max_malloc_size = kMaxAllowedMallocSize;
  137. }
  138. }
  139. void HwasanAllocatorLock() { allocator.ForceLock(); }
  140. void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
  141. void AllocatorThreadStart(AllocatorCache *cache) { allocator.InitCache(cache); }
  142. void AllocatorThreadFinish(AllocatorCache *cache) {
  143. allocator.SwallowCache(cache);
  144. allocator.DestroyCache(cache);
  145. }
  146. static uptr TaggedSize(uptr size) {
  147. if (!size) size = 1;
  148. uptr new_size = RoundUpTo(size, kShadowAlignment);
  149. CHECK_GE(new_size, size);
  150. return new_size;
  151. }
  152. static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
  153. bool zeroise) {
  154. // Keep this consistent with LSAN and ASAN behavior.
  155. if (UNLIKELY(orig_size == 0))
  156. orig_size = 1;
  157. if (UNLIKELY(orig_size > max_malloc_size)) {
  158. if (AllocatorMayReturnNull()) {
  159. Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
  160. orig_size);
  161. return nullptr;
  162. }
  163. ReportAllocationSizeTooBig(orig_size, max_malloc_size, stack);
  164. }
  165. if (UNLIKELY(IsRssLimitExceeded())) {
  166. if (AllocatorMayReturnNull())
  167. return nullptr;
  168. ReportRssLimitExceeded(stack);
  169. }
  170. alignment = Max(alignment, kShadowAlignment);
  171. uptr size = TaggedSize(orig_size);
  172. Thread *t = GetCurrentThread();
  173. void *allocated;
  174. if (t) {
  175. allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
  176. } else {
  177. SpinMutexLock l(&fallback_mutex);
  178. AllocatorCache *cache = &fallback_allocator_cache;
  179. allocated = allocator.Allocate(cache, size, alignment);
  180. }
  181. if (UNLIKELY(!allocated)) {
  182. SetAllocatorOutOfMemory();
  183. if (AllocatorMayReturnNull())
  184. return nullptr;
  185. ReportOutOfMemory(size, stack);
  186. }
  187. if (zeroise) {
  188. // The secondary allocator mmaps memory, which should be zero-inited so we
  189. // don't need to explicitly clear it.
  190. if (allocator.FromPrimary(allocated))
  191. internal_memset(allocated, 0, size);
  192. } else if (flags()->max_malloc_fill_size > 0) {
  193. uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
  194. internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
  195. }
  196. if (size != orig_size) {
  197. u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
  198. uptr tail_length = size - orig_size;
  199. internal_memcpy(tail, tail_magic, tail_length - 1);
  200. // Short granule is excluded from magic tail, so we explicitly untag.
  201. tail[tail_length - 1] = 0;
  202. }
  203. void *user_ptr = allocated;
  204. if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
  205. atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
  206. flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
  207. tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
  208. uptr tag_size = orig_size ? orig_size : 1;
  209. uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
  210. user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
  211. if (full_granule_size != tag_size) {
  212. u8 *short_granule = reinterpret_cast<u8 *>(allocated) + full_granule_size;
  213. TagMemoryAligned((uptr)short_granule, kShadowAlignment,
  214. tag_size % kShadowAlignment);
  215. short_granule[kShadowAlignment - 1] = tag;
  216. }
  217. } else {
  218. // Tagging can not be completely skipped. If it's disabled, we need to tag
  219. // with zeros.
  220. user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
  221. }
  222. Metadata *meta =
  223. reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
  224. #if CAN_SANITIZE_LEAKS
  225. meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
  226. : __lsan::kDirectlyLeaked);
  227. #endif
  228. meta->SetAllocated(StackDepotPut(*stack), orig_size);
  229. RunMallocHooks(user_ptr, orig_size);
  230. return user_ptr;
  231. }
  232. static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
  233. CHECK(tagged_ptr);
  234. uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
  235. if (!InTaggableRegion(tagged_uptr))
  236. return true;
  237. tag_t mem_tag = *reinterpret_cast<tag_t *>(
  238. MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
  239. return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
  240. }
  241. static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
  242. void *tagged_ptr) {
  243. // This function can return true if halt_on_error is false.
  244. if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
  245. !PointerAndMemoryTagsMatch(tagged_ptr)) {
  246. ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
  247. return true;
  248. }
  249. return false;
  250. }
  251. static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
  252. CHECK(tagged_ptr);
  253. void *untagged_ptr = UntagPtr(tagged_ptr);
  254. if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
  255. return;
  256. void *aligned_ptr = reinterpret_cast<void *>(
  257. RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
  258. tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
  259. Metadata *meta =
  260. reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
  261. if (!meta) {
  262. ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
  263. return;
  264. }
  265. RunFreeHooks(tagged_ptr);
  266. uptr orig_size = meta->GetRequestedSize();
  267. u32 free_context_id = StackDepotPut(*stack);
  268. u32 alloc_context_id = meta->GetAllocStackId();
  269. u32 alloc_thread_id = meta->GetAllocThreadId();
  270. bool in_taggable_region =
  271. InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
  272. // Check tail magic.
  273. uptr tagged_size = TaggedSize(orig_size);
  274. if (flags()->free_checks_tail_magic && orig_size &&
  275. tagged_size != orig_size) {
  276. uptr tail_size = tagged_size - orig_size - 1;
  277. CHECK_LT(tail_size, kShadowAlignment);
  278. void *tail_beg = reinterpret_cast<void *>(
  279. reinterpret_cast<uptr>(aligned_ptr) + orig_size);
  280. tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
  281. reinterpret_cast<uptr>(tail_beg) + tail_size));
  282. if (tail_size &&
  283. (internal_memcmp(tail_beg, tail_magic, tail_size) ||
  284. (in_taggable_region && pointer_tag != short_granule_memtag)))
  285. ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
  286. orig_size, tail_magic);
  287. }
  288. // TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
  289. meta->SetUnallocated();
  290. // This memory will not be reused by anyone else, so we are free to keep it
  291. // poisoned.
  292. Thread *t = GetCurrentThread();
  293. if (flags()->max_free_fill_size > 0) {
  294. uptr fill_size =
  295. Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
  296. internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
  297. }
  298. if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
  299. atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
  300. allocator.FromPrimary(untagged_ptr) /* Secondary 0-tag and unmap.*/) {
  301. // Always store full 8-bit tags on free to maximize UAF detection.
  302. tag_t tag;
  303. if (t) {
  304. // Make sure we are not using a short granule tag as a poison tag. This
  305. // would make us attempt to read the memory on a UaF.
  306. // The tag can be zero if tagging is disabled on this thread.
  307. do {
  308. tag = t->GenerateRandomTag(/*num_bits=*/8);
  309. } while (
  310. UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
  311. } else {
  312. static_assert(kFallbackFreeTag >= kShadowAlignment,
  313. "fallback tag must not be a short granule tag.");
  314. tag = kFallbackFreeTag;
  315. }
  316. TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
  317. tag);
  318. }
  319. if (t) {
  320. allocator.Deallocate(t->allocator_cache(), aligned_ptr);
  321. if (auto *ha = t->heap_allocations())
  322. ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_thread_id,
  323. alloc_context_id, free_context_id,
  324. static_cast<u32>(orig_size)});
  325. } else {
  326. SpinMutexLock l(&fallback_mutex);
  327. AllocatorCache *cache = &fallback_allocator_cache;
  328. allocator.Deallocate(cache, aligned_ptr);
  329. }
  330. }
  331. static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
  332. uptr new_size, uptr alignment) {
  333. void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
  334. if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
  335. return nullptr;
  336. void *tagged_ptr_new =
  337. HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
  338. if (tagged_ptr_old && tagged_ptr_new) {
  339. Metadata *meta =
  340. reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
  341. void *untagged_ptr_new = UntagPtr(tagged_ptr_new);
  342. internal_memcpy(untagged_ptr_new, untagged_ptr_old,
  343. Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
  344. HwasanDeallocate(stack, tagged_ptr_old);
  345. }
  346. return tagged_ptr_new;
  347. }
  348. static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
  349. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  350. if (AllocatorMayReturnNull())
  351. return nullptr;
  352. ReportCallocOverflow(nmemb, size, stack);
  353. }
  354. return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
  355. }
  356. HwasanChunkView FindHeapChunkByAddress(uptr address) {
  357. if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
  358. return HwasanChunkView();
  359. void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
  360. if (!block)
  361. return HwasanChunkView();
  362. Metadata *metadata =
  363. reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
  364. return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
  365. }
  366. static const void *AllocationBegin(const void *p) {
  367. const void *untagged_ptr = UntagPtr(p);
  368. if (!untagged_ptr)
  369. return nullptr;
  370. const void *beg = allocator.GetBlockBegin(untagged_ptr);
  371. if (!beg)
  372. return nullptr;
  373. Metadata *b = (Metadata *)allocator.GetMetaData(beg);
  374. if (b->GetRequestedSize() == 0)
  375. return nullptr;
  376. tag_t tag = GetTagFromPointer((uptr)p);
  377. return (const void *)AddTagToPointer((uptr)beg, tag);
  378. }
  379. static uptr AllocationSize(const void *p) {
  380. const void *untagged_ptr = UntagPtr(p);
  381. if (!untagged_ptr) return 0;
  382. const void *beg = allocator.GetBlockBegin(untagged_ptr);
  383. if (!beg)
  384. return 0;
  385. Metadata *b = (Metadata *)allocator.GetMetaData(beg);
  386. return b->GetRequestedSize();
  387. }
  388. static uptr AllocationSizeFast(const void *p) {
  389. const void *untagged_ptr = UntagPtr(p);
  390. void *aligned_ptr = reinterpret_cast<void *>(
  391. RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
  392. Metadata *meta =
  393. reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
  394. return meta->GetRequestedSize();
  395. }
  396. void *hwasan_malloc(uptr size, StackTrace *stack) {
  397. return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
  398. }
  399. void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
  400. return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
  401. }
  402. void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
  403. if (!ptr)
  404. return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
  405. if (size == 0) {
  406. HwasanDeallocate(stack, ptr);
  407. return nullptr;
  408. }
  409. return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
  410. }
  411. void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
  412. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  413. errno = errno_ENOMEM;
  414. if (AllocatorMayReturnNull())
  415. return nullptr;
  416. ReportReallocArrayOverflow(nmemb, size, stack);
  417. }
  418. return hwasan_realloc(ptr, nmemb * size, stack);
  419. }
  420. void *hwasan_valloc(uptr size, StackTrace *stack) {
  421. return SetErrnoOnNull(
  422. HwasanAllocate(stack, size, GetPageSizeCached(), false));
  423. }
  424. void *hwasan_pvalloc(uptr size, StackTrace *stack) {
  425. uptr PageSize = GetPageSizeCached();
  426. if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
  427. errno = errno_ENOMEM;
  428. if (AllocatorMayReturnNull())
  429. return nullptr;
  430. ReportPvallocOverflow(size, stack);
  431. }
  432. // pvalloc(0) should allocate one page.
  433. size = size ? RoundUpTo(size, PageSize) : PageSize;
  434. return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
  435. }
  436. void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
  437. if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
  438. errno = errno_EINVAL;
  439. if (AllocatorMayReturnNull())
  440. return nullptr;
  441. ReportInvalidAlignedAllocAlignment(size, alignment, stack);
  442. }
  443. return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
  444. }
  445. void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
  446. if (UNLIKELY(!IsPowerOfTwo(alignment))) {
  447. errno = errno_EINVAL;
  448. if (AllocatorMayReturnNull())
  449. return nullptr;
  450. ReportInvalidAllocationAlignment(alignment, stack);
  451. }
  452. return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
  453. }
  454. int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
  455. StackTrace *stack) {
  456. if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
  457. if (AllocatorMayReturnNull())
  458. return errno_EINVAL;
  459. ReportInvalidPosixMemalignAlignment(alignment, stack);
  460. }
  461. void *ptr = HwasanAllocate(stack, size, alignment, false);
  462. if (UNLIKELY(!ptr))
  463. // OOM error is already taken care of by HwasanAllocate.
  464. return errno_ENOMEM;
  465. CHECK(IsAligned((uptr)ptr, alignment));
  466. *memptr = ptr;
  467. return 0;
  468. }
  469. void hwasan_free(void *ptr, StackTrace *stack) {
  470. return HwasanDeallocate(stack, ptr);
  471. }
  472. } // namespace __hwasan
  473. // --- Implementation of LSan-specific functions --- {{{1
  474. namespace __lsan {
  475. void LockAllocator() {
  476. __hwasan::HwasanAllocatorLock();
  477. }
  478. void UnlockAllocator() {
  479. __hwasan::HwasanAllocatorUnlock();
  480. }
  481. void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
  482. *begin = (uptr)&__hwasan::allocator;
  483. *end = *begin + sizeof(__hwasan::allocator);
  484. }
  485. uptr PointsIntoChunk(void *p) {
  486. p = UntagPtr(p);
  487. uptr addr = reinterpret_cast<uptr>(p);
  488. uptr chunk =
  489. reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
  490. if (!chunk)
  491. return 0;
  492. __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
  493. __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
  494. if (!metadata || !metadata->IsAllocated())
  495. return 0;
  496. if (addr < chunk + metadata->GetRequestedSize())
  497. return chunk;
  498. if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
  499. return chunk;
  500. return 0;
  501. }
  502. uptr GetUserBegin(uptr chunk) {
  503. CHECK_EQ(UntagAddr(chunk), chunk);
  504. void *block = __hwasan::allocator.GetBlockBeginFastLocked(
  505. reinterpret_cast<void *>(chunk));
  506. if (!block)
  507. return 0;
  508. __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
  509. __hwasan::allocator.GetMetaData(block));
  510. if (!metadata || !metadata->IsAllocated())
  511. return 0;
  512. return reinterpret_cast<uptr>(block);
  513. }
  514. uptr GetUserAddr(uptr chunk) {
  515. if (!InTaggableRegion(chunk))
  516. return chunk;
  517. tag_t mem_tag = *(tag_t *)__hwasan::MemToShadow(chunk);
  518. return AddTagToPointer(chunk, mem_tag);
  519. }
  520. LsanMetadata::LsanMetadata(uptr chunk) {
  521. CHECK_EQ(UntagAddr(chunk), chunk);
  522. metadata_ =
  523. chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
  524. : nullptr;
  525. }
  526. bool LsanMetadata::allocated() const {
  527. if (!metadata_)
  528. return false;
  529. __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
  530. return m->IsAllocated();
  531. }
  532. ChunkTag LsanMetadata::tag() const {
  533. __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
  534. return m->GetLsanTag();
  535. }
  536. void LsanMetadata::set_tag(ChunkTag value) {
  537. __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
  538. m->SetLsanTag(value);
  539. }
  540. uptr LsanMetadata::requested_size() const {
  541. __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
  542. return m->GetRequestedSize();
  543. }
  544. u32 LsanMetadata::stack_trace_id() const {
  545. __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
  546. return m->GetAllocStackId();
  547. }
  548. void ForEachChunk(ForEachChunkCallback callback, void *arg) {
  549. __hwasan::allocator.ForEachChunk(callback, arg);
  550. }
  551. IgnoreObjectResult IgnoreObject(const void *p) {
  552. p = UntagPtr(p);
  553. uptr addr = reinterpret_cast<uptr>(p);
  554. uptr chunk = reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBegin(p));
  555. if (!chunk)
  556. return kIgnoreObjectInvalid;
  557. __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
  558. __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
  559. if (!metadata || !metadata->IsAllocated())
  560. return kIgnoreObjectInvalid;
  561. if (addr >= chunk + metadata->GetRequestedSize())
  562. return kIgnoreObjectInvalid;
  563. if (metadata->GetLsanTag() == kIgnored)
  564. return kIgnoreObjectAlreadyIgnored;
  565. metadata->SetLsanTag(kIgnored);
  566. return kIgnoreObjectSuccess;
  567. }
  568. } // namespace __lsan
  569. using namespace __hwasan;
  570. void __hwasan_enable_allocator_tagging() {
  571. atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
  572. }
  573. void __hwasan_disable_allocator_tagging() {
  574. atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
  575. }
  576. uptr __sanitizer_get_current_allocated_bytes() {
  577. uptr stats[AllocatorStatCount];
  578. allocator.GetStats(stats);
  579. return stats[AllocatorStatAllocated];
  580. }
  581. uptr __sanitizer_get_heap_size() {
  582. uptr stats[AllocatorStatCount];
  583. allocator.GetStats(stats);
  584. return stats[AllocatorStatMapped];
  585. }
  586. uptr __sanitizer_get_free_bytes() { return 1; }
  587. uptr __sanitizer_get_unmapped_bytes() { return 1; }
  588. uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
  589. int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
  590. const void *__sanitizer_get_allocated_begin(const void *p) {
  591. return AllocationBegin(p);
  592. }
  593. uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
  594. uptr __sanitizer_get_allocated_size_fast(const void *p) {
  595. DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
  596. uptr ret = AllocationSizeFast(p);
  597. DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
  598. return ret;
  599. }
  600. void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }