asan_allocator.cpp 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267
  1. //===-- asan_allocator.cpp ------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of AddressSanitizer, an address sanity checker.
  10. //
  11. // Implementation of ASan's memory allocator, 2-nd version.
  12. // This variant uses the allocator from sanitizer_common, i.e. the one shared
  13. // with ThreadSanitizer and MemorySanitizer.
  14. //
  15. //===----------------------------------------------------------------------===//
  16. #include "asan_allocator.h"
  17. #include "asan_internal.h"
  18. #include "asan_mapping.h"
  19. #include "asan_poisoning.h"
  20. #include "asan_report.h"
  21. #include "asan_stack.h"
  22. #include "asan_thread.h"
  23. #include "lsan/lsan_common.h"
  24. #include "sanitizer_common/sanitizer_allocator_checks.h"
  25. #include "sanitizer_common/sanitizer_allocator_interface.h"
  26. #include "sanitizer_common/sanitizer_common.h"
  27. #include "sanitizer_common/sanitizer_errno.h"
  28. #include "sanitizer_common/sanitizer_flags.h"
  29. #include "sanitizer_common/sanitizer_internal_defs.h"
  30. #include "sanitizer_common/sanitizer_list.h"
  31. #include "sanitizer_common/sanitizer_quarantine.h"
  32. #include "sanitizer_common/sanitizer_stackdepot.h"
  33. namespace __asan {
  34. // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
  35. // We use adaptive redzones: for larger allocation larger redzones are used.
  36. static u32 RZLog2Size(u32 rz_log) {
  37. CHECK_LT(rz_log, 8);
  38. return 16 << rz_log;
  39. }
  40. static u32 RZSize2Log(u32 rz_size) {
  41. CHECK_GE(rz_size, 16);
  42. CHECK_LE(rz_size, 2048);
  43. CHECK(IsPowerOfTwo(rz_size));
  44. u32 res = Log2(rz_size) - 4;
  45. CHECK_EQ(rz_size, RZLog2Size(res));
  46. return res;
  47. }
  48. static AsanAllocator &get_allocator();
  49. static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
  50. u32 tid, u32 stack) {
  51. u64 context = tid;
  52. context <<= 32;
  53. context += stack;
  54. atomic_store(atomic_context, context, memory_order_relaxed);
  55. }
  56. static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
  57. u32 &tid, u32 &stack) {
  58. u64 context = atomic_load(atomic_context, memory_order_relaxed);
  59. stack = context;
  60. context >>= 32;
  61. tid = context;
  62. }
  63. // The memory chunk allocated from the underlying allocator looks like this:
  64. // L L L L L L H H U U U U U U R R
  65. // L -- left redzone words (0 or more bytes)
  66. // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
  67. // U -- user memory.
  68. // R -- right redzone (0 or more bytes)
  69. // ChunkBase consists of ChunkHeader and other bytes that overlap with user
  70. // memory.
  71. // If the left redzone is greater than the ChunkHeader size we store a magic
  72. // value in the first uptr word of the memory block and store the address of
  73. // ChunkBase in the next uptr.
  74. // M B L L L L L L L L L H H U U U U U U
  75. // | ^
  76. // ---------------------|
  77. // M -- magic value kAllocBegMagic
  78. // B -- address of ChunkHeader pointing to the first 'H'
  79. class ChunkHeader {
  80. public:
  81. atomic_uint8_t chunk_state;
  82. u8 alloc_type : 2;
  83. u8 lsan_tag : 2;
  84. // align < 8 -> 0
  85. // else -> log2(min(align, 512)) - 2
  86. u8 user_requested_alignment_log : 3;
  87. private:
  88. u16 user_requested_size_hi;
  89. u32 user_requested_size_lo;
  90. atomic_uint64_t alloc_context_id;
  91. public:
  92. uptr UsedSize() const {
  93. static_assert(sizeof(user_requested_size_lo) == 4,
  94. "Expression below requires this");
  95. return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) +
  96. user_requested_size_lo;
  97. }
  98. void SetUsedSize(uptr size) {
  99. user_requested_size_lo = size;
  100. static_assert(sizeof(user_requested_size_lo) == 4,
  101. "Expression below requires this");
  102. user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32);
  103. CHECK_EQ(UsedSize(), size);
  104. }
  105. void SetAllocContext(u32 tid, u32 stack) {
  106. AtomicContextStore(&alloc_context_id, tid, stack);
  107. }
  108. void GetAllocContext(u32 &tid, u32 &stack) const {
  109. AtomicContextLoad(&alloc_context_id, tid, stack);
  110. }
  111. };
  112. class ChunkBase : public ChunkHeader {
  113. atomic_uint64_t free_context_id;
  114. public:
  115. void SetFreeContext(u32 tid, u32 stack) {
  116. AtomicContextStore(&free_context_id, tid, stack);
  117. }
  118. void GetFreeContext(u32 &tid, u32 &stack) const {
  119. AtomicContextLoad(&free_context_id, tid, stack);
  120. }
  121. };
  122. static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
  123. static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
  124. COMPILER_CHECK(kChunkHeaderSize == 16);
  125. COMPILER_CHECK(kChunkHeader2Size <= 16);
  126. enum {
  127. // Either just allocated by underlying allocator, but AsanChunk is not yet
  128. // ready, or almost returned to undelying allocator and AsanChunk is already
  129. // meaningless.
  130. CHUNK_INVALID = 0,
  131. // The chunk is allocated and not yet freed.
  132. CHUNK_ALLOCATED = 2,
  133. // The chunk was freed and put into quarantine zone.
  134. CHUNK_QUARANTINE = 3,
  135. };
  136. class AsanChunk : public ChunkBase {
  137. public:
  138. uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
  139. bool AddrIsInside(uptr addr) {
  140. return (addr >= Beg()) && (addr < Beg() + UsedSize());
  141. }
  142. };
  143. class LargeChunkHeader {
  144. static constexpr uptr kAllocBegMagic =
  145. FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
  146. atomic_uintptr_t magic;
  147. AsanChunk *chunk_header;
  148. public:
  149. AsanChunk *Get() const {
  150. return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
  151. ? chunk_header
  152. : nullptr;
  153. }
  154. void Set(AsanChunk *p) {
  155. if (p) {
  156. chunk_header = p;
  157. atomic_store(&magic, kAllocBegMagic, memory_order_release);
  158. return;
  159. }
  160. uptr old = kAllocBegMagic;
  161. if (!atomic_compare_exchange_strong(&magic, &old, 0,
  162. memory_order_release)) {
  163. CHECK_EQ(old, kAllocBegMagic);
  164. }
  165. }
  166. };
  167. static void FillChunk(AsanChunk *m) {
  168. // FIXME: Use ReleaseMemoryPagesToOS.
  169. Flags &fl = *flags();
  170. if (fl.max_free_fill_size > 0) {
  171. // We have to skip the chunk header, it contains free_context_id.
  172. uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
  173. if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
  174. uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
  175. size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
  176. REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
  177. }
  178. }
  179. }
  180. struct QuarantineCallback {
  181. QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
  182. : cache_(cache),
  183. stack_(stack) {
  184. }
  185. void PreQuarantine(AsanChunk *m) const {
  186. FillChunk(m);
  187. // Poison the region.
  188. PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
  189. kAsanHeapFreeMagic);
  190. }
  191. void Recycle(AsanChunk *m) const {
  192. void *p = get_allocator().GetBlockBegin(m);
  193. // The secondary will immediately unpoison and unmap the memory, so this
  194. // branch is unnecessary.
  195. if (get_allocator().FromPrimary(p)) {
  196. if (p != m) {
  197. // Clear the magic value, as allocator internals may overwrite the
  198. // contents of deallocated chunk, confusing GetAsanChunk lookup.
  199. reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
  200. }
  201. u8 old_chunk_state = CHUNK_QUARANTINE;
  202. if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
  203. CHUNK_INVALID,
  204. memory_order_acquire)) {
  205. CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
  206. }
  207. PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
  208. kAsanHeapLeftRedzoneMagic);
  209. }
  210. // Statistics.
  211. AsanStats &thread_stats = GetCurrentThreadStats();
  212. thread_stats.real_frees++;
  213. thread_stats.really_freed += m->UsedSize();
  214. get_allocator().Deallocate(cache_, p);
  215. }
  216. void RecyclePassThrough(AsanChunk *m) const {
  217. // Recycle for the secondary will immediately unpoison and unmap the
  218. // memory, so quarantine preparation is unnecessary.
  219. if (get_allocator().FromPrimary(m)) {
  220. // The primary allocation may need pattern fill if enabled.
  221. FillChunk(m);
  222. }
  223. Recycle(m);
  224. }
  225. void *Allocate(uptr size) const {
  226. void *res = get_allocator().Allocate(cache_, size, 1);
  227. // TODO(alekseys): Consider making quarantine OOM-friendly.
  228. if (UNLIKELY(!res))
  229. ReportOutOfMemory(size, stack_);
  230. return res;
  231. }
  232. void Deallocate(void *p) const { get_allocator().Deallocate(cache_, p); }
  233. private:
  234. AllocatorCache* const cache_;
  235. BufferedStackTrace* const stack_;
  236. };
  237. typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
  238. typedef AsanQuarantine::Cache QuarantineCache;
  239. void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
  240. PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
  241. // Statistics.
  242. AsanStats &thread_stats = GetCurrentThreadStats();
  243. thread_stats.mmaps++;
  244. thread_stats.mmaped += size;
  245. }
  246. void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin,
  247. uptr user_size) const {
  248. uptr user_end = RoundDownTo(user_begin + user_size, ASAN_SHADOW_GRANULARITY);
  249. user_begin = RoundUpTo(user_begin, ASAN_SHADOW_GRANULARITY);
  250. // The secondary mapping will be immediately returned to user, no value
  251. // poisoning that with non-zero just before unpoisoning by Allocate(). So just
  252. // poison head/tail invisible to Allocate().
  253. PoisonShadow(p, user_begin - p, kAsanHeapLeftRedzoneMagic);
  254. PoisonShadow(user_end, size - (user_end - p), kAsanHeapLeftRedzoneMagic);
  255. // Statistics.
  256. AsanStats &thread_stats = GetCurrentThreadStats();
  257. thread_stats.mmaps++;
  258. thread_stats.mmaped += size;
  259. }
  260. void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
  261. PoisonShadow(p, size, 0);
  262. // We are about to unmap a chunk of user memory.
  263. // Mark the corresponding shadow memory as not needed.
  264. FlushUnneededASanShadowMemory(p, size);
  265. // Statistics.
  266. AsanStats &thread_stats = GetCurrentThreadStats();
  267. thread_stats.munmaps++;
  268. thread_stats.munmaped += size;
  269. }
  270. // We can not use THREADLOCAL because it is not supported on some of the
  271. // platforms we care about (OSX 10.6, Android).
  272. // static THREADLOCAL AllocatorCache cache;
  273. AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
  274. CHECK(ms);
  275. return &ms->allocator_cache;
  276. }
  277. QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
  278. CHECK(ms);
  279. CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
  280. return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
  281. }
  282. void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
  283. quarantine_size_mb = f->quarantine_size_mb;
  284. thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
  285. min_redzone = f->redzone;
  286. max_redzone = f->max_redzone;
  287. may_return_null = cf->allocator_may_return_null;
  288. alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
  289. release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
  290. }
  291. void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
  292. f->quarantine_size_mb = quarantine_size_mb;
  293. f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
  294. f->redzone = min_redzone;
  295. f->max_redzone = max_redzone;
  296. cf->allocator_may_return_null = may_return_null;
  297. f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
  298. cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
  299. }
  300. struct Allocator {
  301. static const uptr kMaxAllowedMallocSize =
  302. FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
  303. AsanAllocator allocator;
  304. AsanQuarantine quarantine;
  305. StaticSpinMutex fallback_mutex;
  306. AllocatorCache fallback_allocator_cache;
  307. QuarantineCache fallback_quarantine_cache;
  308. uptr max_user_defined_malloc_size;
  309. // ------------------- Options --------------------------
  310. atomic_uint16_t min_redzone;
  311. atomic_uint16_t max_redzone;
  312. atomic_uint8_t alloc_dealloc_mismatch;
  313. // ------------------- Initialization ------------------------
  314. explicit Allocator(LinkerInitialized)
  315. : quarantine(LINKER_INITIALIZED),
  316. fallback_quarantine_cache(LINKER_INITIALIZED) {}
  317. void CheckOptions(const AllocatorOptions &options) const {
  318. CHECK_GE(options.min_redzone, 16);
  319. CHECK_GE(options.max_redzone, options.min_redzone);
  320. CHECK_LE(options.max_redzone, 2048);
  321. CHECK(IsPowerOfTwo(options.min_redzone));
  322. CHECK(IsPowerOfTwo(options.max_redzone));
  323. }
  324. void SharedInitCode(const AllocatorOptions &options) {
  325. CheckOptions(options);
  326. quarantine.Init((uptr)options.quarantine_size_mb << 20,
  327. (uptr)options.thread_local_quarantine_size_kb << 10);
  328. atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
  329. memory_order_release);
  330. atomic_store(&min_redzone, options.min_redzone, memory_order_release);
  331. atomic_store(&max_redzone, options.max_redzone, memory_order_release);
  332. }
  333. void InitLinkerInitialized(const AllocatorOptions &options) {
  334. SetAllocatorMayReturnNull(options.may_return_null);
  335. allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
  336. SharedInitCode(options);
  337. max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
  338. ? common_flags()->max_allocation_size_mb
  339. << 20
  340. : kMaxAllowedMallocSize;
  341. }
  342. void RePoisonChunk(uptr chunk) {
  343. // This could be a user-facing chunk (with redzones), or some internal
  344. // housekeeping chunk, like TransferBatch. Start by assuming the former.
  345. AsanChunk *ac = GetAsanChunk((void *)chunk);
  346. uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk);
  347. if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) ==
  348. CHUNK_ALLOCATED) {
  349. uptr beg = ac->Beg();
  350. uptr end = ac->Beg() + ac->UsedSize();
  351. uptr chunk_end = chunk + allocated_size;
  352. if (chunk < beg && beg < end && end <= chunk_end) {
  353. // Looks like a valid AsanChunk in use, poison redzones only.
  354. PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
  355. uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
  356. FastPoisonShadowPartialRightRedzone(
  357. end_aligned_down, end - end_aligned_down,
  358. chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
  359. return;
  360. }
  361. }
  362. // This is either not an AsanChunk or freed or quarantined AsanChunk.
  363. // In either case, poison everything.
  364. PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
  365. }
  366. void ReInitialize(const AllocatorOptions &options) {
  367. SetAllocatorMayReturnNull(options.may_return_null);
  368. allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
  369. SharedInitCode(options);
  370. // Poison all existing allocation's redzones.
  371. if (CanPoisonMemory()) {
  372. allocator.ForceLock();
  373. allocator.ForEachChunk(
  374. [](uptr chunk, void *alloc) {
  375. ((Allocator *)alloc)->RePoisonChunk(chunk);
  376. },
  377. this);
  378. allocator.ForceUnlock();
  379. }
  380. }
  381. void GetOptions(AllocatorOptions *options) const {
  382. options->quarantine_size_mb = quarantine.GetMaxSize() >> 20;
  383. options->thread_local_quarantine_size_kb =
  384. quarantine.GetMaxCacheSize() >> 10;
  385. options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
  386. options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
  387. options->may_return_null = AllocatorMayReturnNull();
  388. options->alloc_dealloc_mismatch =
  389. atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
  390. options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
  391. }
  392. // -------------------- Helper methods. -------------------------
  393. uptr ComputeRZLog(uptr user_requested_size) {
  394. u32 rz_log = user_requested_size <= 64 - 16 ? 0
  395. : user_requested_size <= 128 - 32 ? 1
  396. : user_requested_size <= 512 - 64 ? 2
  397. : user_requested_size <= 4096 - 128 ? 3
  398. : user_requested_size <= (1 << 14) - 256 ? 4
  399. : user_requested_size <= (1 << 15) - 512 ? 5
  400. : user_requested_size <= (1 << 16) - 1024 ? 6
  401. : 7;
  402. u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader)));
  403. u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire));
  404. u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire));
  405. return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log));
  406. }
  407. static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
  408. if (user_requested_alignment < 8)
  409. return 0;
  410. if (user_requested_alignment > 512)
  411. user_requested_alignment = 512;
  412. return Log2(user_requested_alignment) - 2;
  413. }
  414. static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
  415. if (user_requested_alignment_log == 0)
  416. return 0;
  417. return 1LL << (user_requested_alignment_log + 2);
  418. }
  419. // We have an address between two chunks, and we want to report just one.
  420. AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
  421. AsanChunk *right_chunk) {
  422. if (!left_chunk)
  423. return right_chunk;
  424. if (!right_chunk)
  425. return left_chunk;
  426. // Prefer an allocated chunk over freed chunk and freed chunk
  427. // over available chunk.
  428. u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed);
  429. u8 right_state =
  430. atomic_load(&right_chunk->chunk_state, memory_order_relaxed);
  431. if (left_state != right_state) {
  432. if (left_state == CHUNK_ALLOCATED)
  433. return left_chunk;
  434. if (right_state == CHUNK_ALLOCATED)
  435. return right_chunk;
  436. if (left_state == CHUNK_QUARANTINE)
  437. return left_chunk;
  438. if (right_state == CHUNK_QUARANTINE)
  439. return right_chunk;
  440. }
  441. // Same chunk_state: choose based on offset.
  442. sptr l_offset = 0, r_offset = 0;
  443. CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
  444. CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
  445. if (l_offset < r_offset)
  446. return left_chunk;
  447. return right_chunk;
  448. }
  449. bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
  450. AsanChunk *m = GetAsanChunkByAddr(addr);
  451. if (!m) return false;
  452. if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
  453. return false;
  454. if (m->Beg() != addr) return false;
  455. AsanThread *t = GetCurrentThread();
  456. m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
  457. return true;
  458. }
  459. // -------------------- Allocation/Deallocation routines ---------------
  460. void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
  461. AllocType alloc_type, bool can_fill) {
  462. if (UNLIKELY(!AsanInited()))
  463. AsanInitFromRtl();
  464. if (UNLIKELY(IsRssLimitExceeded())) {
  465. if (AllocatorMayReturnNull())
  466. return nullptr;
  467. ReportRssLimitExceeded(stack);
  468. }
  469. Flags &fl = *flags();
  470. CHECK(stack);
  471. const uptr min_alignment = ASAN_SHADOW_GRANULARITY;
  472. const uptr user_requested_alignment_log =
  473. ComputeUserRequestedAlignmentLog(alignment);
  474. if (alignment < min_alignment)
  475. alignment = min_alignment;
  476. if (size == 0) {
  477. // We'd be happy to avoid allocating memory for zero-size requests, but
  478. // some programs/tests depend on this behavior and assume that malloc
  479. // would not return NULL even for zero-size allocations. Moreover, it
  480. // looks like operator new should never return NULL, and results of
  481. // consecutive "new" calls must be different even if the allocated size
  482. // is zero.
  483. size = 1;
  484. }
  485. CHECK(IsPowerOfTwo(alignment));
  486. uptr rz_log = ComputeRZLog(size);
  487. uptr rz_size = RZLog2Size(rz_log);
  488. uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
  489. uptr needed_size = rounded_size + rz_size;
  490. if (alignment > min_alignment)
  491. needed_size += alignment;
  492. bool from_primary = PrimaryAllocator::CanAllocate(needed_size, alignment);
  493. // If we are allocating from the secondary allocator, there will be no
  494. // automatic right redzone, so add the right redzone manually.
  495. if (!from_primary)
  496. needed_size += rz_size;
  497. CHECK(IsAligned(needed_size, min_alignment));
  498. if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
  499. size > max_user_defined_malloc_size) {
  500. if (AllocatorMayReturnNull()) {
  501. Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
  502. size);
  503. return nullptr;
  504. }
  505. uptr malloc_limit =
  506. Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
  507. ReportAllocationSizeTooBig(size, needed_size, malloc_limit, stack);
  508. }
  509. AsanThread *t = GetCurrentThread();
  510. void *allocated;
  511. if (t) {
  512. AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
  513. allocated = allocator.Allocate(cache, needed_size, 8);
  514. } else {
  515. SpinMutexLock l(&fallback_mutex);
  516. AllocatorCache *cache = &fallback_allocator_cache;
  517. allocated = allocator.Allocate(cache, needed_size, 8);
  518. }
  519. if (UNLIKELY(!allocated)) {
  520. SetAllocatorOutOfMemory();
  521. if (AllocatorMayReturnNull())
  522. return nullptr;
  523. ReportOutOfMemory(size, stack);
  524. }
  525. uptr alloc_beg = reinterpret_cast<uptr>(allocated);
  526. uptr alloc_end = alloc_beg + needed_size;
  527. uptr user_beg = alloc_beg + rz_size;
  528. if (!IsAligned(user_beg, alignment))
  529. user_beg = RoundUpTo(user_beg, alignment);
  530. uptr user_end = user_beg + size;
  531. CHECK_LE(user_end, alloc_end);
  532. uptr chunk_beg = user_beg - kChunkHeaderSize;
  533. AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
  534. m->alloc_type = alloc_type;
  535. CHECK(size);
  536. m->SetUsedSize(size);
  537. m->user_requested_alignment_log = user_requested_alignment_log;
  538. m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
  539. if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) {
  540. // The allocator provides an unpoisoned chunk. This is possible for the
  541. // secondary allocator, or if CanPoisonMemory() was false for some time,
  542. // for example, due to flags()->start_disabled. Anyway, poison left and
  543. // right of the block before using it for anything else.
  544. uptr tail_beg = RoundUpTo(user_end, ASAN_SHADOW_GRANULARITY);
  545. uptr tail_end = alloc_beg + allocator.GetActuallyAllocatedSize(allocated);
  546. PoisonShadow(alloc_beg, user_beg - alloc_beg, kAsanHeapLeftRedzoneMagic);
  547. PoisonShadow(tail_beg, tail_end - tail_beg, kAsanHeapLeftRedzoneMagic);
  548. }
  549. uptr size_rounded_down_to_granularity =
  550. RoundDownTo(size, ASAN_SHADOW_GRANULARITY);
  551. // Unpoison the bulk of the memory region.
  552. if (size_rounded_down_to_granularity)
  553. PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
  554. // Deal with the end of the region if size is not aligned to granularity.
  555. if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
  556. u8 *shadow =
  557. (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
  558. *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0;
  559. }
  560. AsanStats &thread_stats = GetCurrentThreadStats();
  561. thread_stats.mallocs++;
  562. thread_stats.malloced += size;
  563. thread_stats.malloced_redzones += needed_size - size;
  564. if (needed_size > SizeClassMap::kMaxSize)
  565. thread_stats.malloc_large++;
  566. else
  567. thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
  568. void *res = reinterpret_cast<void *>(user_beg);
  569. if (can_fill && fl.max_malloc_fill_size) {
  570. uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
  571. REAL(memset)(res, fl.malloc_fill_byte, fill_size);
  572. }
  573. #if CAN_SANITIZE_LEAKS
  574. m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
  575. : __lsan::kDirectlyLeaked;
  576. #endif
  577. // Must be the last mutation of metadata in this function.
  578. atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release);
  579. if (alloc_beg != chunk_beg) {
  580. CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
  581. reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
  582. }
  583. RunMallocHooks(res, size);
  584. return res;
  585. }
  586. // Set quarantine flag if chunk is allocated, issue ASan error report on
  587. // available and quarantined chunks. Return true on success, false otherwise.
  588. bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
  589. BufferedStackTrace *stack) {
  590. u8 old_chunk_state = CHUNK_ALLOCATED;
  591. // Flip the chunk_state atomically to avoid race on double-free.
  592. if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
  593. CHUNK_QUARANTINE,
  594. memory_order_acquire)) {
  595. ReportInvalidFree(ptr, old_chunk_state, stack);
  596. // It's not safe to push a chunk in quarantine on invalid free.
  597. return false;
  598. }
  599. CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
  600. // It was a user data.
  601. m->SetFreeContext(kInvalidTid, 0);
  602. return true;
  603. }
  604. // Expects the chunk to already be marked as quarantined by using
  605. // AtomicallySetQuarantineFlagIfAllocated.
  606. void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
  607. CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
  608. CHUNK_QUARANTINE);
  609. AsanThread *t = GetCurrentThread();
  610. m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
  611. // Push into quarantine.
  612. if (t) {
  613. AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
  614. AllocatorCache *ac = GetAllocatorCache(ms);
  615. quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
  616. m->UsedSize());
  617. } else {
  618. SpinMutexLock l(&fallback_mutex);
  619. AllocatorCache *ac = &fallback_allocator_cache;
  620. quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
  621. m, m->UsedSize());
  622. }
  623. }
  624. void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
  625. BufferedStackTrace *stack, AllocType alloc_type) {
  626. uptr p = reinterpret_cast<uptr>(ptr);
  627. if (p == 0) return;
  628. uptr chunk_beg = p - kChunkHeaderSize;
  629. AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
  630. // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
  631. // malloc. Don't report an invalid free in this case.
  632. if (SANITIZER_WINDOWS &&
  633. !get_allocator().PointerIsMine(ptr)) {
  634. if (!IsSystemHeapAddress(p))
  635. ReportFreeNotMalloced(p, stack);
  636. return;
  637. }
  638. RunFreeHooks(ptr);
  639. // Must mark the chunk as quarantined before any changes to its metadata.
  640. // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
  641. if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
  642. if (m->alloc_type != alloc_type) {
  643. if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
  644. ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
  645. (AllocType)alloc_type);
  646. }
  647. } else {
  648. if (flags()->new_delete_type_mismatch &&
  649. (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
  650. ((delete_size && delete_size != m->UsedSize()) ||
  651. ComputeUserRequestedAlignmentLog(delete_alignment) !=
  652. m->user_requested_alignment_log)) {
  653. ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
  654. }
  655. }
  656. AsanStats &thread_stats = GetCurrentThreadStats();
  657. thread_stats.frees++;
  658. thread_stats.freed += m->UsedSize();
  659. QuarantineChunk(m, ptr, stack);
  660. }
  661. void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
  662. CHECK(old_ptr && new_size);
  663. uptr p = reinterpret_cast<uptr>(old_ptr);
  664. uptr chunk_beg = p - kChunkHeaderSize;
  665. AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
  666. AsanStats &thread_stats = GetCurrentThreadStats();
  667. thread_stats.reallocs++;
  668. thread_stats.realloced += new_size;
  669. void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
  670. if (new_ptr) {
  671. u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire);
  672. if (chunk_state != CHUNK_ALLOCATED)
  673. ReportInvalidFree(old_ptr, chunk_state, stack);
  674. CHECK_NE(REAL(memcpy), nullptr);
  675. uptr memcpy_size = Min(new_size, m->UsedSize());
  676. // If realloc() races with free(), we may start copying freed memory.
  677. // However, we will report racy double-free later anyway.
  678. REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
  679. Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
  680. }
  681. return new_ptr;
  682. }
  683. void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
  684. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  685. if (AllocatorMayReturnNull())
  686. return nullptr;
  687. ReportCallocOverflow(nmemb, size, stack);
  688. }
  689. void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
  690. // If the memory comes from the secondary allocator no need to clear it
  691. // as it comes directly from mmap.
  692. if (ptr && allocator.FromPrimary(ptr))
  693. REAL(memset)(ptr, 0, nmemb * size);
  694. return ptr;
  695. }
  696. void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
  697. if (chunk_state == CHUNK_QUARANTINE)
  698. ReportDoubleFree((uptr)ptr, stack);
  699. else
  700. ReportFreeNotMalloced((uptr)ptr, stack);
  701. }
  702. void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
  703. AllocatorCache *ac = GetAllocatorCache(ms);
  704. quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
  705. allocator.SwallowCache(ac);
  706. }
  707. // -------------------------- Chunk lookup ----------------------
  708. // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
  709. // Returns nullptr if AsanChunk is not yet initialized just after
  710. // get_allocator().Allocate(), or is being destroyed just before
  711. // get_allocator().Deallocate().
  712. AsanChunk *GetAsanChunk(void *alloc_beg) {
  713. if (!alloc_beg)
  714. return nullptr;
  715. AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
  716. if (!p) {
  717. if (!allocator.FromPrimary(alloc_beg))
  718. return nullptr;
  719. p = reinterpret_cast<AsanChunk *>(alloc_beg);
  720. }
  721. u8 state = atomic_load(&p->chunk_state, memory_order_relaxed);
  722. // It does not guaranty that Chunk is initialized, but it's
  723. // definitely not for any other value.
  724. if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
  725. return p;
  726. return nullptr;
  727. }
  728. AsanChunk *GetAsanChunkByAddr(uptr p) {
  729. void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
  730. return GetAsanChunk(alloc_beg);
  731. }
  732. // Allocator must be locked when this function is called.
  733. AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
  734. void *alloc_beg =
  735. allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
  736. return GetAsanChunk(alloc_beg);
  737. }
  738. uptr AllocationSize(uptr p) {
  739. AsanChunk *m = GetAsanChunkByAddr(p);
  740. if (!m) return 0;
  741. if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
  742. return 0;
  743. if (m->Beg() != p) return 0;
  744. return m->UsedSize();
  745. }
  746. uptr AllocationSizeFast(uptr p) {
  747. return reinterpret_cast<AsanChunk *>(p - kChunkHeaderSize)->UsedSize();
  748. }
  749. AsanChunkView FindHeapChunkByAddress(uptr addr) {
  750. AsanChunk *m1 = GetAsanChunkByAddr(addr);
  751. sptr offset = 0;
  752. if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
  753. // The address is in the chunk's left redzone, so maybe it is actually
  754. // a right buffer overflow from the other chunk before.
  755. // Search a bit before to see if there is another chunk.
  756. AsanChunk *m2 = nullptr;
  757. for (uptr l = 1; l < GetPageSizeCached(); l++) {
  758. m2 = GetAsanChunkByAddr(addr - l);
  759. if (m2 == m1) continue; // Still the same chunk.
  760. break;
  761. }
  762. if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
  763. m1 = ChooseChunk(addr, m2, m1);
  764. }
  765. return AsanChunkView(m1);
  766. }
  767. void Purge(BufferedStackTrace *stack) {
  768. AsanThread *t = GetCurrentThread();
  769. if (t) {
  770. AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
  771. quarantine.DrainAndRecycle(GetQuarantineCache(ms),
  772. QuarantineCallback(GetAllocatorCache(ms),
  773. stack));
  774. }
  775. {
  776. SpinMutexLock l(&fallback_mutex);
  777. quarantine.DrainAndRecycle(&fallback_quarantine_cache,
  778. QuarantineCallback(&fallback_allocator_cache,
  779. stack));
  780. }
  781. allocator.ForceReleaseToOS();
  782. }
  783. void PrintStats() {
  784. allocator.PrintStats();
  785. quarantine.PrintStats();
  786. }
  787. void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
  788. allocator.ForceLock();
  789. fallback_mutex.Lock();
  790. }
  791. void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
  792. fallback_mutex.Unlock();
  793. allocator.ForceUnlock();
  794. }
  795. };
  796. static Allocator instance(LINKER_INITIALIZED);
  797. static AsanAllocator &get_allocator() {
  798. return instance.allocator;
  799. }
  800. bool AsanChunkView::IsValid() const {
  801. return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) !=
  802. CHUNK_INVALID;
  803. }
  804. bool AsanChunkView::IsAllocated() const {
  805. return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
  806. CHUNK_ALLOCATED;
  807. }
  808. bool AsanChunkView::IsQuarantined() const {
  809. return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
  810. CHUNK_QUARANTINE;
  811. }
  812. uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
  813. uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
  814. uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
  815. u32 AsanChunkView::UserRequestedAlignment() const {
  816. return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
  817. }
  818. uptr AsanChunkView::AllocTid() const {
  819. u32 tid = 0;
  820. u32 stack = 0;
  821. chunk_->GetAllocContext(tid, stack);
  822. return tid;
  823. }
  824. uptr AsanChunkView::FreeTid() const {
  825. if (!IsQuarantined())
  826. return kInvalidTid;
  827. u32 tid = 0;
  828. u32 stack = 0;
  829. chunk_->GetFreeContext(tid, stack);
  830. return tid;
  831. }
  832. AllocType AsanChunkView::GetAllocType() const {
  833. return (AllocType)chunk_->alloc_type;
  834. }
  835. u32 AsanChunkView::GetAllocStackId() const {
  836. u32 tid = 0;
  837. u32 stack = 0;
  838. chunk_->GetAllocContext(tid, stack);
  839. return stack;
  840. }
  841. u32 AsanChunkView::GetFreeStackId() const {
  842. if (!IsQuarantined())
  843. return 0;
  844. u32 tid = 0;
  845. u32 stack = 0;
  846. chunk_->GetFreeContext(tid, stack);
  847. return stack;
  848. }
  849. void InitializeAllocator(const AllocatorOptions &options) {
  850. instance.InitLinkerInitialized(options);
  851. }
  852. void ReInitializeAllocator(const AllocatorOptions &options) {
  853. instance.ReInitialize(options);
  854. }
  855. void GetAllocatorOptions(AllocatorOptions *options) {
  856. instance.GetOptions(options);
  857. }
  858. AsanChunkView FindHeapChunkByAddress(uptr addr) {
  859. return instance.FindHeapChunkByAddress(addr);
  860. }
  861. AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
  862. return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
  863. }
  864. void AsanThreadLocalMallocStorage::CommitBack() {
  865. GET_STACK_TRACE_MALLOC;
  866. instance.CommitBack(this, &stack);
  867. }
  868. void PrintInternalAllocatorStats() {
  869. instance.PrintStats();
  870. }
  871. void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
  872. instance.Deallocate(ptr, 0, 0, stack, alloc_type);
  873. }
  874. void asan_delete(void *ptr, uptr size, uptr alignment,
  875. BufferedStackTrace *stack, AllocType alloc_type) {
  876. instance.Deallocate(ptr, size, alignment, stack, alloc_type);
  877. }
  878. void *asan_malloc(uptr size, BufferedStackTrace *stack) {
  879. return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
  880. }
  881. void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
  882. return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
  883. }
  884. void *asan_reallocarray(void *p, uptr nmemb, uptr size,
  885. BufferedStackTrace *stack) {
  886. if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
  887. errno = errno_ENOMEM;
  888. if (AllocatorMayReturnNull())
  889. return nullptr;
  890. ReportReallocArrayOverflow(nmemb, size, stack);
  891. }
  892. return asan_realloc(p, nmemb * size, stack);
  893. }
  894. void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
  895. if (!p)
  896. return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
  897. if (size == 0) {
  898. if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
  899. instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
  900. return nullptr;
  901. }
  902. // Allocate a size of 1 if we shouldn't free() on Realloc to 0
  903. size = 1;
  904. }
  905. return SetErrnoOnNull(instance.Reallocate(p, size, stack));
  906. }
  907. void *asan_valloc(uptr size, BufferedStackTrace *stack) {
  908. return SetErrnoOnNull(
  909. instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
  910. }
  911. void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
  912. uptr PageSize = GetPageSizeCached();
  913. if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
  914. errno = errno_ENOMEM;
  915. if (AllocatorMayReturnNull())
  916. return nullptr;
  917. ReportPvallocOverflow(size, stack);
  918. }
  919. // pvalloc(0) should allocate one page.
  920. size = size ? RoundUpTo(size, PageSize) : PageSize;
  921. return SetErrnoOnNull(
  922. instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
  923. }
  924. void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
  925. AllocType alloc_type) {
  926. if (UNLIKELY(!IsPowerOfTwo(alignment))) {
  927. errno = errno_EINVAL;
  928. if (AllocatorMayReturnNull())
  929. return nullptr;
  930. ReportInvalidAllocationAlignment(alignment, stack);
  931. }
  932. return SetErrnoOnNull(
  933. instance.Allocate(size, alignment, stack, alloc_type, true));
  934. }
  935. void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
  936. if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
  937. errno = errno_EINVAL;
  938. if (AllocatorMayReturnNull())
  939. return nullptr;
  940. ReportInvalidAlignedAllocAlignment(size, alignment, stack);
  941. }
  942. return SetErrnoOnNull(
  943. instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
  944. }
  945. int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
  946. BufferedStackTrace *stack) {
  947. if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
  948. if (AllocatorMayReturnNull())
  949. return errno_EINVAL;
  950. ReportInvalidPosixMemalignAlignment(alignment, stack);
  951. }
  952. void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
  953. if (UNLIKELY(!ptr))
  954. // OOM error is already taken care of by Allocate.
  955. return errno_ENOMEM;
  956. CHECK(IsAligned((uptr)ptr, alignment));
  957. *memptr = ptr;
  958. return 0;
  959. }
  960. uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
  961. if (!ptr) return 0;
  962. uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
  963. if (flags()->check_malloc_usable_size && (usable_size == 0)) {
  964. GET_STACK_TRACE_FATAL(pc, bp);
  965. ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
  966. }
  967. return usable_size;
  968. }
  969. uptr asan_mz_size(const void *ptr) {
  970. return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
  971. }
  972. void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
  973. instance.ForceLock();
  974. }
  975. void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
  976. instance.ForceUnlock();
  977. }
  978. } // namespace __asan
  979. // --- Implementation of LSan-specific functions --- {{{1
  980. namespace __lsan {
  981. void LockAllocator() {
  982. __asan::get_allocator().ForceLock();
  983. }
  984. void UnlockAllocator() {
  985. __asan::get_allocator().ForceUnlock();
  986. }
  987. void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
  988. *begin = (uptr)&__asan::get_allocator();
  989. *end = *begin + sizeof(__asan::get_allocator());
  990. }
  991. uptr PointsIntoChunk(void *p) {
  992. uptr addr = reinterpret_cast<uptr>(p);
  993. __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
  994. if (!m || atomic_load(&m->chunk_state, memory_order_acquire) !=
  995. __asan::CHUNK_ALLOCATED)
  996. return 0;
  997. uptr chunk = m->Beg();
  998. if (m->AddrIsInside(addr))
  999. return chunk;
  1000. if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr))
  1001. return chunk;
  1002. return 0;
  1003. }
  1004. uptr GetUserBegin(uptr chunk) {
  1005. // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
  1006. // not needed.
  1007. __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
  1008. return m ? m->Beg() : 0;
  1009. }
  1010. uptr GetUserAddr(uptr chunk) {
  1011. return chunk;
  1012. }
  1013. LsanMetadata::LsanMetadata(uptr chunk) {
  1014. metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
  1015. : nullptr;
  1016. }
  1017. bool LsanMetadata::allocated() const {
  1018. if (!metadata_)
  1019. return false;
  1020. __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
  1021. return atomic_load(&m->chunk_state, memory_order_relaxed) ==
  1022. __asan::CHUNK_ALLOCATED;
  1023. }
  1024. ChunkTag LsanMetadata::tag() const {
  1025. __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
  1026. return static_cast<ChunkTag>(m->lsan_tag);
  1027. }
  1028. void LsanMetadata::set_tag(ChunkTag value) {
  1029. __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
  1030. m->lsan_tag = value;
  1031. }
  1032. uptr LsanMetadata::requested_size() const {
  1033. __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
  1034. return m->UsedSize();
  1035. }
  1036. u32 LsanMetadata::stack_trace_id() const {
  1037. __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
  1038. u32 tid = 0;
  1039. u32 stack = 0;
  1040. m->GetAllocContext(tid, stack);
  1041. return stack;
  1042. }
  1043. void ForEachChunk(ForEachChunkCallback callback, void *arg) {
  1044. __asan::get_allocator().ForEachChunk(callback, arg);
  1045. }
  1046. IgnoreObjectResult IgnoreObject(const void *p) {
  1047. uptr addr = reinterpret_cast<uptr>(p);
  1048. __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
  1049. if (!m ||
  1050. (atomic_load(&m->chunk_state, memory_order_acquire) !=
  1051. __asan::CHUNK_ALLOCATED) ||
  1052. !m->AddrIsInside(addr)) {
  1053. return kIgnoreObjectInvalid;
  1054. }
  1055. if (m->lsan_tag == kIgnored)
  1056. return kIgnoreObjectAlreadyIgnored;
  1057. m->lsan_tag = __lsan::kIgnored;
  1058. return kIgnoreObjectSuccess;
  1059. }
  1060. } // namespace __lsan
  1061. // ---------------------- Interface ---------------- {{{1
  1062. using namespace __asan;
  1063. static const void *AllocationBegin(const void *p) {
  1064. AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p);
  1065. if (!m)
  1066. return nullptr;
  1067. if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
  1068. return nullptr;
  1069. if (m->UsedSize() == 0)
  1070. return nullptr;
  1071. return (const void *)(m->Beg());
  1072. }
  1073. // ASan allocator doesn't reserve extra bytes, so normally we would
  1074. // just return "size". We don't want to expose our redzone sizes, etc here.
  1075. uptr __sanitizer_get_estimated_allocated_size(uptr size) {
  1076. return size;
  1077. }
  1078. int __sanitizer_get_ownership(const void *p) {
  1079. uptr ptr = reinterpret_cast<uptr>(p);
  1080. return instance.AllocationSize(ptr) > 0;
  1081. }
  1082. uptr __sanitizer_get_allocated_size(const void *p) {
  1083. if (!p) return 0;
  1084. uptr ptr = reinterpret_cast<uptr>(p);
  1085. uptr allocated_size = instance.AllocationSize(ptr);
  1086. // Die if p is not malloced or if it is already freed.
  1087. if (allocated_size == 0) {
  1088. GET_STACK_TRACE_FATAL_HERE;
  1089. ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
  1090. }
  1091. return allocated_size;
  1092. }
  1093. uptr __sanitizer_get_allocated_size_fast(const void *p) {
  1094. DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
  1095. uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p));
  1096. DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
  1097. return ret;
  1098. }
  1099. const void *__sanitizer_get_allocated_begin(const void *p) {
  1100. return AllocationBegin(p);
  1101. }
  1102. void __sanitizer_purge_allocator() {
  1103. GET_STACK_TRACE_MALLOC;
  1104. instance.Purge(&stack);
  1105. }
  1106. int __asan_update_allocation_context(void* addr) {
  1107. GET_STACK_TRACE_MALLOC;
  1108. return instance.UpdateAllocationStack((uptr)addr, &stack);
  1109. }