combined.h 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531
  1. //===-- combined.h ----------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_COMBINED_H_
  9. #define SCUDO_COMBINED_H_
  10. #include "chunk.h"
  11. #include "common.h"
  12. #include "flags.h"
  13. #include "flags_parser.h"
  14. #include "local_cache.h"
  15. #include "memtag.h"
  16. #include "options.h"
  17. #include "quarantine.h"
  18. #include "report.h"
  19. #include "rss_limit_checker.h"
  20. #include "secondary.h"
  21. #include "stack_depot.h"
  22. #include "string_utils.h"
  23. #include "tsd.h"
  24. #include "scudo/interface.h"
  25. #ifdef GWP_ASAN_HOOKS
  26. #include "gwp_asan/guarded_pool_allocator.h"
  27. #include "gwp_asan/optional/backtrace.h"
  28. #include "gwp_asan/optional/segv_handler.h"
  29. #endif // GWP_ASAN_HOOKS
  30. extern "C" inline void EmptyCallback() {}
  31. #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
  32. // This function is not part of the NDK so it does not appear in any public
  33. // header files. We only declare/use it when targeting the platform.
  34. extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
  35. size_t num_entries);
  36. #endif
  37. namespace scudo {
  38. template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
  39. class Allocator {
  40. public:
  41. using PrimaryT = typename Params::Primary;
  42. using CacheT = typename PrimaryT::CacheT;
  43. typedef Allocator<Params, PostInitCallback> ThisT;
  44. typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
  45. void callPostInitCallback() {
  46. pthread_once(&PostInitNonce, PostInitCallback);
  47. }
  48. struct QuarantineCallback {
  49. explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
  50. : Allocator(Instance), Cache(LocalCache) {}
  51. // Chunk recycling function, returns a quarantined chunk to the backend,
  52. // first making sure it hasn't been tampered with.
  53. void recycle(void *Ptr) {
  54. Chunk::UnpackedHeader Header;
  55. Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
  56. if (UNLIKELY(Header.State != Chunk::State::Quarantined))
  57. reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
  58. Chunk::UnpackedHeader NewHeader = Header;
  59. NewHeader.State = Chunk::State::Available;
  60. Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
  61. if (allocatorSupportsMemoryTagging<Params>())
  62. Ptr = untagPointer(Ptr);
  63. void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
  64. Cache.deallocate(NewHeader.ClassId, BlockBegin);
  65. }
  66. // We take a shortcut when allocating a quarantine batch by working with the
  67. // appropriate class ID instead of using Size. The compiler should optimize
  68. // the class ID computation and work with the associated cache directly.
  69. void *allocate(UNUSED uptr Size) {
  70. const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
  71. sizeof(QuarantineBatch) + Chunk::getHeaderSize());
  72. void *Ptr = Cache.allocate(QuarantineClassId);
  73. // Quarantine batch allocation failure is fatal.
  74. if (UNLIKELY(!Ptr))
  75. reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
  76. Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
  77. Chunk::getHeaderSize());
  78. Chunk::UnpackedHeader Header = {};
  79. Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
  80. Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
  81. Header.State = Chunk::State::Allocated;
  82. Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
  83. // Reset tag to 0 as this chunk may have been previously used for a tagged
  84. // user allocation.
  85. if (UNLIKELY(useMemoryTagging<Params>(Allocator.Primary.Options.load())))
  86. storeTags(reinterpret_cast<uptr>(Ptr),
  87. reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
  88. return Ptr;
  89. }
  90. void deallocate(void *Ptr) {
  91. const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
  92. sizeof(QuarantineBatch) + Chunk::getHeaderSize());
  93. Chunk::UnpackedHeader Header;
  94. Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
  95. if (UNLIKELY(Header.State != Chunk::State::Allocated))
  96. reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
  97. DCHECK_EQ(Header.ClassId, QuarantineClassId);
  98. DCHECK_EQ(Header.Offset, 0);
  99. DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
  100. Chunk::UnpackedHeader NewHeader = Header;
  101. NewHeader.State = Chunk::State::Available;
  102. Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
  103. Cache.deallocate(QuarantineClassId,
  104. reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
  105. Chunk::getHeaderSize()));
  106. }
  107. private:
  108. ThisT &Allocator;
  109. CacheT &Cache;
  110. };
  111. typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
  112. typedef typename QuarantineT::CacheT QuarantineCacheT;
  113. void init() {
  114. performSanityChecks();
  115. // Check if hardware CRC32 is supported in the binary and by the platform,
  116. // if so, opt for the CRC32 hardware version of the checksum.
  117. if (&computeHardwareCRC32 && hasHardwareCRC32())
  118. HashAlgorithm = Checksum::HardwareCRC32;
  119. if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
  120. Cookie = static_cast<u32>(getMonotonicTime() ^
  121. (reinterpret_cast<uptr>(this) >> 4));
  122. initFlags();
  123. reportUnrecognizedFlags();
  124. RssChecker.init(scudo::getFlags()->soft_rss_limit_mb,
  125. scudo::getFlags()->hard_rss_limit_mb);
  126. // Store some flags locally.
  127. if (getFlags()->may_return_null)
  128. Primary.Options.set(OptionBit::MayReturnNull);
  129. if (getFlags()->zero_contents)
  130. Primary.Options.setFillContentsMode(ZeroFill);
  131. else if (getFlags()->pattern_fill_contents)
  132. Primary.Options.setFillContentsMode(PatternOrZeroFill);
  133. if (getFlags()->dealloc_type_mismatch)
  134. Primary.Options.set(OptionBit::DeallocTypeMismatch);
  135. if (getFlags()->delete_size_mismatch)
  136. Primary.Options.set(OptionBit::DeleteSizeMismatch);
  137. if (allocatorSupportsMemoryTagging<Params>() &&
  138. systemSupportsMemoryTagging())
  139. Primary.Options.set(OptionBit::UseMemoryTagging);
  140. Primary.Options.set(OptionBit::UseOddEvenTags);
  141. QuarantineMaxChunkSize =
  142. static_cast<u32>(getFlags()->quarantine_max_chunk_size);
  143. Stats.init();
  144. const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
  145. Primary.init(ReleaseToOsIntervalMs);
  146. Secondary.init(&Stats, ReleaseToOsIntervalMs);
  147. Quarantine.init(
  148. static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
  149. static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
  150. initRingBuffer();
  151. }
  152. // Initialize the embedded GWP-ASan instance. Requires the main allocator to
  153. // be functional, best called from PostInitCallback.
  154. void initGwpAsan() {
  155. #ifdef GWP_ASAN_HOOKS
  156. gwp_asan::options::Options Opt;
  157. Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
  158. Opt.MaxSimultaneousAllocations =
  159. getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
  160. Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
  161. Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
  162. Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
  163. // Embedded GWP-ASan is locked through the Scudo atfork handler (via
  164. // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
  165. // handler.
  166. Opt.InstallForkHandlers = false;
  167. Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
  168. GuardedAlloc.init(Opt);
  169. if (Opt.InstallSignalHandlers)
  170. gwp_asan::segv_handler::installSignalHandlers(
  171. &GuardedAlloc, Printf,
  172. gwp_asan::backtrace::getPrintBacktraceFunction(),
  173. gwp_asan::backtrace::getSegvBacktraceFunction(),
  174. Opt.Recoverable);
  175. GuardedAllocSlotSize =
  176. GuardedAlloc.getAllocatorState()->maximumAllocationSize();
  177. Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
  178. GuardedAllocSlotSize);
  179. #endif // GWP_ASAN_HOOKS
  180. }
  181. #ifdef GWP_ASAN_HOOKS
  182. const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
  183. return GuardedAlloc.getMetadataRegion();
  184. }
  185. const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
  186. return GuardedAlloc.getAllocatorState();
  187. }
  188. #endif // GWP_ASAN_HOOKS
  189. ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
  190. TSDRegistry.initThreadMaybe(this, MinimalInit);
  191. }
  192. void unmapTestOnly() {
  193. TSDRegistry.unmapTestOnly(this);
  194. Primary.unmapTestOnly();
  195. Secondary.unmapTestOnly();
  196. #ifdef GWP_ASAN_HOOKS
  197. if (getFlags()->GWP_ASAN_InstallSignalHandlers)
  198. gwp_asan::segv_handler::uninstallSignalHandlers();
  199. GuardedAlloc.uninitTestOnly();
  200. #endif // GWP_ASAN_HOOKS
  201. }
  202. TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
  203. // The Cache must be provided zero-initialized.
  204. void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
  205. // Release the resources used by a TSD, which involves:
  206. // - draining the local quarantine cache to the global quarantine;
  207. // - releasing the cached pointers back to the Primary;
  208. // - unlinking the local stats from the global ones (destroying the cache does
  209. // the last two items).
  210. void commitBack(TSD<ThisT> *TSD) {
  211. Quarantine.drain(&TSD->QuarantineCache,
  212. QuarantineCallback(*this, TSD->Cache));
  213. TSD->Cache.destroy(&Stats);
  214. }
  215. ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
  216. if (!allocatorSupportsMemoryTagging<Params>())
  217. return Ptr;
  218. auto UntaggedPtr = untagPointer(Ptr);
  219. if (UntaggedPtr != Ptr)
  220. return UntaggedPtr;
  221. // Secondary, or pointer allocated while memory tagging is unsupported or
  222. // disabled. The tag mismatch is okay in the latter case because tags will
  223. // not be checked.
  224. return addHeaderTag(Ptr);
  225. }
  226. ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
  227. if (!allocatorSupportsMemoryTagging<Params>())
  228. return Ptr;
  229. return addFixedTag(Ptr, 2);
  230. }
  231. ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
  232. return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
  233. }
  234. NOINLINE u32 collectStackTrace() {
  235. #ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
  236. // Discard collectStackTrace() frame and allocator function frame.
  237. constexpr uptr DiscardFrames = 2;
  238. uptr Stack[MaxTraceSize + DiscardFrames];
  239. uptr Size =
  240. android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
  241. Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
  242. return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
  243. #else
  244. return 0;
  245. #endif
  246. }
  247. uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr,
  248. uptr ClassId) {
  249. if (!Options.get(OptionBit::UseOddEvenTags))
  250. return 0;
  251. // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
  252. // even, and vice versa. Blocks are laid out Size bytes apart, and adding
  253. // Size to Ptr will flip the least significant set bit of Size in Ptr, so
  254. // that bit will have the pattern 010101... for consecutive blocks, which we
  255. // can use to determine which tag mask to use.
  256. return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
  257. }
  258. NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
  259. uptr Alignment = MinAlignment,
  260. bool ZeroContents = false) {
  261. initThreadMaybe();
  262. const Options Options = Primary.Options.load();
  263. if (UNLIKELY(Alignment > MaxAlignment)) {
  264. if (Options.get(OptionBit::MayReturnNull))
  265. return nullptr;
  266. reportAlignmentTooBig(Alignment, MaxAlignment);
  267. }
  268. if (Alignment < MinAlignment)
  269. Alignment = MinAlignment;
  270. #ifdef GWP_ASAN_HOOKS
  271. if (UNLIKELY(GuardedAlloc.shouldSample())) {
  272. if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
  273. if (UNLIKELY(&__scudo_allocate_hook))
  274. __scudo_allocate_hook(Ptr, Size);
  275. Stats.lock();
  276. Stats.add(StatAllocated, GuardedAllocSlotSize);
  277. Stats.sub(StatFree, GuardedAllocSlotSize);
  278. Stats.unlock();
  279. return Ptr;
  280. }
  281. }
  282. #endif // GWP_ASAN_HOOKS
  283. const FillContentsMode FillContents = ZeroContents ? ZeroFill
  284. : TSDRegistry.getDisableMemInit()
  285. ? NoFill
  286. : Options.getFillContentsMode();
  287. // If the requested size happens to be 0 (more common than you might think),
  288. // allocate MinAlignment bytes on top of the header. Then add the extra
  289. // bytes required to fulfill the alignment requirements: we allocate enough
  290. // to be sure that there will be an address in the block that will satisfy
  291. // the alignment.
  292. const uptr NeededSize =
  293. roundUpTo(Size, MinAlignment) +
  294. ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
  295. // Takes care of extravagantly large sizes as well as integer overflows.
  296. static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
  297. if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
  298. if (Options.get(OptionBit::MayReturnNull))
  299. return nullptr;
  300. reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
  301. }
  302. DCHECK_LE(Size, NeededSize);
  303. switch (RssChecker.getRssLimitExceeded()) {
  304. case RssLimitChecker::Neither:
  305. break;
  306. case RssLimitChecker::Soft:
  307. if (Options.get(OptionBit::MayReturnNull))
  308. return nullptr;
  309. reportSoftRSSLimit(RssChecker.getSoftRssLimit());
  310. break;
  311. case RssLimitChecker::Hard:
  312. reportHardRSSLimit(RssChecker.getHardRssLimit());
  313. break;
  314. }
  315. void *Block = nullptr;
  316. uptr ClassId = 0;
  317. uptr SecondaryBlockEnd = 0;
  318. if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
  319. ClassId = SizeClassMap::getClassIdBySize(NeededSize);
  320. DCHECK_NE(ClassId, 0U);
  321. bool UnlockRequired;
  322. auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
  323. Block = TSD->Cache.allocate(ClassId);
  324. // If the allocation failed, the most likely reason with a 32-bit primary
  325. // is the region being full. In that event, retry in each successively
  326. // larger class until it fits. If it fails to fit in the largest class,
  327. // fallback to the Secondary.
  328. if (UNLIKELY(!Block)) {
  329. while (ClassId < SizeClassMap::LargestClassId && !Block)
  330. Block = TSD->Cache.allocate(++ClassId);
  331. if (!Block)
  332. ClassId = 0;
  333. }
  334. if (UnlockRequired)
  335. TSD->unlock();
  336. }
  337. if (UNLIKELY(ClassId == 0))
  338. Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
  339. FillContents);
  340. if (UNLIKELY(!Block)) {
  341. if (Options.get(OptionBit::MayReturnNull))
  342. return nullptr;
  343. reportOutOfMemory(NeededSize);
  344. }
  345. const uptr BlockUptr = reinterpret_cast<uptr>(Block);
  346. const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
  347. const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment);
  348. void *Ptr = reinterpret_cast<void *>(UserPtr);
  349. void *TaggedPtr = Ptr;
  350. if (LIKELY(ClassId)) {
  351. // We only need to zero or tag the contents for Primary backed
  352. // allocations. We only set tags for primary allocations in order to avoid
  353. // faulting potentially large numbers of pages for large secondary
  354. // allocations. We assume that guard pages are enough to protect these
  355. // allocations.
  356. //
  357. // FIXME: When the kernel provides a way to set the background tag of a
  358. // mapping, we should be able to tag secondary allocations as well.
  359. //
  360. // When memory tagging is enabled, zeroing the contents is done as part of
  361. // setting the tag.
  362. if (UNLIKELY(useMemoryTagging<Params>(Options))) {
  363. uptr PrevUserPtr;
  364. Chunk::UnpackedHeader Header;
  365. const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
  366. const uptr BlockEnd = BlockUptr + BlockSize;
  367. // If possible, try to reuse the UAF tag that was set by deallocate().
  368. // For simplicity, only reuse tags if we have the same start address as
  369. // the previous allocation. This handles the majority of cases since
  370. // most allocations will not be more aligned than the minimum alignment.
  371. //
  372. // We need to handle situations involving reclaimed chunks, and retag
  373. // the reclaimed portions if necessary. In the case where the chunk is
  374. // fully reclaimed, the chunk's header will be zero, which will trigger
  375. // the code path for new mappings and invalid chunks that prepares the
  376. // chunk from scratch. There are three possibilities for partial
  377. // reclaiming:
  378. //
  379. // (1) Header was reclaimed, data was partially reclaimed.
  380. // (2) Header was not reclaimed, all data was reclaimed (e.g. because
  381. // data started on a page boundary).
  382. // (3) Header was not reclaimed, data was partially reclaimed.
  383. //
  384. // Case (1) will be handled in the same way as for full reclaiming,
  385. // since the header will be zero.
  386. //
  387. // We can detect case (2) by loading the tag from the start
  388. // of the chunk. If it is zero, it means that either all data was
  389. // reclaimed (since we never use zero as the chunk tag), or that the
  390. // previous allocation was of size zero. Either way, we need to prepare
  391. // a new chunk from scratch.
  392. //
  393. // We can detect case (3) by moving to the next page (if covered by the
  394. // chunk) and loading the tag of its first granule. If it is zero, it
  395. // means that all following pages may need to be retagged. On the other
  396. // hand, if it is nonzero, we can assume that all following pages are
  397. // still tagged, according to the logic that if any of the pages
  398. // following the next page were reclaimed, the next page would have been
  399. // reclaimed as well.
  400. uptr TaggedUserPtr;
  401. if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
  402. PrevUserPtr == UserPtr &&
  403. (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
  404. uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
  405. const uptr NextPage = roundUpTo(TaggedUserPtr, getPageSizeCached());
  406. if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
  407. PrevEnd = NextPage;
  408. TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
  409. resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
  410. if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
  411. // If an allocation needs to be zeroed (i.e. calloc) we can normally
  412. // avoid zeroing the memory now since we can rely on memory having
  413. // been zeroed on free, as this is normally done while setting the
  414. // UAF tag. But if tagging was disabled per-thread when the memory
  415. // was freed, it would not have been retagged and thus zeroed, and
  416. // therefore it needs to be zeroed now.
  417. memset(TaggedPtr, 0,
  418. Min(Size, roundUpTo(PrevEnd - TaggedUserPtr,
  419. archMemoryTagGranuleSize())));
  420. } else if (Size) {
  421. // Clear any stack metadata that may have previously been stored in
  422. // the chunk data.
  423. memset(TaggedPtr, 0, archMemoryTagGranuleSize());
  424. }
  425. } else {
  426. const uptr OddEvenMask =
  427. computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
  428. TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
  429. }
  430. storePrimaryAllocationStackMaybe(Options, Ptr);
  431. } else {
  432. Block = addHeaderTag(Block);
  433. Ptr = addHeaderTag(Ptr);
  434. if (UNLIKELY(FillContents != NoFill)) {
  435. // This condition is not necessarily unlikely, but since memset is
  436. // costly, we might as well mark it as such.
  437. memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
  438. PrimaryT::getSizeByClassId(ClassId));
  439. }
  440. }
  441. } else {
  442. Block = addHeaderTag(Block);
  443. Ptr = addHeaderTag(Ptr);
  444. if (UNLIKELY(useMemoryTagging<Params>(Options))) {
  445. storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
  446. storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
  447. }
  448. }
  449. Chunk::UnpackedHeader Header = {};
  450. if (UNLIKELY(UnalignedUserPtr != UserPtr)) {
  451. const uptr Offset = UserPtr - UnalignedUserPtr;
  452. DCHECK_GE(Offset, 2 * sizeof(u32));
  453. // The BlockMarker has no security purpose, but is specifically meant for
  454. // the chunk iteration function that can be used in debugging situations.
  455. // It is the only situation where we have to locate the start of a chunk
  456. // based on its block address.
  457. reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
  458. reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
  459. Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
  460. }
  461. Header.ClassId = ClassId & Chunk::ClassIdMask;
  462. Header.State = Chunk::State::Allocated;
  463. Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
  464. Header.SizeOrUnusedBytes =
  465. (ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) &
  466. Chunk::SizeOrUnusedBytesMask;
  467. Chunk::storeHeader(Cookie, Ptr, &Header);
  468. if (UNLIKELY(&__scudo_allocate_hook))
  469. __scudo_allocate_hook(TaggedPtr, Size);
  470. return TaggedPtr;
  471. }
  472. NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
  473. UNUSED uptr Alignment = MinAlignment) {
  474. // For a deallocation, we only ensure minimal initialization, meaning thread
  475. // local data will be left uninitialized for now (when using ELF TLS). The
  476. // fallback cache will be used instead. This is a workaround for a situation
  477. // where the only heap operation performed in a thread would be a free past
  478. // the TLS destructors, ending up in initialized thread specific data never
  479. // being destroyed properly. Any other heap operation will do a full init.
  480. initThreadMaybe(/*MinimalInit=*/true);
  481. if (UNLIKELY(&__scudo_deallocate_hook))
  482. __scudo_deallocate_hook(Ptr);
  483. if (UNLIKELY(!Ptr))
  484. return;
  485. #ifdef GWP_ASAN_HOOKS
  486. if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
  487. GuardedAlloc.deallocate(Ptr);
  488. Stats.lock();
  489. Stats.add(StatFree, GuardedAllocSlotSize);
  490. Stats.sub(StatAllocated, GuardedAllocSlotSize);
  491. Stats.unlock();
  492. return;
  493. }
  494. #endif // GWP_ASAN_HOOKS
  495. if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
  496. reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
  497. void *TaggedPtr = Ptr;
  498. Ptr = getHeaderTaggedPointer(Ptr);
  499. Chunk::UnpackedHeader Header;
  500. Chunk::loadHeader(Cookie, Ptr, &Header);
  501. if (UNLIKELY(Header.State != Chunk::State::Allocated))
  502. reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
  503. const Options Options = Primary.Options.load();
  504. if (Options.get(OptionBit::DeallocTypeMismatch)) {
  505. if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
  506. // With the exception of memalign'd chunks, that can be still be free'd.
  507. if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
  508. Origin != Chunk::Origin::Malloc)
  509. reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
  510. Header.OriginOrWasZeroed, Origin);
  511. }
  512. }
  513. const uptr Size = getSize(Ptr, &Header);
  514. if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
  515. if (UNLIKELY(DeleteSize != Size))
  516. reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
  517. }
  518. quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
  519. }
  520. void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
  521. initThreadMaybe();
  522. const Options Options = Primary.Options.load();
  523. if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
  524. if (Options.get(OptionBit::MayReturnNull))
  525. return nullptr;
  526. reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
  527. }
  528. // The following cases are handled by the C wrappers.
  529. DCHECK_NE(OldPtr, nullptr);
  530. DCHECK_NE(NewSize, 0);
  531. #ifdef GWP_ASAN_HOOKS
  532. if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
  533. uptr OldSize = GuardedAlloc.getSize(OldPtr);
  534. void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
  535. if (NewPtr)
  536. memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
  537. GuardedAlloc.deallocate(OldPtr);
  538. Stats.lock();
  539. Stats.add(StatFree, GuardedAllocSlotSize);
  540. Stats.sub(StatAllocated, GuardedAllocSlotSize);
  541. Stats.unlock();
  542. return NewPtr;
  543. }
  544. #endif // GWP_ASAN_HOOKS
  545. void *OldTaggedPtr = OldPtr;
  546. OldPtr = getHeaderTaggedPointer(OldPtr);
  547. if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
  548. reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
  549. Chunk::UnpackedHeader OldHeader;
  550. Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
  551. if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
  552. reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
  553. // Pointer has to be allocated with a malloc-type function. Some
  554. // applications think that it is OK to realloc a memalign'ed pointer, which
  555. // will trigger this check. It really isn't.
  556. if (Options.get(OptionBit::DeallocTypeMismatch)) {
  557. if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc))
  558. reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
  559. OldHeader.OriginOrWasZeroed,
  560. Chunk::Origin::Malloc);
  561. }
  562. void *BlockBegin = getBlockBegin(OldTaggedPtr, &OldHeader);
  563. uptr BlockEnd;
  564. uptr OldSize;
  565. const uptr ClassId = OldHeader.ClassId;
  566. if (LIKELY(ClassId)) {
  567. BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
  568. SizeClassMap::getSizeByClassId(ClassId);
  569. OldSize = OldHeader.SizeOrUnusedBytes;
  570. } else {
  571. BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
  572. OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
  573. OldHeader.SizeOrUnusedBytes);
  574. }
  575. // If the new chunk still fits in the previously allocated block (with a
  576. // reasonable delta), we just keep the old block, and update the chunk
  577. // header to reflect the size change.
  578. if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
  579. if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
  580. Chunk::UnpackedHeader NewHeader = OldHeader;
  581. NewHeader.SizeOrUnusedBytes =
  582. (ClassId ? NewSize
  583. : BlockEnd -
  584. (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
  585. Chunk::SizeOrUnusedBytesMask;
  586. Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
  587. if (UNLIKELY(useMemoryTagging<Params>(Options))) {
  588. if (ClassId) {
  589. resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
  590. reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
  591. NewSize, untagPointer(BlockEnd));
  592. storePrimaryAllocationStackMaybe(Options, OldPtr);
  593. } else {
  594. storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
  595. }
  596. }
  597. return OldTaggedPtr;
  598. }
  599. }
  600. // Otherwise we allocate a new one, and deallocate the old one. Some
  601. // allocators will allocate an even larger chunk (by a fixed factor) to
  602. // allow for potential further in-place realloc. The gains of such a trick
  603. // are currently unclear.
  604. void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
  605. if (LIKELY(NewPtr)) {
  606. memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
  607. quarantineOrDeallocateChunk(Options, OldTaggedPtr, &OldHeader, OldSize);
  608. }
  609. return NewPtr;
  610. }
  611. // TODO(kostyak): disable() is currently best-effort. There are some small
  612. // windows of time when an allocation could still succeed after
  613. // this function finishes. We will revisit that later.
  614. void disable() {
  615. initThreadMaybe();
  616. #ifdef GWP_ASAN_HOOKS
  617. GuardedAlloc.disable();
  618. #endif
  619. TSDRegistry.disable();
  620. Stats.disable();
  621. Quarantine.disable();
  622. Primary.disable();
  623. Secondary.disable();
  624. }
  625. void enable() {
  626. initThreadMaybe();
  627. Secondary.enable();
  628. Primary.enable();
  629. Quarantine.enable();
  630. Stats.enable();
  631. TSDRegistry.enable();
  632. #ifdef GWP_ASAN_HOOKS
  633. GuardedAlloc.enable();
  634. #endif
  635. }
  636. // The function returns the amount of bytes required to store the statistics,
  637. // which might be larger than the amount of bytes provided. Note that the
  638. // statistics buffer is not necessarily constant between calls to this
  639. // function. This can be called with a null buffer or zero size for buffer
  640. // sizing purposes.
  641. uptr getStats(char *Buffer, uptr Size) {
  642. ScopedString Str;
  643. disable();
  644. const uptr Length = getStats(&Str) + 1;
  645. enable();
  646. if (Length < Size)
  647. Size = Length;
  648. if (Buffer && Size) {
  649. memcpy(Buffer, Str.data(), Size);
  650. Buffer[Size - 1] = '\0';
  651. }
  652. return Length;
  653. }
  654. void printStats() {
  655. ScopedString Str;
  656. disable();
  657. getStats(&Str);
  658. enable();
  659. Str.output();
  660. }
  661. void releaseToOS() {
  662. initThreadMaybe();
  663. Primary.releaseToOS();
  664. Secondary.releaseToOS();
  665. }
  666. // Iterate over all chunks and call a callback for all busy chunks located
  667. // within the provided memory range. Said callback must not use this allocator
  668. // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
  669. void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
  670. void *Arg) {
  671. initThreadMaybe();
  672. if (archSupportsMemoryTagging())
  673. Base = untagPointer(Base);
  674. const uptr From = Base;
  675. const uptr To = Base + Size;
  676. bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Params>() &&
  677. systemSupportsMemoryTagging();
  678. auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
  679. Arg](uptr Block) {
  680. if (Block < From || Block >= To)
  681. return;
  682. uptr Chunk;
  683. Chunk::UnpackedHeader Header;
  684. if (MayHaveTaggedPrimary) {
  685. // A chunk header can either have a zero tag (tagged primary) or the
  686. // header tag (secondary, or untagged primary). We don't know which so
  687. // try both.
  688. ScopedDisableMemoryTagChecks x;
  689. if (!getChunkFromBlock(Block, &Chunk, &Header) &&
  690. !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
  691. return;
  692. } else {
  693. if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
  694. return;
  695. }
  696. if (Header.State == Chunk::State::Allocated) {
  697. uptr TaggedChunk = Chunk;
  698. if (allocatorSupportsMemoryTagging<Params>())
  699. TaggedChunk = untagPointer(TaggedChunk);
  700. if (useMemoryTagging<Params>(Primary.Options.load()))
  701. TaggedChunk = loadTag(Chunk);
  702. Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
  703. Arg);
  704. }
  705. };
  706. Primary.iterateOverBlocks(Lambda);
  707. Secondary.iterateOverBlocks(Lambda);
  708. #ifdef GWP_ASAN_HOOKS
  709. GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
  710. #endif
  711. }
  712. bool canReturnNull() {
  713. initThreadMaybe();
  714. return Primary.Options.load().get(OptionBit::MayReturnNull);
  715. }
  716. bool setOption(Option O, sptr Value) {
  717. initThreadMaybe();
  718. if (O == Option::MemtagTuning) {
  719. // Enabling odd/even tags involves a tradeoff between use-after-free
  720. // detection and buffer overflow detection. Odd/even tags make it more
  721. // likely for buffer overflows to be detected by increasing the size of
  722. // the guaranteed "red zone" around the allocation, but on the other hand
  723. // use-after-free is less likely to be detected because the tag space for
  724. // any particular chunk is cut in half. Therefore we use this tuning
  725. // setting to control whether odd/even tags are enabled.
  726. if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
  727. Primary.Options.set(OptionBit::UseOddEvenTags);
  728. else if (Value == M_MEMTAG_TUNING_UAF)
  729. Primary.Options.clear(OptionBit::UseOddEvenTags);
  730. return true;
  731. } else {
  732. // We leave it to the various sub-components to decide whether or not they
  733. // want to handle the option, but we do not want to short-circuit
  734. // execution if one of the setOption was to return false.
  735. const bool PrimaryResult = Primary.setOption(O, Value);
  736. const bool SecondaryResult = Secondary.setOption(O, Value);
  737. const bool RegistryResult = TSDRegistry.setOption(O, Value);
  738. return PrimaryResult && SecondaryResult && RegistryResult;
  739. }
  740. return false;
  741. }
  742. // Return the usable size for a given chunk. Technically we lie, as we just
  743. // report the actual size of a chunk. This is done to counteract code actively
  744. // writing past the end of a chunk (like sqlite3) when the usable size allows
  745. // for it, which then forces realloc to copy the usable size of a chunk as
  746. // opposed to its actual size.
  747. uptr getUsableSize(const void *Ptr) {
  748. initThreadMaybe();
  749. if (UNLIKELY(!Ptr))
  750. return 0;
  751. #ifdef GWP_ASAN_HOOKS
  752. if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
  753. return GuardedAlloc.getSize(Ptr);
  754. #endif // GWP_ASAN_HOOKS
  755. Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
  756. Chunk::UnpackedHeader Header;
  757. Chunk::loadHeader(Cookie, Ptr, &Header);
  758. // Getting the usable size of a chunk only makes sense if it's allocated.
  759. if (UNLIKELY(Header.State != Chunk::State::Allocated))
  760. reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
  761. return getSize(Ptr, &Header);
  762. }
  763. void getStats(StatCounters S) {
  764. initThreadMaybe();
  765. Stats.get(S);
  766. }
  767. // Returns true if the pointer provided was allocated by the current
  768. // allocator instance, which is compliant with tcmalloc's ownership concept.
  769. // A corrupted chunk will not be reported as owned, which is WAI.
  770. bool isOwned(const void *Ptr) {
  771. initThreadMaybe();
  772. #ifdef GWP_ASAN_HOOKS
  773. if (GuardedAlloc.pointerIsMine(Ptr))
  774. return true;
  775. #endif // GWP_ASAN_HOOKS
  776. if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
  777. return false;
  778. Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
  779. Chunk::UnpackedHeader Header;
  780. return Chunk::isValid(Cookie, Ptr, &Header) &&
  781. Header.State == Chunk::State::Allocated;
  782. }
  783. void setRssLimitsTestOnly(int SoftRssLimitMb, int HardRssLimitMb,
  784. bool MayReturnNull) {
  785. RssChecker.init(SoftRssLimitMb, HardRssLimitMb);
  786. if (MayReturnNull)
  787. Primary.Options.set(OptionBit::MayReturnNull);
  788. }
  789. bool useMemoryTaggingTestOnly() const {
  790. return useMemoryTagging<Params>(Primary.Options.load());
  791. }
  792. void disableMemoryTagging() {
  793. // If we haven't been initialized yet, we need to initialize now in order to
  794. // prevent a future call to initThreadMaybe() from enabling memory tagging
  795. // based on feature detection. But don't call initThreadMaybe() because it
  796. // may end up calling the allocator (via pthread_atfork, via the post-init
  797. // callback), which may cause mappings to be created with memory tagging
  798. // enabled.
  799. TSDRegistry.initOnceMaybe(this);
  800. if (allocatorSupportsMemoryTagging<Params>()) {
  801. Secondary.disableMemoryTagging();
  802. Primary.Options.clear(OptionBit::UseMemoryTagging);
  803. }
  804. }
  805. void setTrackAllocationStacks(bool Track) {
  806. initThreadMaybe();
  807. if (getFlags()->allocation_ring_buffer_size == 0) {
  808. DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
  809. return;
  810. }
  811. if (Track)
  812. Primary.Options.set(OptionBit::TrackAllocationStacks);
  813. else
  814. Primary.Options.clear(OptionBit::TrackAllocationStacks);
  815. }
  816. void setFillContents(FillContentsMode FillContents) {
  817. initThreadMaybe();
  818. Primary.Options.setFillContentsMode(FillContents);
  819. }
  820. void setAddLargeAllocationSlack(bool AddSlack) {
  821. initThreadMaybe();
  822. if (AddSlack)
  823. Primary.Options.set(OptionBit::AddLargeAllocationSlack);
  824. else
  825. Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
  826. }
  827. const char *getStackDepotAddress() const {
  828. return reinterpret_cast<const char *>(&Depot);
  829. }
  830. const char *getRegionInfoArrayAddress() const {
  831. return Primary.getRegionInfoArrayAddress();
  832. }
  833. static uptr getRegionInfoArraySize() {
  834. return PrimaryT::getRegionInfoArraySize();
  835. }
  836. const char *getRingBufferAddress() {
  837. initThreadMaybe();
  838. return RawRingBuffer;
  839. }
  840. uptr getRingBufferSize() {
  841. initThreadMaybe();
  842. auto *RingBuffer = getRingBuffer();
  843. return RingBuffer ? ringBufferSizeInBytes(RingBuffer->Size) : 0;
  844. }
  845. static bool setRingBufferSizeForBuffer(char *Buffer, size_t Size) {
  846. // Need at least one entry.
  847. if (Size < sizeof(AllocationRingBuffer) +
  848. sizeof(typename AllocationRingBuffer::Entry)) {
  849. return false;
  850. }
  851. AllocationRingBuffer *RingBuffer =
  852. reinterpret_cast<AllocationRingBuffer *>(Buffer);
  853. RingBuffer->Size = (Size - sizeof(AllocationRingBuffer)) /
  854. sizeof(typename AllocationRingBuffer::Entry);
  855. return true;
  856. }
  857. static const uptr MaxTraceSize = 64;
  858. static void collectTraceMaybe(const StackDepot *Depot,
  859. uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
  860. uptr RingPos, Size;
  861. if (!Depot->find(Hash, &RingPos, &Size))
  862. return;
  863. for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
  864. Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
  865. }
  866. static void getErrorInfo(struct scudo_error_info *ErrorInfo,
  867. uintptr_t FaultAddr, const char *DepotPtr,
  868. const char *RegionInfoPtr, const char *RingBufferPtr,
  869. const char *Memory, const char *MemoryTags,
  870. uintptr_t MemoryAddr, size_t MemorySize) {
  871. *ErrorInfo = {};
  872. if (!allocatorSupportsMemoryTagging<Params>() ||
  873. MemoryAddr + MemorySize < MemoryAddr)
  874. return;
  875. auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
  876. size_t NextErrorReport = 0;
  877. // Check for OOB in the current block and the two surrounding blocks. Beyond
  878. // that, UAF is more likely.
  879. if (extractTag(FaultAddr) != 0)
  880. getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
  881. RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
  882. MemorySize, 0, 2);
  883. // Check the ring buffer. For primary allocations this will only find UAF;
  884. // for secondary allocations we can find either UAF or OOB.
  885. getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
  886. RingBufferPtr);
  887. // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
  888. // Beyond that we are likely to hit false positives.
  889. if (extractTag(FaultAddr) != 0)
  890. getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
  891. RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
  892. MemorySize, 2, 16);
  893. }
  894. private:
  895. using SecondaryT = MapAllocator<Params>;
  896. typedef typename PrimaryT::SizeClassMap SizeClassMap;
  897. static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
  898. static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
  899. static const uptr MinAlignment = 1UL << MinAlignmentLog;
  900. static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
  901. static const uptr MaxAllowedMallocSize =
  902. FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
  903. static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
  904. "Minimal alignment must at least cover a chunk header.");
  905. static_assert(!allocatorSupportsMemoryTagging<Params>() ||
  906. MinAlignment >= archMemoryTagGranuleSize(),
  907. "");
  908. static const u32 BlockMarker = 0x44554353U;
  909. // These are indexes into an "array" of 32-bit values that store information
  910. // inline with a chunk that is relevant to diagnosing memory tag faults, where
  911. // 0 corresponds to the address of the user memory. This means that only
  912. // negative indexes may be used. The smallest index that may be used is -2,
  913. // which corresponds to 8 bytes before the user memory, because the chunk
  914. // header size is 8 bytes and in allocators that support memory tagging the
  915. // minimum alignment is at least the tag granule size (16 on aarch64).
  916. static const sptr MemTagAllocationTraceIndex = -2;
  917. static const sptr MemTagAllocationTidIndex = -1;
  918. u32 Cookie = 0;
  919. u32 QuarantineMaxChunkSize = 0;
  920. GlobalStats Stats;
  921. PrimaryT Primary;
  922. SecondaryT Secondary;
  923. QuarantineT Quarantine;
  924. TSDRegistryT TSDRegistry;
  925. pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
  926. RssLimitChecker RssChecker;
  927. #ifdef GWP_ASAN_HOOKS
  928. gwp_asan::GuardedPoolAllocator GuardedAlloc;
  929. uptr GuardedAllocSlotSize = 0;
  930. #endif // GWP_ASAN_HOOKS
  931. StackDepot Depot;
  932. struct AllocationRingBuffer {
  933. struct Entry {
  934. atomic_uptr Ptr;
  935. atomic_uptr AllocationSize;
  936. atomic_u32 AllocationTrace;
  937. atomic_u32 AllocationTid;
  938. atomic_u32 DeallocationTrace;
  939. atomic_u32 DeallocationTid;
  940. };
  941. atomic_uptr Pos;
  942. u32 Size;
  943. // An array of Size (at least one) elements of type Entry is immediately
  944. // following to this struct.
  945. };
  946. // Pointer to memory mapped area starting with AllocationRingBuffer struct,
  947. // and immediately followed by Size elements of type Entry.
  948. char *RawRingBuffer = {};
  949. // The following might get optimized out by the compiler.
  950. NOINLINE void performSanityChecks() {
  951. // Verify that the header offset field can hold the maximum offset. In the
  952. // case of the Secondary allocator, it takes care of alignment and the
  953. // offset will always be small. In the case of the Primary, the worst case
  954. // scenario happens in the last size class, when the backend allocation
  955. // would already be aligned on the requested alignment, which would happen
  956. // to be the maximum alignment that would fit in that size class. As a
  957. // result, the maximum offset will be at most the maximum alignment for the
  958. // last size class minus the header size, in multiples of MinAlignment.
  959. Chunk::UnpackedHeader Header = {};
  960. const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
  961. SizeClassMap::MaxSize - MinAlignment);
  962. const uptr MaxOffset =
  963. (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
  964. Header.Offset = MaxOffset & Chunk::OffsetMask;
  965. if (UNLIKELY(Header.Offset != MaxOffset))
  966. reportSanityCheckError("offset");
  967. // Verify that we can fit the maximum size or amount of unused bytes in the
  968. // header. Given that the Secondary fits the allocation to a page, the worst
  969. // case scenario happens in the Primary. It will depend on the second to
  970. // last and last class sizes, as well as the dynamic base for the Primary.
  971. // The following is an over-approximation that works for our needs.
  972. const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
  973. Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
  974. if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
  975. reportSanityCheckError("size (or unused bytes)");
  976. const uptr LargestClassId = SizeClassMap::LargestClassId;
  977. Header.ClassId = LargestClassId;
  978. if (UNLIKELY(Header.ClassId != LargestClassId))
  979. reportSanityCheckError("class ID");
  980. }
  981. static inline void *getBlockBegin(const void *Ptr,
  982. Chunk::UnpackedHeader *Header) {
  983. return reinterpret_cast<void *>(
  984. reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
  985. (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
  986. }
  987. // Return the size of a chunk as requested during its allocation.
  988. inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
  989. const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
  990. if (LIKELY(Header->ClassId))
  991. return SizeOrUnusedBytes;
  992. if (allocatorSupportsMemoryTagging<Params>())
  993. Ptr = untagPointer(const_cast<void *>(Ptr));
  994. return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
  995. reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
  996. }
  997. void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
  998. Chunk::UnpackedHeader *Header, uptr Size) {
  999. void *Ptr = getHeaderTaggedPointer(TaggedPtr);
  1000. Chunk::UnpackedHeader NewHeader = *Header;
  1001. // If the quarantine is disabled, the actual size of a chunk is 0 or larger
  1002. // than the maximum allowed, we return a chunk directly to the backend.
  1003. // This purposefully underflows for Size == 0.
  1004. const bool BypassQuarantine = !Quarantine.getCacheSize() ||
  1005. ((Size - 1) >= QuarantineMaxChunkSize) ||
  1006. !NewHeader.ClassId;
  1007. if (BypassQuarantine)
  1008. NewHeader.State = Chunk::State::Available;
  1009. else
  1010. NewHeader.State = Chunk::State::Quarantined;
  1011. NewHeader.OriginOrWasZeroed = useMemoryTagging<Params>(Options) &&
  1012. NewHeader.ClassId &&
  1013. !TSDRegistry.getDisableMemInit();
  1014. Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
  1015. if (UNLIKELY(useMemoryTagging<Params>(Options))) {
  1016. u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
  1017. storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
  1018. if (NewHeader.ClassId) {
  1019. if (!TSDRegistry.getDisableMemInit()) {
  1020. uptr TaggedBegin, TaggedEnd;
  1021. const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
  1022. Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
  1023. NewHeader.ClassId);
  1024. // Exclude the previous tag so that immediate use after free is
  1025. // detected 100% of the time.
  1026. setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
  1027. &TaggedEnd);
  1028. }
  1029. }
  1030. }
  1031. if (BypassQuarantine) {
  1032. if (allocatorSupportsMemoryTagging<Params>())
  1033. Ptr = untagPointer(Ptr);
  1034. void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
  1035. const uptr ClassId = NewHeader.ClassId;
  1036. if (LIKELY(ClassId)) {
  1037. bool UnlockRequired;
  1038. auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
  1039. TSD->Cache.deallocate(ClassId, BlockBegin);
  1040. if (UnlockRequired)
  1041. TSD->unlock();
  1042. } else {
  1043. if (UNLIKELY(useMemoryTagging<Params>(Options)))
  1044. storeTags(reinterpret_cast<uptr>(BlockBegin),
  1045. reinterpret_cast<uptr>(Ptr));
  1046. Secondary.deallocate(Options, BlockBegin);
  1047. }
  1048. } else {
  1049. bool UnlockRequired;
  1050. auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
  1051. Quarantine.put(&TSD->QuarantineCache,
  1052. QuarantineCallback(*this, TSD->Cache), Ptr, Size);
  1053. if (UnlockRequired)
  1054. TSD->unlock();
  1055. }
  1056. }
  1057. bool getChunkFromBlock(uptr Block, uptr *Chunk,
  1058. Chunk::UnpackedHeader *Header) {
  1059. *Chunk =
  1060. Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
  1061. return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
  1062. }
  1063. static uptr getChunkOffsetFromBlock(const char *Block) {
  1064. u32 Offset = 0;
  1065. if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
  1066. Offset = reinterpret_cast<const u32 *>(Block)[1];
  1067. return Offset + Chunk::getHeaderSize();
  1068. }
  1069. // Set the tag of the granule past the end of the allocation to 0, to catch
  1070. // linear overflows even if a previous larger allocation used the same block
  1071. // and tag. Only do this if the granule past the end is in our block, because
  1072. // this would otherwise lead to a SEGV if the allocation covers the entire
  1073. // block and our block is at the end of a mapping. The tag of the next block's
  1074. // header granule will be set to 0, so it will serve the purpose of catching
  1075. // linear overflows in this case.
  1076. //
  1077. // For allocations of size 0 we do not end up storing the address tag to the
  1078. // memory tag space, which getInlineErrorInfo() normally relies on to match
  1079. // address tags against chunks. To allow matching in this case we store the
  1080. // address tag in the first byte of the chunk.
  1081. void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
  1082. DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
  1083. uptr UntaggedEnd = untagPointer(End);
  1084. if (UntaggedEnd != BlockEnd) {
  1085. storeTag(UntaggedEnd);
  1086. if (Size == 0)
  1087. *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
  1088. }
  1089. }
  1090. void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
  1091. uptr BlockEnd) {
  1092. // Prepare the granule before the chunk to store the chunk header by setting
  1093. // its tag to 0. Normally its tag will already be 0, but in the case where a
  1094. // chunk holding a low alignment allocation is reused for a higher alignment
  1095. // allocation, the chunk may already have a non-zero tag from the previous
  1096. // allocation.
  1097. storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
  1098. uptr TaggedBegin, TaggedEnd;
  1099. setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
  1100. storeEndMarker(TaggedEnd, Size, BlockEnd);
  1101. return reinterpret_cast<void *>(TaggedBegin);
  1102. }
  1103. void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
  1104. uptr BlockEnd) {
  1105. uptr RoundOldPtr = roundUpTo(OldPtr, archMemoryTagGranuleSize());
  1106. uptr RoundNewPtr;
  1107. if (RoundOldPtr >= NewPtr) {
  1108. // If the allocation is shrinking we just need to set the tag past the end
  1109. // of the allocation to 0. See explanation in storeEndMarker() above.
  1110. RoundNewPtr = roundUpTo(NewPtr, archMemoryTagGranuleSize());
  1111. } else {
  1112. // Set the memory tag of the region
  1113. // [RoundOldPtr, roundUpTo(NewPtr, archMemoryTagGranuleSize()))
  1114. // to the pointer tag stored in OldPtr.
  1115. RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
  1116. }
  1117. storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
  1118. }
  1119. void storePrimaryAllocationStackMaybe(Options Options, void *Ptr) {
  1120. if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
  1121. return;
  1122. auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
  1123. Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
  1124. Ptr32[MemTagAllocationTidIndex] = getThreadID();
  1125. }
  1126. void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
  1127. uptr AllocationSize, u32 DeallocationTrace,
  1128. u32 DeallocationTid) {
  1129. uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
  1130. typename AllocationRingBuffer::Entry *Entry =
  1131. getRingBufferEntry(RawRingBuffer, Pos % getRingBuffer()->Size);
  1132. // First invalidate our entry so that we don't attempt to interpret a
  1133. // partially written state in getSecondaryErrorInfo(). The fences below
  1134. // ensure that the compiler does not move the stores to Ptr in between the
  1135. // stores to the other fields.
  1136. atomic_store_relaxed(&Entry->Ptr, 0);
  1137. __atomic_signal_fence(__ATOMIC_SEQ_CST);
  1138. atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
  1139. atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
  1140. atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
  1141. atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
  1142. atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
  1143. __atomic_signal_fence(__ATOMIC_SEQ_CST);
  1144. atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
  1145. }
  1146. void storeSecondaryAllocationStackMaybe(Options Options, void *Ptr,
  1147. uptr Size) {
  1148. if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
  1149. return;
  1150. u32 Trace = collectStackTrace();
  1151. u32 Tid = getThreadID();
  1152. auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
  1153. Ptr32[MemTagAllocationTraceIndex] = Trace;
  1154. Ptr32[MemTagAllocationTidIndex] = Tid;
  1155. storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
  1156. }
  1157. void storeDeallocationStackMaybe(Options Options, void *Ptr, u8 PrevTag,
  1158. uptr Size) {
  1159. if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
  1160. return;
  1161. auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
  1162. u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
  1163. u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
  1164. u32 DeallocationTrace = collectStackTrace();
  1165. u32 DeallocationTid = getThreadID();
  1166. storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
  1167. AllocationTrace, AllocationTid, Size,
  1168. DeallocationTrace, DeallocationTid);
  1169. }
  1170. static const size_t NumErrorReports =
  1171. sizeof(((scudo_error_info *)nullptr)->reports) /
  1172. sizeof(((scudo_error_info *)nullptr)->reports[0]);
  1173. static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
  1174. size_t &NextErrorReport, uintptr_t FaultAddr,
  1175. const StackDepot *Depot,
  1176. const char *RegionInfoPtr, const char *Memory,
  1177. const char *MemoryTags, uintptr_t MemoryAddr,
  1178. size_t MemorySize, size_t MinDistance,
  1179. size_t MaxDistance) {
  1180. uptr UntaggedFaultAddr = untagPointer(FaultAddr);
  1181. u8 FaultAddrTag = extractTag(FaultAddr);
  1182. BlockInfo Info =
  1183. PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
  1184. auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
  1185. if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
  1186. Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
  1187. return false;
  1188. *Data = &Memory[Addr - MemoryAddr];
  1189. *Tag = static_cast<u8>(
  1190. MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
  1191. return true;
  1192. };
  1193. auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
  1194. Chunk::UnpackedHeader *Header, const u32 **Data,
  1195. u8 *Tag) {
  1196. const char *BlockBegin;
  1197. u8 BlockBeginTag;
  1198. if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
  1199. return false;
  1200. uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
  1201. *ChunkAddr = Addr + ChunkOffset;
  1202. const char *ChunkBegin;
  1203. if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
  1204. return false;
  1205. *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
  1206. ChunkBegin - Chunk::getHeaderSize());
  1207. *Data = reinterpret_cast<const u32 *>(ChunkBegin);
  1208. // Allocations of size 0 will have stashed the tag in the first byte of
  1209. // the chunk, see storeEndMarker().
  1210. if (Header->SizeOrUnusedBytes == 0)
  1211. *Tag = static_cast<u8>(*ChunkBegin);
  1212. return true;
  1213. };
  1214. if (NextErrorReport == NumErrorReports)
  1215. return;
  1216. auto CheckOOB = [&](uptr BlockAddr) {
  1217. if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
  1218. return false;
  1219. uptr ChunkAddr;
  1220. Chunk::UnpackedHeader Header;
  1221. const u32 *Data;
  1222. uint8_t Tag;
  1223. if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
  1224. Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
  1225. return false;
  1226. auto *R = &ErrorInfo->reports[NextErrorReport++];
  1227. R->error_type =
  1228. UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
  1229. R->allocation_address = ChunkAddr;
  1230. R->allocation_size = Header.SizeOrUnusedBytes;
  1231. collectTraceMaybe(Depot, R->allocation_trace,
  1232. Data[MemTagAllocationTraceIndex]);
  1233. R->allocation_tid = Data[MemTagAllocationTidIndex];
  1234. return NextErrorReport == NumErrorReports;
  1235. };
  1236. if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
  1237. return;
  1238. for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
  1239. if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
  1240. CheckOOB(Info.BlockBegin - I * Info.BlockSize))
  1241. return;
  1242. }
  1243. static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
  1244. size_t &NextErrorReport,
  1245. uintptr_t FaultAddr,
  1246. const StackDepot *Depot,
  1247. const char *RingBufferPtr) {
  1248. auto *RingBuffer =
  1249. reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
  1250. if (!RingBuffer || RingBuffer->Size == 0)
  1251. return;
  1252. uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
  1253. for (uptr I = Pos - 1;
  1254. I != Pos - 1 - RingBuffer->Size && NextErrorReport != NumErrorReports;
  1255. --I) {
  1256. auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBuffer->Size);
  1257. uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
  1258. if (!EntryPtr)
  1259. continue;
  1260. uptr UntaggedEntryPtr = untagPointer(EntryPtr);
  1261. uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
  1262. u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
  1263. u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
  1264. u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
  1265. u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
  1266. if (DeallocationTid) {
  1267. // For UAF we only consider in-bounds fault addresses because
  1268. // out-of-bounds UAF is rare and attempting to detect it is very likely
  1269. // to result in false positives.
  1270. if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
  1271. continue;
  1272. } else {
  1273. // Ring buffer OOB is only possible with secondary allocations. In this
  1274. // case we are guaranteed a guard region of at least a page on either
  1275. // side of the allocation (guard page on the right, guard page + tagged
  1276. // region on the left), so ignore any faults outside of that range.
  1277. if (FaultAddr < EntryPtr - getPageSizeCached() ||
  1278. FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
  1279. continue;
  1280. // For UAF the ring buffer will contain two entries, one for the
  1281. // allocation and another for the deallocation. Don't report buffer
  1282. // overflow/underflow using the allocation entry if we have already
  1283. // collected a report from the deallocation entry.
  1284. bool Found = false;
  1285. for (uptr J = 0; J != NextErrorReport; ++J) {
  1286. if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
  1287. Found = true;
  1288. break;
  1289. }
  1290. }
  1291. if (Found)
  1292. continue;
  1293. }
  1294. auto *R = &ErrorInfo->reports[NextErrorReport++];
  1295. if (DeallocationTid)
  1296. R->error_type = USE_AFTER_FREE;
  1297. else if (FaultAddr < EntryPtr)
  1298. R->error_type = BUFFER_UNDERFLOW;
  1299. else
  1300. R->error_type = BUFFER_OVERFLOW;
  1301. R->allocation_address = UntaggedEntryPtr;
  1302. R->allocation_size = EntrySize;
  1303. collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
  1304. R->allocation_tid = AllocationTid;
  1305. collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
  1306. R->deallocation_tid = DeallocationTid;
  1307. }
  1308. }
  1309. uptr getStats(ScopedString *Str) {
  1310. Primary.getStats(Str);
  1311. Secondary.getStats(Str);
  1312. Quarantine.getStats(Str);
  1313. return Str->length();
  1314. }
  1315. static typename AllocationRingBuffer::Entry *
  1316. getRingBufferEntry(char *RawRingBuffer, uptr N) {
  1317. return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
  1318. &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
  1319. }
  1320. static const typename AllocationRingBuffer::Entry *
  1321. getRingBufferEntry(const char *RawRingBuffer, uptr N) {
  1322. return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
  1323. &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
  1324. }
  1325. void initRingBuffer() {
  1326. u32 AllocationRingBufferSize =
  1327. static_cast<u32>(getFlags()->allocation_ring_buffer_size);
  1328. if (AllocationRingBufferSize < 1)
  1329. return;
  1330. MapPlatformData Data = {};
  1331. RawRingBuffer = static_cast<char *>(
  1332. map(/*Addr=*/nullptr,
  1333. roundUpTo(ringBufferSizeInBytes(AllocationRingBufferSize), getPageSizeCached()),
  1334. "AllocatorRingBuffer", /*Flags=*/0, &Data));
  1335. auto *RingBuffer = reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
  1336. RingBuffer->Size = AllocationRingBufferSize;
  1337. static_assert(sizeof(AllocationRingBuffer) %
  1338. alignof(typename AllocationRingBuffer::Entry) ==
  1339. 0,
  1340. "invalid alignment");
  1341. }
  1342. static constexpr size_t ringBufferSizeInBytes(u32 AllocationRingBufferSize) {
  1343. return sizeof(AllocationRingBuffer) +
  1344. AllocationRingBufferSize *
  1345. sizeof(typename AllocationRingBuffer::Entry);
  1346. }
  1347. inline AllocationRingBuffer *getRingBuffer() {
  1348. return reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
  1349. }
  1350. };
  1351. } // namespace scudo
  1352. #endif // SCUDO_COMBINED_H_