secondary.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. //===-- secondary.h ---------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_SECONDARY_H_
  9. #define SCUDO_SECONDARY_H_
  10. #include "chunk.h"
  11. #include "common.h"
  12. #include "list.h"
  13. #include "memtag.h"
  14. #include "mutex.h"
  15. #include "options.h"
  16. #include "stats.h"
  17. #include "string_utils.h"
  18. namespace scudo {
  19. // This allocator wraps the platform allocation primitives, and as such is on
  20. // the slower side and should preferably be used for larger sized allocations.
  21. // Blocks allocated will be preceded and followed by a guard page, and hold
  22. // their own header that is not checksummed: the guard pages and the Combined
  23. // header should be enough for our purpose.
  24. namespace LargeBlock {
  25. struct alignas(Max<uptr>(archSupportsMemoryTagging()
  26. ? archMemoryTagGranuleSize()
  27. : 1,
  28. 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
  29. LargeBlock::Header *Prev;
  30. LargeBlock::Header *Next;
  31. uptr CommitBase;
  32. uptr CommitSize;
  33. uptr MapBase;
  34. uptr MapSize;
  35. [[no_unique_address]] MapPlatformData Data;
  36. };
  37. static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
  38. static_assert(!archSupportsMemoryTagging() ||
  39. sizeof(Header) % archMemoryTagGranuleSize() == 0,
  40. "");
  41. constexpr uptr getHeaderSize() { return sizeof(Header); }
  42. template <typename Config> static uptr addHeaderTag(uptr Ptr) {
  43. if (allocatorSupportsMemoryTagging<Config>())
  44. return addFixedTag(Ptr, 1);
  45. return Ptr;
  46. }
  47. template <typename Config> static Header *getHeader(uptr Ptr) {
  48. return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
  49. }
  50. template <typename Config> static Header *getHeader(const void *Ptr) {
  51. return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
  52. }
  53. } // namespace LargeBlock
  54. static void unmap(LargeBlock::Header *H) {
  55. MapPlatformData Data = H->Data;
  56. unmap(reinterpret_cast<void *>(H->MapBase), H->MapSize, UNMAP_ALL, &Data);
  57. }
  58. class MapAllocatorNoCache {
  59. public:
  60. void init(UNUSED s32 ReleaseToOsInterval) {}
  61. bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
  62. UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
  63. return false;
  64. }
  65. void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
  66. bool canCache(UNUSED uptr Size) { return false; }
  67. void disable() {}
  68. void enable() {}
  69. void releaseToOS() {}
  70. void disableMemoryTagging() {}
  71. void unmapTestOnly() {}
  72. bool setOption(Option O, UNUSED sptr Value) {
  73. if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
  74. O == Option::MaxCacheEntrySize)
  75. return false;
  76. // Not supported by the Secondary Cache, but not an error either.
  77. return true;
  78. }
  79. };
  80. static const uptr MaxUnusedCachePages = 4U;
  81. template <typename Config>
  82. void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
  83. uptr AllocPos, uptr Flags, MapPlatformData *Data) {
  84. const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
  85. if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
  86. const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
  87. map(reinterpret_cast<void *>(CommitBase), UntaggedPos - CommitBase,
  88. "scudo:secondary", MAP_RESIZABLE | MAP_MEMTAG | Flags, Data);
  89. map(reinterpret_cast<void *>(UntaggedPos),
  90. CommitBase + CommitSize - UntaggedPos, "scudo:secondary",
  91. MAP_RESIZABLE | Flags, Data);
  92. } else {
  93. map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
  94. MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
  95. Flags,
  96. Data);
  97. }
  98. }
  99. template <typename Config> class MapAllocatorCache {
  100. public:
  101. // Ensure the default maximum specified fits the array.
  102. static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <=
  103. Config::SecondaryCacheEntriesArraySize,
  104. "");
  105. void init(s32 ReleaseToOsInterval) {
  106. DCHECK_EQ(EntriesCount, 0U);
  107. setOption(Option::MaxCacheEntriesCount,
  108. static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
  109. setOption(Option::MaxCacheEntrySize,
  110. static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
  111. setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
  112. }
  113. void store(Options Options, LargeBlock::Header *H) {
  114. if (!canCache(H->CommitSize))
  115. return unmap(H);
  116. bool EntryCached = false;
  117. bool EmptyCache = false;
  118. const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
  119. const u64 Time = getMonotonicTime();
  120. const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
  121. CachedBlock Entry;
  122. Entry.CommitBase = H->CommitBase;
  123. Entry.CommitSize = H->CommitSize;
  124. Entry.MapBase = H->MapBase;
  125. Entry.MapSize = H->MapSize;
  126. Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
  127. Entry.Data = H->Data;
  128. Entry.Time = Time;
  129. if (useMemoryTagging<Config>(Options)) {
  130. if (Interval == 0 && !SCUDO_FUCHSIA) {
  131. // Release the memory and make it inaccessible at the same time by
  132. // creating a new MAP_NOACCESS mapping on top of the existing mapping.
  133. // Fuchsia does not support replacing mappings by creating a new mapping
  134. // on top so we just do the two syscalls there.
  135. Entry.Time = 0;
  136. mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
  137. Entry.CommitBase, MAP_NOACCESS, &Entry.Data);
  138. } else {
  139. setMemoryPermission(Entry.CommitBase, Entry.CommitSize, MAP_NOACCESS,
  140. &Entry.Data);
  141. }
  142. } else if (Interval == 0) {
  143. releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
  144. Entry.Time = 0;
  145. }
  146. do {
  147. ScopedLock L(Mutex);
  148. if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
  149. // If we get here then memory tagging was disabled in between when we
  150. // read Options and when we locked Mutex. We can't insert our entry into
  151. // the quarantine or the cache because the permissions would be wrong so
  152. // just unmap it.
  153. break;
  154. }
  155. if (Config::SecondaryCacheQuarantineSize &&
  156. useMemoryTagging<Config>(Options)) {
  157. QuarantinePos =
  158. (QuarantinePos + 1) % Max(Config::SecondaryCacheQuarantineSize, 1u);
  159. if (!Quarantine[QuarantinePos].CommitBase) {
  160. Quarantine[QuarantinePos] = Entry;
  161. return;
  162. }
  163. CachedBlock PrevEntry = Quarantine[QuarantinePos];
  164. Quarantine[QuarantinePos] = Entry;
  165. if (OldestTime == 0)
  166. OldestTime = Entry.Time;
  167. Entry = PrevEntry;
  168. }
  169. if (EntriesCount >= MaxCount) {
  170. if (IsFullEvents++ == 4U)
  171. EmptyCache = true;
  172. } else {
  173. for (u32 I = 0; I < MaxCount; I++) {
  174. if (Entries[I].CommitBase)
  175. continue;
  176. if (I != 0)
  177. Entries[I] = Entries[0];
  178. Entries[0] = Entry;
  179. EntriesCount++;
  180. if (OldestTime == 0)
  181. OldestTime = Entry.Time;
  182. EntryCached = true;
  183. break;
  184. }
  185. }
  186. } while (0);
  187. if (EmptyCache)
  188. empty();
  189. else if (Interval >= 0)
  190. releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
  191. if (!EntryCached)
  192. unmap(reinterpret_cast<void *>(Entry.MapBase), Entry.MapSize, UNMAP_ALL,
  193. &Entry.Data);
  194. }
  195. bool retrieve(Options Options, uptr Size, uptr Alignment,
  196. LargeBlock::Header **H, bool *Zeroed) {
  197. const uptr PageSize = getPageSizeCached();
  198. const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
  199. bool Found = false;
  200. CachedBlock Entry;
  201. uptr HeaderPos;
  202. {
  203. ScopedLock L(Mutex);
  204. if (EntriesCount == 0)
  205. return false;
  206. for (u32 I = 0; I < MaxCount; I++) {
  207. const uptr CommitBase = Entries[I].CommitBase;
  208. if (!CommitBase)
  209. continue;
  210. const uptr CommitSize = Entries[I].CommitSize;
  211. const uptr AllocPos =
  212. roundDownTo(CommitBase + CommitSize - Size, Alignment);
  213. HeaderPos =
  214. AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
  215. if (HeaderPos > CommitBase + CommitSize)
  216. continue;
  217. if (HeaderPos < CommitBase ||
  218. AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
  219. continue;
  220. Found = true;
  221. Entry = Entries[I];
  222. Entries[I].CommitBase = 0;
  223. break;
  224. }
  225. }
  226. if (Found) {
  227. *H = reinterpret_cast<LargeBlock::Header *>(
  228. LargeBlock::addHeaderTag<Config>(HeaderPos));
  229. *Zeroed = Entry.Time == 0;
  230. if (useMemoryTagging<Config>(Options))
  231. setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0, &Entry.Data);
  232. uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
  233. if (useMemoryTagging<Config>(Options)) {
  234. if (*Zeroed)
  235. storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
  236. NewBlockBegin);
  237. else if (Entry.BlockBegin < NewBlockBegin)
  238. storeTags(Entry.BlockBegin, NewBlockBegin);
  239. else
  240. storeTags(untagPointer(NewBlockBegin),
  241. untagPointer(Entry.BlockBegin));
  242. }
  243. (*H)->CommitBase = Entry.CommitBase;
  244. (*H)->CommitSize = Entry.CommitSize;
  245. (*H)->MapBase = Entry.MapBase;
  246. (*H)->MapSize = Entry.MapSize;
  247. (*H)->Data = Entry.Data;
  248. EntriesCount--;
  249. }
  250. return Found;
  251. }
  252. bool canCache(uptr Size) {
  253. return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
  254. Size <= atomic_load_relaxed(&MaxEntrySize);
  255. }
  256. bool setOption(Option O, sptr Value) {
  257. if (O == Option::ReleaseInterval) {
  258. const s32 Interval =
  259. Max(Min(static_cast<s32>(Value),
  260. Config::SecondaryCacheMaxReleaseToOsIntervalMs),
  261. Config::SecondaryCacheMinReleaseToOsIntervalMs);
  262. atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
  263. return true;
  264. }
  265. if (O == Option::MaxCacheEntriesCount) {
  266. const u32 MaxCount = static_cast<u32>(Value);
  267. if (MaxCount > Config::SecondaryCacheEntriesArraySize)
  268. return false;
  269. atomic_store_relaxed(&MaxEntriesCount, MaxCount);
  270. return true;
  271. }
  272. if (O == Option::MaxCacheEntrySize) {
  273. atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
  274. return true;
  275. }
  276. // Not supported by the Secondary Cache, but not an error either.
  277. return true;
  278. }
  279. void releaseToOS() { releaseOlderThan(UINT64_MAX); }
  280. void disableMemoryTagging() {
  281. ScopedLock L(Mutex);
  282. for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
  283. if (Quarantine[I].CommitBase) {
  284. unmap(reinterpret_cast<void *>(Quarantine[I].MapBase),
  285. Quarantine[I].MapSize, UNMAP_ALL, &Quarantine[I].Data);
  286. Quarantine[I].CommitBase = 0;
  287. }
  288. }
  289. const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
  290. for (u32 I = 0; I < MaxCount; I++)
  291. if (Entries[I].CommitBase)
  292. setMemoryPermission(Entries[I].CommitBase, Entries[I].CommitSize, 0,
  293. &Entries[I].Data);
  294. QuarantinePos = -1U;
  295. }
  296. void disable() { Mutex.lock(); }
  297. void enable() { Mutex.unlock(); }
  298. void unmapTestOnly() { empty(); }
  299. private:
  300. void empty() {
  301. struct {
  302. void *MapBase;
  303. uptr MapSize;
  304. MapPlatformData Data;
  305. } MapInfo[Config::SecondaryCacheEntriesArraySize];
  306. uptr N = 0;
  307. {
  308. ScopedLock L(Mutex);
  309. for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
  310. if (!Entries[I].CommitBase)
  311. continue;
  312. MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
  313. MapInfo[N].MapSize = Entries[I].MapSize;
  314. MapInfo[N].Data = Entries[I].Data;
  315. Entries[I].CommitBase = 0;
  316. N++;
  317. }
  318. EntriesCount = 0;
  319. IsFullEvents = 0;
  320. }
  321. for (uptr I = 0; I < N; I++)
  322. unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
  323. &MapInfo[I].Data);
  324. }
  325. struct CachedBlock {
  326. uptr CommitBase;
  327. uptr CommitSize;
  328. uptr MapBase;
  329. uptr MapSize;
  330. uptr BlockBegin;
  331. [[no_unique_address]] MapPlatformData Data;
  332. u64 Time;
  333. };
  334. void releaseIfOlderThan(CachedBlock &Entry, u64 Time) {
  335. if (!Entry.CommitBase || !Entry.Time)
  336. return;
  337. if (Entry.Time > Time) {
  338. if (OldestTime == 0 || Entry.Time < OldestTime)
  339. OldestTime = Entry.Time;
  340. return;
  341. }
  342. releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
  343. Entry.Time = 0;
  344. }
  345. void releaseOlderThan(u64 Time) {
  346. ScopedLock L(Mutex);
  347. if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
  348. return;
  349. OldestTime = 0;
  350. for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++)
  351. releaseIfOlderThan(Quarantine[I], Time);
  352. for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++)
  353. releaseIfOlderThan(Entries[I], Time);
  354. }
  355. HybridMutex Mutex;
  356. u32 EntriesCount = 0;
  357. u32 QuarantinePos = 0;
  358. atomic_u32 MaxEntriesCount = {};
  359. atomic_uptr MaxEntrySize = {};
  360. u64 OldestTime = 0;
  361. u32 IsFullEvents = 0;
  362. atomic_s32 ReleaseToOsIntervalMs = {};
  363. CachedBlock Entries[Config::SecondaryCacheEntriesArraySize] = {};
  364. CachedBlock Quarantine[Config::SecondaryCacheQuarantineSize] = {};
  365. };
  366. template <typename Config> class MapAllocator {
  367. public:
  368. void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
  369. DCHECK_EQ(AllocatedBytes, 0U);
  370. DCHECK_EQ(FreedBytes, 0U);
  371. Cache.init(ReleaseToOsInterval);
  372. Stats.init();
  373. if (LIKELY(S))
  374. S->link(&Stats);
  375. }
  376. void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
  377. uptr *BlockEnd = nullptr,
  378. FillContentsMode FillContents = NoFill);
  379. void deallocate(Options Options, void *Ptr);
  380. static uptr getBlockEnd(void *Ptr) {
  381. auto *B = LargeBlock::getHeader<Config>(Ptr);
  382. return B->CommitBase + B->CommitSize;
  383. }
  384. static uptr getBlockSize(void *Ptr) {
  385. return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
  386. }
  387. void getStats(ScopedString *Str) const;
  388. void disable() {
  389. Mutex.lock();
  390. Cache.disable();
  391. }
  392. void enable() {
  393. Cache.enable();
  394. Mutex.unlock();
  395. }
  396. template <typename F> void iterateOverBlocks(F Callback) const {
  397. for (const auto &H : InUseBlocks) {
  398. uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
  399. if (allocatorSupportsMemoryTagging<Config>())
  400. Ptr = untagPointer(Ptr);
  401. Callback(Ptr);
  402. }
  403. }
  404. uptr canCache(uptr Size) { return Cache.canCache(Size); }
  405. bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
  406. void releaseToOS() { Cache.releaseToOS(); }
  407. void disableMemoryTagging() { Cache.disableMemoryTagging(); }
  408. void unmapTestOnly() { Cache.unmapTestOnly(); }
  409. private:
  410. typename Config::SecondaryCache Cache;
  411. HybridMutex Mutex;
  412. DoublyLinkedList<LargeBlock::Header> InUseBlocks;
  413. uptr AllocatedBytes = 0;
  414. uptr FreedBytes = 0;
  415. uptr LargestSize = 0;
  416. u32 NumberOfAllocs = 0;
  417. u32 NumberOfFrees = 0;
  418. LocalStats Stats;
  419. };
  420. // As with the Primary, the size passed to this function includes any desired
  421. // alignment, so that the frontend can align the user allocation. The hint
  422. // parameter allows us to unmap spurious memory when dealing with larger
  423. // (greater than a page) alignments on 32-bit platforms.
  424. // Due to the sparsity of address space available on those platforms, requesting
  425. // an allocation from the Secondary with a large alignment would end up wasting
  426. // VA space (even though we are not committing the whole thing), hence the need
  427. // to trim off some of the reserved space.
  428. // For allocations requested with an alignment greater than or equal to a page,
  429. // the committed memory will amount to something close to Size - AlignmentHint
  430. // (pending rounding and headers).
  431. template <typename Config>
  432. void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
  433. uptr *BlockEndPtr,
  434. FillContentsMode FillContents) {
  435. if (Options.get(OptionBit::AddLargeAllocationSlack))
  436. Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
  437. Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
  438. const uptr PageSize = getPageSizeCached();
  439. uptr RoundedSize =
  440. roundUpTo(roundUpTo(Size, Alignment) + LargeBlock::getHeaderSize() +
  441. Chunk::getHeaderSize(),
  442. PageSize);
  443. if (Alignment > PageSize)
  444. RoundedSize += Alignment - PageSize;
  445. if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
  446. LargeBlock::Header *H;
  447. bool Zeroed;
  448. if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
  449. const uptr BlockEnd = H->CommitBase + H->CommitSize;
  450. if (BlockEndPtr)
  451. *BlockEndPtr = BlockEnd;
  452. uptr HInt = reinterpret_cast<uptr>(H);
  453. if (allocatorSupportsMemoryTagging<Config>())
  454. HInt = untagPointer(HInt);
  455. const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
  456. void *Ptr = reinterpret_cast<void *>(PtrInt);
  457. if (FillContents && !Zeroed)
  458. memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
  459. BlockEnd - PtrInt);
  460. const uptr BlockSize = BlockEnd - HInt;
  461. {
  462. ScopedLock L(Mutex);
  463. InUseBlocks.push_back(H);
  464. AllocatedBytes += BlockSize;
  465. NumberOfAllocs++;
  466. Stats.add(StatAllocated, BlockSize);
  467. Stats.add(StatMapped, H->MapSize);
  468. }
  469. return Ptr;
  470. }
  471. }
  472. MapPlatformData Data = {};
  473. const uptr MapSize = RoundedSize + 2 * PageSize;
  474. uptr MapBase = reinterpret_cast<uptr>(
  475. map(nullptr, MapSize, nullptr, MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
  476. if (UNLIKELY(!MapBase))
  477. return nullptr;
  478. uptr CommitBase = MapBase + PageSize;
  479. uptr MapEnd = MapBase + MapSize;
  480. // In the unlikely event of alignments larger than a page, adjust the amount
  481. // of memory we want to commit, and trim the extra memory.
  482. if (UNLIKELY(Alignment >= PageSize)) {
  483. // For alignments greater than or equal to a page, the user pointer (eg: the
  484. // pointer that is returned by the C or C++ allocation APIs) ends up on a
  485. // page boundary , and our headers will live in the preceding page.
  486. CommitBase = roundUpTo(MapBase + PageSize + 1, Alignment) - PageSize;
  487. const uptr NewMapBase = CommitBase - PageSize;
  488. DCHECK_GE(NewMapBase, MapBase);
  489. // We only trim the extra memory on 32-bit platforms: 64-bit platforms
  490. // are less constrained memory wise, and that saves us two syscalls.
  491. if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
  492. unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
  493. MapBase = NewMapBase;
  494. }
  495. const uptr NewMapEnd =
  496. CommitBase + PageSize + roundUpTo(Size, PageSize) + PageSize;
  497. DCHECK_LE(NewMapEnd, MapEnd);
  498. if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
  499. unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
  500. MapEnd = NewMapEnd;
  501. }
  502. }
  503. const uptr CommitSize = MapEnd - PageSize - CommitBase;
  504. const uptr AllocPos = roundDownTo(CommitBase + CommitSize - Size, Alignment);
  505. mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, &Data);
  506. const uptr HeaderPos =
  507. AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
  508. LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
  509. LargeBlock::addHeaderTag<Config>(HeaderPos));
  510. if (useMemoryTagging<Config>(Options))
  511. storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
  512. reinterpret_cast<uptr>(H + 1));
  513. H->MapBase = MapBase;
  514. H->MapSize = MapEnd - MapBase;
  515. H->CommitBase = CommitBase;
  516. H->CommitSize = CommitSize;
  517. H->Data = Data;
  518. if (BlockEndPtr)
  519. *BlockEndPtr = CommitBase + CommitSize;
  520. {
  521. ScopedLock L(Mutex);
  522. InUseBlocks.push_back(H);
  523. AllocatedBytes += CommitSize;
  524. if (LargestSize < CommitSize)
  525. LargestSize = CommitSize;
  526. NumberOfAllocs++;
  527. Stats.add(StatAllocated, CommitSize);
  528. Stats.add(StatMapped, H->MapSize);
  529. }
  530. return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
  531. }
  532. template <typename Config>
  533. void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
  534. LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
  535. const uptr CommitSize = H->CommitSize;
  536. {
  537. ScopedLock L(Mutex);
  538. InUseBlocks.remove(H);
  539. FreedBytes += CommitSize;
  540. NumberOfFrees++;
  541. Stats.sub(StatAllocated, CommitSize);
  542. Stats.sub(StatMapped, H->MapSize);
  543. }
  544. Cache.store(Options, H);
  545. }
  546. template <typename Config>
  547. void MapAllocator<Config>::getStats(ScopedString *Str) const {
  548. Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
  549. "(%zuK), remains %u (%zuK) max %zuM\n",
  550. NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
  551. FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
  552. (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
  553. }
  554. } // namespace scudo
  555. #endif // SCUDO_SECONDARY_H_