secondary.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. //===-- secondary.h ---------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_SECONDARY_H_
  9. #define SCUDO_SECONDARY_H_
  10. #include "chunk.h"
  11. #include "common.h"
  12. #include "list.h"
  13. #include "memtag.h"
  14. #include "mutex.h"
  15. #include "options.h"
  16. #include "stats.h"
  17. #include "string_utils.h"
  18. namespace scudo {
  19. // This allocator wraps the platform allocation primitives, and as such is on
  20. // the slower side and should preferably be used for larger sized allocations.
  21. // Blocks allocated will be preceded and followed by a guard page, and hold
  22. // their own header that is not checksummed: the guard pages and the Combined
  23. // header should be enough for our purpose.
  24. namespace LargeBlock {
  25. struct alignas(Max<uptr>(archSupportsMemoryTagging()
  26. ? archMemoryTagGranuleSize()
  27. : 1,
  28. 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
  29. LargeBlock::Header *Prev;
  30. LargeBlock::Header *Next;
  31. uptr CommitBase;
  32. uptr CommitSize;
  33. uptr MapBase;
  34. uptr MapSize;
  35. [[no_unique_address]] MapPlatformData Data;
  36. };
  37. static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
  38. static_assert(!archSupportsMemoryTagging() ||
  39. sizeof(Header) % archMemoryTagGranuleSize() == 0,
  40. "");
  41. constexpr uptr getHeaderSize() { return sizeof(Header); }
  42. template <typename Config> static uptr addHeaderTag(uptr Ptr) {
  43. if (allocatorSupportsMemoryTagging<Config>())
  44. return addFixedTag(Ptr, 1);
  45. return Ptr;
  46. }
  47. template <typename Config> static Header *getHeader(uptr Ptr) {
  48. return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
  49. }
  50. template <typename Config> static Header *getHeader(const void *Ptr) {
  51. return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
  52. }
  53. } // namespace LargeBlock
  54. static void unmap(LargeBlock::Header *H) {
  55. MapPlatformData Data = H->Data;
  56. unmap(reinterpret_cast<void *>(H->MapBase), H->MapSize, UNMAP_ALL, &Data);
  57. }
  58. class MapAllocatorNoCache {
  59. public:
  60. void init(UNUSED s32 ReleaseToOsInterval) {}
  61. bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
  62. UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
  63. return false;
  64. }
  65. void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
  66. bool canCache(UNUSED uptr Size) { return false; }
  67. void disable() {}
  68. void enable() {}
  69. void releaseToOS() {}
  70. void disableMemoryTagging() {}
  71. void unmapTestOnly() {}
  72. bool setOption(Option O, UNUSED sptr Value) {
  73. if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
  74. O == Option::MaxCacheEntrySize)
  75. return false;
  76. // Not supported by the Secondary Cache, but not an error either.
  77. return true;
  78. }
  79. };
  80. static const uptr MaxUnusedCachePages = 4U;
  81. template <typename Config>
  82. void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
  83. uptr AllocPos, uptr Flags, MapPlatformData *Data) {
  84. const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
  85. if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
  86. const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
  87. map(reinterpret_cast<void *>(CommitBase), UntaggedPos - CommitBase,
  88. "scudo:secondary", MAP_RESIZABLE | MAP_MEMTAG | Flags, Data);
  89. map(reinterpret_cast<void *>(UntaggedPos),
  90. CommitBase + CommitSize - UntaggedPos, "scudo:secondary",
  91. MAP_RESIZABLE | Flags, Data);
  92. } else {
  93. map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
  94. MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
  95. Flags,
  96. Data);
  97. }
  98. }
  99. // Template specialization to avoid producing zero-length array
  100. template <typename T, size_t Size> class NonZeroLengthArray {
  101. public:
  102. T &operator[](uptr Idx) { return values[Idx]; }
  103. private:
  104. T values[Size];
  105. };
  106. template <typename T> class NonZeroLengthArray<T, 0> {
  107. public:
  108. T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
  109. };
  110. template <typename Config> class MapAllocatorCache {
  111. public:
  112. // Ensure the default maximum specified fits the array.
  113. static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <=
  114. Config::SecondaryCacheEntriesArraySize,
  115. "");
  116. void init(s32 ReleaseToOsInterval) {
  117. DCHECK_EQ(EntriesCount, 0U);
  118. setOption(Option::MaxCacheEntriesCount,
  119. static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
  120. setOption(Option::MaxCacheEntrySize,
  121. static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
  122. setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
  123. }
  124. void store(Options Options, LargeBlock::Header *H) {
  125. if (!canCache(H->CommitSize))
  126. return unmap(H);
  127. bool EntryCached = false;
  128. bool EmptyCache = false;
  129. const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
  130. const u64 Time = getMonotonicTime();
  131. const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
  132. CachedBlock Entry;
  133. Entry.CommitBase = H->CommitBase;
  134. Entry.CommitSize = H->CommitSize;
  135. Entry.MapBase = H->MapBase;
  136. Entry.MapSize = H->MapSize;
  137. Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
  138. Entry.Data = H->Data;
  139. Entry.Time = Time;
  140. if (useMemoryTagging<Config>(Options)) {
  141. if (Interval == 0 && !SCUDO_FUCHSIA) {
  142. // Release the memory and make it inaccessible at the same time by
  143. // creating a new MAP_NOACCESS mapping on top of the existing mapping.
  144. // Fuchsia does not support replacing mappings by creating a new mapping
  145. // on top so we just do the two syscalls there.
  146. Entry.Time = 0;
  147. mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
  148. Entry.CommitBase, MAP_NOACCESS, &Entry.Data);
  149. } else {
  150. setMemoryPermission(Entry.CommitBase, Entry.CommitSize, MAP_NOACCESS,
  151. &Entry.Data);
  152. }
  153. } else if (Interval == 0) {
  154. releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
  155. Entry.Time = 0;
  156. }
  157. do {
  158. ScopedLock L(Mutex);
  159. if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
  160. // If we get here then memory tagging was disabled in between when we
  161. // read Options and when we locked Mutex. We can't insert our entry into
  162. // the quarantine or the cache because the permissions would be wrong so
  163. // just unmap it.
  164. break;
  165. }
  166. if (Config::SecondaryCacheQuarantineSize &&
  167. useMemoryTagging<Config>(Options)) {
  168. QuarantinePos =
  169. (QuarantinePos + 1) % Max(Config::SecondaryCacheQuarantineSize, 1u);
  170. if (!Quarantine[QuarantinePos].CommitBase) {
  171. Quarantine[QuarantinePos] = Entry;
  172. return;
  173. }
  174. CachedBlock PrevEntry = Quarantine[QuarantinePos];
  175. Quarantine[QuarantinePos] = Entry;
  176. if (OldestTime == 0)
  177. OldestTime = Entry.Time;
  178. Entry = PrevEntry;
  179. }
  180. if (EntriesCount >= MaxCount) {
  181. if (IsFullEvents++ == 4U)
  182. EmptyCache = true;
  183. } else {
  184. for (u32 I = 0; I < MaxCount; I++) {
  185. if (Entries[I].CommitBase)
  186. continue;
  187. if (I != 0)
  188. Entries[I] = Entries[0];
  189. Entries[0] = Entry;
  190. EntriesCount++;
  191. if (OldestTime == 0)
  192. OldestTime = Entry.Time;
  193. EntryCached = true;
  194. break;
  195. }
  196. }
  197. } while (0);
  198. if (EmptyCache)
  199. empty();
  200. else if (Interval >= 0)
  201. releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
  202. if (!EntryCached)
  203. unmap(reinterpret_cast<void *>(Entry.MapBase), Entry.MapSize, UNMAP_ALL,
  204. &Entry.Data);
  205. }
  206. bool retrieve(Options Options, uptr Size, uptr Alignment,
  207. LargeBlock::Header **H, bool *Zeroed) {
  208. const uptr PageSize = getPageSizeCached();
  209. const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
  210. bool Found = false;
  211. CachedBlock Entry;
  212. uptr HeaderPos = 0;
  213. {
  214. ScopedLock L(Mutex);
  215. if (EntriesCount == 0)
  216. return false;
  217. for (u32 I = 0; I < MaxCount; I++) {
  218. const uptr CommitBase = Entries[I].CommitBase;
  219. if (!CommitBase)
  220. continue;
  221. const uptr CommitSize = Entries[I].CommitSize;
  222. const uptr AllocPos =
  223. roundDownTo(CommitBase + CommitSize - Size, Alignment);
  224. HeaderPos =
  225. AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
  226. if (HeaderPos > CommitBase + CommitSize)
  227. continue;
  228. if (HeaderPos < CommitBase ||
  229. AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
  230. continue;
  231. Found = true;
  232. Entry = Entries[I];
  233. Entries[I].CommitBase = 0;
  234. break;
  235. }
  236. }
  237. if (Found) {
  238. *H = reinterpret_cast<LargeBlock::Header *>(
  239. LargeBlock::addHeaderTag<Config>(HeaderPos));
  240. *Zeroed = Entry.Time == 0;
  241. if (useMemoryTagging<Config>(Options))
  242. setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0, &Entry.Data);
  243. uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
  244. if (useMemoryTagging<Config>(Options)) {
  245. if (*Zeroed)
  246. storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
  247. NewBlockBegin);
  248. else if (Entry.BlockBegin < NewBlockBegin)
  249. storeTags(Entry.BlockBegin, NewBlockBegin);
  250. else
  251. storeTags(untagPointer(NewBlockBegin),
  252. untagPointer(Entry.BlockBegin));
  253. }
  254. (*H)->CommitBase = Entry.CommitBase;
  255. (*H)->CommitSize = Entry.CommitSize;
  256. (*H)->MapBase = Entry.MapBase;
  257. (*H)->MapSize = Entry.MapSize;
  258. (*H)->Data = Entry.Data;
  259. EntriesCount--;
  260. }
  261. return Found;
  262. }
  263. bool canCache(uptr Size) {
  264. return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
  265. Size <= atomic_load_relaxed(&MaxEntrySize);
  266. }
  267. bool setOption(Option O, sptr Value) {
  268. if (O == Option::ReleaseInterval) {
  269. const s32 Interval =
  270. Max(Min(static_cast<s32>(Value),
  271. Config::SecondaryCacheMaxReleaseToOsIntervalMs),
  272. Config::SecondaryCacheMinReleaseToOsIntervalMs);
  273. atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
  274. return true;
  275. }
  276. if (O == Option::MaxCacheEntriesCount) {
  277. const u32 MaxCount = static_cast<u32>(Value);
  278. if (MaxCount > Config::SecondaryCacheEntriesArraySize)
  279. return false;
  280. atomic_store_relaxed(&MaxEntriesCount, MaxCount);
  281. return true;
  282. }
  283. if (O == Option::MaxCacheEntrySize) {
  284. atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
  285. return true;
  286. }
  287. // Not supported by the Secondary Cache, but not an error either.
  288. return true;
  289. }
  290. void releaseToOS() { releaseOlderThan(UINT64_MAX); }
  291. void disableMemoryTagging() {
  292. ScopedLock L(Mutex);
  293. for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
  294. if (Quarantine[I].CommitBase) {
  295. unmap(reinterpret_cast<void *>(Quarantine[I].MapBase),
  296. Quarantine[I].MapSize, UNMAP_ALL, &Quarantine[I].Data);
  297. Quarantine[I].CommitBase = 0;
  298. }
  299. }
  300. const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
  301. for (u32 I = 0; I < MaxCount; I++)
  302. if (Entries[I].CommitBase)
  303. setMemoryPermission(Entries[I].CommitBase, Entries[I].CommitSize, 0,
  304. &Entries[I].Data);
  305. QuarantinePos = -1U;
  306. }
  307. void disable() { Mutex.lock(); }
  308. void enable() { Mutex.unlock(); }
  309. void unmapTestOnly() { empty(); }
  310. private:
  311. void empty() {
  312. struct {
  313. void *MapBase;
  314. uptr MapSize;
  315. MapPlatformData Data;
  316. } MapInfo[Config::SecondaryCacheEntriesArraySize];
  317. uptr N = 0;
  318. {
  319. ScopedLock L(Mutex);
  320. for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
  321. if (!Entries[I].CommitBase)
  322. continue;
  323. MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
  324. MapInfo[N].MapSize = Entries[I].MapSize;
  325. MapInfo[N].Data = Entries[I].Data;
  326. Entries[I].CommitBase = 0;
  327. N++;
  328. }
  329. EntriesCount = 0;
  330. IsFullEvents = 0;
  331. }
  332. for (uptr I = 0; I < N; I++)
  333. unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
  334. &MapInfo[I].Data);
  335. }
  336. struct CachedBlock {
  337. uptr CommitBase;
  338. uptr CommitSize;
  339. uptr MapBase;
  340. uptr MapSize;
  341. uptr BlockBegin;
  342. [[no_unique_address]] MapPlatformData Data;
  343. u64 Time;
  344. };
  345. void releaseIfOlderThan(CachedBlock &Entry, u64 Time) {
  346. if (!Entry.CommitBase || !Entry.Time)
  347. return;
  348. if (Entry.Time > Time) {
  349. if (OldestTime == 0 || Entry.Time < OldestTime)
  350. OldestTime = Entry.Time;
  351. return;
  352. }
  353. releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
  354. Entry.Time = 0;
  355. }
  356. void releaseOlderThan(u64 Time) {
  357. ScopedLock L(Mutex);
  358. if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
  359. return;
  360. OldestTime = 0;
  361. for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++)
  362. releaseIfOlderThan(Quarantine[I], Time);
  363. for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++)
  364. releaseIfOlderThan(Entries[I], Time);
  365. }
  366. HybridMutex Mutex;
  367. u32 EntriesCount = 0;
  368. u32 QuarantinePos = 0;
  369. atomic_u32 MaxEntriesCount = {};
  370. atomic_uptr MaxEntrySize = {};
  371. u64 OldestTime = 0;
  372. u32 IsFullEvents = 0;
  373. atomic_s32 ReleaseToOsIntervalMs = {};
  374. CachedBlock Entries[Config::SecondaryCacheEntriesArraySize] = {};
  375. NonZeroLengthArray<CachedBlock, Config::SecondaryCacheQuarantineSize>
  376. Quarantine = {};
  377. };
  378. template <typename Config> class MapAllocator {
  379. public:
  380. void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
  381. DCHECK_EQ(AllocatedBytes, 0U);
  382. DCHECK_EQ(FreedBytes, 0U);
  383. Cache.init(ReleaseToOsInterval);
  384. Stats.init();
  385. if (LIKELY(S))
  386. S->link(&Stats);
  387. }
  388. void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
  389. uptr *BlockEnd = nullptr,
  390. FillContentsMode FillContents = NoFill);
  391. void deallocate(Options Options, void *Ptr);
  392. static uptr getBlockEnd(void *Ptr) {
  393. auto *B = LargeBlock::getHeader<Config>(Ptr);
  394. return B->CommitBase + B->CommitSize;
  395. }
  396. static uptr getBlockSize(void *Ptr) {
  397. return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
  398. }
  399. void getStats(ScopedString *Str) const;
  400. void disable() {
  401. Mutex.lock();
  402. Cache.disable();
  403. }
  404. void enable() {
  405. Cache.enable();
  406. Mutex.unlock();
  407. }
  408. template <typename F> void iterateOverBlocks(F Callback) const {
  409. for (const auto &H : InUseBlocks) {
  410. uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
  411. if (allocatorSupportsMemoryTagging<Config>())
  412. Ptr = untagPointer(Ptr);
  413. Callback(Ptr);
  414. }
  415. }
  416. bool canCache(uptr Size) { return Cache.canCache(Size); }
  417. bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
  418. void releaseToOS() { Cache.releaseToOS(); }
  419. void disableMemoryTagging() { Cache.disableMemoryTagging(); }
  420. void unmapTestOnly() { Cache.unmapTestOnly(); }
  421. private:
  422. typename Config::SecondaryCache Cache;
  423. HybridMutex Mutex;
  424. DoublyLinkedList<LargeBlock::Header> InUseBlocks;
  425. uptr AllocatedBytes = 0;
  426. uptr FreedBytes = 0;
  427. uptr LargestSize = 0;
  428. u32 NumberOfAllocs = 0;
  429. u32 NumberOfFrees = 0;
  430. LocalStats Stats;
  431. };
  432. // As with the Primary, the size passed to this function includes any desired
  433. // alignment, so that the frontend can align the user allocation. The hint
  434. // parameter allows us to unmap spurious memory when dealing with larger
  435. // (greater than a page) alignments on 32-bit platforms.
  436. // Due to the sparsity of address space available on those platforms, requesting
  437. // an allocation from the Secondary with a large alignment would end up wasting
  438. // VA space (even though we are not committing the whole thing), hence the need
  439. // to trim off some of the reserved space.
  440. // For allocations requested with an alignment greater than or equal to a page,
  441. // the committed memory will amount to something close to Size - AlignmentHint
  442. // (pending rounding and headers).
  443. template <typename Config>
  444. void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
  445. uptr *BlockEndPtr,
  446. FillContentsMode FillContents) {
  447. if (Options.get(OptionBit::AddLargeAllocationSlack))
  448. Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
  449. Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
  450. const uptr PageSize = getPageSizeCached();
  451. uptr RoundedSize =
  452. roundUpTo(roundUpTo(Size, Alignment) + LargeBlock::getHeaderSize() +
  453. Chunk::getHeaderSize(),
  454. PageSize);
  455. if (Alignment > PageSize)
  456. RoundedSize += Alignment - PageSize;
  457. if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
  458. LargeBlock::Header *H;
  459. bool Zeroed;
  460. if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
  461. const uptr BlockEnd = H->CommitBase + H->CommitSize;
  462. if (BlockEndPtr)
  463. *BlockEndPtr = BlockEnd;
  464. uptr HInt = reinterpret_cast<uptr>(H);
  465. if (allocatorSupportsMemoryTagging<Config>())
  466. HInt = untagPointer(HInt);
  467. const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
  468. void *Ptr = reinterpret_cast<void *>(PtrInt);
  469. if (FillContents && !Zeroed)
  470. memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
  471. BlockEnd - PtrInt);
  472. const uptr BlockSize = BlockEnd - HInt;
  473. {
  474. ScopedLock L(Mutex);
  475. InUseBlocks.push_back(H);
  476. AllocatedBytes += BlockSize;
  477. NumberOfAllocs++;
  478. Stats.add(StatAllocated, BlockSize);
  479. Stats.add(StatMapped, H->MapSize);
  480. }
  481. return Ptr;
  482. }
  483. }
  484. MapPlatformData Data = {};
  485. const uptr MapSize = RoundedSize + 2 * PageSize;
  486. uptr MapBase = reinterpret_cast<uptr>(
  487. map(nullptr, MapSize, nullptr, MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
  488. if (UNLIKELY(!MapBase))
  489. return nullptr;
  490. uptr CommitBase = MapBase + PageSize;
  491. uptr MapEnd = MapBase + MapSize;
  492. // In the unlikely event of alignments larger than a page, adjust the amount
  493. // of memory we want to commit, and trim the extra memory.
  494. if (UNLIKELY(Alignment >= PageSize)) {
  495. // For alignments greater than or equal to a page, the user pointer (eg: the
  496. // pointer that is returned by the C or C++ allocation APIs) ends up on a
  497. // page boundary , and our headers will live in the preceding page.
  498. CommitBase = roundUpTo(MapBase + PageSize + 1, Alignment) - PageSize;
  499. const uptr NewMapBase = CommitBase - PageSize;
  500. DCHECK_GE(NewMapBase, MapBase);
  501. // We only trim the extra memory on 32-bit platforms: 64-bit platforms
  502. // are less constrained memory wise, and that saves us two syscalls.
  503. if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
  504. unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
  505. MapBase = NewMapBase;
  506. }
  507. const uptr NewMapEnd =
  508. CommitBase + PageSize + roundUpTo(Size, PageSize) + PageSize;
  509. DCHECK_LE(NewMapEnd, MapEnd);
  510. if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
  511. unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
  512. MapEnd = NewMapEnd;
  513. }
  514. }
  515. const uptr CommitSize = MapEnd - PageSize - CommitBase;
  516. const uptr AllocPos = roundDownTo(CommitBase + CommitSize - Size, Alignment);
  517. mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, &Data);
  518. const uptr HeaderPos =
  519. AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
  520. LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
  521. LargeBlock::addHeaderTag<Config>(HeaderPos));
  522. if (useMemoryTagging<Config>(Options))
  523. storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
  524. reinterpret_cast<uptr>(H + 1));
  525. H->MapBase = MapBase;
  526. H->MapSize = MapEnd - MapBase;
  527. H->CommitBase = CommitBase;
  528. H->CommitSize = CommitSize;
  529. H->Data = Data;
  530. if (BlockEndPtr)
  531. *BlockEndPtr = CommitBase + CommitSize;
  532. {
  533. ScopedLock L(Mutex);
  534. InUseBlocks.push_back(H);
  535. AllocatedBytes += CommitSize;
  536. if (LargestSize < CommitSize)
  537. LargestSize = CommitSize;
  538. NumberOfAllocs++;
  539. Stats.add(StatAllocated, CommitSize);
  540. Stats.add(StatMapped, H->MapSize);
  541. }
  542. return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
  543. }
  544. template <typename Config>
  545. void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
  546. LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
  547. const uptr CommitSize = H->CommitSize;
  548. {
  549. ScopedLock L(Mutex);
  550. InUseBlocks.remove(H);
  551. FreedBytes += CommitSize;
  552. NumberOfFrees++;
  553. Stats.sub(StatAllocated, CommitSize);
  554. Stats.sub(StatMapped, H->MapSize);
  555. }
  556. Cache.store(Options, H);
  557. }
  558. template <typename Config>
  559. void MapAllocator<Config>::getStats(ScopedString *Str) const {
  560. Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
  561. "(%zuK), remains %u (%zuK) max %zuM\n",
  562. NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
  563. FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
  564. (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
  565. }
  566. } // namespace scudo
  567. #endif // SCUDO_SECONDARY_H_