secondary.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708
  1. //===-- secondary.h ---------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef SCUDO_SECONDARY_H_
  9. #define SCUDO_SECONDARY_H_
  10. #include "chunk.h"
  11. #include "common.h"
  12. #include "list.h"
  13. #include "mem_map.h"
  14. #include "memtag.h"
  15. #include "mutex.h"
  16. #include "options.h"
  17. #include "stats.h"
  18. #include "string_utils.h"
  19. #include "thread_annotations.h"
  20. namespace scudo {
  21. // This allocator wraps the platform allocation primitives, and as such is on
  22. // the slower side and should preferably be used for larger sized allocations.
  23. // Blocks allocated will be preceded and followed by a guard page, and hold
  24. // their own header that is not checksummed: the guard pages and the Combined
  25. // header should be enough for our purpose.
  26. namespace LargeBlock {
  27. struct alignas(Max<uptr>(archSupportsMemoryTagging()
  28. ? archMemoryTagGranuleSize()
  29. : 1,
  30. 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
  31. LargeBlock::Header *Prev;
  32. LargeBlock::Header *Next;
  33. uptr CommitBase;
  34. uptr CommitSize;
  35. MemMapT MemMap;
  36. };
  37. static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
  38. static_assert(!archSupportsMemoryTagging() ||
  39. sizeof(Header) % archMemoryTagGranuleSize() == 0,
  40. "");
  41. constexpr uptr getHeaderSize() { return sizeof(Header); }
  42. template <typename Config> static uptr addHeaderTag(uptr Ptr) {
  43. if (allocatorSupportsMemoryTagging<Config>())
  44. return addFixedTag(Ptr, 1);
  45. return Ptr;
  46. }
  47. template <typename Config> static Header *getHeader(uptr Ptr) {
  48. return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
  49. }
  50. template <typename Config> static Header *getHeader(const void *Ptr) {
  51. return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
  52. }
  53. } // namespace LargeBlock
  54. static inline void unmap(LargeBlock::Header *H) {
  55. // Note that the `H->MapMap` is stored on the pages managed by itself. Take
  56. // over the ownership before unmap() so that any operation along with unmap()
  57. // won't touch inaccessible pages.
  58. MemMapT MemMap = H->MemMap;
  59. MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
  60. }
  61. namespace {
  62. struct CachedBlock {
  63. uptr CommitBase = 0;
  64. uptr CommitSize = 0;
  65. uptr BlockBegin = 0;
  66. MemMapT MemMap = {};
  67. u64 Time = 0;
  68. bool isValid() { return CommitBase != 0; }
  69. void invalidate() { CommitBase = 0; }
  70. };
  71. } // namespace
  72. template <typename Config> class MapAllocatorNoCache {
  73. public:
  74. void init(UNUSED s32 ReleaseToOsInterval) {}
  75. bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
  76. UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
  77. UNUSED bool *Zeroed) {
  78. return false;
  79. }
  80. void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
  81. bool canCache(UNUSED uptr Size) { return false; }
  82. void disable() {}
  83. void enable() {}
  84. void releaseToOS() {}
  85. void disableMemoryTagging() {}
  86. void unmapTestOnly() {}
  87. bool setOption(Option O, UNUSED sptr Value) {
  88. if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
  89. O == Option::MaxCacheEntrySize)
  90. return false;
  91. // Not supported by the Secondary Cache, but not an error either.
  92. return true;
  93. }
  94. void getStats(UNUSED ScopedString *Str) {
  95. Str->append("Secondary Cache Disabled\n");
  96. }
  97. };
  98. static const uptr MaxUnusedCachePages = 4U;
  99. template <typename Config>
  100. bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
  101. uptr AllocPos, uptr Flags, MemMapT &MemMap) {
  102. Flags |= MAP_RESIZABLE;
  103. Flags |= MAP_ALLOWNOMEM;
  104. const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
  105. if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
  106. const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
  107. return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
  108. MAP_MEMTAG | Flags) &&
  109. MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
  110. "scudo:secondary", Flags);
  111. } else {
  112. const uptr RemapFlags =
  113. (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
  114. return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
  115. }
  116. }
  117. // Template specialization to avoid producing zero-length array
  118. template <typename T, size_t Size> class NonZeroLengthArray {
  119. public:
  120. T &operator[](uptr Idx) { return values[Idx]; }
  121. private:
  122. T values[Size];
  123. };
  124. template <typename T> class NonZeroLengthArray<T, 0> {
  125. public:
  126. T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
  127. };
  128. template <typename Config> class MapAllocatorCache {
  129. public:
  130. using CacheConfig = typename Config::Secondary::Cache;
  131. void getStats(ScopedString *Str) {
  132. ScopedLock L(Mutex);
  133. uptr Integral;
  134. uptr Fractional;
  135. computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
  136. &Fractional);
  137. Str->append("Stats: MapAllocatorCache: EntriesCount: %d, "
  138. "MaxEntriesCount: %u, MaxEntrySize: %zu\n",
  139. EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
  140. atomic_load_relaxed(&MaxEntrySize));
  141. Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
  142. "(%zu.%02zu%%)\n",
  143. SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
  144. for (CachedBlock Entry : Entries) {
  145. if (!Entry.isValid())
  146. continue;
  147. Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
  148. "BlockSize: %zu %s\n",
  149. Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
  150. Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
  151. }
  152. }
  153. // Ensure the default maximum specified fits the array.
  154. static_assert(CacheConfig::DefaultMaxEntriesCount <=
  155. CacheConfig::EntriesArraySize,
  156. "");
  157. void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
  158. DCHECK_EQ(EntriesCount, 0U);
  159. setOption(Option::MaxCacheEntriesCount,
  160. static_cast<sptr>(CacheConfig::DefaultMaxEntriesCount));
  161. setOption(Option::MaxCacheEntrySize,
  162. static_cast<sptr>(CacheConfig::DefaultMaxEntrySize));
  163. setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
  164. }
  165. void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
  166. if (!canCache(H->CommitSize))
  167. return unmap(H);
  168. bool EntryCached = false;
  169. bool EmptyCache = false;
  170. const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
  171. const u64 Time = getMonotonicTimeFast();
  172. const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
  173. CachedBlock Entry;
  174. Entry.CommitBase = H->CommitBase;
  175. Entry.CommitSize = H->CommitSize;
  176. Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
  177. Entry.MemMap = H->MemMap;
  178. Entry.Time = Time;
  179. if (useMemoryTagging<Config>(Options)) {
  180. if (Interval == 0 && !SCUDO_FUCHSIA) {
  181. // Release the memory and make it inaccessible at the same time by
  182. // creating a new MAP_NOACCESS mapping on top of the existing mapping.
  183. // Fuchsia does not support replacing mappings by creating a new mapping
  184. // on top so we just do the two syscalls there.
  185. Entry.Time = 0;
  186. mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
  187. Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
  188. } else {
  189. Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
  190. MAP_NOACCESS);
  191. }
  192. } else if (Interval == 0) {
  193. Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
  194. Entry.Time = 0;
  195. }
  196. do {
  197. ScopedLock L(Mutex);
  198. if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
  199. // If we get here then memory tagging was disabled in between when we
  200. // read Options and when we locked Mutex. We can't insert our entry into
  201. // the quarantine or the cache because the permissions would be wrong so
  202. // just unmap it.
  203. break;
  204. }
  205. if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
  206. QuarantinePos =
  207. (QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
  208. if (!Quarantine[QuarantinePos].isValid()) {
  209. Quarantine[QuarantinePos] = Entry;
  210. return;
  211. }
  212. CachedBlock PrevEntry = Quarantine[QuarantinePos];
  213. Quarantine[QuarantinePos] = Entry;
  214. if (OldestTime == 0)
  215. OldestTime = Entry.Time;
  216. Entry = PrevEntry;
  217. }
  218. if (EntriesCount >= MaxCount) {
  219. if (IsFullEvents++ == 4U)
  220. EmptyCache = true;
  221. } else {
  222. for (u32 I = 0; I < MaxCount; I++) {
  223. if (Entries[I].isValid())
  224. continue;
  225. if (I != 0)
  226. Entries[I] = Entries[0];
  227. Entries[0] = Entry;
  228. EntriesCount++;
  229. if (OldestTime == 0)
  230. OldestTime = Entry.Time;
  231. EntryCached = true;
  232. break;
  233. }
  234. }
  235. } while (0);
  236. if (EmptyCache)
  237. empty();
  238. else if (Interval >= 0)
  239. releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
  240. if (!EntryCached)
  241. Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
  242. }
  243. bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
  244. LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
  245. const uptr PageSize = getPageSizeCached();
  246. const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
  247. // 10% of the requested size proved to be the optimal choice for
  248. // retrieving cached blocks after testing several options.
  249. constexpr u32 FragmentedBytesDivisor = 10;
  250. bool Found = false;
  251. CachedBlock Entry;
  252. uptr EntryHeaderPos = 0;
  253. {
  254. ScopedLock L(Mutex);
  255. CallsToRetrieve++;
  256. if (EntriesCount == 0)
  257. return false;
  258. u32 OptimalFitIndex = 0;
  259. uptr MinDiff = UINTPTR_MAX;
  260. for (u32 I = 0; I < MaxCount; I++) {
  261. if (!Entries[I].isValid())
  262. continue;
  263. const uptr CommitBase = Entries[I].CommitBase;
  264. const uptr CommitSize = Entries[I].CommitSize;
  265. const uptr AllocPos =
  266. roundDown(CommitBase + CommitSize - Size, Alignment);
  267. const uptr HeaderPos = AllocPos - HeadersSize;
  268. if (HeaderPos > CommitBase + CommitSize)
  269. continue;
  270. if (HeaderPos < CommitBase ||
  271. AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
  272. continue;
  273. }
  274. Found = true;
  275. const uptr Diff = HeaderPos - CommitBase;
  276. // immediately use a cached block if it's size is close enough to the
  277. // requested size.
  278. const uptr MaxAllowedFragmentedBytes =
  279. (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
  280. if (Diff <= MaxAllowedFragmentedBytes) {
  281. OptimalFitIndex = I;
  282. EntryHeaderPos = HeaderPos;
  283. break;
  284. }
  285. // keep track of the smallest cached block
  286. // that is greater than (AllocSize + HeaderSize)
  287. if (Diff > MinDiff)
  288. continue;
  289. OptimalFitIndex = I;
  290. MinDiff = Diff;
  291. EntryHeaderPos = HeaderPos;
  292. }
  293. if (Found) {
  294. Entry = Entries[OptimalFitIndex];
  295. Entries[OptimalFitIndex].invalidate();
  296. EntriesCount--;
  297. SuccessfulRetrieves++;
  298. }
  299. }
  300. if (!Found)
  301. return false;
  302. *H = reinterpret_cast<LargeBlock::Header *>(
  303. LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
  304. *Zeroed = Entry.Time == 0;
  305. if (useMemoryTagging<Config>(Options))
  306. Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
  307. uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
  308. if (useMemoryTagging<Config>(Options)) {
  309. if (*Zeroed) {
  310. storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
  311. NewBlockBegin);
  312. } else if (Entry.BlockBegin < NewBlockBegin) {
  313. storeTags(Entry.BlockBegin, NewBlockBegin);
  314. } else {
  315. storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
  316. }
  317. }
  318. (*H)->CommitBase = Entry.CommitBase;
  319. (*H)->CommitSize = Entry.CommitSize;
  320. (*H)->MemMap = Entry.MemMap;
  321. return true;
  322. }
  323. bool canCache(uptr Size) {
  324. return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
  325. Size <= atomic_load_relaxed(&MaxEntrySize);
  326. }
  327. bool setOption(Option O, sptr Value) {
  328. if (O == Option::ReleaseInterval) {
  329. const s32 Interval = Max(
  330. Min(static_cast<s32>(Value), CacheConfig::MaxReleaseToOsIntervalMs),
  331. CacheConfig::MinReleaseToOsIntervalMs);
  332. atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
  333. return true;
  334. }
  335. if (O == Option::MaxCacheEntriesCount) {
  336. const u32 MaxCount = static_cast<u32>(Value);
  337. if (MaxCount > CacheConfig::EntriesArraySize)
  338. return false;
  339. atomic_store_relaxed(&MaxEntriesCount, MaxCount);
  340. return true;
  341. }
  342. if (O == Option::MaxCacheEntrySize) {
  343. atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
  344. return true;
  345. }
  346. // Not supported by the Secondary Cache, but not an error either.
  347. return true;
  348. }
  349. void releaseToOS() { releaseOlderThan(UINT64_MAX); }
  350. void disableMemoryTagging() EXCLUDES(Mutex) {
  351. ScopedLock L(Mutex);
  352. for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
  353. if (Quarantine[I].isValid()) {
  354. MemMapT &MemMap = Quarantine[I].MemMap;
  355. MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
  356. Quarantine[I].invalidate();
  357. }
  358. }
  359. const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
  360. for (u32 I = 0; I < MaxCount; I++) {
  361. if (Entries[I].isValid()) {
  362. Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
  363. Entries[I].CommitSize, 0);
  364. }
  365. }
  366. QuarantinePos = -1U;
  367. }
  368. void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
  369. void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
  370. void unmapTestOnly() { empty(); }
  371. private:
  372. void empty() {
  373. MemMapT MapInfo[CacheConfig::EntriesArraySize];
  374. uptr N = 0;
  375. {
  376. ScopedLock L(Mutex);
  377. for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
  378. if (!Entries[I].isValid())
  379. continue;
  380. MapInfo[N] = Entries[I].MemMap;
  381. Entries[I].invalidate();
  382. N++;
  383. }
  384. EntriesCount = 0;
  385. IsFullEvents = 0;
  386. }
  387. for (uptr I = 0; I < N; I++) {
  388. MemMapT &MemMap = MapInfo[I];
  389. MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
  390. }
  391. }
  392. void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
  393. if (!Entry.isValid() || !Entry.Time)
  394. return;
  395. if (Entry.Time > Time) {
  396. if (OldestTime == 0 || Entry.Time < OldestTime)
  397. OldestTime = Entry.Time;
  398. return;
  399. }
  400. Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
  401. Entry.Time = 0;
  402. }
  403. void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
  404. ScopedLock L(Mutex);
  405. if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
  406. return;
  407. OldestTime = 0;
  408. for (uptr I = 0; I < CacheConfig::QuarantineSize; I++)
  409. releaseIfOlderThan(Quarantine[I], Time);
  410. for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++)
  411. releaseIfOlderThan(Entries[I], Time);
  412. }
  413. HybridMutex Mutex;
  414. u32 EntriesCount GUARDED_BY(Mutex) = 0;
  415. u32 QuarantinePos GUARDED_BY(Mutex) = 0;
  416. atomic_u32 MaxEntriesCount = {};
  417. atomic_uptr MaxEntrySize = {};
  418. u64 OldestTime GUARDED_BY(Mutex) = 0;
  419. u32 IsFullEvents GUARDED_BY(Mutex) = 0;
  420. atomic_s32 ReleaseToOsIntervalMs = {};
  421. u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
  422. u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
  423. CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
  424. NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
  425. Quarantine GUARDED_BY(Mutex) = {};
  426. };
  427. template <typename Config> class MapAllocator {
  428. public:
  429. void init(GlobalStats *S,
  430. s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
  431. DCHECK_EQ(AllocatedBytes, 0U);
  432. DCHECK_EQ(FreedBytes, 0U);
  433. Cache.init(ReleaseToOsInterval);
  434. Stats.init();
  435. if (LIKELY(S))
  436. S->link(&Stats);
  437. }
  438. void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
  439. uptr *BlockEnd = nullptr,
  440. FillContentsMode FillContents = NoFill);
  441. void deallocate(const Options &Options, void *Ptr);
  442. static uptr getBlockEnd(void *Ptr) {
  443. auto *B = LargeBlock::getHeader<Config>(Ptr);
  444. return B->CommitBase + B->CommitSize;
  445. }
  446. static uptr getBlockSize(void *Ptr) {
  447. return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
  448. }
  449. static constexpr uptr getHeadersSize() {
  450. return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
  451. }
  452. void disable() NO_THREAD_SAFETY_ANALYSIS {
  453. Mutex.lock();
  454. Cache.disable();
  455. }
  456. void enable() NO_THREAD_SAFETY_ANALYSIS {
  457. Cache.enable();
  458. Mutex.unlock();
  459. }
  460. template <typename F> void iterateOverBlocks(F Callback) const {
  461. Mutex.assertHeld();
  462. for (const auto &H : InUseBlocks) {
  463. uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
  464. if (allocatorSupportsMemoryTagging<Config>())
  465. Ptr = untagPointer(Ptr);
  466. Callback(Ptr);
  467. }
  468. }
  469. bool canCache(uptr Size) { return Cache.canCache(Size); }
  470. bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
  471. void releaseToOS() { Cache.releaseToOS(); }
  472. void disableMemoryTagging() { Cache.disableMemoryTagging(); }
  473. void unmapTestOnly() { Cache.unmapTestOnly(); }
  474. void getStats(ScopedString *Str);
  475. private:
  476. typename Config::Secondary::template CacheT<Config> Cache;
  477. mutable HybridMutex Mutex;
  478. DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
  479. uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
  480. uptr FreedBytes GUARDED_BY(Mutex) = 0;
  481. uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
  482. uptr LargestSize GUARDED_BY(Mutex) = 0;
  483. u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
  484. u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
  485. LocalStats Stats GUARDED_BY(Mutex);
  486. };
  487. // As with the Primary, the size passed to this function includes any desired
  488. // alignment, so that the frontend can align the user allocation. The hint
  489. // parameter allows us to unmap spurious memory when dealing with larger
  490. // (greater than a page) alignments on 32-bit platforms.
  491. // Due to the sparsity of address space available on those platforms, requesting
  492. // an allocation from the Secondary with a large alignment would end up wasting
  493. // VA space (even though we are not committing the whole thing), hence the need
  494. // to trim off some of the reserved space.
  495. // For allocations requested with an alignment greater than or equal to a page,
  496. // the committed memory will amount to something close to Size - AlignmentHint
  497. // (pending rounding and headers).
  498. template <typename Config>
  499. void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
  500. uptr Alignment, uptr *BlockEndPtr,
  501. FillContentsMode FillContents) {
  502. if (Options.get(OptionBit::AddLargeAllocationSlack))
  503. Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
  504. Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
  505. const uptr PageSize = getPageSizeCached();
  506. // Note that cached blocks may have aligned address already. Thus we simply
  507. // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
  508. const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
  509. if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
  510. LargeBlock::Header *H;
  511. bool Zeroed;
  512. if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
  513. &Zeroed)) {
  514. const uptr BlockEnd = H->CommitBase + H->CommitSize;
  515. if (BlockEndPtr)
  516. *BlockEndPtr = BlockEnd;
  517. uptr HInt = reinterpret_cast<uptr>(H);
  518. if (allocatorSupportsMemoryTagging<Config>())
  519. HInt = untagPointer(HInt);
  520. const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
  521. void *Ptr = reinterpret_cast<void *>(PtrInt);
  522. if (FillContents && !Zeroed)
  523. memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
  524. BlockEnd - PtrInt);
  525. {
  526. ScopedLock L(Mutex);
  527. InUseBlocks.push_back(H);
  528. AllocatedBytes += H->CommitSize;
  529. FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
  530. NumberOfAllocs++;
  531. Stats.add(StatAllocated, H->CommitSize);
  532. Stats.add(StatMapped, H->MemMap.getCapacity());
  533. }
  534. return Ptr;
  535. }
  536. }
  537. uptr RoundedSize =
  538. roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
  539. if (Alignment > PageSize)
  540. RoundedSize += Alignment - PageSize;
  541. ReservedMemoryT ReservedMemory;
  542. const uptr MapSize = RoundedSize + 2 * PageSize;
  543. if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
  544. MAP_ALLOWNOMEM))) {
  545. return nullptr;
  546. }
  547. // Take the entire ownership of reserved region.
  548. MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
  549. ReservedMemory.getCapacity());
  550. uptr MapBase = MemMap.getBase();
  551. uptr CommitBase = MapBase + PageSize;
  552. uptr MapEnd = MapBase + MapSize;
  553. // In the unlikely event of alignments larger than a page, adjust the amount
  554. // of memory we want to commit, and trim the extra memory.
  555. if (UNLIKELY(Alignment >= PageSize)) {
  556. // For alignments greater than or equal to a page, the user pointer (eg: the
  557. // pointer that is returned by the C or C++ allocation APIs) ends up on a
  558. // page boundary , and our headers will live in the preceding page.
  559. CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
  560. const uptr NewMapBase = CommitBase - PageSize;
  561. DCHECK_GE(NewMapBase, MapBase);
  562. // We only trim the extra memory on 32-bit platforms: 64-bit platforms
  563. // are less constrained memory wise, and that saves us two syscalls.
  564. if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
  565. MemMap.unmap(MapBase, NewMapBase - MapBase);
  566. MapBase = NewMapBase;
  567. }
  568. const uptr NewMapEnd =
  569. CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
  570. DCHECK_LE(NewMapEnd, MapEnd);
  571. if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
  572. MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
  573. MapEnd = NewMapEnd;
  574. }
  575. }
  576. const uptr CommitSize = MapEnd - PageSize - CommitBase;
  577. const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
  578. if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
  579. MemMap)) {
  580. MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
  581. return nullptr;
  582. }
  583. const uptr HeaderPos = AllocPos - getHeadersSize();
  584. LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
  585. LargeBlock::addHeaderTag<Config>(HeaderPos));
  586. if (useMemoryTagging<Config>(Options))
  587. storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
  588. reinterpret_cast<uptr>(H + 1));
  589. H->CommitBase = CommitBase;
  590. H->CommitSize = CommitSize;
  591. H->MemMap = MemMap;
  592. if (BlockEndPtr)
  593. *BlockEndPtr = CommitBase + CommitSize;
  594. {
  595. ScopedLock L(Mutex);
  596. InUseBlocks.push_back(H);
  597. AllocatedBytes += CommitSize;
  598. FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
  599. if (LargestSize < CommitSize)
  600. LargestSize = CommitSize;
  601. NumberOfAllocs++;
  602. Stats.add(StatAllocated, CommitSize);
  603. Stats.add(StatMapped, H->MemMap.getCapacity());
  604. }
  605. return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
  606. }
  607. template <typename Config>
  608. void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
  609. EXCLUDES(Mutex) {
  610. LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
  611. const uptr CommitSize = H->CommitSize;
  612. {
  613. ScopedLock L(Mutex);
  614. InUseBlocks.remove(H);
  615. FreedBytes += CommitSize;
  616. FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
  617. NumberOfFrees++;
  618. Stats.sub(StatAllocated, CommitSize);
  619. Stats.sub(StatMapped, H->MemMap.getCapacity());
  620. }
  621. Cache.store(Options, H);
  622. }
  623. template <typename Config>
  624. void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
  625. ScopedLock L(Mutex);
  626. Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
  627. "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
  628. NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
  629. FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
  630. (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
  631. FragmentedBytes >> 10);
  632. Cache.getStats(Str);
  633. }
  634. } // namespace scudo
  635. #endif // SCUDO_SECONDARY_H_