sanitizer_allocator_secondary.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Part of the Sanitizer Allocator.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef SANITIZER_ALLOCATOR_H
  13. #error This file must be included inside sanitizer_allocator.h
  14. #endif
  15. // Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
  16. // allocated chunks. To be used in memory constrained or not memory hungry cases
  17. // (currently, 32 bits and internal allocator).
  18. class LargeMmapAllocatorPtrArrayStatic {
  19. public:
  20. inline void *Init() { return &p_[0]; }
  21. inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
  22. private:
  23. static const int kMaxNumChunks = 1 << 15;
  24. uptr p_[kMaxNumChunks];
  25. };
  26. // Much less restricted LargeMmapAllocator chunks list (comparing to
  27. // PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
  28. // ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
  29. // same functionality in Fuchsia case, which does not support MAP_NORESERVE.
  30. class LargeMmapAllocatorPtrArrayDynamic {
  31. public:
  32. inline void *Init() {
  33. uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
  34. SecondaryAllocatorName);
  35. CHECK(p);
  36. return reinterpret_cast<void*>(p);
  37. }
  38. inline void EnsureSpace(uptr n) {
  39. CHECK_LT(n, kMaxNumChunks);
  40. DCHECK(n <= n_reserved_);
  41. if (UNLIKELY(n == n_reserved_)) {
  42. address_range_.MapOrDie(
  43. reinterpret_cast<uptr>(address_range_.base()) +
  44. n_reserved_ * sizeof(uptr),
  45. kChunksBlockCount * sizeof(uptr));
  46. n_reserved_ += kChunksBlockCount;
  47. }
  48. }
  49. private:
  50. static const int kMaxNumChunks = 1 << 20;
  51. static const int kChunksBlockCount = 1 << 14;
  52. ReservedAddressRange address_range_;
  53. uptr n_reserved_;
  54. };
  55. #if SANITIZER_WORDSIZE == 32
  56. typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
  57. #else
  58. typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
  59. #endif
  60. // This class can (de)allocate only large chunks of memory using mmap/unmap.
  61. // The main purpose of this allocator is to cover large and rare allocation
  62. // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
  63. template <class MapUnmapCallback = NoOpMapUnmapCallback,
  64. class PtrArrayT = DefaultLargeMmapAllocatorPtrArray,
  65. class AddressSpaceViewTy = LocalAddressSpaceView>
  66. class LargeMmapAllocator {
  67. public:
  68. using AddressSpaceView = AddressSpaceViewTy;
  69. void InitLinkerInitialized() {
  70. page_size_ = GetPageSizeCached();
  71. chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
  72. }
  73. void Init() {
  74. internal_memset(this, 0, sizeof(*this));
  75. InitLinkerInitialized();
  76. }
  77. void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
  78. CHECK(IsPowerOfTwo(alignment));
  79. uptr map_size = RoundUpMapSize(size);
  80. if (alignment > page_size_)
  81. map_size += alignment;
  82. // Overflow.
  83. if (map_size < size) {
  84. Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
  85. "0x%zx bytes with 0x%zx alignment requested\n",
  86. SanitizerToolName, map_size, alignment);
  87. return nullptr;
  88. }
  89. uptr map_beg = reinterpret_cast<uptr>(
  90. MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
  91. if (!map_beg)
  92. return nullptr;
  93. CHECK(IsAligned(map_beg, page_size_));
  94. MapUnmapCallback().OnMap(map_beg, map_size);
  95. uptr map_end = map_beg + map_size;
  96. uptr res = map_beg + page_size_;
  97. if (res & (alignment - 1)) // Align.
  98. res += alignment - (res & (alignment - 1));
  99. CHECK(IsAligned(res, alignment));
  100. CHECK(IsAligned(res, page_size_));
  101. CHECK_GE(res + size, map_beg);
  102. CHECK_LE(res + size, map_end);
  103. Header *h = GetHeader(res);
  104. h->size = size;
  105. h->map_beg = map_beg;
  106. h->map_size = map_size;
  107. uptr size_log = MostSignificantSetBitIndex(map_size);
  108. CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
  109. {
  110. SpinMutexLock l(&mutex_);
  111. ptr_array_.EnsureSpace(n_chunks_);
  112. uptr idx = n_chunks_++;
  113. h->chunk_idx = idx;
  114. chunks_[idx] = h;
  115. chunks_sorted_ = false;
  116. stats.n_allocs++;
  117. stats.currently_allocated += map_size;
  118. stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
  119. stats.by_size_log[size_log]++;
  120. stat->Add(AllocatorStatAllocated, map_size);
  121. stat->Add(AllocatorStatMapped, map_size);
  122. }
  123. return reinterpret_cast<void*>(res);
  124. }
  125. void Deallocate(AllocatorStats *stat, void *p) {
  126. Header *h = GetHeader(p);
  127. {
  128. SpinMutexLock l(&mutex_);
  129. uptr idx = h->chunk_idx;
  130. CHECK_EQ(chunks_[idx], h);
  131. CHECK_LT(idx, n_chunks_);
  132. chunks_[idx] = chunks_[--n_chunks_];
  133. chunks_[idx]->chunk_idx = idx;
  134. chunks_sorted_ = false;
  135. stats.n_frees++;
  136. stats.currently_allocated -= h->map_size;
  137. stat->Sub(AllocatorStatAllocated, h->map_size);
  138. stat->Sub(AllocatorStatMapped, h->map_size);
  139. }
  140. MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
  141. UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
  142. }
  143. uptr TotalMemoryUsed() {
  144. SpinMutexLock l(&mutex_);
  145. uptr res = 0;
  146. for (uptr i = 0; i < n_chunks_; i++) {
  147. Header *h = chunks_[i];
  148. CHECK_EQ(h->chunk_idx, i);
  149. res += RoundUpMapSize(h->size);
  150. }
  151. return res;
  152. }
  153. bool PointerIsMine(const void *p) const {
  154. return GetBlockBegin(p) != nullptr;
  155. }
  156. uptr GetActuallyAllocatedSize(void *p) {
  157. return RoundUpTo(GetHeader(p)->size, page_size_);
  158. }
  159. // At least page_size_/2 metadata bytes is available.
  160. void *GetMetaData(const void *p) {
  161. // Too slow: CHECK_EQ(p, GetBlockBegin(p));
  162. if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
  163. Printf("%s: bad pointer %p\n", SanitizerToolName, p);
  164. CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
  165. }
  166. return GetHeader(p) + 1;
  167. }
  168. void *GetBlockBegin(const void *ptr) const {
  169. uptr p = reinterpret_cast<uptr>(ptr);
  170. SpinMutexLock l(&mutex_);
  171. uptr nearest_chunk = 0;
  172. Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
  173. // Cache-friendly linear search.
  174. for (uptr i = 0; i < n_chunks_; i++) {
  175. uptr ch = reinterpret_cast<uptr>(chunks[i]);
  176. if (p < ch) continue; // p is at left to this chunk, skip it.
  177. if (p - ch < p - nearest_chunk)
  178. nearest_chunk = ch;
  179. }
  180. if (!nearest_chunk)
  181. return nullptr;
  182. const Header *h =
  183. AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk));
  184. Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk);
  185. CHECK_GE(nearest_chunk, h->map_beg);
  186. CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
  187. CHECK_LE(nearest_chunk, p);
  188. if (h->map_beg + h->map_size <= p)
  189. return nullptr;
  190. return GetUser(h_ptr);
  191. }
  192. void EnsureSortedChunks() {
  193. if (chunks_sorted_) return;
  194. Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_);
  195. Sort(reinterpret_cast<uptr *>(chunks), n_chunks_);
  196. for (uptr i = 0; i < n_chunks_; i++)
  197. AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i;
  198. chunks_sorted_ = true;
  199. }
  200. // This function does the same as GetBlockBegin, but is much faster.
  201. // Must be called with the allocator locked.
  202. void *GetBlockBeginFastLocked(const void *ptr) {
  203. mutex_.CheckLocked();
  204. uptr p = reinterpret_cast<uptr>(ptr);
  205. uptr n = n_chunks_;
  206. if (!n) return nullptr;
  207. EnsureSortedChunks();
  208. Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
  209. auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]);
  210. auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) +
  211. AddressSpaceView::Load(chunks[n - 1])->map_size;
  212. if (p < min_mmap_ || p >= max_mmap_)
  213. return nullptr;
  214. uptr beg = 0, end = n - 1;
  215. // This loop is a log(n) lower_bound. It does not check for the exact match
  216. // to avoid expensive cache-thrashing loads.
  217. while (end - beg >= 2) {
  218. uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
  219. if (p < reinterpret_cast<uptr>(chunks[mid]))
  220. end = mid - 1; // We are not interested in chunks[mid].
  221. else
  222. beg = mid; // chunks[mid] may still be what we want.
  223. }
  224. if (beg < end) {
  225. CHECK_EQ(beg + 1, end);
  226. // There are 2 chunks left, choose one.
  227. if (p >= reinterpret_cast<uptr>(chunks[end]))
  228. beg = end;
  229. }
  230. const Header *h = AddressSpaceView::Load(chunks[beg]);
  231. Header *h_ptr = chunks[beg];
  232. if (h->map_beg + h->map_size <= p || p < h->map_beg)
  233. return nullptr;
  234. return GetUser(h_ptr);
  235. }
  236. void PrintStats() {
  237. Printf("Stats: LargeMmapAllocator: allocated %zd times, "
  238. "remains %zd (%zd K) max %zd M; by size logs: ",
  239. stats.n_allocs, stats.n_allocs - stats.n_frees,
  240. stats.currently_allocated >> 10, stats.max_allocated >> 20);
  241. for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
  242. uptr c = stats.by_size_log[i];
  243. if (!c) continue;
  244. Printf("%zd:%zd; ", i, c);
  245. }
  246. Printf("\n");
  247. }
  248. // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
  249. // introspection API.
  250. void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
  251. void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
  252. // Iterate over all existing chunks.
  253. // The allocator must be locked when calling this function.
  254. void ForEachChunk(ForEachChunkCallback callback, void *arg) {
  255. EnsureSortedChunks(); // Avoid doing the sort while iterating.
  256. const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
  257. for (uptr i = 0; i < n_chunks_; i++) {
  258. const Header *t = chunks[i];
  259. callback(reinterpret_cast<uptr>(GetUser(t)), arg);
  260. // Consistency check: verify that the array did not change.
  261. CHECK_EQ(chunks[i], t);
  262. CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i);
  263. }
  264. }
  265. private:
  266. struct Header {
  267. uptr map_beg;
  268. uptr map_size;
  269. uptr size;
  270. uptr chunk_idx;
  271. };
  272. Header *GetHeader(uptr p) {
  273. CHECK(IsAligned(p, page_size_));
  274. return reinterpret_cast<Header*>(p - page_size_);
  275. }
  276. Header *GetHeader(const void *p) {
  277. return GetHeader(reinterpret_cast<uptr>(p));
  278. }
  279. void *GetUser(const Header *h) const {
  280. CHECK(IsAligned((uptr)h, page_size_));
  281. return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
  282. }
  283. uptr RoundUpMapSize(uptr size) {
  284. return RoundUpTo(size, page_size_) + page_size_;
  285. }
  286. uptr page_size_;
  287. Header **chunks_;
  288. PtrArrayT ptr_array_;
  289. uptr n_chunks_;
  290. bool chunks_sorted_;
  291. struct Stats {
  292. uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
  293. } stats;
  294. mutable StaticSpinMutex mutex_;
  295. };