guarded_pool_allocator.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "gwp_asan/guarded_pool_allocator.h"
  9. #include "gwp_asan/crash_handler.h"
  10. #include "gwp_asan/options.h"
  11. #include "gwp_asan/utilities.h"
  12. #include <assert.h>
  13. #include <stddef.h>
  14. using AllocationMetadata = gwp_asan::AllocationMetadata;
  15. using Error = gwp_asan::Error;
  16. namespace gwp_asan {
  17. namespace {
  18. // Forward declare the pointer to the singleton version of this class.
  19. // Instantiated during initialisation, this allows the signal handler
  20. // to find this class in order to deduce the root cause of failures. Must not be
  21. // referenced by users outside this translation unit, in order to avoid
  22. // init-order-fiasco.
  23. GuardedPoolAllocator *SingletonPtr = nullptr;
  24. size_t roundUpTo(size_t Size, size_t Boundary) {
  25. return (Size + Boundary - 1) & ~(Boundary - 1);
  26. }
  27. uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
  28. return Ptr & ~(PageSize - 1);
  29. }
  30. bool isPowerOfTwo(uintptr_t X) { return (X & (X - 1)) == 0; }
  31. } // anonymous namespace
  32. // Gets the singleton implementation of this class. Thread-compatible until
  33. // init() is called, thread-safe afterwards.
  34. GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() {
  35. return SingletonPtr;
  36. }
  37. void GuardedPoolAllocator::init(const options::Options &Opts) {
  38. // Note: We return from the constructor here if GWP-ASan is not available.
  39. // This will stop heap-allocation of class members, as well as mmap() of the
  40. // guarded slots.
  41. if (!Opts.Enabled || Opts.SampleRate == 0 ||
  42. Opts.MaxSimultaneousAllocations == 0)
  43. return;
  44. Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0.");
  45. Check(Opts.SampleRate < (1 << 30), "GWP-ASan Error: SampleRate is >= 2^30.");
  46. Check(Opts.MaxSimultaneousAllocations >= 0,
  47. "GWP-ASan Error: MaxSimultaneousAllocations is < 0.");
  48. SingletonPtr = this;
  49. Backtrace = Opts.Backtrace;
  50. State.VersionMagic = {{AllocatorVersionMagic::kAllocatorVersionMagic[0],
  51. AllocatorVersionMagic::kAllocatorVersionMagic[1],
  52. AllocatorVersionMagic::kAllocatorVersionMagic[2],
  53. AllocatorVersionMagic::kAllocatorVersionMagic[3]},
  54. AllocatorVersionMagic::kAllocatorVersion,
  55. 0};
  56. State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
  57. const size_t PageSize = getPlatformPageSize();
  58. // getPageAddr() and roundUpTo() assume the page size to be a power of 2.
  59. assert((PageSize & (PageSize - 1)) == 0);
  60. State.PageSize = PageSize;
  61. // Number of pages required =
  62. // + MaxSimultaneousAllocations * maximumAllocationSize (N pages per slot)
  63. // + MaxSimultaneousAllocations (one guard on the left side of each slot)
  64. // + 1 (an extra guard page at the end of the pool, on the right side)
  65. // + 1 (an extra page that's used for reporting internally-detected crashes,
  66. // like double free and invalid free, to the signal handler; see
  67. // raiseInternallyDetectedError() for more info)
  68. size_t PoolBytesRequired =
  69. PageSize * (2 + State.MaxSimultaneousAllocations) +
  70. State.MaxSimultaneousAllocations * State.maximumAllocationSize();
  71. assert(PoolBytesRequired % PageSize == 0);
  72. void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
  73. size_t BytesRequired =
  74. roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize);
  75. Metadata = reinterpret_cast<AllocationMetadata *>(
  76. map(BytesRequired, kGwpAsanMetadataName));
  77. // Allocate memory and set up the free pages queue.
  78. BytesRequired = roundUpTo(
  79. State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize);
  80. FreeSlots =
  81. reinterpret_cast<size_t *>(map(BytesRequired, kGwpAsanFreeSlotsName));
  82. // Multiply the sample rate by 2 to give a good, fast approximation for (1 /
  83. // SampleRate) chance of sampling.
  84. if (Opts.SampleRate != 1)
  85. AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1;
  86. else
  87. AdjustedSampleRatePlusOne = 2;
  88. initPRNG();
  89. getThreadLocals()->NextSampleCounter =
  90. ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) &
  91. ThreadLocalPackedVariables::NextSampleCounterMask;
  92. State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory);
  93. State.GuardedPagePoolEnd =
  94. reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired;
  95. if (Opts.InstallForkHandlers)
  96. installAtFork();
  97. }
  98. void GuardedPoolAllocator::disable() {
  99. PoolMutex.lock();
  100. BacktraceMutex.lock();
  101. }
  102. void GuardedPoolAllocator::enable() {
  103. PoolMutex.unlock();
  104. BacktraceMutex.unlock();
  105. }
  106. void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
  107. void *Arg) {
  108. uintptr_t Start = reinterpret_cast<uintptr_t>(Base);
  109. for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) {
  110. const AllocationMetadata &Meta = Metadata[i];
  111. if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start &&
  112. Meta.Addr < Start + Size)
  113. Cb(Meta.Addr, Meta.RequestedSize, Arg);
  114. }
  115. }
  116. void GuardedPoolAllocator::uninitTestOnly() {
  117. if (State.GuardedPagePool) {
  118. unreserveGuardedPool();
  119. State.GuardedPagePool = 0;
  120. State.GuardedPagePoolEnd = 0;
  121. }
  122. if (Metadata) {
  123. unmap(Metadata,
  124. roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata),
  125. State.PageSize));
  126. Metadata = nullptr;
  127. }
  128. if (FreeSlots) {
  129. unmap(FreeSlots,
  130. roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
  131. State.PageSize));
  132. FreeSlots = nullptr;
  133. }
  134. *getThreadLocals() = ThreadLocalPackedVariables();
  135. }
  136. // Note, minimum backing allocation size in GWP-ASan is always one page, and
  137. // each slot could potentially be multiple pages (but always in
  138. // page-increments). Thus, for anything that requires less than page size
  139. // alignment, we don't need to allocate extra padding to ensure the alignment
  140. // can be met.
  141. size_t GuardedPoolAllocator::getRequiredBackingSize(size_t Size,
  142. size_t Alignment,
  143. size_t PageSize) {
  144. assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
  145. assert(Alignment != 0 && "Alignment should be non-zero");
  146. assert(Size != 0 && "Size should be non-zero");
  147. if (Alignment <= PageSize)
  148. return Size;
  149. return Size + Alignment - PageSize;
  150. }
  151. uintptr_t GuardedPoolAllocator::alignUp(uintptr_t Ptr, size_t Alignment) {
  152. assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
  153. assert(Alignment != 0 && "Alignment should be non-zero");
  154. if ((Ptr & (Alignment - 1)) == 0)
  155. return Ptr;
  156. Ptr += Alignment - (Ptr & (Alignment - 1));
  157. return Ptr;
  158. }
  159. uintptr_t GuardedPoolAllocator::alignDown(uintptr_t Ptr, size_t Alignment) {
  160. assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
  161. assert(Alignment != 0 && "Alignment should be non-zero");
  162. if ((Ptr & (Alignment - 1)) == 0)
  163. return Ptr;
  164. Ptr -= Ptr & (Alignment - 1);
  165. return Ptr;
  166. }
  167. void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
  168. // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
  169. // back to the supporting allocator.
  170. if (State.GuardedPagePoolEnd == 0) {
  171. getThreadLocals()->NextSampleCounter =
  172. (AdjustedSampleRatePlusOne - 1) &
  173. ThreadLocalPackedVariables::NextSampleCounterMask;
  174. return nullptr;
  175. }
  176. if (Size == 0)
  177. Size = 1;
  178. if (Alignment == 0)
  179. Alignment = alignof(max_align_t);
  180. if (!isPowerOfTwo(Alignment) || Alignment > State.maximumAllocationSize() ||
  181. Size > State.maximumAllocationSize())
  182. return nullptr;
  183. size_t BackingSize = getRequiredBackingSize(Size, Alignment, State.PageSize);
  184. if (BackingSize > State.maximumAllocationSize())
  185. return nullptr;
  186. // Protect against recursivity.
  187. if (getThreadLocals()->RecursiveGuard)
  188. return nullptr;
  189. ScopedRecursiveGuard SRG;
  190. size_t Index;
  191. {
  192. ScopedLock L(PoolMutex);
  193. Index = reserveSlot();
  194. }
  195. if (Index == kInvalidSlotID)
  196. return nullptr;
  197. uintptr_t SlotStart = State.slotToAddr(Index);
  198. AllocationMetadata *Meta = addrToMetadata(SlotStart);
  199. uintptr_t SlotEnd = State.slotToAddr(Index) + State.maximumAllocationSize();
  200. uintptr_t UserPtr;
  201. // Randomly choose whether to left-align or right-align the allocation, and
  202. // then apply the necessary adjustments to get an aligned pointer.
  203. if (getRandomUnsigned32() % 2 == 0)
  204. UserPtr = alignUp(SlotStart, Alignment);
  205. else
  206. UserPtr = alignDown(SlotEnd - Size, Alignment);
  207. assert(UserPtr >= SlotStart);
  208. assert(UserPtr + Size <= SlotEnd);
  209. // If a slot is multiple pages in size, and the allocation takes up a single
  210. // page, we can improve overflow detection by leaving the unused pages as
  211. // unmapped.
  212. const size_t PageSize = State.PageSize;
  213. allocateInGuardedPool(
  214. reinterpret_cast<void *>(getPageAddr(UserPtr, PageSize)),
  215. roundUpTo(Size, PageSize));
  216. Meta->RecordAllocation(UserPtr, Size);
  217. {
  218. ScopedLock UL(BacktraceMutex);
  219. Meta->AllocationTrace.RecordBacktrace(Backtrace);
  220. }
  221. return reinterpret_cast<void *>(UserPtr);
  222. }
  223. void GuardedPoolAllocator::raiseInternallyDetectedError(uintptr_t Address,
  224. Error E) {
  225. // Disable the allocator before setting the internal failure state. In
  226. // non-recoverable mode, the allocator will be permanently disabled, and so
  227. // things will be accessed without locks.
  228. disable();
  229. // Races between internally- and externally-raised faults can happen. Right
  230. // now, in this thread we've locked the allocator in order to raise an
  231. // internally-detected fault, and another thread could SIGSEGV to raise an
  232. // externally-detected fault. What will happen is that the other thread will
  233. // wait in the signal handler, as we hold the allocator's locks from the
  234. // disable() above. We'll trigger the signal handler by touching the
  235. // internal-signal-raising address below, and the signal handler from our
  236. // thread will get to run first as we will continue to hold the allocator
  237. // locks until the enable() at the end of this function. Be careful though, if
  238. // this thread receives another SIGSEGV after the disable() above, but before
  239. // touching the internal-signal-raising address below, then this thread will
  240. // get an "externally-raised" SIGSEGV while *also* holding the allocator
  241. // locks, which means this thread's signal handler will deadlock. This could
  242. // be resolved with a re-entrant lock, but asking platforms to implement this
  243. // seems unnecessary given the only way to get a SIGSEGV in this critical
  244. // section is either a memory safety bug in the couple lines of code below (be
  245. // careful!), or someone outside uses `kill(this_thread, SIGSEGV)`, which
  246. // really shouldn't happen.
  247. State.FailureType = E;
  248. State.FailureAddress = Address;
  249. // Raise a SEGV by touching a specific address that identifies to the crash
  250. // handler that this is an internally-raised fault. Changing this address?
  251. // Don't forget to update __gwp_asan_get_internal_crash_address.
  252. volatile char *p =
  253. reinterpret_cast<char *>(State.internallyDetectedErrorFaultAddress());
  254. *p = 0;
  255. // This should never be reached in non-recoverable mode. Ensure that the
  256. // signal handler called handleRecoverablePostCrashReport(), which was
  257. // responsible for re-setting these fields.
  258. assert(State.FailureType == Error::UNKNOWN);
  259. assert(State.FailureAddress == 0u);
  260. // In recoverable mode, the signal handler (after dumping the crash) marked
  261. // the page containing the InternalFaultSegvAddress as read/writeable, to
  262. // allow the second touch to succeed after returning from the signal handler.
  263. // Now, we need to mark the page as non-read/write-able again, so future
  264. // internal faults can be raised.
  265. deallocateInGuardedPool(
  266. reinterpret_cast<void *>(getPageAddr(
  267. State.internallyDetectedErrorFaultAddress(), State.PageSize)),
  268. State.PageSize);
  269. // And now we're done with patching ourselves back up, enable the allocator.
  270. enable();
  271. }
  272. void GuardedPoolAllocator::deallocate(void *Ptr) {
  273. assert(pointerIsMine(Ptr) && "Pointer is not mine!");
  274. uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr);
  275. size_t Slot = State.getNearestSlot(UPtr);
  276. uintptr_t SlotStart = State.slotToAddr(Slot);
  277. AllocationMetadata *Meta = addrToMetadata(UPtr);
  278. // If this allocation is responsible for crash, never recycle it. Turn the
  279. // deallocate() call into a no-op.
  280. if (Meta->HasCrashed)
  281. return;
  282. if (Meta->Addr != UPtr) {
  283. raiseInternallyDetectedError(UPtr, Error::INVALID_FREE);
  284. return;
  285. }
  286. if (Meta->IsDeallocated) {
  287. raiseInternallyDetectedError(UPtr, Error::DOUBLE_FREE);
  288. return;
  289. }
  290. // Intentionally scope the mutex here, so that other threads can access the
  291. // pool during the expensive markInaccessible() call.
  292. {
  293. ScopedLock L(PoolMutex);
  294. // Ensure that the deallocation is recorded before marking the page as
  295. // inaccessible. Otherwise, a racy use-after-free will have inconsistent
  296. // metadata.
  297. Meta->RecordDeallocation();
  298. // Ensure that the unwinder is not called if the recursive flag is set,
  299. // otherwise non-reentrant unwinders may deadlock.
  300. if (!getThreadLocals()->RecursiveGuard) {
  301. ScopedRecursiveGuard SRG;
  302. ScopedLock UL(BacktraceMutex);
  303. Meta->DeallocationTrace.RecordBacktrace(Backtrace);
  304. }
  305. }
  306. deallocateInGuardedPool(reinterpret_cast<void *>(SlotStart),
  307. State.maximumAllocationSize());
  308. // And finally, lock again to release the slot back into the pool.
  309. ScopedLock L(PoolMutex);
  310. freeSlot(Slot);
  311. }
  312. // Thread-compatible, protected by PoolMutex.
  313. static bool PreviousRecursiveGuard;
  314. void GuardedPoolAllocator::preCrashReport(void *Ptr) {
  315. assert(pointerIsMine(Ptr) && "Pointer is not mine!");
  316. uintptr_t InternalCrashAddr = __gwp_asan_get_internal_crash_address(
  317. &State, reinterpret_cast<uintptr_t>(Ptr));
  318. if (!InternalCrashAddr)
  319. disable();
  320. // If something in the signal handler calls malloc() while dumping the
  321. // GWP-ASan report (e.g. backtrace_symbols()), make sure that GWP-ASan doesn't
  322. // service that allocation. `PreviousRecursiveGuard` is protected by the
  323. // allocator locks taken in disable(), either explicitly above for
  324. // externally-raised errors, or implicitly in raiseInternallyDetectedError()
  325. // for internally-detected errors.
  326. PreviousRecursiveGuard = getThreadLocals()->RecursiveGuard;
  327. getThreadLocals()->RecursiveGuard = true;
  328. }
  329. void GuardedPoolAllocator::postCrashReportRecoverableOnly(void *SignalPtr) {
  330. uintptr_t SignalUPtr = reinterpret_cast<uintptr_t>(SignalPtr);
  331. uintptr_t InternalCrashAddr =
  332. __gwp_asan_get_internal_crash_address(&State, SignalUPtr);
  333. uintptr_t ErrorUptr = InternalCrashAddr ?: SignalUPtr;
  334. AllocationMetadata *Metadata = addrToMetadata(ErrorUptr);
  335. Metadata->HasCrashed = true;
  336. allocateInGuardedPool(
  337. reinterpret_cast<void *>(getPageAddr(SignalUPtr, State.PageSize)),
  338. State.PageSize);
  339. // Clear the internal state in order to not confuse the crash handler if a
  340. // use-after-free or buffer-overflow comes from a different allocation in the
  341. // future.
  342. if (InternalCrashAddr) {
  343. State.FailureType = Error::UNKNOWN;
  344. State.FailureAddress = 0;
  345. }
  346. size_t Slot = State.getNearestSlot(ErrorUptr);
  347. // If the slot is available, remove it permanently.
  348. for (size_t i = 0; i < FreeSlotsLength; ++i) {
  349. if (FreeSlots[i] == Slot) {
  350. FreeSlots[i] = FreeSlots[FreeSlotsLength - 1];
  351. FreeSlotsLength -= 1;
  352. break;
  353. }
  354. }
  355. getThreadLocals()->RecursiveGuard = PreviousRecursiveGuard;
  356. if (!InternalCrashAddr)
  357. enable();
  358. }
  359. size_t GuardedPoolAllocator::getSize(const void *Ptr) {
  360. assert(pointerIsMine(Ptr));
  361. ScopedLock L(PoolMutex);
  362. AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
  363. assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
  364. return Meta->RequestedSize;
  365. }
  366. AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {
  367. return &Metadata[State.getNearestSlot(Ptr)];
  368. }
  369. size_t GuardedPoolAllocator::reserveSlot() {
  370. // Avoid potential reuse of a slot before we have made at least a single
  371. // allocation in each slot. Helps with our use-after-free detection.
  372. if (NumSampledAllocations < State.MaxSimultaneousAllocations)
  373. return NumSampledAllocations++;
  374. if (FreeSlotsLength == 0)
  375. return kInvalidSlotID;
  376. size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength;
  377. size_t SlotIndex = FreeSlots[ReservedIndex];
  378. FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength];
  379. return SlotIndex;
  380. }
  381. void GuardedPoolAllocator::freeSlot(size_t SlotIndex) {
  382. assert(FreeSlotsLength < State.MaxSimultaneousAllocations);
  383. FreeSlots[FreeSlotsLength++] = SlotIndex;
  384. }
  385. uint32_t GuardedPoolAllocator::getRandomUnsigned32() {
  386. uint32_t RandomState = getThreadLocals()->RandomState;
  387. RandomState ^= RandomState << 13;
  388. RandomState ^= RandomState >> 17;
  389. RandomState ^= RandomState << 5;
  390. getThreadLocals()->RandomState = RandomState;
  391. return RandomState;
  392. }
  393. } // namespace gwp_asan