guarded_pool_allocator.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "gwp_asan/guarded_pool_allocator.h"
  9. #include "gwp_asan/options.h"
  10. #include "gwp_asan/utilities.h"
  11. #include <assert.h>
  12. #include <stddef.h>
  13. using AllocationMetadata = gwp_asan::AllocationMetadata;
  14. using Error = gwp_asan::Error;
  15. namespace gwp_asan {
  16. namespace {
  17. // Forward declare the pointer to the singleton version of this class.
  18. // Instantiated during initialisation, this allows the signal handler
  19. // to find this class in order to deduce the root cause of failures. Must not be
  20. // referenced by users outside this translation unit, in order to avoid
  21. // init-order-fiasco.
  22. GuardedPoolAllocator *SingletonPtr = nullptr;
  23. size_t roundUpTo(size_t Size, size_t Boundary) {
  24. return (Size + Boundary - 1) & ~(Boundary - 1);
  25. }
  26. uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
  27. return Ptr & ~(PageSize - 1);
  28. }
  29. bool isPowerOfTwo(uintptr_t X) { return (X & (X - 1)) == 0; }
  30. } // anonymous namespace
  31. // Gets the singleton implementation of this class. Thread-compatible until
  32. // init() is called, thread-safe afterwards.
  33. GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() {
  34. return SingletonPtr;
  35. }
  36. void GuardedPoolAllocator::init(const options::Options &Opts) {
  37. // Note: We return from the constructor here if GWP-ASan is not available.
  38. // This will stop heap-allocation of class members, as well as mmap() of the
  39. // guarded slots.
  40. if (!Opts.Enabled || Opts.SampleRate == 0 ||
  41. Opts.MaxSimultaneousAllocations == 0)
  42. return;
  43. Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0.");
  44. Check(Opts.SampleRate < (1 << 30), "GWP-ASan Error: SampleRate is >= 2^30.");
  45. Check(Opts.MaxSimultaneousAllocations >= 0,
  46. "GWP-ASan Error: MaxSimultaneousAllocations is < 0.");
  47. SingletonPtr = this;
  48. Backtrace = Opts.Backtrace;
  49. State.VersionMagic = {{AllocatorVersionMagic::kAllocatorVersionMagic[0],
  50. AllocatorVersionMagic::kAllocatorVersionMagic[1],
  51. AllocatorVersionMagic::kAllocatorVersionMagic[2],
  52. AllocatorVersionMagic::kAllocatorVersionMagic[3]},
  53. AllocatorVersionMagic::kAllocatorVersion,
  54. 0};
  55. State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
  56. const size_t PageSize = getPlatformPageSize();
  57. // getPageAddr() and roundUpTo() assume the page size to be a power of 2.
  58. assert((PageSize & (PageSize - 1)) == 0);
  59. State.PageSize = PageSize;
  60. size_t PoolBytesRequired =
  61. PageSize * (1 + State.MaxSimultaneousAllocations) +
  62. State.MaxSimultaneousAllocations * State.maximumAllocationSize();
  63. assert(PoolBytesRequired % PageSize == 0);
  64. void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
  65. size_t BytesRequired =
  66. roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize);
  67. Metadata = reinterpret_cast<AllocationMetadata *>(
  68. map(BytesRequired, kGwpAsanMetadataName));
  69. // Allocate memory and set up the free pages queue.
  70. BytesRequired = roundUpTo(
  71. State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize);
  72. FreeSlots =
  73. reinterpret_cast<size_t *>(map(BytesRequired, kGwpAsanFreeSlotsName));
  74. // Multiply the sample rate by 2 to give a good, fast approximation for (1 /
  75. // SampleRate) chance of sampling.
  76. if (Opts.SampleRate != 1)
  77. AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1;
  78. else
  79. AdjustedSampleRatePlusOne = 2;
  80. initPRNG();
  81. getThreadLocals()->NextSampleCounter =
  82. ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) &
  83. ThreadLocalPackedVariables::NextSampleCounterMask;
  84. State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory);
  85. State.GuardedPagePoolEnd =
  86. reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired;
  87. if (Opts.InstallForkHandlers)
  88. installAtFork();
  89. }
  90. void GuardedPoolAllocator::disable() {
  91. PoolMutex.lock();
  92. BacktraceMutex.lock();
  93. }
  94. void GuardedPoolAllocator::enable() {
  95. PoolMutex.unlock();
  96. BacktraceMutex.unlock();
  97. }
  98. void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
  99. void *Arg) {
  100. uintptr_t Start = reinterpret_cast<uintptr_t>(Base);
  101. for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) {
  102. const AllocationMetadata &Meta = Metadata[i];
  103. if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start &&
  104. Meta.Addr < Start + Size)
  105. Cb(Meta.Addr, Meta.RequestedSize, Arg);
  106. }
  107. }
  108. void GuardedPoolAllocator::uninitTestOnly() {
  109. if (State.GuardedPagePool) {
  110. unreserveGuardedPool();
  111. State.GuardedPagePool = 0;
  112. State.GuardedPagePoolEnd = 0;
  113. }
  114. if (Metadata) {
  115. unmap(Metadata,
  116. roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata),
  117. State.PageSize));
  118. Metadata = nullptr;
  119. }
  120. if (FreeSlots) {
  121. unmap(FreeSlots,
  122. roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
  123. State.PageSize));
  124. FreeSlots = nullptr;
  125. }
  126. *getThreadLocals() = ThreadLocalPackedVariables();
  127. }
  128. // Note, minimum backing allocation size in GWP-ASan is always one page, and
  129. // each slot could potentially be multiple pages (but always in
  130. // page-increments). Thus, for anything that requires less than page size
  131. // alignment, we don't need to allocate extra padding to ensure the alignment
  132. // can be met.
  133. size_t GuardedPoolAllocator::getRequiredBackingSize(size_t Size,
  134. size_t Alignment,
  135. size_t PageSize) {
  136. assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
  137. assert(Alignment != 0 && "Alignment should be non-zero");
  138. assert(Size != 0 && "Size should be non-zero");
  139. if (Alignment <= PageSize)
  140. return Size;
  141. return Size + Alignment - PageSize;
  142. }
  143. uintptr_t GuardedPoolAllocator::alignUp(uintptr_t Ptr, size_t Alignment) {
  144. assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
  145. assert(Alignment != 0 && "Alignment should be non-zero");
  146. if ((Ptr & (Alignment - 1)) == 0)
  147. return Ptr;
  148. Ptr += Alignment - (Ptr & (Alignment - 1));
  149. return Ptr;
  150. }
  151. uintptr_t GuardedPoolAllocator::alignDown(uintptr_t Ptr, size_t Alignment) {
  152. assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
  153. assert(Alignment != 0 && "Alignment should be non-zero");
  154. if ((Ptr & (Alignment - 1)) == 0)
  155. return Ptr;
  156. Ptr -= Ptr & (Alignment - 1);
  157. return Ptr;
  158. }
  159. void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
  160. // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
  161. // back to the supporting allocator.
  162. if (State.GuardedPagePoolEnd == 0) {
  163. getThreadLocals()->NextSampleCounter =
  164. (AdjustedSampleRatePlusOne - 1) &
  165. ThreadLocalPackedVariables::NextSampleCounterMask;
  166. return nullptr;
  167. }
  168. if (Size == 0)
  169. Size = 1;
  170. if (Alignment == 0)
  171. Alignment = alignof(max_align_t);
  172. if (!isPowerOfTwo(Alignment) || Alignment > State.maximumAllocationSize() ||
  173. Size > State.maximumAllocationSize())
  174. return nullptr;
  175. size_t BackingSize = getRequiredBackingSize(Size, Alignment, State.PageSize);
  176. if (BackingSize > State.maximumAllocationSize())
  177. return nullptr;
  178. // Protect against recursivity.
  179. if (getThreadLocals()->RecursiveGuard)
  180. return nullptr;
  181. ScopedRecursiveGuard SRG;
  182. size_t Index;
  183. {
  184. ScopedLock L(PoolMutex);
  185. Index = reserveSlot();
  186. }
  187. if (Index == kInvalidSlotID)
  188. return nullptr;
  189. uintptr_t SlotStart = State.slotToAddr(Index);
  190. AllocationMetadata *Meta = addrToMetadata(SlotStart);
  191. uintptr_t SlotEnd = State.slotToAddr(Index) + State.maximumAllocationSize();
  192. uintptr_t UserPtr;
  193. // Randomly choose whether to left-align or right-align the allocation, and
  194. // then apply the necessary adjustments to get an aligned pointer.
  195. if (getRandomUnsigned32() % 2 == 0)
  196. UserPtr = alignUp(SlotStart, Alignment);
  197. else
  198. UserPtr = alignDown(SlotEnd - Size, Alignment);
  199. assert(UserPtr >= SlotStart);
  200. assert(UserPtr + Size <= SlotEnd);
  201. // If a slot is multiple pages in size, and the allocation takes up a single
  202. // page, we can improve overflow detection by leaving the unused pages as
  203. // unmapped.
  204. const size_t PageSize = State.PageSize;
  205. allocateInGuardedPool(
  206. reinterpret_cast<void *>(getPageAddr(UserPtr, PageSize)),
  207. roundUpTo(Size, PageSize));
  208. Meta->RecordAllocation(UserPtr, Size);
  209. {
  210. ScopedLock UL(BacktraceMutex);
  211. Meta->AllocationTrace.RecordBacktrace(Backtrace);
  212. }
  213. return reinterpret_cast<void *>(UserPtr);
  214. }
  215. void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
  216. State.FailureType = E;
  217. State.FailureAddress = Address;
  218. // Raise a SEGV by touching first guard page.
  219. volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool);
  220. *p = 0;
  221. // Normally, would be __builtin_unreachable(), but because of
  222. // https://bugs.llvm.org/show_bug.cgi?id=47480, unreachable will DCE the
  223. // volatile store above, even though it has side effects.
  224. __builtin_trap();
  225. }
  226. void GuardedPoolAllocator::stop() {
  227. getThreadLocals()->RecursiveGuard = true;
  228. PoolMutex.tryLock();
  229. }
  230. void GuardedPoolAllocator::deallocate(void *Ptr) {
  231. assert(pointerIsMine(Ptr) && "Pointer is not mine!");
  232. uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr);
  233. size_t Slot = State.getNearestSlot(UPtr);
  234. uintptr_t SlotStart = State.slotToAddr(Slot);
  235. AllocationMetadata *Meta = addrToMetadata(UPtr);
  236. if (Meta->Addr != UPtr) {
  237. // If multiple errors occur at the same time, use the first one.
  238. ScopedLock L(PoolMutex);
  239. trapOnAddress(UPtr, Error::INVALID_FREE);
  240. }
  241. // Intentionally scope the mutex here, so that other threads can access the
  242. // pool during the expensive markInaccessible() call.
  243. {
  244. ScopedLock L(PoolMutex);
  245. if (Meta->IsDeallocated) {
  246. trapOnAddress(UPtr, Error::DOUBLE_FREE);
  247. }
  248. // Ensure that the deallocation is recorded before marking the page as
  249. // inaccessible. Otherwise, a racy use-after-free will have inconsistent
  250. // metadata.
  251. Meta->RecordDeallocation();
  252. // Ensure that the unwinder is not called if the recursive flag is set,
  253. // otherwise non-reentrant unwinders may deadlock.
  254. if (!getThreadLocals()->RecursiveGuard) {
  255. ScopedRecursiveGuard SRG;
  256. ScopedLock UL(BacktraceMutex);
  257. Meta->DeallocationTrace.RecordBacktrace(Backtrace);
  258. }
  259. }
  260. deallocateInGuardedPool(reinterpret_cast<void *>(SlotStart),
  261. State.maximumAllocationSize());
  262. // And finally, lock again to release the slot back into the pool.
  263. ScopedLock L(PoolMutex);
  264. freeSlot(Slot);
  265. }
  266. size_t GuardedPoolAllocator::getSize(const void *Ptr) {
  267. assert(pointerIsMine(Ptr));
  268. ScopedLock L(PoolMutex);
  269. AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
  270. assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
  271. return Meta->RequestedSize;
  272. }
  273. AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {
  274. return &Metadata[State.getNearestSlot(Ptr)];
  275. }
  276. size_t GuardedPoolAllocator::reserveSlot() {
  277. // Avoid potential reuse of a slot before we have made at least a single
  278. // allocation in each slot. Helps with our use-after-free detection.
  279. if (NumSampledAllocations < State.MaxSimultaneousAllocations)
  280. return NumSampledAllocations++;
  281. if (FreeSlotsLength == 0)
  282. return kInvalidSlotID;
  283. size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength;
  284. size_t SlotIndex = FreeSlots[ReservedIndex];
  285. FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength];
  286. return SlotIndex;
  287. }
  288. void GuardedPoolAllocator::freeSlot(size_t SlotIndex) {
  289. assert(FreeSlotsLength < State.MaxSimultaneousAllocations);
  290. FreeSlots[FreeSlotsLength++] = SlotIndex;
  291. }
  292. uint32_t GuardedPoolAllocator::getRandomUnsigned32() {
  293. uint32_t RandomState = getThreadLocals()->RandomState;
  294. RandomState ^= RandomState << 13;
  295. RandomState ^= RandomState >> 17;
  296. RandomState ^= RandomState << 5;
  297. getThreadLocals()->RandomState = RandomState;
  298. return RandomState;
  299. }
  300. } // namespace gwp_asan