sanitizer_stack_store.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. //===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "sanitizer_stack_store.h"
  9. #include "sanitizer_atomic.h"
  10. #include "sanitizer_common.h"
  11. #include "sanitizer_internal_defs.h"
  12. #include "sanitizer_leb128.h"
  13. #include "sanitizer_lzw.h"
  14. #include "sanitizer_placement_new.h"
  15. #include "sanitizer_stacktrace.h"
  16. namespace __sanitizer {
  17. namespace {
  18. struct StackTraceHeader {
  19. static constexpr u32 kStackSizeBits = 8;
  20. u8 size;
  21. u8 tag;
  22. explicit StackTraceHeader(const StackTrace &trace)
  23. : size(Min<uptr>(trace.size, (1u << 8) - 1)), tag(trace.tag) {
  24. CHECK_EQ(trace.tag, static_cast<uptr>(tag));
  25. }
  26. explicit StackTraceHeader(uptr h)
  27. : size(h & ((1 << kStackSizeBits) - 1)), tag(h >> kStackSizeBits) {}
  28. uptr ToUptr() const {
  29. return static_cast<uptr>(size) | (static_cast<uptr>(tag) << kStackSizeBits);
  30. }
  31. };
  32. } // namespace
  33. StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) {
  34. if (!trace.size && !trace.tag)
  35. return 0;
  36. StackTraceHeader h(trace);
  37. uptr idx = 0;
  38. *pack = 0;
  39. uptr *stack_trace = Alloc(h.size + 1, &idx, pack);
  40. // No more space.
  41. if (stack_trace == nullptr)
  42. return 0;
  43. *stack_trace = h.ToUptr();
  44. internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr));
  45. *pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1);
  46. return OffsetToId(idx);
  47. }
  48. StackTrace StackStore::Load(Id id) {
  49. if (!id)
  50. return {};
  51. uptr idx = IdToOffset(id);
  52. uptr block_idx = GetBlockIdx(idx);
  53. CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
  54. const uptr *stack_trace = blocks_[block_idx].GetOrUnpack(this);
  55. if (!stack_trace)
  56. return {};
  57. stack_trace += GetInBlockIdx(idx);
  58. StackTraceHeader h(*stack_trace);
  59. return StackTrace(stack_trace + 1, h.size, h.tag);
  60. }
  61. uptr StackStore::Allocated() const {
  62. return atomic_load_relaxed(&allocated_) + sizeof(*this);
  63. }
  64. uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
  65. for (;;) {
  66. // Optimisic lock-free allocation, essentially try to bump the
  67. // total_frames_.
  68. uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed);
  69. uptr block_idx = GetBlockIdx(start);
  70. uptr last_idx = GetBlockIdx(start + count - 1);
  71. if (LIKELY(block_idx == last_idx)) {
  72. // Fits into a single block.
  73. // No more available blocks. Indicate inability to allocate more memory.
  74. if (block_idx >= ARRAY_SIZE(blocks_))
  75. return nullptr;
  76. *idx = start;
  77. return blocks_[block_idx].GetOrCreate(this) + GetInBlockIdx(start);
  78. }
  79. // Retry. We can't use range allocated in two different blocks.
  80. CHECK_LE(count, kBlockSizeFrames);
  81. uptr in_first = kBlockSizeFrames - GetInBlockIdx(start);
  82. // Mark tail/head of these blocks as "stored".to avoid waiting before we can
  83. // Pack().
  84. *pack += blocks_[block_idx].Stored(in_first);
  85. *pack += blocks_[last_idx].Stored(count - in_first);
  86. }
  87. }
  88. void *StackStore::Map(uptr size, const char *mem_type) {
  89. atomic_fetch_add(&allocated_, size, memory_order_relaxed);
  90. return MmapNoReserveOrDie(size, mem_type);
  91. }
  92. void StackStore::Unmap(void *addr, uptr size) {
  93. atomic_fetch_sub(&allocated_, size, memory_order_relaxed);
  94. UnmapOrDie(addr, size);
  95. }
  96. uptr StackStore::Pack(Compression type) {
  97. uptr res = 0;
  98. for (BlockInfo &b : blocks_) res += b.Pack(type, this);
  99. return res;
  100. }
  101. void StackStore::LockAll() {
  102. for (BlockInfo &b : blocks_) b.Lock();
  103. }
  104. void StackStore::UnlockAll() {
  105. for (BlockInfo &b : blocks_) b.Unlock();
  106. }
  107. void StackStore::TestOnlyUnmap() {
  108. for (BlockInfo &b : blocks_) b.TestOnlyUnmap(this);
  109. internal_memset(this, 0, sizeof(*this));
  110. }
  111. uptr *StackStore::BlockInfo::Get() const {
  112. // Idiomatic double-checked locking uses memory_order_acquire here. But
  113. // relaxed is fine for us, justification is similar to
  114. // TwoLevelMap::GetOrCreate.
  115. return reinterpret_cast<uptr *>(atomic_load_relaxed(&data_));
  116. }
  117. uptr *StackStore::BlockInfo::Create(StackStore *store) {
  118. SpinMutexLock l(&mtx_);
  119. uptr *ptr = Get();
  120. if (!ptr) {
  121. ptr = reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStore"));
  122. atomic_store(&data_, reinterpret_cast<uptr>(ptr), memory_order_release);
  123. }
  124. return ptr;
  125. }
  126. uptr *StackStore::BlockInfo::GetOrCreate(StackStore *store) {
  127. uptr *ptr = Get();
  128. if (LIKELY(ptr))
  129. return ptr;
  130. return Create(store);
  131. }
  132. class SLeb128Encoder {
  133. public:
  134. SLeb128Encoder(u8 *begin, u8 *end) : begin(begin), end(end) {}
  135. bool operator==(const SLeb128Encoder &other) const {
  136. return begin == other.begin;
  137. }
  138. bool operator!=(const SLeb128Encoder &other) const {
  139. return begin != other.begin;
  140. }
  141. SLeb128Encoder &operator=(uptr v) {
  142. sptr diff = v - previous;
  143. begin = EncodeSLEB128(diff, begin, end);
  144. previous = v;
  145. return *this;
  146. }
  147. SLeb128Encoder &operator*() { return *this; }
  148. SLeb128Encoder &operator++() { return *this; }
  149. u8 *base() const { return begin; }
  150. private:
  151. u8 *begin;
  152. u8 *end;
  153. uptr previous = 0;
  154. };
  155. class SLeb128Decoder {
  156. public:
  157. SLeb128Decoder(const u8 *begin, const u8 *end) : begin(begin), end(end) {}
  158. bool operator==(const SLeb128Decoder &other) const {
  159. return begin == other.begin;
  160. }
  161. bool operator!=(const SLeb128Decoder &other) const {
  162. return begin != other.begin;
  163. }
  164. uptr operator*() {
  165. sptr diff;
  166. begin = DecodeSLEB128(begin, end, &diff);
  167. previous += diff;
  168. return previous;
  169. }
  170. SLeb128Decoder &operator++() { return *this; }
  171. SLeb128Decoder operator++(int) { return *this; }
  172. private:
  173. const u8 *begin;
  174. const u8 *end;
  175. uptr previous = 0;
  176. };
  177. static u8 *CompressDelta(const uptr *from, const uptr *from_end, u8 *to,
  178. u8 *to_end) {
  179. SLeb128Encoder encoder(to, to_end);
  180. for (; from != from_end; ++from, ++encoder) *encoder = *from;
  181. return encoder.base();
  182. }
  183. static uptr *UncompressDelta(const u8 *from, const u8 *from_end, uptr *to,
  184. uptr *to_end) {
  185. SLeb128Decoder decoder(from, from_end);
  186. SLeb128Decoder end(from_end, from_end);
  187. for (; decoder != end; ++to, ++decoder) *to = *decoder;
  188. CHECK_EQ(to, to_end);
  189. return to;
  190. }
  191. static u8 *CompressLzw(const uptr *from, const uptr *from_end, u8 *to,
  192. u8 *to_end) {
  193. SLeb128Encoder encoder(to, to_end);
  194. encoder = LzwEncode<uptr>(from, from_end, encoder);
  195. return encoder.base();
  196. }
  197. static uptr *UncompressLzw(const u8 *from, const u8 *from_end, uptr *to,
  198. uptr *to_end) {
  199. SLeb128Decoder decoder(from, from_end);
  200. SLeb128Decoder end(from_end, from_end);
  201. to = LzwDecode<uptr>(decoder, end, to);
  202. CHECK_EQ(to, to_end);
  203. return to;
  204. }
  205. #if defined(_MSC_VER) && !defined(__clang__)
  206. # pragma warning(push)
  207. // Disable 'nonstandard extension used: zero-sized array in struct/union'.
  208. # pragma warning(disable : 4200)
  209. #endif
  210. namespace {
  211. struct PackedHeader {
  212. uptr size;
  213. StackStore::Compression type;
  214. u8 data[];
  215. };
  216. } // namespace
  217. #if defined(_MSC_VER) && !defined(__clang__)
  218. # pragma warning(pop)
  219. #endif
  220. uptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) {
  221. SpinMutexLock l(&mtx_);
  222. switch (state) {
  223. case State::Storing:
  224. state = State::Unpacked;
  225. FALLTHROUGH;
  226. case State::Unpacked:
  227. return Get();
  228. case State::Packed:
  229. break;
  230. }
  231. u8 *ptr = reinterpret_cast<u8 *>(Get());
  232. CHECK_NE(nullptr, ptr);
  233. const PackedHeader *header = reinterpret_cast<const PackedHeader *>(ptr);
  234. CHECK_LE(header->size, kBlockSizeBytes);
  235. CHECK_GE(header->size, sizeof(PackedHeader));
  236. uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
  237. uptr *unpacked =
  238. reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStoreUnpack"));
  239. uptr *unpacked_end;
  240. switch (header->type) {
  241. case Compression::Delta:
  242. unpacked_end = UncompressDelta(header->data, ptr + header->size, unpacked,
  243. unpacked + kBlockSizeFrames);
  244. break;
  245. case Compression::LZW:
  246. unpacked_end = UncompressLzw(header->data, ptr + header->size, unpacked,
  247. unpacked + kBlockSizeFrames);
  248. break;
  249. default:
  250. UNREACHABLE("Unexpected type");
  251. break;
  252. }
  253. CHECK_EQ(kBlockSizeFrames, unpacked_end - unpacked);
  254. MprotectReadOnly(reinterpret_cast<uptr>(unpacked), kBlockSizeBytes);
  255. atomic_store(&data_, reinterpret_cast<uptr>(unpacked), memory_order_release);
  256. store->Unmap(ptr, packed_size_aligned);
  257. state = State::Unpacked;
  258. return Get();
  259. }
  260. uptr StackStore::BlockInfo::Pack(Compression type, StackStore *store) {
  261. if (type == Compression::None)
  262. return 0;
  263. SpinMutexLock l(&mtx_);
  264. switch (state) {
  265. case State::Unpacked:
  266. case State::Packed:
  267. return 0;
  268. case State::Storing:
  269. break;
  270. }
  271. uptr *ptr = Get();
  272. if (!ptr || !Stored(0))
  273. return 0;
  274. u8 *packed =
  275. reinterpret_cast<u8 *>(store->Map(kBlockSizeBytes, "StackStorePack"));
  276. PackedHeader *header = reinterpret_cast<PackedHeader *>(packed);
  277. u8 *alloc_end = packed + kBlockSizeBytes;
  278. u8 *packed_end = nullptr;
  279. switch (type) {
  280. case Compression::Delta:
  281. packed_end =
  282. CompressDelta(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
  283. break;
  284. case Compression::LZW:
  285. packed_end =
  286. CompressLzw(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
  287. break;
  288. default:
  289. UNREACHABLE("Unexpected type");
  290. break;
  291. }
  292. header->type = type;
  293. header->size = packed_end - packed;
  294. VPrintf(1, "Packed block of %zu KiB to %zu KiB\n", kBlockSizeBytes >> 10,
  295. header->size >> 10);
  296. if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8) {
  297. VPrintf(1, "Undo and keep block unpacked\n");
  298. MprotectReadOnly(reinterpret_cast<uptr>(ptr), kBlockSizeBytes);
  299. store->Unmap(packed, kBlockSizeBytes);
  300. state = State::Unpacked;
  301. return 0;
  302. }
  303. uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
  304. store->Unmap(packed + packed_size_aligned,
  305. kBlockSizeBytes - packed_size_aligned);
  306. MprotectReadOnly(reinterpret_cast<uptr>(packed), packed_size_aligned);
  307. atomic_store(&data_, reinterpret_cast<uptr>(packed), memory_order_release);
  308. store->Unmap(ptr, kBlockSizeBytes);
  309. state = State::Packed;
  310. return kBlockSizeBytes - packed_size_aligned;
  311. }
  312. void StackStore::BlockInfo::TestOnlyUnmap(StackStore *store) {
  313. if (uptr *ptr = Get())
  314. store->Unmap(ptr, kBlockSizeBytes);
  315. }
  316. bool StackStore::BlockInfo::Stored(uptr n) {
  317. return n + atomic_fetch_add(&stored_, n, memory_order_release) ==
  318. kBlockSizeFrames;
  319. }
  320. bool StackStore::BlockInfo::IsPacked() const {
  321. SpinMutexLock l(&mtx_);
  322. return state == State::Packed;
  323. }
  324. } // namespace __sanitizer