sanitizer_stack_store.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. //===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "sanitizer_stack_store.h"
  9. #include "sanitizer_atomic.h"
  10. #include "sanitizer_common.h"
  11. #include "sanitizer_internal_defs.h"
  12. #include "sanitizer_leb128.h"
  13. #include "sanitizer_lzw.h"
  14. #include "sanitizer_placement_new.h"
  15. #include "sanitizer_stacktrace.h"
  16. namespace __sanitizer {
  17. namespace {
  18. struct StackTraceHeader {
  19. static constexpr u32 kStackSizeBits = 8;
  20. u8 size;
  21. u8 tag;
  22. explicit StackTraceHeader(const StackTrace &trace)
  23. : size(Min<uptr>(trace.size, (1u << 8) - 1)), tag(trace.tag) {
  24. CHECK_EQ(trace.tag, static_cast<uptr>(tag));
  25. }
  26. explicit StackTraceHeader(uptr h)
  27. : size(h & ((1 << kStackSizeBits) - 1)), tag(h >> kStackSizeBits) {}
  28. uptr ToUptr() const {
  29. return static_cast<uptr>(size) | (static_cast<uptr>(tag) << kStackSizeBits);
  30. }
  31. };
  32. } // namespace
  33. StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) {
  34. if (!trace.size && !trace.tag)
  35. return 0;
  36. StackTraceHeader h(trace);
  37. uptr idx = 0;
  38. *pack = 0;
  39. uptr *stack_trace = Alloc(h.size + 1, &idx, pack);
  40. *stack_trace = h.ToUptr();
  41. internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr));
  42. *pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1);
  43. return OffsetToId(idx);
  44. }
  45. StackTrace StackStore::Load(Id id) {
  46. if (!id)
  47. return {};
  48. uptr idx = IdToOffset(id);
  49. uptr block_idx = GetBlockIdx(idx);
  50. CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
  51. const uptr *stack_trace = blocks_[block_idx].GetOrUnpack(this);
  52. if (!stack_trace)
  53. return {};
  54. stack_trace += GetInBlockIdx(idx);
  55. StackTraceHeader h(*stack_trace);
  56. return StackTrace(stack_trace + 1, h.size, h.tag);
  57. }
  58. uptr StackStore::Allocated() const {
  59. return atomic_load_relaxed(&allocated_) + sizeof(*this);
  60. }
  61. uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
  62. for (;;) {
  63. // Optimisic lock-free allocation, essentially try to bump the
  64. // total_frames_.
  65. uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed);
  66. uptr block_idx = GetBlockIdx(start);
  67. uptr last_idx = GetBlockIdx(start + count - 1);
  68. if (LIKELY(block_idx == last_idx)) {
  69. // Fits into the a single block.
  70. CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
  71. *idx = start;
  72. return blocks_[block_idx].GetOrCreate(this) + GetInBlockIdx(start);
  73. }
  74. // Retry. We can't use range allocated in two different blocks.
  75. CHECK_LE(count, kBlockSizeFrames);
  76. uptr in_first = kBlockSizeFrames - GetInBlockIdx(start);
  77. // Mark tail/head of these blocks as "stored".to avoid waiting before we can
  78. // Pack().
  79. *pack += blocks_[block_idx].Stored(in_first);
  80. *pack += blocks_[last_idx].Stored(count - in_first);
  81. }
  82. }
  83. void *StackStore::Map(uptr size, const char *mem_type) {
  84. atomic_fetch_add(&allocated_, size, memory_order_relaxed);
  85. return MmapNoReserveOrDie(size, mem_type);
  86. }
  87. void StackStore::Unmap(void *addr, uptr size) {
  88. atomic_fetch_sub(&allocated_, size, memory_order_relaxed);
  89. UnmapOrDie(addr, size);
  90. }
  91. uptr StackStore::Pack(Compression type) {
  92. uptr res = 0;
  93. for (BlockInfo &b : blocks_) res += b.Pack(type, this);
  94. return res;
  95. }
  96. void StackStore::LockAll() {
  97. for (BlockInfo &b : blocks_) b.Lock();
  98. }
  99. void StackStore::UnlockAll() {
  100. for (BlockInfo &b : blocks_) b.Unlock();
  101. }
  102. void StackStore::TestOnlyUnmap() {
  103. for (BlockInfo &b : blocks_) b.TestOnlyUnmap(this);
  104. internal_memset(this, 0, sizeof(*this));
  105. }
  106. uptr *StackStore::BlockInfo::Get() const {
  107. // Idiomatic double-checked locking uses memory_order_acquire here. But
  108. // relaxed is fine for us, justification is similar to
  109. // TwoLevelMap::GetOrCreate.
  110. return reinterpret_cast<uptr *>(atomic_load_relaxed(&data_));
  111. }
  112. uptr *StackStore::BlockInfo::Create(StackStore *store) {
  113. SpinMutexLock l(&mtx_);
  114. uptr *ptr = Get();
  115. if (!ptr) {
  116. ptr = reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStore"));
  117. atomic_store(&data_, reinterpret_cast<uptr>(ptr), memory_order_release);
  118. }
  119. return ptr;
  120. }
  121. uptr *StackStore::BlockInfo::GetOrCreate(StackStore *store) {
  122. uptr *ptr = Get();
  123. if (LIKELY(ptr))
  124. return ptr;
  125. return Create(store);
  126. }
  127. class SLeb128Encoder {
  128. public:
  129. SLeb128Encoder(u8 *begin, u8 *end) : begin(begin), end(end) {}
  130. bool operator==(const SLeb128Encoder &other) const {
  131. return begin == other.begin;
  132. }
  133. bool operator!=(const SLeb128Encoder &other) const {
  134. return begin != other.begin;
  135. }
  136. SLeb128Encoder &operator=(uptr v) {
  137. sptr diff = v - previous;
  138. begin = EncodeSLEB128(diff, begin, end);
  139. previous = v;
  140. return *this;
  141. }
  142. SLeb128Encoder &operator*() { return *this; }
  143. SLeb128Encoder &operator++() { return *this; }
  144. u8 *base() const { return begin; }
  145. private:
  146. u8 *begin;
  147. u8 *end;
  148. uptr previous = 0;
  149. };
  150. class SLeb128Decoder {
  151. public:
  152. SLeb128Decoder(const u8 *begin, const u8 *end) : begin(begin), end(end) {}
  153. bool operator==(const SLeb128Decoder &other) const {
  154. return begin == other.begin;
  155. }
  156. bool operator!=(const SLeb128Decoder &other) const {
  157. return begin != other.begin;
  158. }
  159. uptr operator*() {
  160. sptr diff;
  161. begin = DecodeSLEB128(begin, end, &diff);
  162. previous += diff;
  163. return previous;
  164. }
  165. SLeb128Decoder &operator++() { return *this; }
  166. SLeb128Decoder operator++(int) { return *this; }
  167. private:
  168. const u8 *begin;
  169. const u8 *end;
  170. uptr previous = 0;
  171. };
  172. static u8 *CompressDelta(const uptr *from, const uptr *from_end, u8 *to,
  173. u8 *to_end) {
  174. SLeb128Encoder encoder(to, to_end);
  175. for (; from != from_end; ++from, ++encoder) *encoder = *from;
  176. return encoder.base();
  177. }
  178. static uptr *UncompressDelta(const u8 *from, const u8 *from_end, uptr *to,
  179. uptr *to_end) {
  180. SLeb128Decoder decoder(from, from_end);
  181. SLeb128Decoder end(from_end, from_end);
  182. for (; decoder != end; ++to, ++decoder) *to = *decoder;
  183. CHECK_EQ(to, to_end);
  184. return to;
  185. }
  186. static u8 *CompressLzw(const uptr *from, const uptr *from_end, u8 *to,
  187. u8 *to_end) {
  188. SLeb128Encoder encoder(to, to_end);
  189. encoder = LzwEncode<uptr>(from, from_end, encoder);
  190. return encoder.base();
  191. }
  192. static uptr *UncompressLzw(const u8 *from, const u8 *from_end, uptr *to,
  193. uptr *to_end) {
  194. SLeb128Decoder decoder(from, from_end);
  195. SLeb128Decoder end(from_end, from_end);
  196. to = LzwDecode<uptr>(decoder, end, to);
  197. CHECK_EQ(to, to_end);
  198. return to;
  199. }
  200. #if defined(_MSC_VER) && !defined(__clang__)
  201. # pragma warning(push)
  202. // Disable 'nonstandard extension used: zero-sized array in struct/union'.
  203. # pragma warning(disable : 4200)
  204. #endif
  205. namespace {
  206. struct PackedHeader {
  207. uptr size;
  208. StackStore::Compression type;
  209. u8 data[];
  210. };
  211. } // namespace
  212. #if defined(_MSC_VER) && !defined(__clang__)
  213. # pragma warning(pop)
  214. #endif
  215. uptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) {
  216. SpinMutexLock l(&mtx_);
  217. switch (state) {
  218. case State::Storing:
  219. state = State::Unpacked;
  220. FALLTHROUGH;
  221. case State::Unpacked:
  222. return Get();
  223. case State::Packed:
  224. break;
  225. }
  226. u8 *ptr = reinterpret_cast<u8 *>(Get());
  227. CHECK_NE(nullptr, ptr);
  228. const PackedHeader *header = reinterpret_cast<const PackedHeader *>(ptr);
  229. CHECK_LE(header->size, kBlockSizeBytes);
  230. CHECK_GE(header->size, sizeof(PackedHeader));
  231. uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
  232. uptr *unpacked =
  233. reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStoreUnpack"));
  234. uptr *unpacked_end;
  235. switch (header->type) {
  236. case Compression::Delta:
  237. unpacked_end = UncompressDelta(header->data, ptr + header->size, unpacked,
  238. unpacked + kBlockSizeFrames);
  239. break;
  240. case Compression::LZW:
  241. unpacked_end = UncompressLzw(header->data, ptr + header->size, unpacked,
  242. unpacked + kBlockSizeFrames);
  243. break;
  244. default:
  245. UNREACHABLE("Unexpected type");
  246. break;
  247. }
  248. CHECK_EQ(kBlockSizeFrames, unpacked_end - unpacked);
  249. MprotectReadOnly(reinterpret_cast<uptr>(unpacked), kBlockSizeBytes);
  250. atomic_store(&data_, reinterpret_cast<uptr>(unpacked), memory_order_release);
  251. store->Unmap(ptr, packed_size_aligned);
  252. state = State::Unpacked;
  253. return Get();
  254. }
  255. uptr StackStore::BlockInfo::Pack(Compression type, StackStore *store) {
  256. if (type == Compression::None)
  257. return 0;
  258. SpinMutexLock l(&mtx_);
  259. switch (state) {
  260. case State::Unpacked:
  261. case State::Packed:
  262. return 0;
  263. case State::Storing:
  264. break;
  265. }
  266. uptr *ptr = Get();
  267. if (!ptr || !Stored(0))
  268. return 0;
  269. u8 *packed =
  270. reinterpret_cast<u8 *>(store->Map(kBlockSizeBytes, "StackStorePack"));
  271. PackedHeader *header = reinterpret_cast<PackedHeader *>(packed);
  272. u8 *alloc_end = packed + kBlockSizeBytes;
  273. u8 *packed_end = nullptr;
  274. switch (type) {
  275. case Compression::Delta:
  276. packed_end =
  277. CompressDelta(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
  278. break;
  279. case Compression::LZW:
  280. packed_end =
  281. CompressLzw(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
  282. break;
  283. default:
  284. UNREACHABLE("Unexpected type");
  285. break;
  286. }
  287. header->type = type;
  288. header->size = packed_end - packed;
  289. VPrintf(1, "Packed block of %zu KiB to %zu KiB\n", kBlockSizeBytes >> 10,
  290. header->size >> 10);
  291. if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8) {
  292. VPrintf(1, "Undo and keep block unpacked\n");
  293. MprotectReadOnly(reinterpret_cast<uptr>(ptr), kBlockSizeBytes);
  294. store->Unmap(packed, kBlockSizeBytes);
  295. state = State::Unpacked;
  296. return 0;
  297. }
  298. uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
  299. store->Unmap(packed + packed_size_aligned,
  300. kBlockSizeBytes - packed_size_aligned);
  301. MprotectReadOnly(reinterpret_cast<uptr>(packed), packed_size_aligned);
  302. atomic_store(&data_, reinterpret_cast<uptr>(packed), memory_order_release);
  303. store->Unmap(ptr, kBlockSizeBytes);
  304. state = State::Packed;
  305. return kBlockSizeBytes - packed_size_aligned;
  306. }
  307. void StackStore::BlockInfo::TestOnlyUnmap(StackStore *store) {
  308. if (uptr *ptr = Get())
  309. store->Unmap(ptr, kBlockSizeBytes);
  310. }
  311. bool StackStore::BlockInfo::Stored(uptr n) {
  312. return n + atomic_fetch_add(&stored_, n, memory_order_release) ==
  313. kBlockSizeFrames;
  314. }
  315. bool StackStore::BlockInfo::IsPacked() const {
  316. SpinMutexLock l(&mtx_);
  317. return state == State::Packed;
  318. }
  319. } // namespace __sanitizer