tsan_trace.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. //===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef TSAN_TRACE_H
  13. #define TSAN_TRACE_H
  14. #include "tsan_defs.h"
  15. #include "tsan_ilist.h"
  16. #include "tsan_mutexset.h"
  17. #include "tsan_stack_trace.h"
  18. namespace __tsan {
  19. enum class EventType : u64 {
  20. kAccessExt,
  21. kAccessRange,
  22. kLock,
  23. kRLock,
  24. kUnlock,
  25. kTime,
  26. };
  27. // "Base" type for all events for type dispatch.
  28. struct Event {
  29. // We use variable-length type encoding to give more bits to some event
  30. // types that need them. If is_access is set, this is EventAccess.
  31. // Otherwise, if is_func is set, this is EventFunc.
  32. // Otherwise type denotes the type.
  33. u64 is_access : 1;
  34. u64 is_func : 1;
  35. EventType type : 3;
  36. u64 _ : 59;
  37. };
  38. static_assert(sizeof(Event) == 8, "bad Event size");
  39. // Nop event used as padding and does not affect state during replay.
  40. static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0};
  41. // Compressed memory access can represent only some events with PCs
  42. // close enough to each other. Otherwise we fall back to EventAccessExt.
  43. struct EventAccess {
  44. static constexpr uptr kPCBits = 15;
  45. static_assert(kPCBits + kCompressedAddrBits + 5 == 64,
  46. "unused bits in EventAccess");
  47. u64 is_access : 1; // = 1
  48. u64 is_read : 1;
  49. u64 is_atomic : 1;
  50. u64 size_log : 2;
  51. u64 pc_delta : kPCBits; // signed delta from the previous memory access PC
  52. u64 addr : kCompressedAddrBits;
  53. };
  54. static_assert(sizeof(EventAccess) == 8, "bad EventAccess size");
  55. // Function entry (pc != 0) or exit (pc == 0).
  56. struct EventFunc {
  57. u64 is_access : 1; // = 0
  58. u64 is_func : 1; // = 1
  59. u64 pc : 62;
  60. };
  61. static_assert(sizeof(EventFunc) == 8, "bad EventFunc size");
  62. // Extended memory access with full PC.
  63. struct EventAccessExt {
  64. // Note: precisely specifying the unused parts of the bitfield is critical for
  65. // performance. If we don't specify them, compiler will generate code to load
  66. // the old value and shuffle it to extract the unused bits to apply to the new
  67. // value. If we specify the unused part and store 0 in there, all that
  68. // unnecessary code goes away (store of the 0 const is combined with other
  69. // constant parts).
  70. static constexpr uptr kUnusedBits = 11;
  71. static_assert(kCompressedAddrBits + kUnusedBits + 9 == 64,
  72. "unused bits in EventAccessExt");
  73. u64 is_access : 1; // = 0
  74. u64 is_func : 1; // = 0
  75. EventType type : 3; // = EventType::kAccessExt
  76. u64 is_read : 1;
  77. u64 is_atomic : 1;
  78. u64 size_log : 2;
  79. u64 _ : kUnusedBits;
  80. u64 addr : kCompressedAddrBits;
  81. u64 pc;
  82. };
  83. static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size");
  84. // Access to a memory range.
  85. struct EventAccessRange {
  86. static constexpr uptr kSizeLoBits = 13;
  87. static_assert(kCompressedAddrBits + kSizeLoBits + 7 == 64,
  88. "unused bits in EventAccessRange");
  89. u64 is_access : 1; // = 0
  90. u64 is_func : 1; // = 0
  91. EventType type : 3; // = EventType::kAccessRange
  92. u64 is_read : 1;
  93. u64 is_free : 1;
  94. u64 size_lo : kSizeLoBits;
  95. u64 pc : kCompressedAddrBits;
  96. u64 addr : kCompressedAddrBits;
  97. u64 size_hi : 64 - kCompressedAddrBits;
  98. };
  99. static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size");
  100. // Mutex lock.
  101. struct EventLock {
  102. static constexpr uptr kStackIDLoBits = 15;
  103. static constexpr uptr kStackIDHiBits =
  104. sizeof(StackID) * kByteBits - kStackIDLoBits;
  105. static constexpr uptr kUnusedBits = 3;
  106. static_assert(kCompressedAddrBits + kStackIDLoBits + 5 == 64,
  107. "unused bits in EventLock");
  108. static_assert(kCompressedAddrBits + kStackIDHiBits + kUnusedBits == 64,
  109. "unused bits in EventLock");
  110. u64 is_access : 1; // = 0
  111. u64 is_func : 1; // = 0
  112. EventType type : 3; // = EventType::kLock or EventType::kRLock
  113. u64 pc : kCompressedAddrBits;
  114. u64 stack_lo : kStackIDLoBits;
  115. u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits;
  116. u64 _ : kUnusedBits;
  117. u64 addr : kCompressedAddrBits;
  118. };
  119. static_assert(sizeof(EventLock) == 16, "bad EventLock size");
  120. // Mutex unlock.
  121. struct EventUnlock {
  122. static constexpr uptr kUnusedBits = 15;
  123. static_assert(kCompressedAddrBits + kUnusedBits + 5 == 64,
  124. "unused bits in EventUnlock");
  125. u64 is_access : 1; // = 0
  126. u64 is_func : 1; // = 0
  127. EventType type : 3; // = EventType::kUnlock
  128. u64 _ : kUnusedBits;
  129. u64 addr : kCompressedAddrBits;
  130. };
  131. static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size");
  132. // Time change event.
  133. struct EventTime {
  134. static constexpr uptr kUnusedBits = 37;
  135. static_assert(kUnusedBits + sizeof(Sid) * kByteBits + kEpochBits + 5 == 64,
  136. "unused bits in EventTime");
  137. u64 is_access : 1; // = 0
  138. u64 is_func : 1; // = 0
  139. EventType type : 3; // = EventType::kTime
  140. u64 sid : sizeof(Sid) * kByteBits;
  141. u64 epoch : kEpochBits;
  142. u64 _ : kUnusedBits;
  143. };
  144. static_assert(sizeof(EventTime) == 8, "bad EventTime size");
  145. struct Trace;
  146. struct TraceHeader {
  147. Trace* trace = nullptr; // back-pointer to Trace containing this part
  148. INode trace_parts; // in Trace::parts
  149. INode global; // in Contex::trace_part_recycle
  150. };
  151. struct TracePart : TraceHeader {
  152. // There are a lot of goroutines in Go, so we use smaller parts.
  153. static constexpr uptr kByteSize = (SANITIZER_GO ? 128 : 256) << 10;
  154. static constexpr uptr kSize =
  155. (kByteSize - sizeof(TraceHeader)) / sizeof(Event);
  156. // TraceAcquire does a fast event pointer overflow check by comparing
  157. // pointer into TracePart::events with kAlignment mask. Since TracePart's
  158. // are allocated page-aligned, this check detects end of the array
  159. // (it also have false positives in the middle that are filtered separately).
  160. // This also requires events to be the last field.
  161. static constexpr uptr kAlignment = 0xff0;
  162. Event events[kSize];
  163. TracePart() {}
  164. };
  165. static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
  166. struct Trace {
  167. Mutex mtx;
  168. IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
  169. // First node non-queued into ctx->trace_part_recycle.
  170. TracePart* local_head;
  171. // Final position in the last part for finished threads.
  172. Event* final_pos = nullptr;
  173. // Number of trace parts allocated on behalf of this trace specifically.
  174. // Total number of parts in this trace can be larger if we retake some
  175. // parts from other traces.
  176. uptr parts_allocated = 0;
  177. Trace() : mtx(MutexTypeTrace) {}
  178. // We need at least 3 parts per thread, because we want to keep at last
  179. // 2 parts per thread that are not queued into ctx->trace_part_recycle
  180. // (the current one being filled and one full part that ensures that
  181. // we always have at least one part worth of previous memory accesses).
  182. static constexpr uptr kMinParts = 3;
  183. static constexpr uptr kFinishedThreadLo = 16;
  184. static constexpr uptr kFinishedThreadHi = 64;
  185. };
  186. } // namespace __tsan
  187. #endif // TSAN_TRACE_H