tsan_rtl.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813
  1. //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. // Main internal TSan header file.
  12. //
  13. // Ground rules:
  14. // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
  15. // function-scope locals)
  16. // - All functions/classes/etc reside in namespace __tsan, except for those
  17. // declared in tsan_interface.h.
  18. // - Platform-specific files should be used instead of ifdefs (*).
  19. // - No system headers included in header files (*).
  20. // - Platform specific headres included only into platform-specific files (*).
  21. //
  22. // (*) Except when inlining is critical for performance.
  23. //===----------------------------------------------------------------------===//
  24. #ifndef TSAN_RTL_H
  25. #define TSAN_RTL_H
  26. #include "sanitizer_common/sanitizer_allocator.h"
  27. #include "sanitizer_common/sanitizer_allocator_internal.h"
  28. #include "sanitizer_common/sanitizer_asm.h"
  29. #include "sanitizer_common/sanitizer_common.h"
  30. #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
  31. #include "sanitizer_common/sanitizer_libignore.h"
  32. #include "sanitizer_common/sanitizer_suppressions.h"
  33. #include "sanitizer_common/sanitizer_thread_registry.h"
  34. #include "sanitizer_common/sanitizer_vector.h"
  35. #include "tsan_defs.h"
  36. #include "tsan_flags.h"
  37. #include "tsan_ignoreset.h"
  38. #include "tsan_ilist.h"
  39. #include "tsan_mman.h"
  40. #include "tsan_mutexset.h"
  41. #include "tsan_platform.h"
  42. #include "tsan_report.h"
  43. #include "tsan_shadow.h"
  44. #include "tsan_stack_trace.h"
  45. #include "tsan_sync.h"
  46. #include "tsan_trace.h"
  47. #include "tsan_vector_clock.h"
  48. #if SANITIZER_WORDSIZE != 64
  49. # error "ThreadSanitizer is supported only on 64-bit platforms"
  50. #endif
  51. namespace __tsan {
  52. #if !SANITIZER_GO
  53. struct MapUnmapCallback;
  54. #if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \
  55. defined(__powerpc__)
  56. struct AP32 {
  57. static const uptr kSpaceBeg = 0;
  58. static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
  59. static const uptr kMetadataSize = 0;
  60. typedef __sanitizer::CompactSizeClassMap SizeClassMap;
  61. static const uptr kRegionSizeLog = 20;
  62. using AddressSpaceView = LocalAddressSpaceView;
  63. typedef __tsan::MapUnmapCallback MapUnmapCallback;
  64. static const uptr kFlags = 0;
  65. };
  66. typedef SizeClassAllocator32<AP32> PrimaryAllocator;
  67. #else
  68. struct AP64 { // Allocator64 parameters. Deliberately using a short name.
  69. # if defined(__s390x__)
  70. typedef MappingS390x Mapping;
  71. # else
  72. typedef Mapping48AddressSpace Mapping;
  73. # endif
  74. static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
  75. static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
  76. static const uptr kMetadataSize = 0;
  77. typedef DefaultSizeClassMap SizeClassMap;
  78. typedef __tsan::MapUnmapCallback MapUnmapCallback;
  79. static const uptr kFlags = 0;
  80. using AddressSpaceView = LocalAddressSpaceView;
  81. };
  82. typedef SizeClassAllocator64<AP64> PrimaryAllocator;
  83. #endif
  84. typedef CombinedAllocator<PrimaryAllocator> Allocator;
  85. typedef Allocator::AllocatorCache AllocatorCache;
  86. Allocator *allocator();
  87. #endif
  88. struct ThreadSignalContext;
  89. struct JmpBuf {
  90. uptr sp;
  91. int int_signal_send;
  92. bool in_blocking_func;
  93. uptr in_signal_handler;
  94. uptr *shadow_stack_pos;
  95. };
  96. // A Processor represents a physical thread, or a P for Go.
  97. // It is used to store internal resources like allocate cache, and does not
  98. // participate in race-detection logic (invisible to end user).
  99. // In C++ it is tied to an OS thread just like ThreadState, however ideally
  100. // it should be tied to a CPU (this way we will have fewer allocator caches).
  101. // In Go it is tied to a P, so there are significantly fewer Processor's than
  102. // ThreadState's (which are tied to Gs).
  103. // A ThreadState must be wired with a Processor to handle events.
  104. struct Processor {
  105. ThreadState *thr; // currently wired thread, or nullptr
  106. #if !SANITIZER_GO
  107. AllocatorCache alloc_cache;
  108. InternalAllocatorCache internal_alloc_cache;
  109. #endif
  110. DenseSlabAllocCache block_cache;
  111. DenseSlabAllocCache sync_cache;
  112. DDPhysicalThread *dd_pt;
  113. };
  114. #if !SANITIZER_GO
  115. // ScopedGlobalProcessor temporary setups a global processor for the current
  116. // thread, if it does not have one. Intended for interceptors that can run
  117. // at the very thread end, when we already destroyed the thread processor.
  118. struct ScopedGlobalProcessor {
  119. ScopedGlobalProcessor();
  120. ~ScopedGlobalProcessor();
  121. };
  122. #endif
  123. struct TidEpoch {
  124. Tid tid;
  125. Epoch epoch;
  126. };
  127. struct TidSlot {
  128. Mutex mtx;
  129. Sid sid;
  130. atomic_uint32_t raw_epoch;
  131. ThreadState *thr;
  132. Vector<TidEpoch> journal;
  133. INode node;
  134. Epoch epoch() const {
  135. return static_cast<Epoch>(atomic_load(&raw_epoch, memory_order_relaxed));
  136. }
  137. void SetEpoch(Epoch v) {
  138. atomic_store(&raw_epoch, static_cast<u32>(v), memory_order_relaxed);
  139. }
  140. TidSlot();
  141. } ALIGNED(SANITIZER_CACHE_LINE_SIZE);
  142. // This struct is stored in TLS.
  143. struct ThreadState {
  144. FastState fast_state;
  145. int ignore_sync;
  146. #if !SANITIZER_GO
  147. int ignore_interceptors;
  148. #endif
  149. uptr *shadow_stack_pos;
  150. // Current position in tctx->trace.Back()->events (Event*).
  151. atomic_uintptr_t trace_pos;
  152. // PC of the last memory access, used to compute PC deltas in the trace.
  153. uptr trace_prev_pc;
  154. // Technically `current` should be a separate THREADLOCAL variable;
  155. // but it is placed here in order to share cache line with previous fields.
  156. ThreadState* current;
  157. atomic_sint32_t pending_signals;
  158. VectorClock clock;
  159. // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
  160. // We do not distinguish beteween ignoring reads and writes
  161. // for better performance.
  162. int ignore_reads_and_writes;
  163. int suppress_reports;
  164. // Go does not support ignores.
  165. #if !SANITIZER_GO
  166. IgnoreSet mop_ignore_set;
  167. IgnoreSet sync_ignore_set;
  168. #endif
  169. uptr *shadow_stack;
  170. uptr *shadow_stack_end;
  171. #if !SANITIZER_GO
  172. Vector<JmpBuf> jmp_bufs;
  173. int in_symbolizer;
  174. atomic_uintptr_t in_blocking_func;
  175. bool in_ignored_lib;
  176. bool is_inited;
  177. #endif
  178. MutexSet mset;
  179. bool is_dead;
  180. const Tid tid;
  181. uptr stk_addr;
  182. uptr stk_size;
  183. uptr tls_addr;
  184. uptr tls_size;
  185. ThreadContext *tctx;
  186. DDLogicalThread *dd_lt;
  187. TidSlot *slot;
  188. uptr slot_epoch;
  189. bool slot_locked;
  190. // Current wired Processor, or nullptr. Required to handle any events.
  191. Processor *proc1;
  192. #if !SANITIZER_GO
  193. Processor *proc() { return proc1; }
  194. #else
  195. Processor *proc();
  196. #endif
  197. atomic_uintptr_t in_signal_handler;
  198. atomic_uintptr_t signal_ctx;
  199. #if !SANITIZER_GO
  200. StackID last_sleep_stack_id;
  201. VectorClock last_sleep_clock;
  202. #endif
  203. // Set in regions of runtime that must be signal-safe and fork-safe.
  204. // If set, malloc must not be called.
  205. int nomalloc;
  206. const ReportDesc *current_report;
  207. explicit ThreadState(Tid tid);
  208. } ALIGNED(SANITIZER_CACHE_LINE_SIZE);
  209. #if !SANITIZER_GO
  210. #if SANITIZER_APPLE || SANITIZER_ANDROID
  211. ThreadState *cur_thread();
  212. void set_cur_thread(ThreadState *thr);
  213. void cur_thread_finalize();
  214. inline ThreadState *cur_thread_init() { return cur_thread(); }
  215. # else
  216. __attribute__((tls_model("initial-exec")))
  217. extern THREADLOCAL char cur_thread_placeholder[];
  218. inline ThreadState *cur_thread() {
  219. return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
  220. }
  221. inline ThreadState *cur_thread_init() {
  222. ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
  223. if (UNLIKELY(!thr->current))
  224. thr->current = thr;
  225. return thr->current;
  226. }
  227. inline void set_cur_thread(ThreadState *thr) {
  228. reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
  229. }
  230. inline void cur_thread_finalize() { }
  231. # endif // SANITIZER_APPLE || SANITIZER_ANDROID
  232. #endif // SANITIZER_GO
  233. class ThreadContext final : public ThreadContextBase {
  234. public:
  235. explicit ThreadContext(Tid tid);
  236. ~ThreadContext();
  237. ThreadState *thr;
  238. StackID creation_stack_id;
  239. VectorClock *sync;
  240. uptr sync_epoch;
  241. Trace trace;
  242. // Override superclass callbacks.
  243. void OnDead() override;
  244. void OnJoined(void *arg) override;
  245. void OnFinished() override;
  246. void OnStarted(void *arg) override;
  247. void OnCreated(void *arg) override;
  248. void OnReset() override;
  249. void OnDetached(void *arg) override;
  250. };
  251. struct RacyStacks {
  252. MD5Hash hash[2];
  253. bool operator==(const RacyStacks &other) const;
  254. };
  255. struct RacyAddress {
  256. uptr addr_min;
  257. uptr addr_max;
  258. };
  259. struct FiredSuppression {
  260. ReportType type;
  261. uptr pc_or_addr;
  262. Suppression *supp;
  263. };
  264. struct Context {
  265. Context();
  266. bool initialized;
  267. #if !SANITIZER_GO
  268. bool after_multithreaded_fork;
  269. #endif
  270. MetaMap metamap;
  271. Mutex report_mtx;
  272. int nreported;
  273. atomic_uint64_t last_symbolize_time_ns;
  274. void *background_thread;
  275. atomic_uint32_t stop_background_thread;
  276. ThreadRegistry thread_registry;
  277. // This is used to prevent a very unlikely but very pathological behavior.
  278. // Since memory access handling is not synchronized with DoReset,
  279. // a thread running concurrently with DoReset can leave a bogus shadow value
  280. // that will be later falsely detected as a race. For such false races
  281. // RestoreStack will return false and we will not report it.
  282. // However, consider that a thread leaves a whole lot of such bogus values
  283. // and these values are later read by a whole lot of threads.
  284. // This will cause massive amounts of ReportRace calls and lots of
  285. // serialization. In very pathological cases the resulting slowdown
  286. // can be >100x. This is very unlikely, but it was presumably observed
  287. // in practice: https://github.com/google/sanitizers/issues/1552
  288. // If this happens, previous access sid+epoch will be the same for all of
  289. // these false races b/c if the thread will try to increment epoch, it will
  290. // notice that DoReset has happened and will stop producing bogus shadow
  291. // values. So, last_spurious_race is used to remember the last sid+epoch
  292. // for which RestoreStack returned false. Then it is used to filter out
  293. // races with the same sid+epoch very early and quickly.
  294. // It is of course possible that multiple threads left multiple bogus shadow
  295. // values and all of them are read by lots of threads at the same time.
  296. // In such case last_spurious_race will only be able to deduplicate a few
  297. // races from one thread, then few from another and so on. An alternative
  298. // would be to hold an array of such sid+epoch, but we consider such scenario
  299. // as even less likely.
  300. // Note: this can lead to some rare false negatives as well:
  301. // 1. When a legit access with the same sid+epoch participates in a race
  302. // as the "previous" memory access, it will be wrongly filtered out.
  303. // 2. When RestoreStack returns false for a legit memory access because it
  304. // was already evicted from the thread trace, we will still remember it in
  305. // last_spurious_race. Then if there is another racing memory access from
  306. // the same thread that happened in the same epoch, but was stored in the
  307. // next thread trace part (which is still preserved in the thread trace),
  308. // we will also wrongly filter it out while RestoreStack would actually
  309. // succeed for that second memory access.
  310. RawShadow last_spurious_race;
  311. Mutex racy_mtx;
  312. Vector<RacyStacks> racy_stacks;
  313. // Number of fired suppressions may be large enough.
  314. Mutex fired_suppressions_mtx;
  315. InternalMmapVector<FiredSuppression> fired_suppressions;
  316. DDetector *dd;
  317. Flags flags;
  318. fd_t memprof_fd;
  319. // The last slot index (kFreeSid) is used to denote freed memory.
  320. TidSlot slots[kThreadSlotCount - 1];
  321. // Protects global_epoch, slot_queue, trace_part_recycle.
  322. Mutex slot_mtx;
  323. uptr global_epoch; // guarded by slot_mtx and by all slot mutexes
  324. bool resetting; // global reset is in progress
  325. IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
  326. IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
  327. SANITIZER_GUARDED_BY(slot_mtx);
  328. uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
  329. uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
  330. uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
  331. #if SANITIZER_GO
  332. uptr mapped_shadow_begin;
  333. uptr mapped_shadow_end;
  334. #endif
  335. };
  336. extern Context *ctx; // The one and the only global runtime context.
  337. ALWAYS_INLINE Flags *flags() {
  338. return &ctx->flags;
  339. }
  340. struct ScopedIgnoreInterceptors {
  341. ScopedIgnoreInterceptors() {
  342. #if !SANITIZER_GO
  343. cur_thread()->ignore_interceptors++;
  344. #endif
  345. }
  346. ~ScopedIgnoreInterceptors() {
  347. #if !SANITIZER_GO
  348. cur_thread()->ignore_interceptors--;
  349. #endif
  350. }
  351. };
  352. const char *GetObjectTypeFromTag(uptr tag);
  353. const char *GetReportHeaderFromTag(uptr tag);
  354. uptr TagFromShadowStackFrame(uptr pc);
  355. class ScopedReportBase {
  356. public:
  357. void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid,
  358. StackTrace stack, const MutexSet *mset);
  359. void AddStack(StackTrace stack, bool suppressable = false);
  360. void AddThread(const ThreadContext *tctx, bool suppressable = false);
  361. void AddThread(Tid tid, bool suppressable = false);
  362. void AddUniqueTid(Tid unique_tid);
  363. int AddMutex(uptr addr, StackID creation_stack_id);
  364. void AddLocation(uptr addr, uptr size);
  365. void AddSleep(StackID stack_id);
  366. void SetCount(int count);
  367. void SetSigNum(int sig);
  368. const ReportDesc *GetReport() const;
  369. protected:
  370. ScopedReportBase(ReportType typ, uptr tag);
  371. ~ScopedReportBase();
  372. private:
  373. ReportDesc *rep_;
  374. // Symbolizer makes lots of intercepted calls. If we try to process them,
  375. // at best it will cause deadlocks on internal mutexes.
  376. ScopedIgnoreInterceptors ignore_interceptors_;
  377. ScopedReportBase(const ScopedReportBase &) = delete;
  378. void operator=(const ScopedReportBase &) = delete;
  379. };
  380. class ScopedReport : public ScopedReportBase {
  381. public:
  382. explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
  383. ~ScopedReport();
  384. private:
  385. ScopedErrorReportLock lock_;
  386. };
  387. bool ShouldReport(ThreadState *thr, ReportType typ);
  388. ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
  389. // The stack could look like:
  390. // <start> | <main> | <foo> | tag | <bar>
  391. // This will extract the tag and keep:
  392. // <start> | <main> | <foo> | <bar>
  393. template<typename StackTraceTy>
  394. void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
  395. if (stack->size < 2) return;
  396. uptr possible_tag_pc = stack->trace[stack->size - 2];
  397. uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
  398. if (possible_tag == kExternalTagNone) return;
  399. stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
  400. stack->size -= 1;
  401. if (tag) *tag = possible_tag;
  402. }
  403. template<typename StackTraceTy>
  404. void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
  405. uptr *tag = nullptr) {
  406. uptr size = thr->shadow_stack_pos - thr->shadow_stack;
  407. uptr start = 0;
  408. if (size + !!toppc > kStackTraceMax) {
  409. start = size + !!toppc - kStackTraceMax;
  410. size = kStackTraceMax - !!toppc;
  411. }
  412. stack->Init(&thr->shadow_stack[start], size, toppc);
  413. ExtractTagFromStack(stack, tag);
  414. }
  415. #define GET_STACK_TRACE_FATAL(thr, pc) \
  416. VarSizeStackTrace stack; \
  417. ObtainCurrentStack(thr, pc, &stack); \
  418. stack.ReverseOrder();
  419. void MapShadow(uptr addr, uptr size);
  420. void MapThreadTrace(uptr addr, uptr size, const char *name);
  421. void DontNeedShadowFor(uptr addr, uptr size);
  422. void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
  423. void InitializeShadowMemory();
  424. void DontDumpShadow(uptr addr, uptr size);
  425. void InitializeInterceptors();
  426. void InitializeLibIgnore();
  427. void InitializeDynamicAnnotations();
  428. void ForkBefore(ThreadState *thr, uptr pc);
  429. void ForkParentAfter(ThreadState *thr, uptr pc);
  430. void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
  431. void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
  432. AccessType typ);
  433. bool OutputReport(ThreadState *thr, const ScopedReport &srep);
  434. bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
  435. bool IsExpectedReport(uptr addr, uptr size);
  436. #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
  437. # define DPrintf Printf
  438. #else
  439. # define DPrintf(...)
  440. #endif
  441. #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
  442. # define DPrintf2 Printf
  443. #else
  444. # define DPrintf2(...)
  445. #endif
  446. StackID CurrentStackId(ThreadState *thr, uptr pc);
  447. ReportStack *SymbolizeStackId(StackID stack_id);
  448. void PrintCurrentStack(ThreadState *thr, uptr pc);
  449. void PrintCurrentStackSlow(uptr pc); // uses libunwind
  450. MBlock *JavaHeapBlock(uptr addr, uptr *start);
  451. void Initialize(ThreadState *thr);
  452. void MaybeSpawnBackgroundThread();
  453. int Finalize(ThreadState *thr);
  454. void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
  455. void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
  456. void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
  457. AccessType typ);
  458. void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
  459. AccessType typ);
  460. // This creates 2 non-inlined specialized versions of MemoryAccessRange.
  461. template <bool is_read>
  462. void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);
  463. ALWAYS_INLINE
  464. void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
  465. bool is_write) {
  466. if (size == 0)
  467. return;
  468. if (is_write)
  469. MemoryAccessRangeT<false>(thr, pc, addr, size);
  470. else
  471. MemoryAccessRangeT<true>(thr, pc, addr, size);
  472. }
  473. void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);
  474. void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
  475. void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
  476. void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
  477. void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
  478. uptr size);
  479. void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
  480. void ThreadIgnoreEnd(ThreadState *thr);
  481. void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
  482. void ThreadIgnoreSyncEnd(ThreadState *thr);
  483. Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
  484. void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
  485. ThreadType thread_type);
  486. void ThreadFinish(ThreadState *thr);
  487. Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
  488. void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
  489. void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
  490. void ThreadFinalize(ThreadState *thr);
  491. void ThreadSetName(ThreadState *thr, const char *name);
  492. int ThreadCount(ThreadState *thr);
  493. void ProcessPendingSignalsImpl(ThreadState *thr);
  494. void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
  495. Processor *ProcCreate();
  496. void ProcDestroy(Processor *proc);
  497. void ProcWire(Processor *proc, ThreadState *thr);
  498. void ProcUnwire(Processor *proc, ThreadState *thr);
  499. // Note: the parameter is called flagz, because flags is already taken
  500. // by the global function that returns flags.
  501. void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  502. void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  503. void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  504. void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
  505. int rec = 1);
  506. int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  507. void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  508. void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  509. void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
  510. void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
  511. void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
  512. void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
  513. void Acquire(ThreadState *thr, uptr pc, uptr addr);
  514. // AcquireGlobal synchronizes the current thread with all other threads.
  515. // In terms of happens-before relation, it draws a HB edge from all threads
  516. // (where they happen to execute right now) to the current thread. We use it to
  517. // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
  518. // right before executing finalizers. This provides a coarse, but simple
  519. // approximation of the actual required synchronization.
  520. void AcquireGlobal(ThreadState *thr);
  521. void Release(ThreadState *thr, uptr pc, uptr addr);
  522. void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
  523. void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
  524. void AfterSleep(ThreadState *thr, uptr pc);
  525. void IncrementEpoch(ThreadState *thr);
  526. #if !SANITIZER_GO
  527. uptr ALWAYS_INLINE HeapEnd() {
  528. return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
  529. }
  530. #endif
  531. void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
  532. void SlotDetach(ThreadState *thr);
  533. void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
  534. void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
  535. void DoReset(ThreadState *thr, uptr epoch);
  536. void FlushShadowMemory();
  537. ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
  538. void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
  539. void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
  540. // These need to match __tsan_switch_to_fiber_* flags defined in
  541. // tsan_interface.h. See documentation there as well.
  542. enum FiberSwitchFlags {
  543. FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
  544. };
  545. class SlotLocker {
  546. public:
  547. ALWAYS_INLINE
  548. SlotLocker(ThreadState *thr, bool recursive = false)
  549. : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
  550. #if !SANITIZER_GO
  551. // We are in trouble if we are here with in_blocking_func set.
  552. // If in_blocking_func is set, all signals will be delivered synchronously,
  553. // which means we can't lock slots since the signal handler will try
  554. // to lock it recursively and deadlock.
  555. DCHECK(!atomic_load(&thr->in_blocking_func, memory_order_relaxed));
  556. #endif
  557. if (!locked_)
  558. SlotLock(thr_);
  559. }
  560. ALWAYS_INLINE
  561. ~SlotLocker() {
  562. if (!locked_)
  563. SlotUnlock(thr_);
  564. }
  565. private:
  566. ThreadState *thr_;
  567. bool locked_;
  568. };
  569. class SlotUnlocker {
  570. public:
  571. SlotUnlocker(ThreadState *thr) : thr_(thr), locked_(thr->slot_locked) {
  572. if (locked_)
  573. SlotUnlock(thr_);
  574. }
  575. ~SlotUnlocker() {
  576. if (locked_)
  577. SlotLock(thr_);
  578. }
  579. private:
  580. ThreadState *thr_;
  581. bool locked_;
  582. };
  583. ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
  584. if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
  585. ProcessPendingSignalsImpl(thr);
  586. }
  587. extern bool is_initialized;
  588. ALWAYS_INLINE
  589. void LazyInitialize(ThreadState *thr) {
  590. // If we can use .preinit_array, assume that __tsan_init
  591. // called from .preinit_array initializes runtime before
  592. // any instrumented code except when tsan is used as a
  593. // shared library.
  594. #if (!SANITIZER_CAN_USE_PREINIT_ARRAY || defined(SANITIZER_SHARED))
  595. if (UNLIKELY(!is_initialized))
  596. Initialize(thr);
  597. #endif
  598. }
  599. void TraceResetForTesting();
  600. void TraceSwitchPart(ThreadState *thr);
  601. void TraceSwitchPartImpl(ThreadState *thr);
  602. bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
  603. AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
  604. MutexSet *pmset, uptr *ptag);
  605. template <typename EventT>
  606. ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
  607. EventT **ev) {
  608. // TraceSwitchPart accesses shadow_stack, but it's called infrequently,
  609. // so we check it here proactively.
  610. DCHECK(thr->shadow_stack);
  611. Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
  612. #if SANITIZER_DEBUG
  613. // TraceSwitch acquires these mutexes,
  614. // so we lock them here to detect deadlocks more reliably.
  615. { Lock lock(&ctx->slot_mtx); }
  616. { Lock lock(&thr->tctx->trace.mtx); }
  617. TracePart *current = thr->tctx->trace.parts.Back();
  618. if (current) {
  619. DCHECK_GE(pos, &current->events[0]);
  620. DCHECK_LE(pos, &current->events[TracePart::kSize]);
  621. } else {
  622. DCHECK_EQ(pos, nullptr);
  623. }
  624. #endif
  625. // TracePart is allocated with mmap and is at least 4K aligned.
  626. // So the following check is a faster way to check for part end.
  627. // It may have false positives in the middle of the trace,
  628. // they are filtered out in TraceSwitch.
  629. if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
  630. return false;
  631. *ev = reinterpret_cast<EventT *>(pos);
  632. return true;
  633. }
  634. template <typename EventT>
  635. ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
  636. DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
  637. atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
  638. }
  639. template <typename EventT>
  640. void TraceEvent(ThreadState *thr, EventT ev) {
  641. EventT *evp;
  642. if (!TraceAcquire(thr, &evp)) {
  643. TraceSwitchPart(thr);
  644. UNUSED bool res = TraceAcquire(thr, &evp);
  645. DCHECK(res);
  646. }
  647. *evp = ev;
  648. TraceRelease(thr, evp);
  649. }
  650. ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
  651. uptr pc = 0) {
  652. if (!kCollectHistory)
  653. return true;
  654. EventFunc *ev;
  655. if (UNLIKELY(!TraceAcquire(thr, &ev)))
  656. return false;
  657. ev->is_access = 0;
  658. ev->is_func = 1;
  659. ev->pc = pc;
  660. TraceRelease(thr, ev);
  661. return true;
  662. }
  663. WARN_UNUSED_RESULT
  664. bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
  665. AccessType typ);
  666. WARN_UNUSED_RESULT
  667. bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
  668. AccessType typ);
  669. void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
  670. AccessType typ);
  671. void TraceFunc(ThreadState *thr, uptr pc = 0);
  672. void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
  673. StackID stk);
  674. void TraceMutexUnlock(ThreadState *thr, uptr addr);
  675. void TraceTime(ThreadState *thr);
  676. void TraceRestartFuncExit(ThreadState *thr);
  677. void TraceRestartFuncEntry(ThreadState *thr, uptr pc);
  678. void GrowShadowStack(ThreadState *thr);
  679. ALWAYS_INLINE
  680. void FuncEntry(ThreadState *thr, uptr pc) {
  681. DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.sid(), (void *)pc);
  682. if (UNLIKELY(!TryTraceFunc(thr, pc)))
  683. return TraceRestartFuncEntry(thr, pc);
  684. DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
  685. #if !SANITIZER_GO
  686. DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
  687. #else
  688. if (thr->shadow_stack_pos == thr->shadow_stack_end)
  689. GrowShadowStack(thr);
  690. #endif
  691. thr->shadow_stack_pos[0] = pc;
  692. thr->shadow_stack_pos++;
  693. }
  694. ALWAYS_INLINE
  695. void FuncExit(ThreadState *thr) {
  696. DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.sid());
  697. if (UNLIKELY(!TryTraceFunc(thr, 0)))
  698. return TraceRestartFuncExit(thr);
  699. DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
  700. #if !SANITIZER_GO
  701. DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
  702. #endif
  703. thr->shadow_stack_pos--;
  704. }
  705. #if !SANITIZER_GO
  706. extern void (*on_initialize)(void);
  707. extern int (*on_finalize)(int);
  708. #endif
  709. } // namespace __tsan
  710. #endif // TSAN_RTL_H