tsan_rtl.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. // Main internal TSan header file.
  12. //
  13. // Ground rules:
  14. // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
  15. // function-scope locals)
  16. // - All functions/classes/etc reside in namespace __tsan, except for those
  17. // declared in tsan_interface.h.
  18. // - Platform-specific files should be used instead of ifdefs (*).
  19. // - No system headers included in header files (*).
  20. // - Platform specific headres included only into platform-specific files (*).
  21. //
  22. // (*) Except when inlining is critical for performance.
  23. //===----------------------------------------------------------------------===//
  24. #ifndef TSAN_RTL_H
  25. #define TSAN_RTL_H
  26. #include "sanitizer_common/sanitizer_allocator.h"
  27. #include "sanitizer_common/sanitizer_allocator_internal.h"
  28. #include "sanitizer_common/sanitizer_asm.h"
  29. #include "sanitizer_common/sanitizer_common.h"
  30. #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
  31. #include "sanitizer_common/sanitizer_libignore.h"
  32. #include "sanitizer_common/sanitizer_suppressions.h"
  33. #include "sanitizer_common/sanitizer_thread_registry.h"
  34. #include "sanitizer_common/sanitizer_vector.h"
  35. #include "tsan_defs.h"
  36. #include "tsan_flags.h"
  37. #include "tsan_ignoreset.h"
  38. #include "tsan_ilist.h"
  39. #include "tsan_mman.h"
  40. #include "tsan_mutexset.h"
  41. #include "tsan_platform.h"
  42. #include "tsan_report.h"
  43. #include "tsan_shadow.h"
  44. #include "tsan_stack_trace.h"
  45. #include "tsan_sync.h"
  46. #include "tsan_trace.h"
  47. #include "tsan_vector_clock.h"
  48. #if SANITIZER_WORDSIZE != 64
  49. # error "ThreadSanitizer is supported only on 64-bit platforms"
  50. #endif
  51. namespace __tsan {
  52. #if !SANITIZER_GO
  53. struct MapUnmapCallback;
  54. #if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
  55. struct AP32 {
  56. static const uptr kSpaceBeg = 0;
  57. static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
  58. static const uptr kMetadataSize = 0;
  59. typedef __sanitizer::CompactSizeClassMap SizeClassMap;
  60. static const uptr kRegionSizeLog = 20;
  61. using AddressSpaceView = LocalAddressSpaceView;
  62. typedef __tsan::MapUnmapCallback MapUnmapCallback;
  63. static const uptr kFlags = 0;
  64. };
  65. typedef SizeClassAllocator32<AP32> PrimaryAllocator;
  66. #else
  67. struct AP64 { // Allocator64 parameters. Deliberately using a short name.
  68. # if defined(__s390x__)
  69. typedef MappingS390x Mapping;
  70. # else
  71. typedef Mapping48AddressSpace Mapping;
  72. # endif
  73. static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
  74. static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
  75. static const uptr kMetadataSize = 0;
  76. typedef DefaultSizeClassMap SizeClassMap;
  77. typedef __tsan::MapUnmapCallback MapUnmapCallback;
  78. static const uptr kFlags = 0;
  79. using AddressSpaceView = LocalAddressSpaceView;
  80. };
  81. typedef SizeClassAllocator64<AP64> PrimaryAllocator;
  82. #endif
  83. typedef CombinedAllocator<PrimaryAllocator> Allocator;
  84. typedef Allocator::AllocatorCache AllocatorCache;
  85. Allocator *allocator();
  86. #endif
  87. struct ThreadSignalContext;
  88. struct JmpBuf {
  89. uptr sp;
  90. int int_signal_send;
  91. bool in_blocking_func;
  92. uptr in_signal_handler;
  93. uptr *shadow_stack_pos;
  94. };
  95. // A Processor represents a physical thread, or a P for Go.
  96. // It is used to store internal resources like allocate cache, and does not
  97. // participate in race-detection logic (invisible to end user).
  98. // In C++ it is tied to an OS thread just like ThreadState, however ideally
  99. // it should be tied to a CPU (this way we will have fewer allocator caches).
  100. // In Go it is tied to a P, so there are significantly fewer Processor's than
  101. // ThreadState's (which are tied to Gs).
  102. // A ThreadState must be wired with a Processor to handle events.
  103. struct Processor {
  104. ThreadState *thr; // currently wired thread, or nullptr
  105. #if !SANITIZER_GO
  106. AllocatorCache alloc_cache;
  107. InternalAllocatorCache internal_alloc_cache;
  108. #endif
  109. DenseSlabAllocCache block_cache;
  110. DenseSlabAllocCache sync_cache;
  111. DDPhysicalThread *dd_pt;
  112. };
  113. #if !SANITIZER_GO
  114. // ScopedGlobalProcessor temporary setups a global processor for the current
  115. // thread, if it does not have one. Intended for interceptors that can run
  116. // at the very thread end, when we already destroyed the thread processor.
  117. struct ScopedGlobalProcessor {
  118. ScopedGlobalProcessor();
  119. ~ScopedGlobalProcessor();
  120. };
  121. #endif
  122. struct TidEpoch {
  123. Tid tid;
  124. Epoch epoch;
  125. };
  126. struct TidSlot {
  127. Mutex mtx;
  128. Sid sid;
  129. atomic_uint32_t raw_epoch;
  130. ThreadState *thr;
  131. Vector<TidEpoch> journal;
  132. INode node;
  133. Epoch epoch() const {
  134. return static_cast<Epoch>(atomic_load(&raw_epoch, memory_order_relaxed));
  135. }
  136. void SetEpoch(Epoch v) {
  137. atomic_store(&raw_epoch, static_cast<u32>(v), memory_order_relaxed);
  138. }
  139. TidSlot();
  140. } ALIGNED(SANITIZER_CACHE_LINE_SIZE);
  141. // This struct is stored in TLS.
  142. struct ThreadState {
  143. FastState fast_state;
  144. int ignore_sync;
  145. #if !SANITIZER_GO
  146. int ignore_interceptors;
  147. #endif
  148. uptr *shadow_stack_pos;
  149. // Current position in tctx->trace.Back()->events (Event*).
  150. atomic_uintptr_t trace_pos;
  151. // PC of the last memory access, used to compute PC deltas in the trace.
  152. uptr trace_prev_pc;
  153. // Technically `current` should be a separate THREADLOCAL variable;
  154. // but it is placed here in order to share cache line with previous fields.
  155. ThreadState* current;
  156. atomic_sint32_t pending_signals;
  157. VectorClock clock;
  158. // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
  159. // We do not distinguish beteween ignoring reads and writes
  160. // for better performance.
  161. int ignore_reads_and_writes;
  162. int suppress_reports;
  163. // Go does not support ignores.
  164. #if !SANITIZER_GO
  165. IgnoreSet mop_ignore_set;
  166. IgnoreSet sync_ignore_set;
  167. #endif
  168. uptr *shadow_stack;
  169. uptr *shadow_stack_end;
  170. #if !SANITIZER_GO
  171. Vector<JmpBuf> jmp_bufs;
  172. int in_symbolizer;
  173. bool in_ignored_lib;
  174. bool is_inited;
  175. #endif
  176. MutexSet mset;
  177. bool is_dead;
  178. const Tid tid;
  179. uptr stk_addr;
  180. uptr stk_size;
  181. uptr tls_addr;
  182. uptr tls_size;
  183. ThreadContext *tctx;
  184. DDLogicalThread *dd_lt;
  185. TidSlot *slot;
  186. uptr slot_epoch;
  187. bool slot_locked;
  188. // Current wired Processor, or nullptr. Required to handle any events.
  189. Processor *proc1;
  190. #if !SANITIZER_GO
  191. Processor *proc() { return proc1; }
  192. #else
  193. Processor *proc();
  194. #endif
  195. atomic_uintptr_t in_signal_handler;
  196. ThreadSignalContext *signal_ctx;
  197. #if !SANITIZER_GO
  198. StackID last_sleep_stack_id;
  199. VectorClock last_sleep_clock;
  200. #endif
  201. // Set in regions of runtime that must be signal-safe and fork-safe.
  202. // If set, malloc must not be called.
  203. int nomalloc;
  204. const ReportDesc *current_report;
  205. explicit ThreadState(Tid tid);
  206. } ALIGNED(SANITIZER_CACHE_LINE_SIZE);
  207. #if !SANITIZER_GO
  208. #if SANITIZER_MAC || SANITIZER_ANDROID
  209. ThreadState *cur_thread();
  210. void set_cur_thread(ThreadState *thr);
  211. void cur_thread_finalize();
  212. inline ThreadState *cur_thread_init() { return cur_thread(); }
  213. # else
  214. __attribute__((tls_model("initial-exec")))
  215. extern THREADLOCAL char cur_thread_placeholder[];
  216. inline ThreadState *cur_thread() {
  217. return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
  218. }
  219. inline ThreadState *cur_thread_init() {
  220. ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
  221. if (UNLIKELY(!thr->current))
  222. thr->current = thr;
  223. return thr->current;
  224. }
  225. inline void set_cur_thread(ThreadState *thr) {
  226. reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
  227. }
  228. inline void cur_thread_finalize() { }
  229. # endif // SANITIZER_MAC || SANITIZER_ANDROID
  230. #endif // SANITIZER_GO
  231. class ThreadContext final : public ThreadContextBase {
  232. public:
  233. explicit ThreadContext(Tid tid);
  234. ~ThreadContext();
  235. ThreadState *thr;
  236. StackID creation_stack_id;
  237. VectorClock *sync;
  238. uptr sync_epoch;
  239. Trace trace;
  240. // Override superclass callbacks.
  241. void OnDead() override;
  242. void OnJoined(void *arg) override;
  243. void OnFinished() override;
  244. void OnStarted(void *arg) override;
  245. void OnCreated(void *arg) override;
  246. void OnReset() override;
  247. void OnDetached(void *arg) override;
  248. };
  249. struct RacyStacks {
  250. MD5Hash hash[2];
  251. bool operator==(const RacyStacks &other) const;
  252. };
  253. struct RacyAddress {
  254. uptr addr_min;
  255. uptr addr_max;
  256. };
  257. struct FiredSuppression {
  258. ReportType type;
  259. uptr pc_or_addr;
  260. Suppression *supp;
  261. };
  262. struct Context {
  263. Context();
  264. bool initialized;
  265. #if !SANITIZER_GO
  266. bool after_multithreaded_fork;
  267. #endif
  268. MetaMap metamap;
  269. Mutex report_mtx;
  270. int nreported;
  271. atomic_uint64_t last_symbolize_time_ns;
  272. void *background_thread;
  273. atomic_uint32_t stop_background_thread;
  274. ThreadRegistry thread_registry;
  275. Mutex racy_mtx;
  276. Vector<RacyStacks> racy_stacks;
  277. Vector<RacyAddress> racy_addresses;
  278. // Number of fired suppressions may be large enough.
  279. Mutex fired_suppressions_mtx;
  280. InternalMmapVector<FiredSuppression> fired_suppressions;
  281. DDetector *dd;
  282. Flags flags;
  283. fd_t memprof_fd;
  284. // The last slot index (kFreeSid) is used to denote freed memory.
  285. TidSlot slots[kThreadSlotCount - 1];
  286. // Protects global_epoch, slot_queue, trace_part_recycle.
  287. Mutex slot_mtx;
  288. uptr global_epoch; // guarded by slot_mtx and by all slot mutexes
  289. bool resetting; // global reset is in progress
  290. IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
  291. IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
  292. SANITIZER_GUARDED_BY(slot_mtx);
  293. uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
  294. uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
  295. uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
  296. };
  297. extern Context *ctx; // The one and the only global runtime context.
  298. ALWAYS_INLINE Flags *flags() {
  299. return &ctx->flags;
  300. }
  301. struct ScopedIgnoreInterceptors {
  302. ScopedIgnoreInterceptors() {
  303. #if !SANITIZER_GO
  304. cur_thread()->ignore_interceptors++;
  305. #endif
  306. }
  307. ~ScopedIgnoreInterceptors() {
  308. #if !SANITIZER_GO
  309. cur_thread()->ignore_interceptors--;
  310. #endif
  311. }
  312. };
  313. const char *GetObjectTypeFromTag(uptr tag);
  314. const char *GetReportHeaderFromTag(uptr tag);
  315. uptr TagFromShadowStackFrame(uptr pc);
  316. class ScopedReportBase {
  317. public:
  318. void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid,
  319. StackTrace stack, const MutexSet *mset);
  320. void AddStack(StackTrace stack, bool suppressable = false);
  321. void AddThread(const ThreadContext *tctx, bool suppressable = false);
  322. void AddThread(Tid tid, bool suppressable = false);
  323. void AddUniqueTid(Tid unique_tid);
  324. int AddMutex(uptr addr, StackID creation_stack_id);
  325. void AddLocation(uptr addr, uptr size);
  326. void AddSleep(StackID stack_id);
  327. void SetCount(int count);
  328. const ReportDesc *GetReport() const;
  329. protected:
  330. ScopedReportBase(ReportType typ, uptr tag);
  331. ~ScopedReportBase();
  332. private:
  333. ReportDesc *rep_;
  334. // Symbolizer makes lots of intercepted calls. If we try to process them,
  335. // at best it will cause deadlocks on internal mutexes.
  336. ScopedIgnoreInterceptors ignore_interceptors_;
  337. ScopedReportBase(const ScopedReportBase &) = delete;
  338. void operator=(const ScopedReportBase &) = delete;
  339. };
  340. class ScopedReport : public ScopedReportBase {
  341. public:
  342. explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
  343. ~ScopedReport();
  344. private:
  345. ScopedErrorReportLock lock_;
  346. };
  347. bool ShouldReport(ThreadState *thr, ReportType typ);
  348. ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
  349. // The stack could look like:
  350. // <start> | <main> | <foo> | tag | <bar>
  351. // This will extract the tag and keep:
  352. // <start> | <main> | <foo> | <bar>
  353. template<typename StackTraceTy>
  354. void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
  355. if (stack->size < 2) return;
  356. uptr possible_tag_pc = stack->trace[stack->size - 2];
  357. uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
  358. if (possible_tag == kExternalTagNone) return;
  359. stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
  360. stack->size -= 1;
  361. if (tag) *tag = possible_tag;
  362. }
  363. template<typename StackTraceTy>
  364. void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
  365. uptr *tag = nullptr) {
  366. uptr size = thr->shadow_stack_pos - thr->shadow_stack;
  367. uptr start = 0;
  368. if (size + !!toppc > kStackTraceMax) {
  369. start = size + !!toppc - kStackTraceMax;
  370. size = kStackTraceMax - !!toppc;
  371. }
  372. stack->Init(&thr->shadow_stack[start], size, toppc);
  373. ExtractTagFromStack(stack, tag);
  374. }
  375. #define GET_STACK_TRACE_FATAL(thr, pc) \
  376. VarSizeStackTrace stack; \
  377. ObtainCurrentStack(thr, pc, &stack); \
  378. stack.ReverseOrder();
  379. void MapShadow(uptr addr, uptr size);
  380. void MapThreadTrace(uptr addr, uptr size, const char *name);
  381. void DontNeedShadowFor(uptr addr, uptr size);
  382. void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
  383. void InitializeShadowMemory();
  384. void InitializeInterceptors();
  385. void InitializeLibIgnore();
  386. void InitializeDynamicAnnotations();
  387. void ForkBefore(ThreadState *thr, uptr pc);
  388. void ForkParentAfter(ThreadState *thr, uptr pc);
  389. void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
  390. void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
  391. AccessType typ);
  392. bool OutputReport(ThreadState *thr, const ScopedReport &srep);
  393. bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
  394. bool IsExpectedReport(uptr addr, uptr size);
  395. #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
  396. # define DPrintf Printf
  397. #else
  398. # define DPrintf(...)
  399. #endif
  400. #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
  401. # define DPrintf2 Printf
  402. #else
  403. # define DPrintf2(...)
  404. #endif
  405. StackID CurrentStackId(ThreadState *thr, uptr pc);
  406. ReportStack *SymbolizeStackId(StackID stack_id);
  407. void PrintCurrentStack(ThreadState *thr, uptr pc);
  408. void PrintCurrentStackSlow(uptr pc); // uses libunwind
  409. MBlock *JavaHeapBlock(uptr addr, uptr *start);
  410. void Initialize(ThreadState *thr);
  411. void MaybeSpawnBackgroundThread();
  412. int Finalize(ThreadState *thr);
  413. void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
  414. void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
  415. void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
  416. AccessType typ);
  417. void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
  418. AccessType typ);
  419. // This creates 2 non-inlined specialized versions of MemoryAccessRange.
  420. template <bool is_read>
  421. void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);
  422. ALWAYS_INLINE
  423. void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
  424. bool is_write) {
  425. if (size == 0)
  426. return;
  427. if (is_write)
  428. MemoryAccessRangeT<false>(thr, pc, addr, size);
  429. else
  430. MemoryAccessRangeT<true>(thr, pc, addr, size);
  431. }
  432. void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);
  433. void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
  434. void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
  435. void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
  436. void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
  437. uptr size);
  438. void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
  439. void ThreadIgnoreEnd(ThreadState *thr);
  440. void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
  441. void ThreadIgnoreSyncEnd(ThreadState *thr);
  442. Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
  443. void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
  444. ThreadType thread_type);
  445. void ThreadFinish(ThreadState *thr);
  446. Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
  447. void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
  448. void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
  449. void ThreadFinalize(ThreadState *thr);
  450. void ThreadSetName(ThreadState *thr, const char *name);
  451. int ThreadCount(ThreadState *thr);
  452. void ProcessPendingSignalsImpl(ThreadState *thr);
  453. void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
  454. Processor *ProcCreate();
  455. void ProcDestroy(Processor *proc);
  456. void ProcWire(Processor *proc, ThreadState *thr);
  457. void ProcUnwire(Processor *proc, ThreadState *thr);
  458. // Note: the parameter is called flagz, because flags is already taken
  459. // by the global function that returns flags.
  460. void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  461. void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  462. void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  463. void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
  464. int rec = 1);
  465. int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  466. void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  467. void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
  468. void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
  469. void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
  470. void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
  471. void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
  472. void Acquire(ThreadState *thr, uptr pc, uptr addr);
  473. // AcquireGlobal synchronizes the current thread with all other threads.
  474. // In terms of happens-before relation, it draws a HB edge from all threads
  475. // (where they happen to execute right now) to the current thread. We use it to
  476. // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
  477. // right before executing finalizers. This provides a coarse, but simple
  478. // approximation of the actual required synchronization.
  479. void AcquireGlobal(ThreadState *thr);
  480. void Release(ThreadState *thr, uptr pc, uptr addr);
  481. void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
  482. void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
  483. void AfterSleep(ThreadState *thr, uptr pc);
  484. void IncrementEpoch(ThreadState *thr);
  485. #if !SANITIZER_GO
  486. uptr ALWAYS_INLINE HeapEnd() {
  487. return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
  488. }
  489. #endif
  490. void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
  491. void SlotDetach(ThreadState *thr);
  492. void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
  493. void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
  494. void DoReset(ThreadState *thr, uptr epoch);
  495. void FlushShadowMemory();
  496. ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
  497. void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
  498. void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
  499. // These need to match __tsan_switch_to_fiber_* flags defined in
  500. // tsan_interface.h. See documentation there as well.
  501. enum FiberSwitchFlags {
  502. FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
  503. };
  504. class SlotLocker {
  505. public:
  506. ALWAYS_INLINE
  507. SlotLocker(ThreadState *thr, bool recursive = false)
  508. : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
  509. if (!locked_)
  510. SlotLock(thr_);
  511. }
  512. ALWAYS_INLINE
  513. ~SlotLocker() {
  514. if (!locked_)
  515. SlotUnlock(thr_);
  516. }
  517. private:
  518. ThreadState *thr_;
  519. bool locked_;
  520. };
  521. class SlotUnlocker {
  522. public:
  523. SlotUnlocker(ThreadState *thr) : thr_(thr), locked_(thr->slot_locked) {
  524. if (locked_)
  525. SlotUnlock(thr_);
  526. }
  527. ~SlotUnlocker() {
  528. if (locked_)
  529. SlotLock(thr_);
  530. }
  531. private:
  532. ThreadState *thr_;
  533. bool locked_;
  534. };
  535. ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
  536. if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
  537. ProcessPendingSignalsImpl(thr);
  538. }
  539. extern bool is_initialized;
  540. ALWAYS_INLINE
  541. void LazyInitialize(ThreadState *thr) {
  542. // If we can use .preinit_array, assume that __tsan_init
  543. // called from .preinit_array initializes runtime before
  544. // any instrumented code.
  545. #if !SANITIZER_CAN_USE_PREINIT_ARRAY
  546. if (UNLIKELY(!is_initialized))
  547. Initialize(thr);
  548. #endif
  549. }
  550. void TraceResetForTesting();
  551. void TraceSwitchPart(ThreadState *thr);
  552. void TraceSwitchPartImpl(ThreadState *thr);
  553. bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
  554. AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
  555. MutexSet *pmset, uptr *ptag);
  556. template <typename EventT>
  557. ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
  558. EventT **ev) {
  559. // TraceSwitchPart accesses shadow_stack, but it's called infrequently,
  560. // so we check it here proactively.
  561. DCHECK(thr->shadow_stack);
  562. Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
  563. #if SANITIZER_DEBUG
  564. // TraceSwitch acquires these mutexes,
  565. // so we lock them here to detect deadlocks more reliably.
  566. { Lock lock(&ctx->slot_mtx); }
  567. { Lock lock(&thr->tctx->trace.mtx); }
  568. TracePart *current = thr->tctx->trace.parts.Back();
  569. if (current) {
  570. DCHECK_GE(pos, &current->events[0]);
  571. DCHECK_LE(pos, &current->events[TracePart::kSize]);
  572. } else {
  573. DCHECK_EQ(pos, nullptr);
  574. }
  575. #endif
  576. // TracePart is allocated with mmap and is at least 4K aligned.
  577. // So the following check is a faster way to check for part end.
  578. // It may have false positives in the middle of the trace,
  579. // they are filtered out in TraceSwitch.
  580. if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
  581. return false;
  582. *ev = reinterpret_cast<EventT *>(pos);
  583. return true;
  584. }
  585. template <typename EventT>
  586. ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
  587. DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
  588. atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
  589. }
  590. template <typename EventT>
  591. void TraceEvent(ThreadState *thr, EventT ev) {
  592. EventT *evp;
  593. if (!TraceAcquire(thr, &evp)) {
  594. TraceSwitchPart(thr);
  595. UNUSED bool res = TraceAcquire(thr, &evp);
  596. DCHECK(res);
  597. }
  598. *evp = ev;
  599. TraceRelease(thr, evp);
  600. }
  601. ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
  602. uptr pc = 0) {
  603. if (!kCollectHistory)
  604. return true;
  605. EventFunc *ev;
  606. if (UNLIKELY(!TraceAcquire(thr, &ev)))
  607. return false;
  608. ev->is_access = 0;
  609. ev->is_func = 1;
  610. ev->pc = pc;
  611. TraceRelease(thr, ev);
  612. return true;
  613. }
  614. WARN_UNUSED_RESULT
  615. bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
  616. AccessType typ);
  617. WARN_UNUSED_RESULT
  618. bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
  619. AccessType typ);
  620. void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
  621. AccessType typ);
  622. void TraceFunc(ThreadState *thr, uptr pc = 0);
  623. void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
  624. StackID stk);
  625. void TraceMutexUnlock(ThreadState *thr, uptr addr);
  626. void TraceTime(ThreadState *thr);
  627. void TraceRestartFuncExit(ThreadState *thr);
  628. void TraceRestartFuncEntry(ThreadState *thr, uptr pc);
  629. void GrowShadowStack(ThreadState *thr);
  630. ALWAYS_INLINE
  631. void FuncEntry(ThreadState *thr, uptr pc) {
  632. DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.sid(), (void *)pc);
  633. if (UNLIKELY(!TryTraceFunc(thr, pc)))
  634. return TraceRestartFuncEntry(thr, pc);
  635. DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
  636. #if !SANITIZER_GO
  637. DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
  638. #else
  639. if (thr->shadow_stack_pos == thr->shadow_stack_end)
  640. GrowShadowStack(thr);
  641. #endif
  642. thr->shadow_stack_pos[0] = pc;
  643. thr->shadow_stack_pos++;
  644. }
  645. ALWAYS_INLINE
  646. void FuncExit(ThreadState *thr) {
  647. DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.sid());
  648. if (UNLIKELY(!TryTraceFunc(thr, 0)))
  649. return TraceRestartFuncExit(thr);
  650. DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
  651. #if !SANITIZER_GO
  652. DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
  653. #endif
  654. thr->shadow_stack_pos--;
  655. }
  656. #if !SANITIZER_GO
  657. extern void (*on_initialize)(void);
  658. extern int (*on_finalize)(int);
  659. #endif
  660. } // namespace __tsan
  661. #endif // TSAN_RTL_H