tsan_interface_atomic.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. //===-- tsan_interface_atomic.cpp -----------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. // ThreadSanitizer atomic operations are based on C++11/C1x standards.
  13. // For background see C++11 standard. A slightly older, publicly
  14. // available draft of the standard (not entirely up-to-date, but close enough
  15. // for casual browsing) is available here:
  16. // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
  17. // The following page contains more background information:
  18. // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
  19. #include "sanitizer_common/sanitizer_placement_new.h"
  20. #include "sanitizer_common/sanitizer_stacktrace.h"
  21. #include "sanitizer_common/sanitizer_mutex.h"
  22. #include "tsan_flags.h"
  23. #include "tsan_interface.h"
  24. #include "tsan_rtl.h"
  25. using namespace __tsan;
  26. #if !SANITIZER_GO && __TSAN_HAS_INT128
  27. // Protects emulation of 128-bit atomic operations.
  28. static StaticSpinMutex mutex128;
  29. #endif
  30. #if SANITIZER_DEBUG
  31. static bool IsLoadOrder(morder mo) {
  32. return mo == mo_relaxed || mo == mo_consume
  33. || mo == mo_acquire || mo == mo_seq_cst;
  34. }
  35. static bool IsStoreOrder(morder mo) {
  36. return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
  37. }
  38. #endif
  39. static bool IsReleaseOrder(morder mo) {
  40. return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
  41. }
  42. static bool IsAcquireOrder(morder mo) {
  43. return mo == mo_consume || mo == mo_acquire
  44. || mo == mo_acq_rel || mo == mo_seq_cst;
  45. }
  46. static bool IsAcqRelOrder(morder mo) {
  47. return mo == mo_acq_rel || mo == mo_seq_cst;
  48. }
  49. template<typename T> T func_xchg(volatile T *v, T op) {
  50. T res = __sync_lock_test_and_set(v, op);
  51. // __sync_lock_test_and_set does not contain full barrier.
  52. __sync_synchronize();
  53. return res;
  54. }
  55. template<typename T> T func_add(volatile T *v, T op) {
  56. return __sync_fetch_and_add(v, op);
  57. }
  58. template<typename T> T func_sub(volatile T *v, T op) {
  59. return __sync_fetch_and_sub(v, op);
  60. }
  61. template<typename T> T func_and(volatile T *v, T op) {
  62. return __sync_fetch_and_and(v, op);
  63. }
  64. template<typename T> T func_or(volatile T *v, T op) {
  65. return __sync_fetch_and_or(v, op);
  66. }
  67. template<typename T> T func_xor(volatile T *v, T op) {
  68. return __sync_fetch_and_xor(v, op);
  69. }
  70. template<typename T> T func_nand(volatile T *v, T op) {
  71. // clang does not support __sync_fetch_and_nand.
  72. T cmp = *v;
  73. for (;;) {
  74. T newv = ~(cmp & op);
  75. T cur = __sync_val_compare_and_swap(v, cmp, newv);
  76. if (cmp == cur)
  77. return cmp;
  78. cmp = cur;
  79. }
  80. }
  81. template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
  82. return __sync_val_compare_and_swap(v, cmp, xch);
  83. }
  84. // clang does not support 128-bit atomic ops.
  85. // Atomic ops are executed under tsan internal mutex,
  86. // here we assume that the atomic variables are not accessed
  87. // from non-instrumented code.
  88. #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
  89. && __TSAN_HAS_INT128
  90. a128 func_xchg(volatile a128 *v, a128 op) {
  91. SpinMutexLock lock(&mutex128);
  92. a128 cmp = *v;
  93. *v = op;
  94. return cmp;
  95. }
  96. a128 func_add(volatile a128 *v, a128 op) {
  97. SpinMutexLock lock(&mutex128);
  98. a128 cmp = *v;
  99. *v = cmp + op;
  100. return cmp;
  101. }
  102. a128 func_sub(volatile a128 *v, a128 op) {
  103. SpinMutexLock lock(&mutex128);
  104. a128 cmp = *v;
  105. *v = cmp - op;
  106. return cmp;
  107. }
  108. a128 func_and(volatile a128 *v, a128 op) {
  109. SpinMutexLock lock(&mutex128);
  110. a128 cmp = *v;
  111. *v = cmp & op;
  112. return cmp;
  113. }
  114. a128 func_or(volatile a128 *v, a128 op) {
  115. SpinMutexLock lock(&mutex128);
  116. a128 cmp = *v;
  117. *v = cmp | op;
  118. return cmp;
  119. }
  120. a128 func_xor(volatile a128 *v, a128 op) {
  121. SpinMutexLock lock(&mutex128);
  122. a128 cmp = *v;
  123. *v = cmp ^ op;
  124. return cmp;
  125. }
  126. a128 func_nand(volatile a128 *v, a128 op) {
  127. SpinMutexLock lock(&mutex128);
  128. a128 cmp = *v;
  129. *v = ~(cmp & op);
  130. return cmp;
  131. }
  132. a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
  133. SpinMutexLock lock(&mutex128);
  134. a128 cur = *v;
  135. if (cur == cmp)
  136. *v = xch;
  137. return cur;
  138. }
  139. #endif
  140. template <typename T>
  141. static int AccessSize() {
  142. if (sizeof(T) <= 1)
  143. return 1;
  144. else if (sizeof(T) <= 2)
  145. return 2;
  146. else if (sizeof(T) <= 4)
  147. return 4;
  148. else
  149. return 8;
  150. // For 16-byte atomics we also use 8-byte memory access,
  151. // this leads to false negatives only in very obscure cases.
  152. }
  153. #if !SANITIZER_GO
  154. static atomic_uint8_t *to_atomic(const volatile a8 *a) {
  155. return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
  156. }
  157. static atomic_uint16_t *to_atomic(const volatile a16 *a) {
  158. return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
  159. }
  160. #endif
  161. static atomic_uint32_t *to_atomic(const volatile a32 *a) {
  162. return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
  163. }
  164. static atomic_uint64_t *to_atomic(const volatile a64 *a) {
  165. return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
  166. }
  167. static memory_order to_mo(morder mo) {
  168. switch (mo) {
  169. case mo_relaxed: return memory_order_relaxed;
  170. case mo_consume: return memory_order_consume;
  171. case mo_acquire: return memory_order_acquire;
  172. case mo_release: return memory_order_release;
  173. case mo_acq_rel: return memory_order_acq_rel;
  174. case mo_seq_cst: return memory_order_seq_cst;
  175. }
  176. DCHECK(0);
  177. return memory_order_seq_cst;
  178. }
  179. template<typename T>
  180. static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
  181. return atomic_load(to_atomic(a), to_mo(mo));
  182. }
  183. #if __TSAN_HAS_INT128 && !SANITIZER_GO
  184. static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
  185. SpinMutexLock lock(&mutex128);
  186. return *a;
  187. }
  188. #endif
  189. template <typename T>
  190. static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
  191. DCHECK(IsLoadOrder(mo));
  192. // This fast-path is critical for performance.
  193. // Assume the access is atomic.
  194. if (!IsAcquireOrder(mo)) {
  195. MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
  196. kAccessRead | kAccessAtomic);
  197. return NoTsanAtomicLoad(a, mo);
  198. }
  199. // Don't create sync object if it does not exist yet. For example, an atomic
  200. // pointer is initialized to nullptr and then periodically acquire-loaded.
  201. T v = NoTsanAtomicLoad(a, mo);
  202. SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
  203. if (s) {
  204. SlotLocker locker(thr);
  205. ReadLock lock(&s->mtx);
  206. thr->clock.Acquire(s->clock);
  207. // Re-read under sync mutex because we need a consistent snapshot
  208. // of the value and the clock we acquire.
  209. v = NoTsanAtomicLoad(a, mo);
  210. }
  211. MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
  212. return v;
  213. }
  214. template<typename T>
  215. static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
  216. atomic_store(to_atomic(a), v, to_mo(mo));
  217. }
  218. #if __TSAN_HAS_INT128 && !SANITIZER_GO
  219. static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
  220. SpinMutexLock lock(&mutex128);
  221. *a = v;
  222. }
  223. #endif
  224. template <typename T>
  225. static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
  226. morder mo) {
  227. DCHECK(IsStoreOrder(mo));
  228. MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
  229. // This fast-path is critical for performance.
  230. // Assume the access is atomic.
  231. // Strictly saying even relaxed store cuts off release sequence,
  232. // so must reset the clock.
  233. if (!IsReleaseOrder(mo)) {
  234. NoTsanAtomicStore(a, v, mo);
  235. return;
  236. }
  237. SlotLocker locker(thr);
  238. {
  239. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
  240. Lock lock(&s->mtx);
  241. thr->clock.ReleaseStore(&s->clock);
  242. NoTsanAtomicStore(a, v, mo);
  243. }
  244. IncrementEpoch(thr);
  245. }
  246. template <typename T, T (*F)(volatile T *v, T op)>
  247. static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
  248. MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
  249. if (LIKELY(mo == mo_relaxed))
  250. return F(a, v);
  251. SlotLocker locker(thr);
  252. {
  253. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
  254. RWLock lock(&s->mtx, IsReleaseOrder(mo));
  255. if (IsAcqRelOrder(mo))
  256. thr->clock.ReleaseAcquire(&s->clock);
  257. else if (IsReleaseOrder(mo))
  258. thr->clock.Release(&s->clock);
  259. else if (IsAcquireOrder(mo))
  260. thr->clock.Acquire(s->clock);
  261. v = F(a, v);
  262. }
  263. if (IsReleaseOrder(mo))
  264. IncrementEpoch(thr);
  265. return v;
  266. }
  267. template<typename T>
  268. static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
  269. return func_xchg(a, v);
  270. }
  271. template<typename T>
  272. static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
  273. return func_add(a, v);
  274. }
  275. template<typename T>
  276. static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
  277. return func_sub(a, v);
  278. }
  279. template<typename T>
  280. static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
  281. return func_and(a, v);
  282. }
  283. template<typename T>
  284. static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
  285. return func_or(a, v);
  286. }
  287. template<typename T>
  288. static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
  289. return func_xor(a, v);
  290. }
  291. template<typename T>
  292. static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
  293. return func_nand(a, v);
  294. }
  295. template<typename T>
  296. static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
  297. morder mo) {
  298. return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
  299. }
  300. template<typename T>
  301. static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
  302. morder mo) {
  303. return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
  304. }
  305. template<typename T>
  306. static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
  307. morder mo) {
  308. return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
  309. }
  310. template<typename T>
  311. static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
  312. morder mo) {
  313. return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
  314. }
  315. template<typename T>
  316. static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
  317. morder mo) {
  318. return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
  319. }
  320. template<typename T>
  321. static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
  322. morder mo) {
  323. return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
  324. }
  325. template<typename T>
  326. static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
  327. morder mo) {
  328. return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
  329. }
  330. template<typename T>
  331. static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
  332. return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
  333. }
  334. #if __TSAN_HAS_INT128
  335. static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
  336. morder mo, morder fmo) {
  337. a128 old = *c;
  338. a128 cur = func_cas(a, old, v);
  339. if (cur == old)
  340. return true;
  341. *c = cur;
  342. return false;
  343. }
  344. #endif
  345. template<typename T>
  346. static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
  347. NoTsanAtomicCAS(a, &c, v, mo, fmo);
  348. return c;
  349. }
  350. template <typename T>
  351. static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
  352. morder mo, morder fmo) {
  353. // 31.7.2.18: "The failure argument shall not be memory_order_release
  354. // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
  355. // (mo_relaxed) when those are used.
  356. DCHECK(IsLoadOrder(fmo));
  357. MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
  358. if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
  359. T cc = *c;
  360. T pr = func_cas(a, cc, v);
  361. if (pr == cc)
  362. return true;
  363. *c = pr;
  364. return false;
  365. }
  366. SlotLocker locker(thr);
  367. bool release = IsReleaseOrder(mo);
  368. bool success;
  369. {
  370. auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
  371. RWLock lock(&s->mtx, release);
  372. T cc = *c;
  373. T pr = func_cas(a, cc, v);
  374. success = pr == cc;
  375. if (!success) {
  376. *c = pr;
  377. mo = fmo;
  378. }
  379. if (success && IsAcqRelOrder(mo))
  380. thr->clock.ReleaseAcquire(&s->clock);
  381. else if (success && IsReleaseOrder(mo))
  382. thr->clock.Release(&s->clock);
  383. else if (IsAcquireOrder(mo))
  384. thr->clock.Acquire(s->clock);
  385. }
  386. if (success && release)
  387. IncrementEpoch(thr);
  388. return success;
  389. }
  390. template<typename T>
  391. static T AtomicCAS(ThreadState *thr, uptr pc,
  392. volatile T *a, T c, T v, morder mo, morder fmo) {
  393. AtomicCAS(thr, pc, a, &c, v, mo, fmo);
  394. return c;
  395. }
  396. #if !SANITIZER_GO
  397. static void NoTsanAtomicFence(morder mo) {
  398. __sync_synchronize();
  399. }
  400. static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
  401. // FIXME(dvyukov): not implemented.
  402. __sync_synchronize();
  403. }
  404. #endif
  405. // Interface functions follow.
  406. #if !SANITIZER_GO
  407. // C/C++
  408. static morder convert_morder(morder mo) {
  409. if (flags()->force_seq_cst_atomics)
  410. return (morder)mo_seq_cst;
  411. // Filter out additional memory order flags:
  412. // MEMMODEL_SYNC = 1 << 15
  413. // __ATOMIC_HLE_ACQUIRE = 1 << 16
  414. // __ATOMIC_HLE_RELEASE = 1 << 17
  415. //
  416. // HLE is an optimization, and we pretend that elision always fails.
  417. // MEMMODEL_SYNC is used when lowering __sync_ atomics,
  418. // since we use __sync_ atomics for actual atomic operations,
  419. // we can safely ignore it as well. It also subtly affects semantics,
  420. // but we don't model the difference.
  421. return (morder)(mo & 0x7fff);
  422. }
  423. # define ATOMIC_IMPL(func, ...) \
  424. ThreadState *const thr = cur_thread(); \
  425. ProcessPendingSignals(thr); \
  426. if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
  427. return NoTsanAtomic##func(__VA_ARGS__); \
  428. mo = convert_morder(mo); \
  429. return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);
  430. extern "C" {
  431. SANITIZER_INTERFACE_ATTRIBUTE
  432. a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
  433. ATOMIC_IMPL(Load, a, mo);
  434. }
  435. SANITIZER_INTERFACE_ATTRIBUTE
  436. a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
  437. ATOMIC_IMPL(Load, a, mo);
  438. }
  439. SANITIZER_INTERFACE_ATTRIBUTE
  440. a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
  441. ATOMIC_IMPL(Load, a, mo);
  442. }
  443. SANITIZER_INTERFACE_ATTRIBUTE
  444. a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
  445. ATOMIC_IMPL(Load, a, mo);
  446. }
  447. #if __TSAN_HAS_INT128
  448. SANITIZER_INTERFACE_ATTRIBUTE
  449. a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
  450. ATOMIC_IMPL(Load, a, mo);
  451. }
  452. #endif
  453. SANITIZER_INTERFACE_ATTRIBUTE
  454. void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
  455. ATOMIC_IMPL(Store, a, v, mo);
  456. }
  457. SANITIZER_INTERFACE_ATTRIBUTE
  458. void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
  459. ATOMIC_IMPL(Store, a, v, mo);
  460. }
  461. SANITIZER_INTERFACE_ATTRIBUTE
  462. void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
  463. ATOMIC_IMPL(Store, a, v, mo);
  464. }
  465. SANITIZER_INTERFACE_ATTRIBUTE
  466. void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
  467. ATOMIC_IMPL(Store, a, v, mo);
  468. }
  469. #if __TSAN_HAS_INT128
  470. SANITIZER_INTERFACE_ATTRIBUTE
  471. void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
  472. ATOMIC_IMPL(Store, a, v, mo);
  473. }
  474. #endif
  475. SANITIZER_INTERFACE_ATTRIBUTE
  476. a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
  477. ATOMIC_IMPL(Exchange, a, v, mo);
  478. }
  479. SANITIZER_INTERFACE_ATTRIBUTE
  480. a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
  481. ATOMIC_IMPL(Exchange, a, v, mo);
  482. }
  483. SANITIZER_INTERFACE_ATTRIBUTE
  484. a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
  485. ATOMIC_IMPL(Exchange, a, v, mo);
  486. }
  487. SANITIZER_INTERFACE_ATTRIBUTE
  488. a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
  489. ATOMIC_IMPL(Exchange, a, v, mo);
  490. }
  491. #if __TSAN_HAS_INT128
  492. SANITIZER_INTERFACE_ATTRIBUTE
  493. a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
  494. ATOMIC_IMPL(Exchange, a, v, mo);
  495. }
  496. #endif
  497. SANITIZER_INTERFACE_ATTRIBUTE
  498. a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
  499. ATOMIC_IMPL(FetchAdd, a, v, mo);
  500. }
  501. SANITIZER_INTERFACE_ATTRIBUTE
  502. a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
  503. ATOMIC_IMPL(FetchAdd, a, v, mo);
  504. }
  505. SANITIZER_INTERFACE_ATTRIBUTE
  506. a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
  507. ATOMIC_IMPL(FetchAdd, a, v, mo);
  508. }
  509. SANITIZER_INTERFACE_ATTRIBUTE
  510. a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
  511. ATOMIC_IMPL(FetchAdd, a, v, mo);
  512. }
  513. #if __TSAN_HAS_INT128
  514. SANITIZER_INTERFACE_ATTRIBUTE
  515. a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
  516. ATOMIC_IMPL(FetchAdd, a, v, mo);
  517. }
  518. #endif
  519. SANITIZER_INTERFACE_ATTRIBUTE
  520. a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
  521. ATOMIC_IMPL(FetchSub, a, v, mo);
  522. }
  523. SANITIZER_INTERFACE_ATTRIBUTE
  524. a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
  525. ATOMIC_IMPL(FetchSub, a, v, mo);
  526. }
  527. SANITIZER_INTERFACE_ATTRIBUTE
  528. a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
  529. ATOMIC_IMPL(FetchSub, a, v, mo);
  530. }
  531. SANITIZER_INTERFACE_ATTRIBUTE
  532. a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
  533. ATOMIC_IMPL(FetchSub, a, v, mo);
  534. }
  535. #if __TSAN_HAS_INT128
  536. SANITIZER_INTERFACE_ATTRIBUTE
  537. a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
  538. ATOMIC_IMPL(FetchSub, a, v, mo);
  539. }
  540. #endif
  541. SANITIZER_INTERFACE_ATTRIBUTE
  542. a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
  543. ATOMIC_IMPL(FetchAnd, a, v, mo);
  544. }
  545. SANITIZER_INTERFACE_ATTRIBUTE
  546. a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
  547. ATOMIC_IMPL(FetchAnd, a, v, mo);
  548. }
  549. SANITIZER_INTERFACE_ATTRIBUTE
  550. a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
  551. ATOMIC_IMPL(FetchAnd, a, v, mo);
  552. }
  553. SANITIZER_INTERFACE_ATTRIBUTE
  554. a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
  555. ATOMIC_IMPL(FetchAnd, a, v, mo);
  556. }
  557. #if __TSAN_HAS_INT128
  558. SANITIZER_INTERFACE_ATTRIBUTE
  559. a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
  560. ATOMIC_IMPL(FetchAnd, a, v, mo);
  561. }
  562. #endif
  563. SANITIZER_INTERFACE_ATTRIBUTE
  564. a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
  565. ATOMIC_IMPL(FetchOr, a, v, mo);
  566. }
  567. SANITIZER_INTERFACE_ATTRIBUTE
  568. a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
  569. ATOMIC_IMPL(FetchOr, a, v, mo);
  570. }
  571. SANITIZER_INTERFACE_ATTRIBUTE
  572. a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
  573. ATOMIC_IMPL(FetchOr, a, v, mo);
  574. }
  575. SANITIZER_INTERFACE_ATTRIBUTE
  576. a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
  577. ATOMIC_IMPL(FetchOr, a, v, mo);
  578. }
  579. #if __TSAN_HAS_INT128
  580. SANITIZER_INTERFACE_ATTRIBUTE
  581. a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
  582. ATOMIC_IMPL(FetchOr, a, v, mo);
  583. }
  584. #endif
  585. SANITIZER_INTERFACE_ATTRIBUTE
  586. a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
  587. ATOMIC_IMPL(FetchXor, a, v, mo);
  588. }
  589. SANITIZER_INTERFACE_ATTRIBUTE
  590. a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
  591. ATOMIC_IMPL(FetchXor, a, v, mo);
  592. }
  593. SANITIZER_INTERFACE_ATTRIBUTE
  594. a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
  595. ATOMIC_IMPL(FetchXor, a, v, mo);
  596. }
  597. SANITIZER_INTERFACE_ATTRIBUTE
  598. a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
  599. ATOMIC_IMPL(FetchXor, a, v, mo);
  600. }
  601. #if __TSAN_HAS_INT128
  602. SANITIZER_INTERFACE_ATTRIBUTE
  603. a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
  604. ATOMIC_IMPL(FetchXor, a, v, mo);
  605. }
  606. #endif
  607. SANITIZER_INTERFACE_ATTRIBUTE
  608. a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
  609. ATOMIC_IMPL(FetchNand, a, v, mo);
  610. }
  611. SANITIZER_INTERFACE_ATTRIBUTE
  612. a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
  613. ATOMIC_IMPL(FetchNand, a, v, mo);
  614. }
  615. SANITIZER_INTERFACE_ATTRIBUTE
  616. a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
  617. ATOMIC_IMPL(FetchNand, a, v, mo);
  618. }
  619. SANITIZER_INTERFACE_ATTRIBUTE
  620. a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
  621. ATOMIC_IMPL(FetchNand, a, v, mo);
  622. }
  623. #if __TSAN_HAS_INT128
  624. SANITIZER_INTERFACE_ATTRIBUTE
  625. a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
  626. ATOMIC_IMPL(FetchNand, a, v, mo);
  627. }
  628. #endif
  629. SANITIZER_INTERFACE_ATTRIBUTE
  630. int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
  631. morder mo, morder fmo) {
  632. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  633. }
  634. SANITIZER_INTERFACE_ATTRIBUTE
  635. int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
  636. morder mo, morder fmo) {
  637. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  638. }
  639. SANITIZER_INTERFACE_ATTRIBUTE
  640. int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
  641. morder mo, morder fmo) {
  642. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  643. }
  644. SANITIZER_INTERFACE_ATTRIBUTE
  645. int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
  646. morder mo, morder fmo) {
  647. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  648. }
  649. #if __TSAN_HAS_INT128
  650. SANITIZER_INTERFACE_ATTRIBUTE
  651. int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
  652. morder mo, morder fmo) {
  653. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  654. }
  655. #endif
  656. SANITIZER_INTERFACE_ATTRIBUTE
  657. int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
  658. morder mo, morder fmo) {
  659. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  660. }
  661. SANITIZER_INTERFACE_ATTRIBUTE
  662. int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
  663. morder mo, morder fmo) {
  664. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  665. }
  666. SANITIZER_INTERFACE_ATTRIBUTE
  667. int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
  668. morder mo, morder fmo) {
  669. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  670. }
  671. SANITIZER_INTERFACE_ATTRIBUTE
  672. int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
  673. morder mo, morder fmo) {
  674. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  675. }
  676. #if __TSAN_HAS_INT128
  677. SANITIZER_INTERFACE_ATTRIBUTE
  678. int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
  679. morder mo, morder fmo) {
  680. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  681. }
  682. #endif
  683. SANITIZER_INTERFACE_ATTRIBUTE
  684. a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
  685. morder mo, morder fmo) {
  686. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  687. }
  688. SANITIZER_INTERFACE_ATTRIBUTE
  689. a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
  690. morder mo, morder fmo) {
  691. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  692. }
  693. SANITIZER_INTERFACE_ATTRIBUTE
  694. a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
  695. morder mo, morder fmo) {
  696. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  697. }
  698. SANITIZER_INTERFACE_ATTRIBUTE
  699. a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
  700. morder mo, morder fmo) {
  701. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  702. }
  703. #if __TSAN_HAS_INT128
  704. SANITIZER_INTERFACE_ATTRIBUTE
  705. a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
  706. morder mo, morder fmo) {
  707. ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
  708. }
  709. #endif
  710. SANITIZER_INTERFACE_ATTRIBUTE
  711. void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
  712. SANITIZER_INTERFACE_ATTRIBUTE
  713. void __tsan_atomic_signal_fence(morder mo) {
  714. }
  715. } // extern "C"
  716. #else // #if !SANITIZER_GO
  717. // Go
  718. # define ATOMIC(func, ...) \
  719. if (thr->ignore_sync) { \
  720. NoTsanAtomic##func(__VA_ARGS__); \
  721. } else { \
  722. FuncEntry(thr, cpc); \
  723. Atomic##func(thr, pc, __VA_ARGS__); \
  724. FuncExit(thr); \
  725. }
  726. # define ATOMIC_RET(func, ret, ...) \
  727. if (thr->ignore_sync) { \
  728. (ret) = NoTsanAtomic##func(__VA_ARGS__); \
  729. } else { \
  730. FuncEntry(thr, cpc); \
  731. (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
  732. FuncExit(thr); \
  733. }
  734. extern "C" {
  735. SANITIZER_INTERFACE_ATTRIBUTE
  736. void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
  737. ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
  738. }
  739. SANITIZER_INTERFACE_ATTRIBUTE
  740. void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
  741. ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
  742. }
  743. SANITIZER_INTERFACE_ATTRIBUTE
  744. void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
  745. ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
  746. }
  747. SANITIZER_INTERFACE_ATTRIBUTE
  748. void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
  749. ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
  750. }
  751. SANITIZER_INTERFACE_ATTRIBUTE
  752. void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
  753. ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
  754. }
  755. SANITIZER_INTERFACE_ATTRIBUTE
  756. void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
  757. ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
  758. }
  759. SANITIZER_INTERFACE_ATTRIBUTE
  760. void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
  761. ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
  762. }
  763. SANITIZER_INTERFACE_ATTRIBUTE
  764. void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
  765. ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
  766. }
  767. SANITIZER_INTERFACE_ATTRIBUTE
  768. void __tsan_go_atomic32_compare_exchange(
  769. ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
  770. a32 cur = 0;
  771. a32 cmp = *(a32*)(a+8);
  772. ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
  773. *(bool*)(a+16) = (cur == cmp);
  774. }
  775. SANITIZER_INTERFACE_ATTRIBUTE
  776. void __tsan_go_atomic64_compare_exchange(
  777. ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
  778. a64 cur = 0;
  779. a64 cmp = *(a64*)(a+8);
  780. ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
  781. *(bool*)(a+24) = (cur == cmp);
  782. }
  783. } // extern "C"
  784. #endif // #if !SANITIZER_GO