tsan_interceptors_posix.cpp 97 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057
  1. //===-- tsan_interceptors_posix.cpp ---------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. // FIXME: move as many interceptors as possible into
  12. // sanitizer_common/sanitizer_common_interceptors.inc
  13. //===----------------------------------------------------------------------===//
  14. #include "sanitizer_common/sanitizer_atomic.h"
  15. #include "sanitizer_common/sanitizer_errno.h"
  16. #include "sanitizer_common/sanitizer_libc.h"
  17. #include "sanitizer_common/sanitizer_linux.h"
  18. #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
  19. #include "sanitizer_common/sanitizer_platform_limits_posix.h"
  20. #include "sanitizer_common/sanitizer_placement_new.h"
  21. #include "sanitizer_common/sanitizer_posix.h"
  22. #include "sanitizer_common/sanitizer_stacktrace.h"
  23. #include "sanitizer_common/sanitizer_tls_get_addr.h"
  24. #include "interception/interception.h"
  25. #include "tsan_interceptors.h"
  26. #include "tsan_interface.h"
  27. #include "tsan_platform.h"
  28. #include "tsan_suppressions.h"
  29. #include "tsan_rtl.h"
  30. #include "tsan_mman.h"
  31. #include "tsan_fd.h"
  32. #include <stdarg.h>
  33. using namespace __tsan;
  34. #if SANITIZER_FREEBSD || SANITIZER_MAC
  35. #define stdout __stdoutp
  36. #define stderr __stderrp
  37. #endif
  38. #if SANITIZER_NETBSD
  39. #define dirfd(dirp) (*(int *)(dirp))
  40. #define fileno_unlocked(fp) \
  41. (((__sanitizer_FILE *)fp)->_file == -1 \
  42. ? -1 \
  43. : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
  44. #define stdout ((__sanitizer_FILE*)&__sF[1])
  45. #define stderr ((__sanitizer_FILE*)&__sF[2])
  46. #define nanosleep __nanosleep50
  47. #define vfork __vfork14
  48. #endif
  49. #ifdef __mips__
  50. const int kSigCount = 129;
  51. #else
  52. const int kSigCount = 65;
  53. #endif
  54. #ifdef __mips__
  55. struct ucontext_t {
  56. u64 opaque[768 / sizeof(u64) + 1];
  57. };
  58. #else
  59. struct ucontext_t {
  60. // The size is determined by looking at sizeof of real ucontext_t on linux.
  61. u64 opaque[936 / sizeof(u64) + 1];
  62. };
  63. #endif
  64. #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
  65. defined(__s390x__)
  66. #define PTHREAD_ABI_BASE "GLIBC_2.3.2"
  67. #elif defined(__aarch64__) || SANITIZER_PPC64V2
  68. #define PTHREAD_ABI_BASE "GLIBC_2.17"
  69. #endif
  70. extern "C" int pthread_attr_init(void *attr);
  71. extern "C" int pthread_attr_destroy(void *attr);
  72. DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
  73. extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
  74. extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
  75. void (*child)(void));
  76. extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
  77. extern "C" int pthread_setspecific(unsigned key, const void *v);
  78. DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
  79. DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
  80. DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
  81. DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
  82. extern "C" int pthread_equal(void *t1, void *t2);
  83. extern "C" void *pthread_self();
  84. extern "C" void _exit(int status);
  85. #if !SANITIZER_NETBSD
  86. extern "C" int fileno_unlocked(void *stream);
  87. extern "C" int dirfd(void *dirp);
  88. #endif
  89. #if SANITIZER_NETBSD
  90. extern __sanitizer_FILE __sF[];
  91. #else
  92. extern __sanitizer_FILE *stdout, *stderr;
  93. #endif
  94. #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
  95. const int PTHREAD_MUTEX_RECURSIVE = 1;
  96. const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
  97. #else
  98. const int PTHREAD_MUTEX_RECURSIVE = 2;
  99. const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
  100. #endif
  101. #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
  102. const int EPOLL_CTL_ADD = 1;
  103. #endif
  104. const int SIGILL = 4;
  105. const int SIGTRAP = 5;
  106. const int SIGABRT = 6;
  107. const int SIGFPE = 8;
  108. const int SIGSEGV = 11;
  109. const int SIGPIPE = 13;
  110. const int SIGTERM = 15;
  111. #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
  112. const int SIGBUS = 10;
  113. const int SIGSYS = 12;
  114. #else
  115. const int SIGBUS = 7;
  116. const int SIGSYS = 31;
  117. #endif
  118. void *const MAP_FAILED = (void*)-1;
  119. #if SANITIZER_NETBSD
  120. const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
  121. #elif !SANITIZER_MAC
  122. const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
  123. #endif
  124. const int MAP_FIXED = 0x10;
  125. typedef long long_t;
  126. typedef __sanitizer::u16 mode_t;
  127. // From /usr/include/unistd.h
  128. # define F_ULOCK 0 /* Unlock a previously locked region. */
  129. # define F_LOCK 1 /* Lock a region for exclusive use. */
  130. # define F_TLOCK 2 /* Test and lock a region for exclusive use. */
  131. # define F_TEST 3 /* Test a region for other processes locks. */
  132. #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
  133. const int SA_SIGINFO = 0x40;
  134. const int SIG_SETMASK = 3;
  135. #elif defined(__mips__)
  136. const int SA_SIGINFO = 8;
  137. const int SIG_SETMASK = 3;
  138. #else
  139. const int SA_SIGINFO = 4;
  140. const int SIG_SETMASK = 2;
  141. #endif
  142. #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
  143. (!cur_thread_init()->is_inited)
  144. namespace __tsan {
  145. struct SignalDesc {
  146. bool armed;
  147. __sanitizer_siginfo siginfo;
  148. ucontext_t ctx;
  149. };
  150. struct ThreadSignalContext {
  151. int int_signal_send;
  152. atomic_uintptr_t in_blocking_func;
  153. SignalDesc pending_signals[kSigCount];
  154. // emptyset and oldset are too big for stack.
  155. __sanitizer_sigset_t emptyset;
  156. __sanitizer_sigset_t oldset;
  157. };
  158. // The sole reason tsan wraps atexit callbacks is to establish synchronization
  159. // between callback setup and callback execution.
  160. struct AtExitCtx {
  161. void (*f)();
  162. void *arg;
  163. uptr pc;
  164. };
  165. // InterceptorContext holds all global data required for interceptors.
  166. // It's explicitly constructed in InitializeInterceptors with placement new
  167. // and is never destroyed. This allows usage of members with non-trivial
  168. // constructors and destructors.
  169. struct InterceptorContext {
  170. // The object is 64-byte aligned, because we want hot data to be located
  171. // in a single cache line if possible (it's accessed in every interceptor).
  172. ALIGNED(64) LibIgnore libignore;
  173. __sanitizer_sigaction sigactions[kSigCount];
  174. #if !SANITIZER_MAC && !SANITIZER_NETBSD
  175. unsigned finalize_key;
  176. #endif
  177. Mutex atexit_mu;
  178. Vector<struct AtExitCtx *> AtExitStack;
  179. InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
  180. };
  181. static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
  182. InterceptorContext *interceptor_ctx() {
  183. return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
  184. }
  185. LibIgnore *libignore() {
  186. return &interceptor_ctx()->libignore;
  187. }
  188. void InitializeLibIgnore() {
  189. const SuppressionContext &supp = *Suppressions();
  190. const uptr n = supp.SuppressionCount();
  191. for (uptr i = 0; i < n; i++) {
  192. const Suppression *s = supp.SuppressionAt(i);
  193. if (0 == internal_strcmp(s->type, kSuppressionLib))
  194. libignore()->AddIgnoredLibrary(s->templ);
  195. }
  196. if (flags()->ignore_noninstrumented_modules)
  197. libignore()->IgnoreNoninstrumentedModules(true);
  198. libignore()->OnLibraryLoaded(0);
  199. }
  200. // The following two hooks can be used by for cooperative scheduling when
  201. // locking.
  202. #ifdef TSAN_EXTERNAL_HOOKS
  203. void OnPotentiallyBlockingRegionBegin();
  204. void OnPotentiallyBlockingRegionEnd();
  205. #else
  206. SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
  207. SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
  208. #endif
  209. } // namespace __tsan
  210. static ThreadSignalContext *SigCtx(ThreadState *thr) {
  211. ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
  212. if (ctx == 0 && !thr->is_dead) {
  213. ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
  214. MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
  215. thr->signal_ctx = ctx;
  216. }
  217. return ctx;
  218. }
  219. ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
  220. uptr pc)
  221. : thr_(thr), in_ignored_lib_(false), ignoring_(false) {
  222. LazyInitialize(thr);
  223. if (!thr_->is_inited) return;
  224. if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
  225. DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
  226. ignoring_ =
  227. !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
  228. libignore()->IsIgnored(pc, &in_ignored_lib_));
  229. EnableIgnores();
  230. }
  231. ScopedInterceptor::~ScopedInterceptor() {
  232. if (!thr_->is_inited) return;
  233. DisableIgnores();
  234. if (!thr_->ignore_interceptors) {
  235. ProcessPendingSignals(thr_);
  236. FuncExit(thr_);
  237. CheckedMutex::CheckNoLocks();
  238. }
  239. }
  240. NOINLINE
  241. void ScopedInterceptor::EnableIgnoresImpl() {
  242. ThreadIgnoreBegin(thr_, 0);
  243. if (flags()->ignore_noninstrumented_modules)
  244. thr_->suppress_reports++;
  245. if (in_ignored_lib_) {
  246. DCHECK(!thr_->in_ignored_lib);
  247. thr_->in_ignored_lib = true;
  248. }
  249. }
  250. NOINLINE
  251. void ScopedInterceptor::DisableIgnoresImpl() {
  252. ThreadIgnoreEnd(thr_);
  253. if (flags()->ignore_noninstrumented_modules)
  254. thr_->suppress_reports--;
  255. if (in_ignored_lib_) {
  256. DCHECK(thr_->in_ignored_lib);
  257. thr_->in_ignored_lib = false;
  258. }
  259. }
  260. #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
  261. #if SANITIZER_FREEBSD || SANITIZER_NETBSD
  262. # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
  263. #else
  264. # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
  265. #endif
  266. #if SANITIZER_FREEBSD
  267. # define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
  268. INTERCEPT_FUNCTION(_pthread_##func)
  269. #else
  270. # define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
  271. #endif
  272. #if SANITIZER_NETBSD
  273. # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
  274. INTERCEPT_FUNCTION(__libc_##func)
  275. # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
  276. INTERCEPT_FUNCTION(__libc_thr_##func)
  277. #else
  278. # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
  279. # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
  280. #endif
  281. #define READ_STRING_OF_LEN(thr, pc, s, len, n) \
  282. MemoryAccessRange((thr), (pc), (uptr)(s), \
  283. common_flags()->strict_string_checks ? (len) + 1 : (n), false)
  284. #define READ_STRING(thr, pc, s, n) \
  285. READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
  286. #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
  287. struct BlockingCall {
  288. explicit BlockingCall(ThreadState *thr)
  289. : thr(thr)
  290. , ctx(SigCtx(thr)) {
  291. for (;;) {
  292. atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
  293. if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
  294. break;
  295. atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
  296. ProcessPendingSignals(thr);
  297. }
  298. // When we are in a "blocking call", we process signals asynchronously
  299. // (right when they arrive). In this context we do not expect to be
  300. // executing any user/runtime code. The known interceptor sequence when
  301. // this is not true is: pthread_join -> munmap(stack). It's fine
  302. // to ignore munmap in this case -- we handle stack shadow separately.
  303. thr->ignore_interceptors++;
  304. }
  305. ~BlockingCall() {
  306. thr->ignore_interceptors--;
  307. atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
  308. }
  309. ThreadState *thr;
  310. ThreadSignalContext *ctx;
  311. };
  312. TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
  313. SCOPED_TSAN_INTERCEPTOR(sleep, sec);
  314. unsigned res = BLOCK_REAL(sleep)(sec);
  315. AfterSleep(thr, pc);
  316. return res;
  317. }
  318. TSAN_INTERCEPTOR(int, usleep, long_t usec) {
  319. SCOPED_TSAN_INTERCEPTOR(usleep, usec);
  320. int res = BLOCK_REAL(usleep)(usec);
  321. AfterSleep(thr, pc);
  322. return res;
  323. }
  324. TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
  325. SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
  326. int res = BLOCK_REAL(nanosleep)(req, rem);
  327. AfterSleep(thr, pc);
  328. return res;
  329. }
  330. TSAN_INTERCEPTOR(int, pause, int fake) {
  331. SCOPED_TSAN_INTERCEPTOR(pause, fake);
  332. return BLOCK_REAL(pause)(fake);
  333. }
  334. // Note: we specifically call the function in such strange way
  335. // with "installed_at" because in reports it will appear between
  336. // callback frames and the frame that installed the callback.
  337. static void at_exit_callback_installed_at() {
  338. AtExitCtx *ctx;
  339. {
  340. // Ensure thread-safety.
  341. Lock l(&interceptor_ctx()->atexit_mu);
  342. // Pop AtExitCtx from the top of the stack of callback functions
  343. uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
  344. ctx = interceptor_ctx()->AtExitStack[element];
  345. interceptor_ctx()->AtExitStack.PopBack();
  346. }
  347. ThreadState *thr = cur_thread();
  348. Acquire(thr, ctx->pc, (uptr)ctx);
  349. FuncEntry(thr, ctx->pc);
  350. ((void(*)())ctx->f)();
  351. FuncExit(thr);
  352. Free(ctx);
  353. }
  354. static void cxa_at_exit_callback_installed_at(void *arg) {
  355. ThreadState *thr = cur_thread();
  356. AtExitCtx *ctx = (AtExitCtx*)arg;
  357. Acquire(thr, ctx->pc, (uptr)arg);
  358. FuncEntry(thr, ctx->pc);
  359. ((void(*)(void *arg))ctx->f)(ctx->arg);
  360. FuncExit(thr);
  361. Free(ctx);
  362. }
  363. static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
  364. void *arg, void *dso);
  365. #if !SANITIZER_ANDROID
  366. TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
  367. if (in_symbolizer())
  368. return 0;
  369. // We want to setup the atexit callback even if we are in ignored lib
  370. // or after fork.
  371. SCOPED_INTERCEPTOR_RAW(atexit, f);
  372. return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0);
  373. }
  374. #endif
  375. TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
  376. if (in_symbolizer())
  377. return 0;
  378. SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
  379. return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso);
  380. }
  381. static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
  382. void *arg, void *dso) {
  383. auto *ctx = New<AtExitCtx>();
  384. ctx->f = f;
  385. ctx->arg = arg;
  386. ctx->pc = pc;
  387. Release(thr, pc, (uptr)ctx);
  388. // Memory allocation in __cxa_atexit will race with free during exit,
  389. // because we do not see synchronization around atexit callback list.
  390. ThreadIgnoreBegin(thr, pc);
  391. int res;
  392. if (!dso) {
  393. // NetBSD does not preserve the 2nd argument if dso is equal to 0
  394. // Store ctx in a local stack-like structure
  395. // Ensure thread-safety.
  396. Lock l(&interceptor_ctx()->atexit_mu);
  397. // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
  398. // due to atexit_mu held on exit from the calloc interceptor.
  399. ScopedIgnoreInterceptors ignore;
  400. res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
  401. 0, 0);
  402. // Push AtExitCtx on the top of the stack of callback functions
  403. if (!res) {
  404. interceptor_ctx()->AtExitStack.PushBack(ctx);
  405. }
  406. } else {
  407. res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
  408. }
  409. ThreadIgnoreEnd(thr);
  410. return res;
  411. }
  412. #if !SANITIZER_MAC && !SANITIZER_NETBSD
  413. static void on_exit_callback_installed_at(int status, void *arg) {
  414. ThreadState *thr = cur_thread();
  415. AtExitCtx *ctx = (AtExitCtx*)arg;
  416. Acquire(thr, ctx->pc, (uptr)arg);
  417. FuncEntry(thr, ctx->pc);
  418. ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
  419. FuncExit(thr);
  420. Free(ctx);
  421. }
  422. TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
  423. if (in_symbolizer())
  424. return 0;
  425. SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
  426. auto *ctx = New<AtExitCtx>();
  427. ctx->f = (void(*)())f;
  428. ctx->arg = arg;
  429. ctx->pc = GET_CALLER_PC();
  430. Release(thr, pc, (uptr)ctx);
  431. // Memory allocation in __cxa_atexit will race with free during exit,
  432. // because we do not see synchronization around atexit callback list.
  433. ThreadIgnoreBegin(thr, pc);
  434. int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
  435. ThreadIgnoreEnd(thr);
  436. return res;
  437. }
  438. #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
  439. #else
  440. #define TSAN_MAYBE_INTERCEPT_ON_EXIT
  441. #endif
  442. // Cleanup old bufs.
  443. static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
  444. for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
  445. JmpBuf *buf = &thr->jmp_bufs[i];
  446. if (buf->sp <= sp) {
  447. uptr sz = thr->jmp_bufs.Size();
  448. internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
  449. thr->jmp_bufs.PopBack();
  450. i--;
  451. }
  452. }
  453. }
  454. static void SetJmp(ThreadState *thr, uptr sp) {
  455. if (!thr->is_inited) // called from libc guts during bootstrap
  456. return;
  457. // Cleanup old bufs.
  458. JmpBufGarbageCollect(thr, sp);
  459. // Remember the buf.
  460. JmpBuf *buf = thr->jmp_bufs.PushBack();
  461. buf->sp = sp;
  462. buf->shadow_stack_pos = thr->shadow_stack_pos;
  463. ThreadSignalContext *sctx = SigCtx(thr);
  464. buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
  465. buf->in_blocking_func = sctx ?
  466. atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
  467. false;
  468. buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
  469. memory_order_relaxed);
  470. }
  471. static void LongJmp(ThreadState *thr, uptr *env) {
  472. uptr sp = ExtractLongJmpSp(env);
  473. // Find the saved buf with matching sp.
  474. for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
  475. JmpBuf *buf = &thr->jmp_bufs[i];
  476. if (buf->sp == sp) {
  477. CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
  478. // Unwind the stack.
  479. while (thr->shadow_stack_pos > buf->shadow_stack_pos)
  480. FuncExit(thr);
  481. ThreadSignalContext *sctx = SigCtx(thr);
  482. if (sctx) {
  483. sctx->int_signal_send = buf->int_signal_send;
  484. atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
  485. memory_order_relaxed);
  486. }
  487. atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
  488. memory_order_relaxed);
  489. JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
  490. return;
  491. }
  492. }
  493. Printf("ThreadSanitizer: can't find longjmp buf\n");
  494. CHECK(0);
  495. }
  496. // FIXME: put everything below into a common extern "C" block?
  497. extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
  498. #if SANITIZER_MAC
  499. TSAN_INTERCEPTOR(int, setjmp, void *env);
  500. TSAN_INTERCEPTOR(int, _setjmp, void *env);
  501. TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
  502. #else // SANITIZER_MAC
  503. #if SANITIZER_NETBSD
  504. #define setjmp_symname __setjmp14
  505. #define sigsetjmp_symname __sigsetjmp14
  506. #else
  507. #define setjmp_symname setjmp
  508. #define sigsetjmp_symname sigsetjmp
  509. #endif
  510. #define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
  511. #define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
  512. #define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
  513. #define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
  514. #define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
  515. #define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
  516. // Not called. Merely to satisfy TSAN_INTERCEPT().
  517. extern "C" SANITIZER_INTERFACE_ATTRIBUTE
  518. int TSAN_INTERCEPTOR_SETJMP(void *env);
  519. extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
  520. CHECK(0);
  521. return 0;
  522. }
  523. // FIXME: any reason to have a separate declaration?
  524. extern "C" SANITIZER_INTERFACE_ATTRIBUTE
  525. int __interceptor__setjmp(void *env);
  526. extern "C" int __interceptor__setjmp(void *env) {
  527. CHECK(0);
  528. return 0;
  529. }
  530. extern "C" SANITIZER_INTERFACE_ATTRIBUTE
  531. int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
  532. extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
  533. CHECK(0);
  534. return 0;
  535. }
  536. #if !SANITIZER_NETBSD
  537. extern "C" SANITIZER_INTERFACE_ATTRIBUTE
  538. int __interceptor___sigsetjmp(void *env);
  539. extern "C" int __interceptor___sigsetjmp(void *env) {
  540. CHECK(0);
  541. return 0;
  542. }
  543. #endif
  544. extern "C" int setjmp_symname(void *env);
  545. extern "C" int _setjmp(void *env);
  546. extern "C" int sigsetjmp_symname(void *env);
  547. #if !SANITIZER_NETBSD
  548. extern "C" int __sigsetjmp(void *env);
  549. #endif
  550. DEFINE_REAL(int, setjmp_symname, void *env)
  551. DEFINE_REAL(int, _setjmp, void *env)
  552. DEFINE_REAL(int, sigsetjmp_symname, void *env)
  553. #if !SANITIZER_NETBSD
  554. DEFINE_REAL(int, __sigsetjmp, void *env)
  555. #endif
  556. #endif // SANITIZER_MAC
  557. #if SANITIZER_NETBSD
  558. #define longjmp_symname __longjmp14
  559. #define siglongjmp_symname __siglongjmp14
  560. #else
  561. #define longjmp_symname longjmp
  562. #define siglongjmp_symname siglongjmp
  563. #endif
  564. TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
  565. // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
  566. // bad things will happen. We will jump over ScopedInterceptor dtor and can
  567. // leave thr->in_ignored_lib set.
  568. {
  569. SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
  570. }
  571. LongJmp(cur_thread(), env);
  572. REAL(longjmp_symname)(env, val);
  573. }
  574. TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
  575. {
  576. SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
  577. }
  578. LongJmp(cur_thread(), env);
  579. REAL(siglongjmp_symname)(env, val);
  580. }
  581. #if SANITIZER_NETBSD
  582. TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
  583. {
  584. SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
  585. }
  586. LongJmp(cur_thread(), env);
  587. REAL(_longjmp)(env, val);
  588. }
  589. #endif
  590. #if !SANITIZER_MAC
  591. TSAN_INTERCEPTOR(void*, malloc, uptr size) {
  592. if (in_symbolizer())
  593. return InternalAlloc(size);
  594. void *p = 0;
  595. {
  596. SCOPED_INTERCEPTOR_RAW(malloc, size);
  597. p = user_alloc(thr, pc, size);
  598. }
  599. invoke_malloc_hook(p, size);
  600. return p;
  601. }
  602. // In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
  603. // __libc_memalign so that (1) we can detect races (2) free will not be called
  604. // on libc internally allocated blocks.
  605. TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
  606. SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
  607. return user_memalign(thr, pc, align, sz);
  608. }
  609. TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
  610. if (in_symbolizer())
  611. return InternalCalloc(size, n);
  612. void *p = 0;
  613. {
  614. SCOPED_INTERCEPTOR_RAW(calloc, size, n);
  615. p = user_calloc(thr, pc, size, n);
  616. }
  617. invoke_malloc_hook(p, n * size);
  618. return p;
  619. }
  620. TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
  621. if (in_symbolizer())
  622. return InternalRealloc(p, size);
  623. if (p)
  624. invoke_free_hook(p);
  625. {
  626. SCOPED_INTERCEPTOR_RAW(realloc, p, size);
  627. p = user_realloc(thr, pc, p, size);
  628. }
  629. invoke_malloc_hook(p, size);
  630. return p;
  631. }
  632. TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
  633. if (in_symbolizer())
  634. return InternalReallocArray(p, size, n);
  635. if (p)
  636. invoke_free_hook(p);
  637. {
  638. SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
  639. p = user_reallocarray(thr, pc, p, size, n);
  640. }
  641. invoke_malloc_hook(p, size);
  642. return p;
  643. }
  644. TSAN_INTERCEPTOR(void, free, void *p) {
  645. if (p == 0)
  646. return;
  647. if (in_symbolizer())
  648. return InternalFree(p);
  649. invoke_free_hook(p);
  650. SCOPED_INTERCEPTOR_RAW(free, p);
  651. user_free(thr, pc, p);
  652. }
  653. TSAN_INTERCEPTOR(void, cfree, void *p) {
  654. if (p == 0)
  655. return;
  656. if (in_symbolizer())
  657. return InternalFree(p);
  658. invoke_free_hook(p);
  659. SCOPED_INTERCEPTOR_RAW(cfree, p);
  660. user_free(thr, pc, p);
  661. }
  662. TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
  663. SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
  664. return user_alloc_usable_size(p);
  665. }
  666. #endif
  667. TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
  668. SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
  669. uptr srclen = internal_strlen(src);
  670. MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
  671. MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
  672. return REAL(strcpy)(dst, src);
  673. }
  674. TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
  675. SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
  676. uptr srclen = internal_strnlen(src, n);
  677. MemoryAccessRange(thr, pc, (uptr)dst, n, true);
  678. MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
  679. return REAL(strncpy)(dst, src, n);
  680. }
  681. TSAN_INTERCEPTOR(char*, strdup, const char *str) {
  682. SCOPED_TSAN_INTERCEPTOR(strdup, str);
  683. // strdup will call malloc, so no instrumentation is required here.
  684. return REAL(strdup)(str);
  685. }
  686. // Zero out addr if it points into shadow memory and was provided as a hint
  687. // only, i.e., MAP_FIXED is not set.
  688. static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
  689. if (*addr) {
  690. if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
  691. if (flags & MAP_FIXED) {
  692. errno = errno_EINVAL;
  693. return false;
  694. } else {
  695. *addr = 0;
  696. }
  697. }
  698. }
  699. return true;
  700. }
  701. template <class Mmap>
  702. static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
  703. void *addr, SIZE_T sz, int prot, int flags,
  704. int fd, OFF64_T off) {
  705. if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
  706. void *res = real_mmap(addr, sz, prot, flags, fd, off);
  707. if (res != MAP_FAILED) {
  708. if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) {
  709. Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
  710. addr, (void*)sz, res);
  711. Die();
  712. }
  713. if (fd > 0) FdAccess(thr, pc, fd);
  714. MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
  715. }
  716. return res;
  717. }
  718. TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
  719. SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
  720. UnmapShadow(thr, (uptr)addr, sz);
  721. int res = REAL(munmap)(addr, sz);
  722. return res;
  723. }
  724. #if SANITIZER_LINUX
  725. TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
  726. SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
  727. return user_memalign(thr, pc, align, sz);
  728. }
  729. #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
  730. #else
  731. #define TSAN_MAYBE_INTERCEPT_MEMALIGN
  732. #endif
  733. #if !SANITIZER_MAC
  734. TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
  735. if (in_symbolizer())
  736. return InternalAlloc(sz, nullptr, align);
  737. SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
  738. return user_aligned_alloc(thr, pc, align, sz);
  739. }
  740. TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
  741. if (in_symbolizer())
  742. return InternalAlloc(sz, nullptr, GetPageSizeCached());
  743. SCOPED_INTERCEPTOR_RAW(valloc, sz);
  744. return user_valloc(thr, pc, sz);
  745. }
  746. #endif
  747. #if SANITIZER_LINUX
  748. TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
  749. if (in_symbolizer()) {
  750. uptr PageSize = GetPageSizeCached();
  751. sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
  752. return InternalAlloc(sz, nullptr, PageSize);
  753. }
  754. SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
  755. return user_pvalloc(thr, pc, sz);
  756. }
  757. #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
  758. #else
  759. #define TSAN_MAYBE_INTERCEPT_PVALLOC
  760. #endif
  761. #if !SANITIZER_MAC
  762. TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
  763. if (in_symbolizer()) {
  764. void *p = InternalAlloc(sz, nullptr, align);
  765. if (!p)
  766. return errno_ENOMEM;
  767. *memptr = p;
  768. return 0;
  769. }
  770. SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
  771. return user_posix_memalign(thr, pc, memptr, align, sz);
  772. }
  773. #endif
  774. // Both __cxa_guard_acquire and pthread_once 0-initialize
  775. // the object initially. pthread_once does not have any
  776. // other ABI requirements. __cxa_guard_acquire assumes
  777. // that any non-0 value in the first byte means that
  778. // initialization is completed. Contents of the remaining
  779. // bytes are up to us.
  780. constexpr u32 kGuardInit = 0;
  781. constexpr u32 kGuardDone = 1;
  782. constexpr u32 kGuardRunning = 1 << 16;
  783. constexpr u32 kGuardWaiter = 1 << 17;
  784. static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
  785. bool blocking_hooks = true) {
  786. if (blocking_hooks)
  787. OnPotentiallyBlockingRegionBegin();
  788. auto on_exit = at_scope_exit([blocking_hooks] {
  789. if (blocking_hooks)
  790. OnPotentiallyBlockingRegionEnd();
  791. });
  792. for (;;) {
  793. u32 cmp = atomic_load(g, memory_order_acquire);
  794. if (cmp == kGuardInit) {
  795. if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
  796. memory_order_relaxed))
  797. return 1;
  798. } else if (cmp == kGuardDone) {
  799. if (!thr->in_ignored_lib)
  800. Acquire(thr, pc, (uptr)g);
  801. return 0;
  802. } else {
  803. if ((cmp & kGuardWaiter) ||
  804. atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
  805. memory_order_relaxed))
  806. FutexWait(g, cmp | kGuardWaiter);
  807. }
  808. }
  809. }
  810. static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
  811. u32 v) {
  812. if (!thr->in_ignored_lib)
  813. Release(thr, pc, (uptr)g);
  814. u32 old = atomic_exchange(g, v, memory_order_release);
  815. if (old & kGuardWaiter)
  816. FutexWake(g, 1 << 30);
  817. }
  818. // __cxa_guard_acquire and friends need to be intercepted in a special way -
  819. // regular interceptors will break statically-linked libstdc++. Linux
  820. // interceptors are especially defined as weak functions (so that they don't
  821. // cause link errors when user defines them as well). So they silently
  822. // auto-disable themselves when such symbol is already present in the binary. If
  823. // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
  824. // will silently replace our interceptor. That's why on Linux we simply export
  825. // these interceptors with INTERFACE_ATTRIBUTE.
  826. // On OS X, we don't support statically linking, so we just use a regular
  827. // interceptor.
  828. #if SANITIZER_MAC
  829. #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
  830. #else
  831. #define STDCXX_INTERCEPTOR(rettype, name, ...) \
  832. extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
  833. #endif
  834. // Used in thread-safe function static initialization.
  835. STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
  836. SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
  837. return guard_acquire(thr, pc, g);
  838. }
  839. STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
  840. SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
  841. guard_release(thr, pc, g, kGuardDone);
  842. }
  843. STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
  844. SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
  845. guard_release(thr, pc, g, kGuardInit);
  846. }
  847. namespace __tsan {
  848. void DestroyThreadState() {
  849. ThreadState *thr = cur_thread();
  850. Processor *proc = thr->proc();
  851. ThreadFinish(thr);
  852. ProcUnwire(proc, thr);
  853. ProcDestroy(proc);
  854. DTLS_Destroy();
  855. cur_thread_finalize();
  856. }
  857. void PlatformCleanUpThreadState(ThreadState *thr) {
  858. ThreadSignalContext *sctx = thr->signal_ctx;
  859. if (sctx) {
  860. thr->signal_ctx = 0;
  861. UnmapOrDie(sctx, sizeof(*sctx));
  862. }
  863. }
  864. } // namespace __tsan
  865. #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
  866. static void thread_finalize(void *v) {
  867. uptr iter = (uptr)v;
  868. if (iter > 1) {
  869. if (pthread_setspecific(interceptor_ctx()->finalize_key,
  870. (void*)(iter - 1))) {
  871. Printf("ThreadSanitizer: failed to set thread key\n");
  872. Die();
  873. }
  874. return;
  875. }
  876. DestroyThreadState();
  877. }
  878. #endif
  879. struct ThreadParam {
  880. void* (*callback)(void *arg);
  881. void *param;
  882. Tid tid;
  883. Semaphore created;
  884. Semaphore started;
  885. };
  886. extern "C" void *__tsan_thread_start_func(void *arg) {
  887. ThreadParam *p = (ThreadParam*)arg;
  888. void* (*callback)(void *arg) = p->callback;
  889. void *param = p->param;
  890. {
  891. ThreadState *thr = cur_thread_init();
  892. // Thread-local state is not initialized yet.
  893. ScopedIgnoreInterceptors ignore;
  894. #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
  895. ThreadIgnoreBegin(thr, 0);
  896. if (pthread_setspecific(interceptor_ctx()->finalize_key,
  897. (void *)GetPthreadDestructorIterations())) {
  898. Printf("ThreadSanitizer: failed to set thread key\n");
  899. Die();
  900. }
  901. ThreadIgnoreEnd(thr);
  902. #endif
  903. p->created.Wait();
  904. Processor *proc = ProcCreate();
  905. ProcWire(proc, thr);
  906. ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
  907. p->started.Post();
  908. }
  909. void *res = callback(param);
  910. // Prevent the callback from being tail called,
  911. // it mixes up stack traces.
  912. volatile int foo = 42;
  913. foo++;
  914. return res;
  915. }
  916. TSAN_INTERCEPTOR(int, pthread_create,
  917. void *th, void *attr, void *(*callback)(void*), void * param) {
  918. SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
  919. MaybeSpawnBackgroundThread();
  920. if (ctx->after_multithreaded_fork) {
  921. if (flags()->die_after_fork) {
  922. Report("ThreadSanitizer: starting new threads after multi-threaded "
  923. "fork is not supported. Dying (set die_after_fork=0 to override)\n");
  924. Die();
  925. } else {
  926. VPrintf(1,
  927. "ThreadSanitizer: starting new threads after multi-threaded "
  928. "fork is not supported (pid %lu). Continuing because of "
  929. "die_after_fork=0, but you are on your own\n",
  930. internal_getpid());
  931. }
  932. }
  933. __sanitizer_pthread_attr_t myattr;
  934. if (attr == 0) {
  935. pthread_attr_init(&myattr);
  936. attr = &myattr;
  937. }
  938. int detached = 0;
  939. REAL(pthread_attr_getdetachstate)(attr, &detached);
  940. AdjustStackSize(attr);
  941. ThreadParam p;
  942. p.callback = callback;
  943. p.param = param;
  944. p.tid = kMainTid;
  945. int res = -1;
  946. {
  947. // Otherwise we see false positives in pthread stack manipulation.
  948. ScopedIgnoreInterceptors ignore;
  949. ThreadIgnoreBegin(thr, pc);
  950. res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
  951. ThreadIgnoreEnd(thr);
  952. }
  953. if (res == 0) {
  954. p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
  955. CHECK_NE(p.tid, kMainTid);
  956. // Synchronization on p.tid serves two purposes:
  957. // 1. ThreadCreate must finish before the new thread starts.
  958. // Otherwise the new thread can call pthread_detach, but the pthread_t
  959. // identifier is not yet registered in ThreadRegistry by ThreadCreate.
  960. // 2. ThreadStart must finish before this thread continues.
  961. // Otherwise, this thread can call pthread_detach and reset thr->sync
  962. // before the new thread got a chance to acquire from it in ThreadStart.
  963. p.created.Post();
  964. p.started.Wait();
  965. }
  966. if (attr == &myattr)
  967. pthread_attr_destroy(&myattr);
  968. return res;
  969. }
  970. TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
  971. SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
  972. Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
  973. ThreadIgnoreBegin(thr, pc);
  974. int res = BLOCK_REAL(pthread_join)(th, ret);
  975. ThreadIgnoreEnd(thr);
  976. if (res == 0) {
  977. ThreadJoin(thr, pc, tid);
  978. }
  979. return res;
  980. }
  981. DEFINE_REAL_PTHREAD_FUNCTIONS
  982. TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
  983. SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
  984. Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
  985. int res = REAL(pthread_detach)(th);
  986. if (res == 0) {
  987. ThreadDetach(thr, pc, tid);
  988. }
  989. return res;
  990. }
  991. TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
  992. {
  993. SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
  994. #if !SANITIZER_MAC && !SANITIZER_ANDROID
  995. CHECK_EQ(thr, &cur_thread_placeholder);
  996. #endif
  997. }
  998. REAL(pthread_exit)(retval);
  999. }
  1000. #if SANITIZER_LINUX
  1001. TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
  1002. SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
  1003. Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
  1004. ThreadIgnoreBegin(thr, pc);
  1005. int res = REAL(pthread_tryjoin_np)(th, ret);
  1006. ThreadIgnoreEnd(thr);
  1007. if (res == 0)
  1008. ThreadJoin(thr, pc, tid);
  1009. else
  1010. ThreadNotJoined(thr, pc, tid, (uptr)th);
  1011. return res;
  1012. }
  1013. TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
  1014. const struct timespec *abstime) {
  1015. SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
  1016. Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
  1017. ThreadIgnoreBegin(thr, pc);
  1018. int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
  1019. ThreadIgnoreEnd(thr);
  1020. if (res == 0)
  1021. ThreadJoin(thr, pc, tid);
  1022. else
  1023. ThreadNotJoined(thr, pc, tid, (uptr)th);
  1024. return res;
  1025. }
  1026. #endif
  1027. // Problem:
  1028. // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
  1029. // pthread_cond_t has different size in the different versions.
  1030. // If call new REAL functions for old pthread_cond_t, they will corrupt memory
  1031. // after pthread_cond_t (old cond is smaller).
  1032. // If we call old REAL functions for new pthread_cond_t, we will lose some
  1033. // functionality (e.g. old functions do not support waiting against
  1034. // CLOCK_REALTIME).
  1035. // Proper handling would require to have 2 versions of interceptors as well.
  1036. // But this is messy, in particular requires linker scripts when sanitizer
  1037. // runtime is linked into a shared library.
  1038. // Instead we assume we don't have dynamic libraries built against old
  1039. // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
  1040. // that allows to work with old libraries (but this mode does not support
  1041. // some features, e.g. pthread_condattr_getpshared).
  1042. static void *init_cond(void *c, bool force = false) {
  1043. // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
  1044. // So we allocate additional memory on the side large enough to hold
  1045. // any pthread_cond_t object. Always call new REAL functions, but pass
  1046. // the aux object to them.
  1047. // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
  1048. // first word of pthread_cond_t to zero.
  1049. // It's all relevant only for linux.
  1050. if (!common_flags()->legacy_pthread_cond)
  1051. return c;
  1052. atomic_uintptr_t *p = (atomic_uintptr_t*)c;
  1053. uptr cond = atomic_load(p, memory_order_acquire);
  1054. if (!force && cond != 0)
  1055. return (void*)cond;
  1056. void *newcond = WRAP(malloc)(pthread_cond_t_sz);
  1057. internal_memset(newcond, 0, pthread_cond_t_sz);
  1058. if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
  1059. memory_order_acq_rel))
  1060. return newcond;
  1061. WRAP(free)(newcond);
  1062. return (void*)cond;
  1063. }
  1064. namespace {
  1065. template <class Fn>
  1066. struct CondMutexUnlockCtx {
  1067. ScopedInterceptor *si;
  1068. ThreadState *thr;
  1069. uptr pc;
  1070. void *m;
  1071. void *c;
  1072. const Fn &fn;
  1073. int Cancel() const { return fn(); }
  1074. void Unlock() const;
  1075. };
  1076. template <class Fn>
  1077. void CondMutexUnlockCtx<Fn>::Unlock() const {
  1078. // pthread_cond_wait interceptor has enabled async signal delivery
  1079. // (see BlockingCall below). Disable async signals since we are running
  1080. // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
  1081. // since the thread is cancelled, so we have to manually execute them
  1082. // (the thread still can run some user code due to pthread_cleanup_push).
  1083. ThreadSignalContext *ctx = SigCtx(thr);
  1084. CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
  1085. atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
  1086. MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
  1087. // Undo BlockingCall ctor effects.
  1088. thr->ignore_interceptors--;
  1089. si->~ScopedInterceptor();
  1090. }
  1091. } // namespace
  1092. INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
  1093. void *cond = init_cond(c, true);
  1094. SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
  1095. MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
  1096. return REAL(pthread_cond_init)(cond, a);
  1097. }
  1098. template <class Fn>
  1099. int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
  1100. void *c, void *m) {
  1101. MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
  1102. MutexUnlock(thr, pc, (uptr)m);
  1103. int res = 0;
  1104. // This ensures that we handle mutex lock even in case of pthread_cancel.
  1105. // See test/tsan/cond_cancel.cpp.
  1106. {
  1107. // Enable signal delivery while the thread is blocked.
  1108. BlockingCall bc(thr);
  1109. CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
  1110. res = call_pthread_cancel_with_cleanup(
  1111. [](void *arg) -> int {
  1112. return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
  1113. },
  1114. [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
  1115. &arg);
  1116. }
  1117. if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
  1118. MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
  1119. return res;
  1120. }
  1121. INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
  1122. void *cond = init_cond(c);
  1123. SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
  1124. return cond_wait(
  1125. thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
  1126. m);
  1127. }
  1128. INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
  1129. void *cond = init_cond(c);
  1130. SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
  1131. return cond_wait(
  1132. thr, pc, &si,
  1133. [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
  1134. m);
  1135. }
  1136. #if SANITIZER_LINUX
  1137. INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
  1138. __sanitizer_clockid_t clock, void *abstime) {
  1139. void *cond = init_cond(c);
  1140. SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
  1141. return cond_wait(
  1142. thr, pc, &si,
  1143. [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
  1144. cond, m);
  1145. }
  1146. #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
  1147. #else
  1148. #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
  1149. #endif
  1150. #if SANITIZER_MAC
  1151. INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
  1152. void *reltime) {
  1153. void *cond = init_cond(c);
  1154. SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
  1155. return cond_wait(
  1156. thr, pc, &si,
  1157. [=]() {
  1158. return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
  1159. },
  1160. cond, m);
  1161. }
  1162. #endif
  1163. INTERCEPTOR(int, pthread_cond_signal, void *c) {
  1164. void *cond = init_cond(c);
  1165. SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
  1166. MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
  1167. return REAL(pthread_cond_signal)(cond);
  1168. }
  1169. INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
  1170. void *cond = init_cond(c);
  1171. SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
  1172. MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
  1173. return REAL(pthread_cond_broadcast)(cond);
  1174. }
  1175. INTERCEPTOR(int, pthread_cond_destroy, void *c) {
  1176. void *cond = init_cond(c);
  1177. SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
  1178. MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
  1179. int res = REAL(pthread_cond_destroy)(cond);
  1180. if (common_flags()->legacy_pthread_cond) {
  1181. // Free our aux cond and zero the pointer to not leave dangling pointers.
  1182. WRAP(free)(cond);
  1183. atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
  1184. }
  1185. return res;
  1186. }
  1187. TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
  1188. SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
  1189. int res = REAL(pthread_mutex_init)(m, a);
  1190. if (res == 0) {
  1191. u32 flagz = 0;
  1192. if (a) {
  1193. int type = 0;
  1194. if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
  1195. if (type == PTHREAD_MUTEX_RECURSIVE ||
  1196. type == PTHREAD_MUTEX_RECURSIVE_NP)
  1197. flagz |= MutexFlagWriteReentrant;
  1198. }
  1199. MutexCreate(thr, pc, (uptr)m, flagz);
  1200. }
  1201. return res;
  1202. }
  1203. TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
  1204. SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
  1205. int res = REAL(pthread_mutex_destroy)(m);
  1206. if (res == 0 || res == errno_EBUSY) {
  1207. MutexDestroy(thr, pc, (uptr)m);
  1208. }
  1209. return res;
  1210. }
  1211. TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
  1212. SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
  1213. int res = REAL(pthread_mutex_trylock)(m);
  1214. if (res == errno_EOWNERDEAD)
  1215. MutexRepair(thr, pc, (uptr)m);
  1216. if (res == 0 || res == errno_EOWNERDEAD)
  1217. MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
  1218. return res;
  1219. }
  1220. #if !SANITIZER_MAC
  1221. TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
  1222. SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
  1223. int res = REAL(pthread_mutex_timedlock)(m, abstime);
  1224. if (res == 0) {
  1225. MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
  1226. }
  1227. return res;
  1228. }
  1229. #endif
  1230. #if !SANITIZER_MAC
  1231. TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
  1232. SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
  1233. int res = REAL(pthread_spin_init)(m, pshared);
  1234. if (res == 0) {
  1235. MutexCreate(thr, pc, (uptr)m);
  1236. }
  1237. return res;
  1238. }
  1239. TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
  1240. SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
  1241. int res = REAL(pthread_spin_destroy)(m);
  1242. if (res == 0) {
  1243. MutexDestroy(thr, pc, (uptr)m);
  1244. }
  1245. return res;
  1246. }
  1247. TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
  1248. SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
  1249. MutexPreLock(thr, pc, (uptr)m);
  1250. int res = REAL(pthread_spin_lock)(m);
  1251. if (res == 0) {
  1252. MutexPostLock(thr, pc, (uptr)m);
  1253. }
  1254. return res;
  1255. }
  1256. TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
  1257. SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
  1258. int res = REAL(pthread_spin_trylock)(m);
  1259. if (res == 0) {
  1260. MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
  1261. }
  1262. return res;
  1263. }
  1264. TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
  1265. SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
  1266. MutexUnlock(thr, pc, (uptr)m);
  1267. int res = REAL(pthread_spin_unlock)(m);
  1268. return res;
  1269. }
  1270. #endif
  1271. TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
  1272. SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
  1273. int res = REAL(pthread_rwlock_init)(m, a);
  1274. if (res == 0) {
  1275. MutexCreate(thr, pc, (uptr)m);
  1276. }
  1277. return res;
  1278. }
  1279. TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
  1280. SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
  1281. int res = REAL(pthread_rwlock_destroy)(m);
  1282. if (res == 0) {
  1283. MutexDestroy(thr, pc, (uptr)m);
  1284. }
  1285. return res;
  1286. }
  1287. TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
  1288. SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
  1289. MutexPreReadLock(thr, pc, (uptr)m);
  1290. int res = REAL(pthread_rwlock_rdlock)(m);
  1291. if (res == 0) {
  1292. MutexPostReadLock(thr, pc, (uptr)m);
  1293. }
  1294. return res;
  1295. }
  1296. TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
  1297. SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
  1298. int res = REAL(pthread_rwlock_tryrdlock)(m);
  1299. if (res == 0) {
  1300. MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
  1301. }
  1302. return res;
  1303. }
  1304. #if !SANITIZER_MAC
  1305. TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
  1306. SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
  1307. int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
  1308. if (res == 0) {
  1309. MutexPostReadLock(thr, pc, (uptr)m);
  1310. }
  1311. return res;
  1312. }
  1313. #endif
  1314. TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
  1315. SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
  1316. MutexPreLock(thr, pc, (uptr)m);
  1317. int res = REAL(pthread_rwlock_wrlock)(m);
  1318. if (res == 0) {
  1319. MutexPostLock(thr, pc, (uptr)m);
  1320. }
  1321. return res;
  1322. }
  1323. TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
  1324. SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
  1325. int res = REAL(pthread_rwlock_trywrlock)(m);
  1326. if (res == 0) {
  1327. MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
  1328. }
  1329. return res;
  1330. }
  1331. #if !SANITIZER_MAC
  1332. TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
  1333. SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
  1334. int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
  1335. if (res == 0) {
  1336. MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
  1337. }
  1338. return res;
  1339. }
  1340. #endif
  1341. TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
  1342. SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
  1343. MutexReadOrWriteUnlock(thr, pc, (uptr)m);
  1344. int res = REAL(pthread_rwlock_unlock)(m);
  1345. return res;
  1346. }
  1347. #if !SANITIZER_MAC
  1348. TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
  1349. SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
  1350. MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
  1351. int res = REAL(pthread_barrier_init)(b, a, count);
  1352. return res;
  1353. }
  1354. TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
  1355. SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
  1356. MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
  1357. int res = REAL(pthread_barrier_destroy)(b);
  1358. return res;
  1359. }
  1360. TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
  1361. SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
  1362. Release(thr, pc, (uptr)b);
  1363. MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
  1364. int res = REAL(pthread_barrier_wait)(b);
  1365. MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
  1366. if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
  1367. Acquire(thr, pc, (uptr)b);
  1368. }
  1369. return res;
  1370. }
  1371. #endif
  1372. TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
  1373. SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
  1374. if (o == 0 || f == 0)
  1375. return errno_EINVAL;
  1376. atomic_uint32_t *a;
  1377. if (SANITIZER_MAC)
  1378. a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
  1379. else if (SANITIZER_NETBSD)
  1380. a = static_cast<atomic_uint32_t*>
  1381. ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
  1382. else
  1383. a = static_cast<atomic_uint32_t*>(o);
  1384. // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
  1385. // result in crashes due to too little stack space.
  1386. if (guard_acquire(thr, pc, a, !SANITIZER_MAC)) {
  1387. (*f)();
  1388. guard_release(thr, pc, a, kGuardDone);
  1389. }
  1390. return 0;
  1391. }
  1392. #if SANITIZER_GLIBC
  1393. TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
  1394. SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
  1395. if (fd > 0)
  1396. FdAccess(thr, pc, fd);
  1397. return REAL(__fxstat)(version, fd, buf);
  1398. }
  1399. #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
  1400. #else
  1401. #define TSAN_MAYBE_INTERCEPT___FXSTAT
  1402. #endif
  1403. TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
  1404. #if SANITIZER_GLIBC
  1405. SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
  1406. if (fd > 0)
  1407. FdAccess(thr, pc, fd);
  1408. return REAL(__fxstat)(0, fd, buf);
  1409. #else
  1410. SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
  1411. if (fd > 0)
  1412. FdAccess(thr, pc, fd);
  1413. return REAL(fstat)(fd, buf);
  1414. #endif
  1415. }
  1416. #if SANITIZER_GLIBC
  1417. TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
  1418. SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
  1419. if (fd > 0)
  1420. FdAccess(thr, pc, fd);
  1421. return REAL(__fxstat64)(version, fd, buf);
  1422. }
  1423. #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
  1424. #else
  1425. #define TSAN_MAYBE_INTERCEPT___FXSTAT64
  1426. #endif
  1427. #if SANITIZER_GLIBC
  1428. TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
  1429. SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
  1430. if (fd > 0)
  1431. FdAccess(thr, pc, fd);
  1432. return REAL(__fxstat64)(0, fd, buf);
  1433. }
  1434. #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
  1435. #else
  1436. #define TSAN_MAYBE_INTERCEPT_FSTAT64
  1437. #endif
  1438. TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
  1439. va_list ap;
  1440. va_start(ap, oflag);
  1441. mode_t mode = va_arg(ap, int);
  1442. va_end(ap);
  1443. SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
  1444. READ_STRING(thr, pc, name, 0);
  1445. int fd = REAL(open)(name, oflag, mode);
  1446. if (fd >= 0)
  1447. FdFileCreate(thr, pc, fd);
  1448. return fd;
  1449. }
  1450. #if SANITIZER_LINUX
  1451. TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
  1452. va_list ap;
  1453. va_start(ap, oflag);
  1454. mode_t mode = va_arg(ap, int);
  1455. va_end(ap);
  1456. SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
  1457. READ_STRING(thr, pc, name, 0);
  1458. int fd = REAL(open64)(name, oflag, mode);
  1459. if (fd >= 0)
  1460. FdFileCreate(thr, pc, fd);
  1461. return fd;
  1462. }
  1463. #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
  1464. #else
  1465. #define TSAN_MAYBE_INTERCEPT_OPEN64
  1466. #endif
  1467. TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
  1468. SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
  1469. READ_STRING(thr, pc, name, 0);
  1470. int fd = REAL(creat)(name, mode);
  1471. if (fd >= 0)
  1472. FdFileCreate(thr, pc, fd);
  1473. return fd;
  1474. }
  1475. #if SANITIZER_LINUX
  1476. TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
  1477. SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
  1478. READ_STRING(thr, pc, name, 0);
  1479. int fd = REAL(creat64)(name, mode);
  1480. if (fd >= 0)
  1481. FdFileCreate(thr, pc, fd);
  1482. return fd;
  1483. }
  1484. #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
  1485. #else
  1486. #define TSAN_MAYBE_INTERCEPT_CREAT64
  1487. #endif
  1488. TSAN_INTERCEPTOR(int, dup, int oldfd) {
  1489. SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
  1490. int newfd = REAL(dup)(oldfd);
  1491. if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
  1492. FdDup(thr, pc, oldfd, newfd, true);
  1493. return newfd;
  1494. }
  1495. TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
  1496. SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
  1497. int newfd2 = REAL(dup2)(oldfd, newfd);
  1498. if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
  1499. FdDup(thr, pc, oldfd, newfd2, false);
  1500. return newfd2;
  1501. }
  1502. #if !SANITIZER_MAC
  1503. TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
  1504. SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
  1505. int newfd2 = REAL(dup3)(oldfd, newfd, flags);
  1506. if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
  1507. FdDup(thr, pc, oldfd, newfd2, false);
  1508. return newfd2;
  1509. }
  1510. #endif
  1511. #if SANITIZER_LINUX
  1512. TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
  1513. SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
  1514. int fd = REAL(eventfd)(initval, flags);
  1515. if (fd >= 0)
  1516. FdEventCreate(thr, pc, fd);
  1517. return fd;
  1518. }
  1519. #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
  1520. #else
  1521. #define TSAN_MAYBE_INTERCEPT_EVENTFD
  1522. #endif
  1523. #if SANITIZER_LINUX
  1524. TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
  1525. SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
  1526. FdClose(thr, pc, fd);
  1527. fd = REAL(signalfd)(fd, mask, flags);
  1528. if (!MustIgnoreInterceptor(thr))
  1529. FdSignalCreate(thr, pc, fd);
  1530. return fd;
  1531. }
  1532. #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
  1533. #else
  1534. #define TSAN_MAYBE_INTERCEPT_SIGNALFD
  1535. #endif
  1536. #if SANITIZER_LINUX
  1537. TSAN_INTERCEPTOR(int, inotify_init, int fake) {
  1538. SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
  1539. int fd = REAL(inotify_init)(fake);
  1540. if (fd >= 0)
  1541. FdInotifyCreate(thr, pc, fd);
  1542. return fd;
  1543. }
  1544. #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
  1545. #else
  1546. #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
  1547. #endif
  1548. #if SANITIZER_LINUX
  1549. TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
  1550. SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
  1551. int fd = REAL(inotify_init1)(flags);
  1552. if (fd >= 0)
  1553. FdInotifyCreate(thr, pc, fd);
  1554. return fd;
  1555. }
  1556. #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
  1557. #else
  1558. #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
  1559. #endif
  1560. TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
  1561. SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
  1562. int fd = REAL(socket)(domain, type, protocol);
  1563. if (fd >= 0)
  1564. FdSocketCreate(thr, pc, fd);
  1565. return fd;
  1566. }
  1567. TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
  1568. SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
  1569. int res = REAL(socketpair)(domain, type, protocol, fd);
  1570. if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
  1571. FdPipeCreate(thr, pc, fd[0], fd[1]);
  1572. return res;
  1573. }
  1574. TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
  1575. SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
  1576. FdSocketConnecting(thr, pc, fd);
  1577. int res = REAL(connect)(fd, addr, addrlen);
  1578. if (res == 0 && fd >= 0)
  1579. FdSocketConnect(thr, pc, fd);
  1580. return res;
  1581. }
  1582. TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
  1583. SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
  1584. int res = REAL(bind)(fd, addr, addrlen);
  1585. if (fd > 0 && res == 0)
  1586. FdAccess(thr, pc, fd);
  1587. return res;
  1588. }
  1589. TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
  1590. SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
  1591. int res = REAL(listen)(fd, backlog);
  1592. if (fd > 0 && res == 0)
  1593. FdAccess(thr, pc, fd);
  1594. return res;
  1595. }
  1596. TSAN_INTERCEPTOR(int, close, int fd) {
  1597. SCOPED_INTERCEPTOR_RAW(close, fd);
  1598. FdClose(thr, pc, fd);
  1599. return REAL(close)(fd);
  1600. }
  1601. #if SANITIZER_LINUX
  1602. TSAN_INTERCEPTOR(int, __close, int fd) {
  1603. SCOPED_INTERCEPTOR_RAW(__close, fd);
  1604. FdClose(thr, pc, fd);
  1605. return REAL(__close)(fd);
  1606. }
  1607. #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
  1608. #else
  1609. #define TSAN_MAYBE_INTERCEPT___CLOSE
  1610. #endif
  1611. // glibc guts
  1612. #if SANITIZER_LINUX && !SANITIZER_ANDROID
  1613. TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
  1614. SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
  1615. int fds[64];
  1616. int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
  1617. for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]);
  1618. REAL(__res_iclose)(state, free_addr);
  1619. }
  1620. #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
  1621. #else
  1622. #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
  1623. #endif
  1624. TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
  1625. SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
  1626. int res = REAL(pipe)(pipefd);
  1627. if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
  1628. FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
  1629. return res;
  1630. }
  1631. #if !SANITIZER_MAC
  1632. TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
  1633. SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
  1634. int res = REAL(pipe2)(pipefd, flags);
  1635. if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
  1636. FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
  1637. return res;
  1638. }
  1639. #endif
  1640. TSAN_INTERCEPTOR(int, unlink, char *path) {
  1641. SCOPED_TSAN_INTERCEPTOR(unlink, path);
  1642. Release(thr, pc, File2addr(path));
  1643. int res = REAL(unlink)(path);
  1644. return res;
  1645. }
  1646. TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
  1647. SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
  1648. void *res = REAL(tmpfile)(fake);
  1649. if (res) {
  1650. int fd = fileno_unlocked(res);
  1651. if (fd >= 0)
  1652. FdFileCreate(thr, pc, fd);
  1653. }
  1654. return res;
  1655. }
  1656. #if SANITIZER_LINUX
  1657. TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
  1658. SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
  1659. void *res = REAL(tmpfile64)(fake);
  1660. if (res) {
  1661. int fd = fileno_unlocked(res);
  1662. if (fd >= 0)
  1663. FdFileCreate(thr, pc, fd);
  1664. }
  1665. return res;
  1666. }
  1667. #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
  1668. #else
  1669. #define TSAN_MAYBE_INTERCEPT_TMPFILE64
  1670. #endif
  1671. static void FlushStreams() {
  1672. // Flushing all the streams here may freeze the process if a child thread is
  1673. // performing file stream operations at the same time.
  1674. REAL(fflush)(stdout);
  1675. REAL(fflush)(stderr);
  1676. }
  1677. TSAN_INTERCEPTOR(void, abort, int fake) {
  1678. SCOPED_TSAN_INTERCEPTOR(abort, fake);
  1679. FlushStreams();
  1680. REAL(abort)(fake);
  1681. }
  1682. TSAN_INTERCEPTOR(int, rmdir, char *path) {
  1683. SCOPED_TSAN_INTERCEPTOR(rmdir, path);
  1684. Release(thr, pc, Dir2addr(path));
  1685. int res = REAL(rmdir)(path);
  1686. return res;
  1687. }
  1688. TSAN_INTERCEPTOR(int, closedir, void *dirp) {
  1689. SCOPED_INTERCEPTOR_RAW(closedir, dirp);
  1690. if (dirp) {
  1691. int fd = dirfd(dirp);
  1692. FdClose(thr, pc, fd);
  1693. }
  1694. return REAL(closedir)(dirp);
  1695. }
  1696. #if SANITIZER_LINUX
  1697. TSAN_INTERCEPTOR(int, epoll_create, int size) {
  1698. SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
  1699. int fd = REAL(epoll_create)(size);
  1700. if (fd >= 0)
  1701. FdPollCreate(thr, pc, fd);
  1702. return fd;
  1703. }
  1704. TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
  1705. SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
  1706. int fd = REAL(epoll_create1)(flags);
  1707. if (fd >= 0)
  1708. FdPollCreate(thr, pc, fd);
  1709. return fd;
  1710. }
  1711. TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
  1712. SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
  1713. if (epfd >= 0)
  1714. FdAccess(thr, pc, epfd);
  1715. if (epfd >= 0 && fd >= 0)
  1716. FdAccess(thr, pc, fd);
  1717. if (op == EPOLL_CTL_ADD && epfd >= 0)
  1718. FdRelease(thr, pc, epfd);
  1719. int res = REAL(epoll_ctl)(epfd, op, fd, ev);
  1720. return res;
  1721. }
  1722. TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
  1723. SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
  1724. if (epfd >= 0)
  1725. FdAccess(thr, pc, epfd);
  1726. int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
  1727. if (res > 0 && epfd >= 0)
  1728. FdAcquire(thr, pc, epfd);
  1729. return res;
  1730. }
  1731. TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
  1732. void *sigmask) {
  1733. SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
  1734. if (epfd >= 0)
  1735. FdAccess(thr, pc, epfd);
  1736. int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
  1737. if (res > 0 && epfd >= 0)
  1738. FdAcquire(thr, pc, epfd);
  1739. return res;
  1740. }
  1741. #define TSAN_MAYBE_INTERCEPT_EPOLL \
  1742. TSAN_INTERCEPT(epoll_create); \
  1743. TSAN_INTERCEPT(epoll_create1); \
  1744. TSAN_INTERCEPT(epoll_ctl); \
  1745. TSAN_INTERCEPT(epoll_wait); \
  1746. TSAN_INTERCEPT(epoll_pwait)
  1747. #else
  1748. #define TSAN_MAYBE_INTERCEPT_EPOLL
  1749. #endif
  1750. // The following functions are intercepted merely to process pending signals.
  1751. // If program blocks signal X, we must deliver the signal before the function
  1752. // returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
  1753. // it's better to deliver the signal straight away.
  1754. TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
  1755. SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
  1756. return REAL(sigsuspend)(mask);
  1757. }
  1758. TSAN_INTERCEPTOR(int, sigblock, int mask) {
  1759. SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
  1760. return REAL(sigblock)(mask);
  1761. }
  1762. TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
  1763. SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
  1764. return REAL(sigsetmask)(mask);
  1765. }
  1766. TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
  1767. __sanitizer_sigset_t *oldset) {
  1768. SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
  1769. return REAL(pthread_sigmask)(how, set, oldset);
  1770. }
  1771. namespace __tsan {
  1772. static void ReportErrnoSpoiling(ThreadState *thr, uptr pc) {
  1773. VarSizeStackTrace stack;
  1774. // StackTrace::GetNestInstructionPc(pc) is used because return address is
  1775. // expected, OutputReport() will undo this.
  1776. ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
  1777. ThreadRegistryLock l(&ctx->thread_registry);
  1778. ScopedReport rep(ReportTypeErrnoInSignal);
  1779. if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
  1780. rep.AddStack(stack, true);
  1781. OutputReport(thr, rep);
  1782. }
  1783. }
  1784. static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
  1785. int sig, __sanitizer_siginfo *info,
  1786. void *uctx) {
  1787. CHECK(thr->slot);
  1788. __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
  1789. if (acquire)
  1790. Acquire(thr, 0, (uptr)&sigactions[sig]);
  1791. // Signals are generally asynchronous, so if we receive a signals when
  1792. // ignores are enabled we should disable ignores. This is critical for sync
  1793. // and interceptors, because otherwise we can miss synchronization and report
  1794. // false races.
  1795. int ignore_reads_and_writes = thr->ignore_reads_and_writes;
  1796. int ignore_interceptors = thr->ignore_interceptors;
  1797. int ignore_sync = thr->ignore_sync;
  1798. // For symbolizer we only process SIGSEGVs synchronously
  1799. // (bug in symbolizer or in tsan). But we want to reset
  1800. // in_symbolizer to fail gracefully. Symbolizer and user code
  1801. // use different memory allocators, so if we don't reset
  1802. // in_symbolizer we can get memory allocated with one being
  1803. // feed with another, which can cause more crashes.
  1804. int in_symbolizer = thr->in_symbolizer;
  1805. if (!ctx->after_multithreaded_fork) {
  1806. thr->ignore_reads_and_writes = 0;
  1807. thr->fast_state.ClearIgnoreBit();
  1808. thr->ignore_interceptors = 0;
  1809. thr->ignore_sync = 0;
  1810. thr->in_symbolizer = 0;
  1811. }
  1812. // Ensure that the handler does not spoil errno.
  1813. const int saved_errno = errno;
  1814. errno = 99;
  1815. // This code races with sigaction. Be careful to not read sa_sigaction twice.
  1816. // Also need to remember pc for reporting before the call,
  1817. // because the handler can reset it.
  1818. volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
  1819. ? (uptr)sigactions[sig].sigaction
  1820. : (uptr)sigactions[sig].handler;
  1821. if (pc != sig_dfl && pc != sig_ign) {
  1822. // The callback can be either sa_handler or sa_sigaction.
  1823. // They have different signatures, but we assume that passing
  1824. // additional arguments to sa_handler works and is harmless.
  1825. ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
  1826. }
  1827. if (!ctx->after_multithreaded_fork) {
  1828. thr->ignore_reads_and_writes = ignore_reads_and_writes;
  1829. if (ignore_reads_and_writes)
  1830. thr->fast_state.SetIgnoreBit();
  1831. thr->ignore_interceptors = ignore_interceptors;
  1832. thr->ignore_sync = ignore_sync;
  1833. thr->in_symbolizer = in_symbolizer;
  1834. }
  1835. // We do not detect errno spoiling for SIGTERM,
  1836. // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
  1837. // tsan reports false positive in such case.
  1838. // It's difficult to properly detect this situation (reraise),
  1839. // because in async signal processing case (when handler is called directly
  1840. // from rtl_generic_sighandler) we have not yet received the reraised
  1841. // signal; and it looks too fragile to intercept all ways to reraise a signal.
  1842. if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
  1843. errno != 99)
  1844. ReportErrnoSpoiling(thr, pc);
  1845. errno = saved_errno;
  1846. }
  1847. void ProcessPendingSignalsImpl(ThreadState *thr) {
  1848. atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
  1849. ThreadSignalContext *sctx = SigCtx(thr);
  1850. if (sctx == 0)
  1851. return;
  1852. atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
  1853. internal_sigfillset(&sctx->emptyset);
  1854. int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
  1855. CHECK_EQ(res, 0);
  1856. for (int sig = 0; sig < kSigCount; sig++) {
  1857. SignalDesc *signal = &sctx->pending_signals[sig];
  1858. if (signal->armed) {
  1859. signal->armed = false;
  1860. CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
  1861. &signal->ctx);
  1862. }
  1863. }
  1864. res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
  1865. CHECK_EQ(res, 0);
  1866. atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
  1867. }
  1868. } // namespace __tsan
  1869. static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
  1870. return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
  1871. sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
  1872. // If we are sending signal to ourselves, we must process it now.
  1873. (sctx && sig == sctx->int_signal_send);
  1874. }
  1875. void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
  1876. ThreadState *thr = cur_thread_init();
  1877. ThreadSignalContext *sctx = SigCtx(thr);
  1878. if (sig < 0 || sig >= kSigCount) {
  1879. VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
  1880. return;
  1881. }
  1882. // Don't mess with synchronous signals.
  1883. const bool sync = is_sync_signal(sctx, sig);
  1884. if (sync ||
  1885. // If we are in blocking function, we can safely process it now
  1886. // (but check if we are in a recursive interceptor,
  1887. // i.e. pthread_join()->munmap()).
  1888. (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
  1889. atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
  1890. if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
  1891. atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
  1892. CallUserSignalHandler(thr, sync, true, sig, info, ctx);
  1893. atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
  1894. } else {
  1895. // Be very conservative with when we do acquire in this case.
  1896. // It's unsafe to do acquire in async handlers, because ThreadState
  1897. // can be in inconsistent state.
  1898. // SIGSYS looks relatively safe -- it's synchronous and can actually
  1899. // need some global state.
  1900. bool acq = (sig == SIGSYS);
  1901. CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
  1902. }
  1903. atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
  1904. return;
  1905. }
  1906. if (sctx == 0)
  1907. return;
  1908. SignalDesc *signal = &sctx->pending_signals[sig];
  1909. if (signal->armed == false) {
  1910. signal->armed = true;
  1911. internal_memcpy(&signal->siginfo, info, sizeof(*info));
  1912. internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
  1913. atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
  1914. }
  1915. }
  1916. TSAN_INTERCEPTOR(int, raise, int sig) {
  1917. SCOPED_TSAN_INTERCEPTOR(raise, sig);
  1918. ThreadSignalContext *sctx = SigCtx(thr);
  1919. CHECK_NE(sctx, 0);
  1920. int prev = sctx->int_signal_send;
  1921. sctx->int_signal_send = sig;
  1922. int res = REAL(raise)(sig);
  1923. CHECK_EQ(sctx->int_signal_send, sig);
  1924. sctx->int_signal_send = prev;
  1925. return res;
  1926. }
  1927. TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
  1928. SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
  1929. ThreadSignalContext *sctx = SigCtx(thr);
  1930. CHECK_NE(sctx, 0);
  1931. int prev = sctx->int_signal_send;
  1932. if (pid == (int)internal_getpid()) {
  1933. sctx->int_signal_send = sig;
  1934. }
  1935. int res = REAL(kill)(pid, sig);
  1936. if (pid == (int)internal_getpid()) {
  1937. CHECK_EQ(sctx->int_signal_send, sig);
  1938. sctx->int_signal_send = prev;
  1939. }
  1940. return res;
  1941. }
  1942. TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
  1943. SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
  1944. ThreadSignalContext *sctx = SigCtx(thr);
  1945. CHECK_NE(sctx, 0);
  1946. int prev = sctx->int_signal_send;
  1947. bool self = pthread_equal(tid, pthread_self());
  1948. if (self)
  1949. sctx->int_signal_send = sig;
  1950. int res = REAL(pthread_kill)(tid, sig);
  1951. if (self) {
  1952. CHECK_EQ(sctx->int_signal_send, sig);
  1953. sctx->int_signal_send = prev;
  1954. }
  1955. return res;
  1956. }
  1957. TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
  1958. SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
  1959. // It's intercepted merely to process pending signals.
  1960. return REAL(gettimeofday)(tv, tz);
  1961. }
  1962. TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
  1963. void *hints, void *rv) {
  1964. SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
  1965. // We miss atomic synchronization in getaddrinfo,
  1966. // and can report false race between malloc and free
  1967. // inside of getaddrinfo. So ignore memory accesses.
  1968. ThreadIgnoreBegin(thr, pc);
  1969. int res = REAL(getaddrinfo)(node, service, hints, rv);
  1970. ThreadIgnoreEnd(thr);
  1971. return res;
  1972. }
  1973. TSAN_INTERCEPTOR(int, fork, int fake) {
  1974. if (in_symbolizer())
  1975. return REAL(fork)(fake);
  1976. SCOPED_INTERCEPTOR_RAW(fork, fake);
  1977. return REAL(fork)(fake);
  1978. }
  1979. void atfork_prepare() {
  1980. if (in_symbolizer())
  1981. return;
  1982. ThreadState *thr = cur_thread();
  1983. const uptr pc = StackTrace::GetCurrentPc();
  1984. ForkBefore(thr, pc);
  1985. }
  1986. void atfork_parent() {
  1987. if (in_symbolizer())
  1988. return;
  1989. ThreadState *thr = cur_thread();
  1990. const uptr pc = StackTrace::GetCurrentPc();
  1991. ForkParentAfter(thr, pc);
  1992. }
  1993. void atfork_child() {
  1994. if (in_symbolizer())
  1995. return;
  1996. ThreadState *thr = cur_thread();
  1997. const uptr pc = StackTrace::GetCurrentPc();
  1998. ForkChildAfter(thr, pc, true);
  1999. FdOnFork(thr, pc);
  2000. }
  2001. #if !SANITIZER_IOS
  2002. TSAN_INTERCEPTOR(int, vfork, int fake) {
  2003. // Some programs (e.g. openjdk) call close for all file descriptors
  2004. // in the child process. Under tsan it leads to false positives, because
  2005. // address space is shared, so the parent process also thinks that
  2006. // the descriptors are closed (while they are actually not).
  2007. // This leads to false positives due to missed synchronization.
  2008. // Strictly saying this is undefined behavior, because vfork child is not
  2009. // allowed to call any functions other than exec/exit. But this is what
  2010. // openjdk does, so we want to handle it.
  2011. // We could disable interceptors in the child process. But it's not possible
  2012. // to simply intercept and wrap vfork, because vfork child is not allowed
  2013. // to return from the function that calls vfork, and that's exactly what
  2014. // we would do. So this would require some assembly trickery as well.
  2015. // Instead we simply turn vfork into fork.
  2016. return WRAP(fork)(fake);
  2017. }
  2018. #endif
  2019. #if SANITIZER_LINUX
  2020. TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
  2021. void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
  2022. SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
  2023. child_tid);
  2024. struct Arg {
  2025. int (*fn)(void *);
  2026. void *arg;
  2027. };
  2028. auto wrapper = +[](void *p) -> int {
  2029. auto *thr = cur_thread();
  2030. uptr pc = GET_CURRENT_PC();
  2031. // Start the background thread for fork, but not for clone.
  2032. // For fork we did this always and it's known to work (or user code has
  2033. // adopted). But if we do this for the new clone interceptor some code
  2034. // (sandbox2) fails. So model we used to do for years and don't start the
  2035. // background thread after clone.
  2036. ForkChildAfter(thr, pc, false);
  2037. FdOnFork(thr, pc);
  2038. auto *arg = static_cast<Arg *>(p);
  2039. return arg->fn(arg->arg);
  2040. };
  2041. ForkBefore(thr, pc);
  2042. Arg arg_wrapper = {fn, arg};
  2043. int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
  2044. child_tid);
  2045. ForkParentAfter(thr, pc);
  2046. return pid;
  2047. }
  2048. #endif
  2049. #if !SANITIZER_MAC && !SANITIZER_ANDROID
  2050. typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
  2051. void *data);
  2052. struct dl_iterate_phdr_data {
  2053. ThreadState *thr;
  2054. uptr pc;
  2055. dl_iterate_phdr_cb_t cb;
  2056. void *data;
  2057. };
  2058. static bool IsAppNotRodata(uptr addr) {
  2059. return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata;
  2060. }
  2061. static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
  2062. void *data) {
  2063. dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
  2064. // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
  2065. // accessible in dl_iterate_phdr callback. But we don't see synchronization
  2066. // inside of dynamic linker, so we "unpoison" it here in order to not
  2067. // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
  2068. // because some libc functions call __libc_dlopen.
  2069. if (info && IsAppNotRodata((uptr)info->dlpi_name))
  2070. MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
  2071. internal_strlen(info->dlpi_name));
  2072. int res = cbdata->cb(info, size, cbdata->data);
  2073. // Perform the check one more time in case info->dlpi_name was overwritten
  2074. // by user callback.
  2075. if (info && IsAppNotRodata((uptr)info->dlpi_name))
  2076. MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
  2077. internal_strlen(info->dlpi_name));
  2078. return res;
  2079. }
  2080. TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
  2081. SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
  2082. dl_iterate_phdr_data cbdata;
  2083. cbdata.thr = thr;
  2084. cbdata.pc = pc;
  2085. cbdata.cb = cb;
  2086. cbdata.data = data;
  2087. int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
  2088. return res;
  2089. }
  2090. #endif
  2091. static int OnExit(ThreadState *thr) {
  2092. int status = Finalize(thr);
  2093. FlushStreams();
  2094. return status;
  2095. }
  2096. struct TsanInterceptorContext {
  2097. ThreadState *thr;
  2098. const uptr pc;
  2099. };
  2100. #if !SANITIZER_MAC
  2101. static void HandleRecvmsg(ThreadState *thr, uptr pc,
  2102. __sanitizer_msghdr *msg) {
  2103. int fds[64];
  2104. int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
  2105. for (int i = 0; i < cnt; i++)
  2106. FdEventCreate(thr, pc, fds[i]);
  2107. }
  2108. #endif
  2109. #include "sanitizer_common/sanitizer_platform_interceptors.h"
  2110. // Causes interceptor recursion (getaddrinfo() and fopen())
  2111. #undef SANITIZER_INTERCEPT_GETADDRINFO
  2112. // We define our own.
  2113. #if SANITIZER_INTERCEPT_TLS_GET_ADDR
  2114. #define NEED_TLS_GET_ADDR
  2115. #endif
  2116. #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
  2117. #define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
  2118. #undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
  2119. #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
  2120. #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
  2121. INTERCEPT_FUNCTION_VER(name, ver)
  2122. #define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
  2123. (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
  2124. #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
  2125. MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
  2126. ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
  2127. true)
  2128. #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
  2129. MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
  2130. ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
  2131. false)
  2132. #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
  2133. SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
  2134. TsanInterceptorContext _ctx = {thr, pc}; \
  2135. ctx = (void *)&_ctx; \
  2136. (void)ctx;
  2137. #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
  2138. SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
  2139. TsanInterceptorContext _ctx = {thr, pc}; \
  2140. ctx = (void *)&_ctx; \
  2141. (void)ctx;
  2142. #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
  2143. if (path) \
  2144. Acquire(thr, pc, File2addr(path)); \
  2145. if (file) { \
  2146. int fd = fileno_unlocked(file); \
  2147. if (fd >= 0) FdFileCreate(thr, pc, fd); \
  2148. }
  2149. #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
  2150. if (file) { \
  2151. int fd = fileno_unlocked(file); \
  2152. FdClose(thr, pc, fd); \
  2153. }
  2154. #define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
  2155. ({ \
  2156. CheckNoDeepBind(filename, flag); \
  2157. ThreadIgnoreBegin(thr, 0); \
  2158. void *res = REAL(dlopen)(filename, flag); \
  2159. ThreadIgnoreEnd(thr); \
  2160. res; \
  2161. })
  2162. #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
  2163. libignore()->OnLibraryLoaded(filename)
  2164. #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
  2165. libignore()->OnLibraryUnloaded()
  2166. #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
  2167. Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
  2168. #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
  2169. Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
  2170. #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
  2171. Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
  2172. #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
  2173. FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
  2174. #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
  2175. FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
  2176. #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
  2177. FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
  2178. #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
  2179. FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
  2180. #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
  2181. ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
  2182. #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
  2183. if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
  2184. COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name); \
  2185. else \
  2186. __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
  2187. #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
  2188. #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
  2189. OnExit(((TsanInterceptorContext *) ctx)->thr)
  2190. #define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
  2191. MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
  2192. ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
  2193. #define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
  2194. MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
  2195. ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
  2196. #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
  2197. MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
  2198. ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
  2199. #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
  2200. MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
  2201. ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
  2202. #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
  2203. MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
  2204. ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
  2205. #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
  2206. off) \
  2207. do { \
  2208. return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
  2209. off); \
  2210. } while (false)
  2211. #if !SANITIZER_MAC
  2212. #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
  2213. HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
  2214. ((TsanInterceptorContext *)ctx)->pc, msg)
  2215. #endif
  2216. #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
  2217. if (TsanThread *t = GetCurrentThread()) { \
  2218. *begin = t->tls_begin(); \
  2219. *end = t->tls_end(); \
  2220. } else { \
  2221. *begin = *end = 0; \
  2222. }
  2223. #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
  2224. SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
  2225. #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
  2226. SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
  2227. #include "sanitizer_common/sanitizer_common_interceptors.inc"
  2228. static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
  2229. __sanitizer_sigaction *old);
  2230. static __sanitizer_sighandler_ptr signal_impl(int sig,
  2231. __sanitizer_sighandler_ptr h);
  2232. #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
  2233. { return sigaction_impl(signo, act, oldact); }
  2234. #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
  2235. { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
  2236. #include "sanitizer_common/sanitizer_signal_interceptors.inc"
  2237. int sigaction_impl(int sig, const __sanitizer_sigaction *act,
  2238. __sanitizer_sigaction *old) {
  2239. // Note: if we call REAL(sigaction) directly for any reason without proxying
  2240. // the signal handler through sighandler, very bad things will happen.
  2241. // The handler will run synchronously and corrupt tsan per-thread state.
  2242. SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
  2243. if (sig <= 0 || sig >= kSigCount) {
  2244. errno = errno_EINVAL;
  2245. return -1;
  2246. }
  2247. __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
  2248. __sanitizer_sigaction old_stored;
  2249. if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
  2250. __sanitizer_sigaction newact;
  2251. if (act) {
  2252. // Copy act into sigactions[sig].
  2253. // Can't use struct copy, because compiler can emit call to memcpy.
  2254. // Can't use internal_memcpy, because it copies byte-by-byte,
  2255. // and signal handler reads the handler concurrently. It it can read
  2256. // some bytes from old value and some bytes from new value.
  2257. // Use volatile to prevent insertion of memcpy.
  2258. sigactions[sig].handler =
  2259. *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
  2260. sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
  2261. internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
  2262. sizeof(sigactions[sig].sa_mask));
  2263. #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
  2264. sigactions[sig].sa_restorer = act->sa_restorer;
  2265. #endif
  2266. internal_memcpy(&newact, act, sizeof(newact));
  2267. internal_sigfillset(&newact.sa_mask);
  2268. if ((act->sa_flags & SA_SIGINFO) ||
  2269. ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
  2270. newact.sa_flags |= SA_SIGINFO;
  2271. newact.sigaction = sighandler;
  2272. }
  2273. ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
  2274. act = &newact;
  2275. }
  2276. int res = REAL(sigaction)(sig, act, old);
  2277. if (res == 0 && old && old->sigaction == sighandler)
  2278. internal_memcpy(old, &old_stored, sizeof(*old));
  2279. return res;
  2280. }
  2281. static __sanitizer_sighandler_ptr signal_impl(int sig,
  2282. __sanitizer_sighandler_ptr h) {
  2283. __sanitizer_sigaction act;
  2284. act.handler = h;
  2285. internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
  2286. act.sa_flags = 0;
  2287. __sanitizer_sigaction old;
  2288. int res = sigaction_symname(sig, &act, &old);
  2289. if (res) return (__sanitizer_sighandler_ptr)sig_err;
  2290. return old.handler;
  2291. }
  2292. #define TSAN_SYSCALL() \
  2293. ThreadState *thr = cur_thread(); \
  2294. if (thr->ignore_interceptors) \
  2295. return; \
  2296. ScopedSyscall scoped_syscall(thr)
  2297. struct ScopedSyscall {
  2298. ThreadState *thr;
  2299. explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
  2300. ~ScopedSyscall() {
  2301. ProcessPendingSignals(thr);
  2302. }
  2303. };
  2304. #if !SANITIZER_FREEBSD && !SANITIZER_MAC
  2305. static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
  2306. TSAN_SYSCALL();
  2307. MemoryAccessRange(thr, pc, p, s, write);
  2308. }
  2309. static USED void syscall_acquire(uptr pc, uptr addr) {
  2310. TSAN_SYSCALL();
  2311. Acquire(thr, pc, addr);
  2312. DPrintf("syscall_acquire(0x%zx))\n", addr);
  2313. }
  2314. static USED void syscall_release(uptr pc, uptr addr) {
  2315. TSAN_SYSCALL();
  2316. DPrintf("syscall_release(0x%zx)\n", addr);
  2317. Release(thr, pc, addr);
  2318. }
  2319. static void syscall_fd_close(uptr pc, int fd) {
  2320. auto *thr = cur_thread();
  2321. FdClose(thr, pc, fd);
  2322. }
  2323. static USED void syscall_fd_acquire(uptr pc, int fd) {
  2324. TSAN_SYSCALL();
  2325. FdAcquire(thr, pc, fd);
  2326. DPrintf("syscall_fd_acquire(%d)\n", fd);
  2327. }
  2328. static USED void syscall_fd_release(uptr pc, int fd) {
  2329. TSAN_SYSCALL();
  2330. DPrintf("syscall_fd_release(%d)\n", fd);
  2331. FdRelease(thr, pc, fd);
  2332. }
  2333. static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
  2334. static void syscall_post_fork(uptr pc, int pid) {
  2335. ThreadState *thr = cur_thread();
  2336. if (pid == 0) {
  2337. // child
  2338. ForkChildAfter(thr, pc, true);
  2339. FdOnFork(thr, pc);
  2340. } else if (pid > 0) {
  2341. // parent
  2342. ForkParentAfter(thr, pc);
  2343. } else {
  2344. // error
  2345. ForkParentAfter(thr, pc);
  2346. }
  2347. }
  2348. #endif
  2349. #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
  2350. syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
  2351. #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
  2352. syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
  2353. #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
  2354. do { \
  2355. (void)(p); \
  2356. (void)(s); \
  2357. } while (false)
  2358. #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
  2359. do { \
  2360. (void)(p); \
  2361. (void)(s); \
  2362. } while (false)
  2363. #define COMMON_SYSCALL_ACQUIRE(addr) \
  2364. syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
  2365. #define COMMON_SYSCALL_RELEASE(addr) \
  2366. syscall_release(GET_CALLER_PC(), (uptr)(addr))
  2367. #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
  2368. #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
  2369. #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
  2370. #define COMMON_SYSCALL_PRE_FORK() \
  2371. syscall_pre_fork(GET_CALLER_PC())
  2372. #define COMMON_SYSCALL_POST_FORK(res) \
  2373. syscall_post_fork(GET_CALLER_PC(), res)
  2374. #include "sanitizer_common/sanitizer_common_syscalls.inc"
  2375. #include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
  2376. #ifdef NEED_TLS_GET_ADDR
  2377. static void handle_tls_addr(void *arg, void *res) {
  2378. ThreadState *thr = cur_thread();
  2379. if (!thr)
  2380. return;
  2381. DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
  2382. thr->tls_addr + thr->tls_size);
  2383. if (!dtv)
  2384. return;
  2385. // New DTLS block has been allocated.
  2386. MemoryResetRange(thr, 0, dtv->beg, dtv->size);
  2387. }
  2388. #if !SANITIZER_S390
  2389. // Define own interceptor instead of sanitizer_common's for three reasons:
  2390. // 1. It must not process pending signals.
  2391. // Signal handlers may contain MOVDQA instruction (see below).
  2392. // 2. It must be as simple as possible to not contain MOVDQA.
  2393. // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
  2394. // is empty for tsan (meant only for msan).
  2395. // Note: __tls_get_addr can be called with mis-aligned stack due to:
  2396. // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
  2397. // So the interceptor must work with mis-aligned stack, in particular, does not
  2398. // execute MOVDQA with stack addresses.
  2399. TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
  2400. void *res = REAL(__tls_get_addr)(arg);
  2401. handle_tls_addr(arg, res);
  2402. return res;
  2403. }
  2404. #else // SANITIZER_S390
  2405. TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
  2406. uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
  2407. char *tp = static_cast<char *>(__builtin_thread_pointer());
  2408. handle_tls_addr(arg, res + tp);
  2409. return res;
  2410. }
  2411. #endif
  2412. #endif
  2413. #if SANITIZER_NETBSD
  2414. TSAN_INTERCEPTOR(void, _lwp_exit) {
  2415. SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
  2416. DestroyThreadState();
  2417. REAL(_lwp_exit)();
  2418. }
  2419. #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
  2420. #else
  2421. #define TSAN_MAYBE_INTERCEPT__LWP_EXIT
  2422. #endif
  2423. #if SANITIZER_FREEBSD
  2424. TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
  2425. SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
  2426. DestroyThreadState();
  2427. REAL(thr_exit(state));
  2428. }
  2429. #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
  2430. #else
  2431. #define TSAN_MAYBE_INTERCEPT_THR_EXIT
  2432. #endif
  2433. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
  2434. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
  2435. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
  2436. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
  2437. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
  2438. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
  2439. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
  2440. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
  2441. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
  2442. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
  2443. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
  2444. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
  2445. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
  2446. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
  2447. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
  2448. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
  2449. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
  2450. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
  2451. TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
  2452. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
  2453. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
  2454. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
  2455. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
  2456. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
  2457. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
  2458. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
  2459. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
  2460. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
  2461. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
  2462. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
  2463. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
  2464. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
  2465. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
  2466. TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
  2467. TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
  2468. TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
  2469. void *c)
  2470. namespace __tsan {
  2471. static void finalize(void *arg) {
  2472. ThreadState *thr = cur_thread();
  2473. int status = Finalize(thr);
  2474. // Make sure the output is not lost.
  2475. FlushStreams();
  2476. if (status)
  2477. Die();
  2478. }
  2479. #if !SANITIZER_MAC && !SANITIZER_ANDROID
  2480. static void unreachable() {
  2481. Report("FATAL: ThreadSanitizer: unreachable called\n");
  2482. Die();
  2483. }
  2484. #endif
  2485. // Define default implementation since interception of libdispatch is optional.
  2486. SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
  2487. void InitializeInterceptors() {
  2488. #if !SANITIZER_MAC
  2489. // We need to setup it early, because functions like dlsym() can call it.
  2490. REAL(memset) = internal_memset;
  2491. REAL(memcpy) = internal_memcpy;
  2492. #endif
  2493. new(interceptor_ctx()) InterceptorContext();
  2494. InitializeCommonInterceptors();
  2495. InitializeSignalInterceptors();
  2496. InitializeLibdispatchInterceptors();
  2497. #if !SANITIZER_MAC
  2498. // We can not use TSAN_INTERCEPT to get setjmp addr,
  2499. // because it does &setjmp and setjmp is not present in some versions of libc.
  2500. using __interception::InterceptFunction;
  2501. InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0);
  2502. InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
  2503. InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0,
  2504. 0);
  2505. #if !SANITIZER_NETBSD
  2506. InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
  2507. #endif
  2508. #endif
  2509. TSAN_INTERCEPT(longjmp_symname);
  2510. TSAN_INTERCEPT(siglongjmp_symname);
  2511. #if SANITIZER_NETBSD
  2512. TSAN_INTERCEPT(_longjmp);
  2513. #endif
  2514. TSAN_INTERCEPT(malloc);
  2515. TSAN_INTERCEPT(__libc_memalign);
  2516. TSAN_INTERCEPT(calloc);
  2517. TSAN_INTERCEPT(realloc);
  2518. TSAN_INTERCEPT(reallocarray);
  2519. TSAN_INTERCEPT(free);
  2520. TSAN_INTERCEPT(cfree);
  2521. TSAN_INTERCEPT(munmap);
  2522. TSAN_MAYBE_INTERCEPT_MEMALIGN;
  2523. TSAN_INTERCEPT(valloc);
  2524. TSAN_MAYBE_INTERCEPT_PVALLOC;
  2525. TSAN_INTERCEPT(posix_memalign);
  2526. TSAN_INTERCEPT(strcpy);
  2527. TSAN_INTERCEPT(strncpy);
  2528. TSAN_INTERCEPT(strdup);
  2529. TSAN_INTERCEPT(pthread_create);
  2530. TSAN_INTERCEPT(pthread_join);
  2531. TSAN_INTERCEPT(pthread_detach);
  2532. TSAN_INTERCEPT(pthread_exit);
  2533. #if SANITIZER_LINUX
  2534. TSAN_INTERCEPT(pthread_tryjoin_np);
  2535. TSAN_INTERCEPT(pthread_timedjoin_np);
  2536. #endif
  2537. TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
  2538. TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
  2539. TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
  2540. TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
  2541. TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
  2542. TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
  2543. TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
  2544. TSAN_INTERCEPT(pthread_mutex_init);
  2545. TSAN_INTERCEPT(pthread_mutex_destroy);
  2546. TSAN_INTERCEPT(pthread_mutex_trylock);
  2547. TSAN_INTERCEPT(pthread_mutex_timedlock);
  2548. TSAN_INTERCEPT(pthread_spin_init);
  2549. TSAN_INTERCEPT(pthread_spin_destroy);
  2550. TSAN_INTERCEPT(pthread_spin_lock);
  2551. TSAN_INTERCEPT(pthread_spin_trylock);
  2552. TSAN_INTERCEPT(pthread_spin_unlock);
  2553. TSAN_INTERCEPT(pthread_rwlock_init);
  2554. TSAN_INTERCEPT(pthread_rwlock_destroy);
  2555. TSAN_INTERCEPT(pthread_rwlock_rdlock);
  2556. TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
  2557. TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
  2558. TSAN_INTERCEPT(pthread_rwlock_wrlock);
  2559. TSAN_INTERCEPT(pthread_rwlock_trywrlock);
  2560. TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
  2561. TSAN_INTERCEPT(pthread_rwlock_unlock);
  2562. TSAN_INTERCEPT(pthread_barrier_init);
  2563. TSAN_INTERCEPT(pthread_barrier_destroy);
  2564. TSAN_INTERCEPT(pthread_barrier_wait);
  2565. TSAN_INTERCEPT(pthread_once);
  2566. TSAN_INTERCEPT(fstat);
  2567. TSAN_MAYBE_INTERCEPT___FXSTAT;
  2568. TSAN_MAYBE_INTERCEPT_FSTAT64;
  2569. TSAN_MAYBE_INTERCEPT___FXSTAT64;
  2570. TSAN_INTERCEPT(open);
  2571. TSAN_MAYBE_INTERCEPT_OPEN64;
  2572. TSAN_INTERCEPT(creat);
  2573. TSAN_MAYBE_INTERCEPT_CREAT64;
  2574. TSAN_INTERCEPT(dup);
  2575. TSAN_INTERCEPT(dup2);
  2576. TSAN_INTERCEPT(dup3);
  2577. TSAN_MAYBE_INTERCEPT_EVENTFD;
  2578. TSAN_MAYBE_INTERCEPT_SIGNALFD;
  2579. TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
  2580. TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
  2581. TSAN_INTERCEPT(socket);
  2582. TSAN_INTERCEPT(socketpair);
  2583. TSAN_INTERCEPT(connect);
  2584. TSAN_INTERCEPT(bind);
  2585. TSAN_INTERCEPT(listen);
  2586. TSAN_MAYBE_INTERCEPT_EPOLL;
  2587. TSAN_INTERCEPT(close);
  2588. TSAN_MAYBE_INTERCEPT___CLOSE;
  2589. TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
  2590. TSAN_INTERCEPT(pipe);
  2591. TSAN_INTERCEPT(pipe2);
  2592. TSAN_INTERCEPT(unlink);
  2593. TSAN_INTERCEPT(tmpfile);
  2594. TSAN_MAYBE_INTERCEPT_TMPFILE64;
  2595. TSAN_INTERCEPT(abort);
  2596. TSAN_INTERCEPT(rmdir);
  2597. TSAN_INTERCEPT(closedir);
  2598. TSAN_INTERCEPT(sigsuspend);
  2599. TSAN_INTERCEPT(sigblock);
  2600. TSAN_INTERCEPT(sigsetmask);
  2601. TSAN_INTERCEPT(pthread_sigmask);
  2602. TSAN_INTERCEPT(raise);
  2603. TSAN_INTERCEPT(kill);
  2604. TSAN_INTERCEPT(pthread_kill);
  2605. TSAN_INTERCEPT(sleep);
  2606. TSAN_INTERCEPT(usleep);
  2607. TSAN_INTERCEPT(nanosleep);
  2608. TSAN_INTERCEPT(pause);
  2609. TSAN_INTERCEPT(gettimeofday);
  2610. TSAN_INTERCEPT(getaddrinfo);
  2611. TSAN_INTERCEPT(fork);
  2612. TSAN_INTERCEPT(vfork);
  2613. #if SANITIZER_LINUX
  2614. TSAN_INTERCEPT(clone);
  2615. #endif
  2616. #if !SANITIZER_ANDROID
  2617. TSAN_INTERCEPT(dl_iterate_phdr);
  2618. #endif
  2619. TSAN_MAYBE_INTERCEPT_ON_EXIT;
  2620. TSAN_INTERCEPT(__cxa_atexit);
  2621. TSAN_INTERCEPT(_exit);
  2622. #ifdef NEED_TLS_GET_ADDR
  2623. #if !SANITIZER_S390
  2624. TSAN_INTERCEPT(__tls_get_addr);
  2625. #else
  2626. TSAN_INTERCEPT(__tls_get_addr_internal);
  2627. TSAN_INTERCEPT(__tls_get_offset);
  2628. #endif
  2629. #endif
  2630. TSAN_MAYBE_INTERCEPT__LWP_EXIT;
  2631. TSAN_MAYBE_INTERCEPT_THR_EXIT;
  2632. #if !SANITIZER_MAC && !SANITIZER_ANDROID
  2633. // Need to setup it, because interceptors check that the function is resolved.
  2634. // But atexit is emitted directly into the module, so can't be resolved.
  2635. REAL(atexit) = (int(*)(void(*)()))unreachable;
  2636. #endif
  2637. if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
  2638. Printf("ThreadSanitizer: failed to setup atexit callback\n");
  2639. Die();
  2640. }
  2641. if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) {
  2642. Printf("ThreadSanitizer: failed to setup atfork callbacks\n");
  2643. Die();
  2644. }
  2645. #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
  2646. if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
  2647. Printf("ThreadSanitizer: failed to create thread key\n");
  2648. Die();
  2649. }
  2650. #endif
  2651. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
  2652. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
  2653. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
  2654. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
  2655. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
  2656. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
  2657. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
  2658. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
  2659. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
  2660. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
  2661. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
  2662. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
  2663. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
  2664. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
  2665. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
  2666. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
  2667. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
  2668. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
  2669. TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
  2670. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
  2671. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
  2672. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
  2673. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
  2674. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
  2675. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
  2676. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
  2677. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
  2678. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
  2679. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
  2680. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
  2681. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
  2682. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
  2683. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
  2684. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
  2685. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
  2686. TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
  2687. FdInit();
  2688. }
  2689. } // namespace __tsan
  2690. // Invisible barrier for tests.
  2691. // There were several unsuccessful iterations for this functionality:
  2692. // 1. Initially it was implemented in user code using
  2693. // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
  2694. // MacOS. Futexes are linux-specific for this matter.
  2695. // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
  2696. // "as-if synchronized via sleep" messages in reports which failed some
  2697. // output tests.
  2698. // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
  2699. // visible events, which lead to "failed to restore stack trace" failures.
  2700. // Note that no_sanitize_thread attribute does not turn off atomic interception
  2701. // so attaching it to the function defined in user code does not help.
  2702. // That's why we now have what we have.
  2703. constexpr u32 kBarrierThreadBits = 10;
  2704. constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
  2705. extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
  2706. atomic_uint32_t *barrier, u32 num_threads) {
  2707. if (num_threads >= kBarrierThreads) {
  2708. Printf("barrier_init: count is too large (%d)\n", num_threads);
  2709. Die();
  2710. }
  2711. // kBarrierThreadBits lsb is thread count,
  2712. // the remaining are count of entered threads.
  2713. atomic_store(barrier, num_threads, memory_order_relaxed);
  2714. }
  2715. static u32 barrier_epoch(u32 value) {
  2716. return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
  2717. }
  2718. extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
  2719. atomic_uint32_t *barrier) {
  2720. u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
  2721. u32 old_epoch = barrier_epoch(old);
  2722. if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
  2723. FutexWake(barrier, (1 << 30));
  2724. return;
  2725. }
  2726. for (;;) {
  2727. u32 cur = atomic_load(barrier, memory_order_relaxed);
  2728. if (barrier_epoch(cur) != old_epoch)
  2729. return;
  2730. FutexWait(barrier, cur);
  2731. }
  2732. }