lsan_common.cpp 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089
  1. //=-- lsan_common.cpp -----------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of LeakSanitizer.
  10. // Implementation of common leak checking functionality.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "lsan_common.h"
  14. #include "sanitizer_common/sanitizer_common.h"
  15. #include "sanitizer_common/sanitizer_flag_parser.h"
  16. #include "sanitizer_common/sanitizer_flags.h"
  17. #include "sanitizer_common/sanitizer_placement_new.h"
  18. #include "sanitizer_common/sanitizer_procmaps.h"
  19. #include "sanitizer_common/sanitizer_report_decorator.h"
  20. #include "sanitizer_common/sanitizer_stackdepot.h"
  21. #include "sanitizer_common/sanitizer_stacktrace.h"
  22. #include "sanitizer_common/sanitizer_suppressions.h"
  23. #include "sanitizer_common/sanitizer_thread_registry.h"
  24. #include "sanitizer_common/sanitizer_tls_get_addr.h"
  25. #if CAN_SANITIZE_LEAKS
  26. # if SANITIZER_APPLE
  27. // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
  28. # if SANITIZER_IOS && !SANITIZER_IOSSIM
  29. # define OBJC_DATA_MASK 0x0000007ffffffff8UL
  30. # else
  31. # define OBJC_DATA_MASK 0x00007ffffffffff8UL
  32. # endif
  33. // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139
  34. # define OBJC_FAST_IS_RW 0x8000000000000000UL
  35. # endif
  36. namespace __lsan {
  37. // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
  38. // also to protect the global list of root regions.
  39. Mutex global_mutex;
  40. Flags lsan_flags;
  41. void DisableCounterUnderflow() {
  42. if (common_flags()->detect_leaks) {
  43. Report("Unmatched call to __lsan_enable().\n");
  44. Die();
  45. }
  46. }
  47. void Flags::SetDefaults() {
  48. # define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
  49. # include "lsan_flags.inc"
  50. # undef LSAN_FLAG
  51. }
  52. void RegisterLsanFlags(FlagParser *parser, Flags *f) {
  53. # define LSAN_FLAG(Type, Name, DefaultValue, Description) \
  54. RegisterFlag(parser, #Name, Description, &f->Name);
  55. # include "lsan_flags.inc"
  56. # undef LSAN_FLAG
  57. }
  58. # define LOG_POINTERS(...) \
  59. do { \
  60. if (flags()->log_pointers) \
  61. Report(__VA_ARGS__); \
  62. } while (0)
  63. # define LOG_THREADS(...) \
  64. do { \
  65. if (flags()->log_threads) \
  66. Report(__VA_ARGS__); \
  67. } while (0)
  68. class LeakSuppressionContext {
  69. bool parsed = false;
  70. SuppressionContext context;
  71. bool suppressed_stacks_sorted = true;
  72. InternalMmapVector<u32> suppressed_stacks;
  73. const LoadedModule *suppress_module = nullptr;
  74. void LazyInit();
  75. Suppression *GetSuppressionForAddr(uptr addr);
  76. bool SuppressInvalid(const StackTrace &stack);
  77. bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
  78. public:
  79. LeakSuppressionContext(const char *supprression_types[],
  80. int suppression_types_num)
  81. : context(supprression_types, suppression_types_num) {}
  82. bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
  83. const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
  84. if (!suppressed_stacks_sorted) {
  85. suppressed_stacks_sorted = true;
  86. SortAndDedup(suppressed_stacks);
  87. }
  88. return suppressed_stacks;
  89. }
  90. void PrintMatchedSuppressions();
  91. };
  92. ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
  93. static LeakSuppressionContext *suppression_ctx = nullptr;
  94. static const char kSuppressionLeak[] = "leak";
  95. static const char *kSuppressionTypes[] = {kSuppressionLeak};
  96. static const char kStdSuppressions[] =
  97. # if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
  98. // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
  99. // definition.
  100. "leak:*pthread_exit*\n"
  101. # endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
  102. # if SANITIZER_APPLE
  103. // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
  104. "leak:*_os_trace*\n"
  105. # endif
  106. // TLS leak in some glibc versions, described in
  107. // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
  108. "leak:*tls_get_addr*\n";
  109. void InitializeSuppressions() {
  110. CHECK_EQ(nullptr, suppression_ctx);
  111. suppression_ctx = new (suppression_placeholder)
  112. LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
  113. }
  114. void LeakSuppressionContext::LazyInit() {
  115. if (!parsed) {
  116. parsed = true;
  117. context.ParseFromFile(flags()->suppressions);
  118. if (&__lsan_default_suppressions)
  119. context.Parse(__lsan_default_suppressions());
  120. context.Parse(kStdSuppressions);
  121. if (flags()->use_tls && flags()->use_ld_allocations)
  122. suppress_module = GetLinker();
  123. }
  124. }
  125. Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
  126. Suppression *s = nullptr;
  127. // Suppress by module name.
  128. const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
  129. if (!module_name)
  130. module_name = "<unknown module>";
  131. if (context.Match(module_name, kSuppressionLeak, &s))
  132. return s;
  133. // Suppress by file or function name.
  134. SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
  135. for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
  136. if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
  137. context.Match(cur->info.file, kSuppressionLeak, &s)) {
  138. break;
  139. }
  140. }
  141. frames->ClearAll();
  142. return s;
  143. }
  144. static uptr GetCallerPC(const StackTrace &stack) {
  145. // The top frame is our malloc/calloc/etc. The next frame is the caller.
  146. if (stack.size >= 2)
  147. return stack.trace[1];
  148. return 0;
  149. }
  150. # if SANITIZER_APPLE
  151. // Objective-C class data pointers are stored with flags in the low bits, so
  152. // they need to be transformed back into something that looks like a pointer.
  153. static inline void *MaybeTransformPointer(void *p) {
  154. uptr ptr = reinterpret_cast<uptr>(p);
  155. if ((ptr & OBJC_FAST_IS_RW) == OBJC_FAST_IS_RW)
  156. ptr &= OBJC_DATA_MASK;
  157. return reinterpret_cast<void *>(ptr);
  158. }
  159. # endif
  160. // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
  161. // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
  162. // modules accounting etc.
  163. // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
  164. // They are allocated with a __libc_memalign() call in allocate_and_init()
  165. // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
  166. // blocks, but we can make sure they come from our own allocator by intercepting
  167. // __libc_memalign(). On top of that, there is no easy way to reach them. Their
  168. // addresses are stored in a dynamically allocated array (the DTV) which is
  169. // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
  170. // being reachable from the static TLS, and the dynamic TLS being reachable from
  171. // the DTV. This is because the initial DTV is allocated before our interception
  172. // mechanism kicks in, and thus we don't recognize it as allocated memory. We
  173. // can't special-case it either, since we don't know its size.
  174. // Our solution is to include in the root set all allocations made from
  175. // ld-linux.so (which is where allocate_and_init() is implemented). This is
  176. // guaranteed to include all dynamic TLS blocks (and possibly other allocations
  177. // which we don't care about).
  178. // On all other platforms, this simply checks to ensure that the caller pc is
  179. // valid before reporting chunks as leaked.
  180. bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
  181. uptr caller_pc = GetCallerPC(stack);
  182. // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
  183. // it as reachable, as we can't properly report its allocation stack anyway.
  184. return !caller_pc ||
  185. (suppress_module && suppress_module->containsAddress(caller_pc));
  186. }
  187. bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
  188. uptr hit_count, uptr total_size) {
  189. for (uptr i = 0; i < stack.size; i++) {
  190. Suppression *s = GetSuppressionForAddr(
  191. StackTrace::GetPreviousInstructionPc(stack.trace[i]));
  192. if (s) {
  193. s->weight += total_size;
  194. atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
  195. return true;
  196. }
  197. }
  198. return false;
  199. }
  200. bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
  201. uptr total_size) {
  202. LazyInit();
  203. StackTrace stack = StackDepotGet(stack_trace_id);
  204. if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
  205. return false;
  206. suppressed_stacks_sorted = false;
  207. suppressed_stacks.push_back(stack_trace_id);
  208. return true;
  209. }
  210. static LeakSuppressionContext *GetSuppressionContext() {
  211. CHECK(suppression_ctx);
  212. return suppression_ctx;
  213. }
  214. static InternalMmapVectorNoCtor<RootRegion> root_regions;
  215. InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
  216. return &root_regions;
  217. }
  218. void InitCommonLsan() {
  219. if (common_flags()->detect_leaks) {
  220. // Initialization which can fail or print warnings should only be done if
  221. // LSan is actually enabled.
  222. InitializeSuppressions();
  223. InitializePlatformSpecificModules();
  224. }
  225. }
  226. class Decorator : public __sanitizer::SanitizerCommonDecorator {
  227. public:
  228. Decorator() : SanitizerCommonDecorator() {}
  229. const char *Error() { return Red(); }
  230. const char *Leak() { return Blue(); }
  231. };
  232. static inline bool MaybeUserPointer(uptr p) {
  233. // Since our heap is located in mmap-ed memory, we can assume a sensible lower
  234. // bound on heap addresses.
  235. const uptr kMinAddress = 4 * 4096;
  236. if (p < kMinAddress)
  237. return false;
  238. # if defined(__x86_64__)
  239. // Accept only canonical form user-space addresses.
  240. return ((p >> 47) == 0);
  241. # elif defined(__mips64)
  242. return ((p >> 40) == 0);
  243. # elif defined(__aarch64__)
  244. // Accept up to 48 bit VMA.
  245. return ((p >> 48) == 0);
  246. # elif defined(__loongarch_lp64)
  247. // Allow 47-bit user-space VMA at current.
  248. return ((p >> 47) == 0);
  249. # else
  250. return true;
  251. # endif
  252. }
  253. // Scans the memory range, looking for byte patterns that point into allocator
  254. // chunks. Marks those chunks with |tag| and adds them to |frontier|.
  255. // There are two usage modes for this function: finding reachable chunks
  256. // (|tag| = kReachable) and finding indirectly leaked chunks
  257. // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
  258. // so |frontier| = 0.
  259. void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
  260. const char *region_type, ChunkTag tag) {
  261. CHECK(tag == kReachable || tag == kIndirectlyLeaked);
  262. const uptr alignment = flags()->pointer_alignment();
  263. LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
  264. (void *)end);
  265. uptr pp = begin;
  266. if (pp % alignment)
  267. pp = pp + alignment - pp % alignment;
  268. for (; pp + sizeof(void *) <= end; pp += alignment) {
  269. void *p = *reinterpret_cast<void **>(pp);
  270. # if SANITIZER_APPLE
  271. p = MaybeTransformPointer(p);
  272. # endif
  273. if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
  274. continue;
  275. uptr chunk = PointsIntoChunk(p);
  276. if (!chunk)
  277. continue;
  278. // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
  279. if (chunk == begin)
  280. continue;
  281. LsanMetadata m(chunk);
  282. if (m.tag() == kReachable || m.tag() == kIgnored)
  283. continue;
  284. // Do this check relatively late so we can log only the interesting cases.
  285. if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
  286. LOG_POINTERS(
  287. "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
  288. "%zu.\n",
  289. (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
  290. m.requested_size());
  291. continue;
  292. }
  293. m.set_tag(tag);
  294. LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
  295. (void *)pp, p, (void *)chunk,
  296. (void *)(chunk + m.requested_size()), m.requested_size());
  297. if (frontier)
  298. frontier->push_back(chunk);
  299. }
  300. }
  301. // Scans a global range for pointers
  302. void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
  303. uptr allocator_begin = 0, allocator_end = 0;
  304. GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
  305. if (begin <= allocator_begin && allocator_begin < end) {
  306. CHECK_LE(allocator_begin, allocator_end);
  307. CHECK_LE(allocator_end, end);
  308. if (begin < allocator_begin)
  309. ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
  310. kReachable);
  311. if (allocator_end < end)
  312. ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
  313. } else {
  314. ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
  315. }
  316. }
  317. void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
  318. Frontier *frontier) {
  319. for (uptr i = 0; i < ranges.size(); i++) {
  320. ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
  321. kReachable);
  322. }
  323. }
  324. # if SANITIZER_FUCHSIA
  325. // Fuchsia handles all threads together with its own callback.
  326. static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
  327. uptr) {}
  328. # else
  329. # if SANITIZER_ANDROID
  330. // FIXME: Move this out into *libcdep.cpp
  331. extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
  332. pid_t, void (*cb)(void *, void *, uptr, void *), void *);
  333. # endif
  334. static void ProcessThreadRegistry(Frontier *frontier) {
  335. InternalMmapVector<uptr> ptrs;
  336. GetAdditionalThreadContextPtrsLocked(&ptrs);
  337. for (uptr i = 0; i < ptrs.size(); ++i) {
  338. void *ptr = reinterpret_cast<void *>(ptrs[i]);
  339. uptr chunk = PointsIntoChunk(ptr);
  340. if (!chunk)
  341. continue;
  342. LsanMetadata m(chunk);
  343. if (!m.allocated())
  344. continue;
  345. // Mark as reachable and add to frontier.
  346. LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
  347. m.set_tag(kReachable);
  348. frontier->push_back(chunk);
  349. }
  350. }
  351. // Scans thread data (stacks and TLS) for heap pointers.
  352. static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
  353. Frontier *frontier, tid_t caller_tid,
  354. uptr caller_sp) {
  355. InternalMmapVector<uptr> registers;
  356. InternalMmapVector<Range> extra_ranges;
  357. for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
  358. tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
  359. LOG_THREADS("Processing thread %llu.\n", os_id);
  360. uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
  361. DTLS *dtls;
  362. bool thread_found =
  363. GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
  364. &tls_end, &cache_begin, &cache_end, &dtls);
  365. if (!thread_found) {
  366. // If a thread can't be found in the thread registry, it's probably in the
  367. // process of destruction. Log this event and move on.
  368. LOG_THREADS("Thread %llu not found in registry.\n", os_id);
  369. continue;
  370. }
  371. uptr sp;
  372. PtraceRegistersStatus have_registers =
  373. suspended_threads.GetRegistersAndSP(i, &registers, &sp);
  374. if (have_registers != REGISTERS_AVAILABLE) {
  375. Report("Unable to get registers from thread %llu.\n", os_id);
  376. // If unable to get SP, consider the entire stack to be reachable unless
  377. // GetRegistersAndSP failed with ESRCH.
  378. if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
  379. continue;
  380. sp = stack_begin;
  381. }
  382. if (suspended_threads.GetThreadID(i) == caller_tid) {
  383. sp = caller_sp;
  384. }
  385. if (flags()->use_registers && have_registers) {
  386. uptr registers_begin = reinterpret_cast<uptr>(registers.data());
  387. uptr registers_end =
  388. reinterpret_cast<uptr>(registers.data() + registers.size());
  389. ScanRangeForPointers(registers_begin, registers_end, frontier,
  390. "REGISTERS", kReachable);
  391. }
  392. if (flags()->use_stacks) {
  393. LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
  394. (void *)stack_end, (void *)sp);
  395. if (sp < stack_begin || sp >= stack_end) {
  396. // SP is outside the recorded stack range (e.g. the thread is running a
  397. // signal handler on alternate stack, or swapcontext was used).
  398. // Again, consider the entire stack range to be reachable.
  399. LOG_THREADS("WARNING: stack pointer not in stack range.\n");
  400. uptr page_size = GetPageSizeCached();
  401. int skipped = 0;
  402. while (stack_begin < stack_end &&
  403. !IsAccessibleMemoryRange(stack_begin, 1)) {
  404. skipped++;
  405. stack_begin += page_size;
  406. }
  407. LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
  408. skipped, (void *)stack_begin, (void *)stack_end);
  409. } else {
  410. // Shrink the stack range to ignore out-of-scope values.
  411. stack_begin = sp;
  412. }
  413. ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
  414. kReachable);
  415. extra_ranges.clear();
  416. GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
  417. ScanExtraStackRanges(extra_ranges, frontier);
  418. }
  419. if (flags()->use_tls) {
  420. if (tls_begin) {
  421. LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
  422. // If the tls and cache ranges don't overlap, scan full tls range,
  423. // otherwise, only scan the non-overlapping portions
  424. if (cache_begin == cache_end || tls_end < cache_begin ||
  425. tls_begin > cache_end) {
  426. ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
  427. } else {
  428. if (tls_begin < cache_begin)
  429. ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
  430. kReachable);
  431. if (tls_end > cache_end)
  432. ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
  433. kReachable);
  434. }
  435. }
  436. # if SANITIZER_ANDROID
  437. auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
  438. void *arg) -> void {
  439. ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
  440. reinterpret_cast<uptr>(dtls_end),
  441. reinterpret_cast<Frontier *>(arg), "DTLS",
  442. kReachable);
  443. };
  444. // FIXME: There might be a race-condition here (and in Bionic) if the
  445. // thread is suspended in the middle of updating its DTLS. IOWs, we
  446. // could scan already freed memory. (probably fine for now)
  447. __libc_iterate_dynamic_tls(os_id, cb, frontier);
  448. # else
  449. if (dtls && !DTLSInDestruction(dtls)) {
  450. ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
  451. uptr dtls_beg = dtv.beg;
  452. uptr dtls_end = dtls_beg + dtv.size;
  453. if (dtls_beg < dtls_end) {
  454. LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
  455. (void *)dtls_end);
  456. ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
  457. kReachable);
  458. }
  459. });
  460. } else {
  461. // We are handling a thread with DTLS under destruction. Log about
  462. // this and continue.
  463. LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
  464. }
  465. # endif
  466. }
  467. }
  468. // Add pointers reachable from ThreadContexts
  469. ProcessThreadRegistry(frontier);
  470. }
  471. # endif // SANITIZER_FUCHSIA
  472. void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
  473. uptr region_begin, uptr region_end, bool is_readable) {
  474. uptr intersection_begin = Max(root_region.begin, region_begin);
  475. uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
  476. if (intersection_begin >= intersection_end)
  477. return;
  478. LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
  479. (void *)root_region.begin,
  480. (void *)(root_region.begin + root_region.size),
  481. (void *)region_begin, (void *)region_end,
  482. is_readable ? "readable" : "unreadable");
  483. if (is_readable)
  484. ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
  485. kReachable);
  486. }
  487. static void ProcessRootRegion(Frontier *frontier,
  488. const RootRegion &root_region) {
  489. MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
  490. MemoryMappedSegment segment;
  491. while (proc_maps.Next(&segment)) {
  492. ScanRootRegion(frontier, root_region, segment.start, segment.end,
  493. segment.IsReadable());
  494. }
  495. }
  496. // Scans root regions for heap pointers.
  497. static void ProcessRootRegions(Frontier *frontier) {
  498. if (!flags()->use_root_regions)
  499. return;
  500. for (uptr i = 0; i < root_regions.size(); i++)
  501. ProcessRootRegion(frontier, root_regions[i]);
  502. }
  503. static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
  504. while (frontier->size()) {
  505. uptr next_chunk = frontier->back();
  506. frontier->pop_back();
  507. LsanMetadata m(next_chunk);
  508. ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
  509. "HEAP", tag);
  510. }
  511. }
  512. // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
  513. // which are reachable from it as indirectly leaked.
  514. static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
  515. chunk = GetUserBegin(chunk);
  516. LsanMetadata m(chunk);
  517. if (m.allocated() && m.tag() != kReachable) {
  518. ScanRangeForPointers(chunk, chunk + m.requested_size(),
  519. /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
  520. }
  521. }
  522. static void IgnoredSuppressedCb(uptr chunk, void *arg) {
  523. CHECK(arg);
  524. chunk = GetUserBegin(chunk);
  525. LsanMetadata m(chunk);
  526. if (!m.allocated() || m.tag() == kIgnored)
  527. return;
  528. const InternalMmapVector<u32> &suppressed =
  529. *static_cast<const InternalMmapVector<u32> *>(arg);
  530. uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
  531. if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
  532. return;
  533. LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
  534. (void *)(chunk + m.requested_size()), m.requested_size());
  535. m.set_tag(kIgnored);
  536. }
  537. // ForEachChunk callback. If chunk is marked as ignored, adds its address to
  538. // frontier.
  539. static void CollectIgnoredCb(uptr chunk, void *arg) {
  540. CHECK(arg);
  541. chunk = GetUserBegin(chunk);
  542. LsanMetadata m(chunk);
  543. if (m.allocated() && m.tag() == kIgnored) {
  544. LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
  545. (void *)(chunk + m.requested_size()), m.requested_size());
  546. reinterpret_cast<Frontier *>(arg)->push_back(chunk);
  547. }
  548. }
  549. // Sets the appropriate tag on each chunk.
  550. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
  551. Frontier *frontier, tid_t caller_tid,
  552. uptr caller_sp) {
  553. const InternalMmapVector<u32> &suppressed_stacks =
  554. GetSuppressionContext()->GetSortedSuppressedStacks();
  555. if (!suppressed_stacks.empty()) {
  556. ForEachChunk(IgnoredSuppressedCb,
  557. const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
  558. }
  559. ForEachChunk(CollectIgnoredCb, frontier);
  560. ProcessGlobalRegions(frontier);
  561. ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
  562. ProcessRootRegions(frontier);
  563. FloodFillTag(frontier, kReachable);
  564. // The check here is relatively expensive, so we do this in a separate flood
  565. // fill. That way we can skip the check for chunks that are reachable
  566. // otherwise.
  567. LOG_POINTERS("Processing platform-specific allocations.\n");
  568. ProcessPlatformSpecificAllocations(frontier);
  569. FloodFillTag(frontier, kReachable);
  570. // Iterate over leaked chunks and mark those that are reachable from other
  571. // leaked chunks.
  572. LOG_POINTERS("Scanning leaked chunks.\n");
  573. ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
  574. }
  575. // ForEachChunk callback. Resets the tags to pre-leak-check state.
  576. static void ResetTagsCb(uptr chunk, void *arg) {
  577. (void)arg;
  578. chunk = GetUserBegin(chunk);
  579. LsanMetadata m(chunk);
  580. if (m.allocated() && m.tag() != kIgnored)
  581. m.set_tag(kDirectlyLeaked);
  582. }
  583. // ForEachChunk callback. Aggregates information about unreachable chunks into
  584. // a LeakReport.
  585. static void CollectLeaksCb(uptr chunk, void *arg) {
  586. CHECK(arg);
  587. LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
  588. chunk = GetUserBegin(chunk);
  589. LsanMetadata m(chunk);
  590. if (!m.allocated())
  591. return;
  592. if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
  593. leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
  594. }
  595. void LeakSuppressionContext::PrintMatchedSuppressions() {
  596. InternalMmapVector<Suppression *> matched;
  597. context.GetMatched(&matched);
  598. if (!matched.size())
  599. return;
  600. const char *line = "-----------------------------------------------------";
  601. Printf("%s\n", line);
  602. Printf("Suppressions used:\n");
  603. Printf(" count bytes template\n");
  604. for (uptr i = 0; i < matched.size(); i++) {
  605. Printf("%7zu %10zu %s\n",
  606. static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
  607. matched[i]->weight, matched[i]->templ);
  608. }
  609. Printf("%s\n\n", line);
  610. }
  611. # if SANITIZER_FUCHSIA
  612. // Fuchsia provides a libc interface that guarantees all threads are
  613. // covered, and SuspendedThreadList is never really used.
  614. static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
  615. # else // !SANITIZER_FUCHSIA
  616. static void ReportUnsuspendedThreads(
  617. const SuspendedThreadsList &suspended_threads) {
  618. InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
  619. for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
  620. threads[i] = suspended_threads.GetThreadID(i);
  621. Sort(threads.data(), threads.size());
  622. InternalMmapVector<tid_t> unsuspended;
  623. GetRunningThreadsLocked(&unsuspended);
  624. for (auto os_id : unsuspended) {
  625. uptr i = InternalLowerBound(threads, os_id);
  626. if (i >= threads.size() || threads[i] != os_id)
  627. Report(
  628. "Running thread %zu was not suspended. False leaks are possible.\n",
  629. os_id);
  630. }
  631. }
  632. # endif // !SANITIZER_FUCHSIA
  633. static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
  634. void *arg) {
  635. CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
  636. CHECK(param);
  637. CHECK(!param->success);
  638. ReportUnsuspendedThreads(suspended_threads);
  639. ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
  640. param->caller_sp);
  641. ForEachChunk(CollectLeaksCb, &param->leaks);
  642. // Clean up for subsequent leak checks. This assumes we did not overwrite any
  643. // kIgnored tags.
  644. ForEachChunk(ResetTagsCb, nullptr);
  645. param->success = true;
  646. }
  647. static bool PrintResults(LeakReport &report) {
  648. uptr unsuppressed_count = report.UnsuppressedLeakCount();
  649. if (unsuppressed_count) {
  650. Decorator d;
  651. Printf(
  652. "\n"
  653. "================================================================="
  654. "\n");
  655. Printf("%s", d.Error());
  656. Report("ERROR: LeakSanitizer: detected memory leaks\n");
  657. Printf("%s", d.Default());
  658. report.ReportTopLeaks(flags()->max_leaks);
  659. }
  660. if (common_flags()->print_suppressions)
  661. GetSuppressionContext()->PrintMatchedSuppressions();
  662. if (unsuppressed_count > 0) {
  663. report.PrintSummary();
  664. return true;
  665. }
  666. return false;
  667. }
  668. static bool CheckForLeaks() {
  669. if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
  670. VReport(1, "LeakSanitizer is disabled");
  671. return false;
  672. }
  673. VReport(1, "LeakSanitizer: checking for leaks");
  674. // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
  675. // suppressions. However if a stack id was previously suppressed, it should be
  676. // suppressed in future checks as well.
  677. for (int i = 0;; ++i) {
  678. EnsureMainThreadIDIsCorrect();
  679. CheckForLeaksParam param;
  680. // Capture calling thread's stack pointer early, to avoid false negatives.
  681. // Old frame with dead pointers might be overlapped by new frame inside
  682. // CheckForLeaks which does not use bytes with pointers before the
  683. // threads are suspended and stack pointers captured.
  684. param.caller_tid = GetTid();
  685. param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
  686. LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
  687. if (!param.success) {
  688. Report("LeakSanitizer has encountered a fatal error.\n");
  689. Report(
  690. "HINT: For debugging, try setting environment variable "
  691. "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
  692. Report(
  693. "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
  694. "etc)\n");
  695. Die();
  696. }
  697. LeakReport leak_report;
  698. leak_report.AddLeakedChunks(param.leaks);
  699. // No new suppressions stacks, so rerun will not help and we can report.
  700. if (!leak_report.ApplySuppressions())
  701. return PrintResults(leak_report);
  702. // No indirect leaks to report, so we are done here.
  703. if (!leak_report.IndirectUnsuppressedLeakCount())
  704. return PrintResults(leak_report);
  705. if (i >= 8) {
  706. Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
  707. return PrintResults(leak_report);
  708. }
  709. // We found a new previously unseen suppressed call stack. Rerun to make
  710. // sure it does not hold indirect leaks.
  711. VReport(1, "Rerun with %zu suppressed stacks.",
  712. GetSuppressionContext()->GetSortedSuppressedStacks().size());
  713. }
  714. }
  715. static bool has_reported_leaks = false;
  716. bool HasReportedLeaks() { return has_reported_leaks; }
  717. void DoLeakCheck() {
  718. Lock l(&global_mutex);
  719. static bool already_done;
  720. if (already_done)
  721. return;
  722. already_done = true;
  723. has_reported_leaks = CheckForLeaks();
  724. if (has_reported_leaks)
  725. HandleLeaks();
  726. }
  727. static int DoRecoverableLeakCheck() {
  728. Lock l(&global_mutex);
  729. bool have_leaks = CheckForLeaks();
  730. return have_leaks ? 1 : 0;
  731. }
  732. void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
  733. ///// LeakReport implementation. /////
  734. // A hard limit on the number of distinct leaks, to avoid quadratic complexity
  735. // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
  736. // in real-world applications.
  737. // FIXME: Get rid of this limit by moving logic into DedupLeaks.
  738. const uptr kMaxLeaksConsidered = 5000;
  739. void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
  740. for (const LeakedChunk &leak : chunks) {
  741. uptr chunk = leak.chunk;
  742. u32 stack_trace_id = leak.stack_trace_id;
  743. uptr leaked_size = leak.leaked_size;
  744. ChunkTag tag = leak.tag;
  745. CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
  746. if (u32 resolution = flags()->resolution) {
  747. StackTrace stack = StackDepotGet(stack_trace_id);
  748. stack.size = Min(stack.size, resolution);
  749. stack_trace_id = StackDepotPut(stack);
  750. }
  751. bool is_directly_leaked = (tag == kDirectlyLeaked);
  752. uptr i;
  753. for (i = 0; i < leaks_.size(); i++) {
  754. if (leaks_[i].stack_trace_id == stack_trace_id &&
  755. leaks_[i].is_directly_leaked == is_directly_leaked) {
  756. leaks_[i].hit_count++;
  757. leaks_[i].total_size += leaked_size;
  758. break;
  759. }
  760. }
  761. if (i == leaks_.size()) {
  762. if (leaks_.size() == kMaxLeaksConsidered)
  763. return;
  764. Leak leak = {next_id_++, /* hit_count */ 1,
  765. leaked_size, stack_trace_id,
  766. is_directly_leaked, /* is_suppressed */ false};
  767. leaks_.push_back(leak);
  768. }
  769. if (flags()->report_objects) {
  770. LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
  771. leaked_objects_.push_back(obj);
  772. }
  773. }
  774. }
  775. static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
  776. if (leak1.is_directly_leaked == leak2.is_directly_leaked)
  777. return leak1.total_size > leak2.total_size;
  778. else
  779. return leak1.is_directly_leaked;
  780. }
  781. void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
  782. CHECK(leaks_.size() <= kMaxLeaksConsidered);
  783. Printf("\n");
  784. if (leaks_.size() == kMaxLeaksConsidered)
  785. Printf(
  786. "Too many leaks! Only the first %zu leaks encountered will be "
  787. "reported.\n",
  788. kMaxLeaksConsidered);
  789. uptr unsuppressed_count = UnsuppressedLeakCount();
  790. if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
  791. Printf("The %zu top leak(s):\n", num_leaks_to_report);
  792. Sort(leaks_.data(), leaks_.size(), &LeakComparator);
  793. uptr leaks_reported = 0;
  794. for (uptr i = 0; i < leaks_.size(); i++) {
  795. if (leaks_[i].is_suppressed)
  796. continue;
  797. PrintReportForLeak(i);
  798. leaks_reported++;
  799. if (leaks_reported == num_leaks_to_report)
  800. break;
  801. }
  802. if (leaks_reported < unsuppressed_count) {
  803. uptr remaining = unsuppressed_count - leaks_reported;
  804. Printf("Omitting %zu more leak(s).\n", remaining);
  805. }
  806. }
  807. void LeakReport::PrintReportForLeak(uptr index) {
  808. Decorator d;
  809. Printf("%s", d.Leak());
  810. Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
  811. leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
  812. leaks_[index].total_size, leaks_[index].hit_count);
  813. Printf("%s", d.Default());
  814. CHECK(leaks_[index].stack_trace_id);
  815. StackDepotGet(leaks_[index].stack_trace_id).Print();
  816. if (flags()->report_objects) {
  817. Printf("Objects leaked above:\n");
  818. PrintLeakedObjectsForLeak(index);
  819. Printf("\n");
  820. }
  821. }
  822. void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
  823. u32 leak_id = leaks_[index].id;
  824. for (uptr j = 0; j < leaked_objects_.size(); j++) {
  825. if (leaked_objects_[j].leak_id == leak_id)
  826. Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
  827. leaked_objects_[j].size);
  828. }
  829. }
  830. void LeakReport::PrintSummary() {
  831. CHECK(leaks_.size() <= kMaxLeaksConsidered);
  832. uptr bytes = 0, allocations = 0;
  833. for (uptr i = 0; i < leaks_.size(); i++) {
  834. if (leaks_[i].is_suppressed)
  835. continue;
  836. bytes += leaks_[i].total_size;
  837. allocations += leaks_[i].hit_count;
  838. }
  839. InternalScopedString summary;
  840. summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
  841. allocations);
  842. ReportErrorSummary(summary.data());
  843. }
  844. uptr LeakReport::ApplySuppressions() {
  845. LeakSuppressionContext *suppressions = GetSuppressionContext();
  846. uptr new_suppressions = false;
  847. for (uptr i = 0; i < leaks_.size(); i++) {
  848. if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
  849. leaks_[i].total_size)) {
  850. leaks_[i].is_suppressed = true;
  851. ++new_suppressions;
  852. }
  853. }
  854. return new_suppressions;
  855. }
  856. uptr LeakReport::UnsuppressedLeakCount() {
  857. uptr result = 0;
  858. for (uptr i = 0; i < leaks_.size(); i++)
  859. if (!leaks_[i].is_suppressed)
  860. result++;
  861. return result;
  862. }
  863. uptr LeakReport::IndirectUnsuppressedLeakCount() {
  864. uptr result = 0;
  865. for (uptr i = 0; i < leaks_.size(); i++)
  866. if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
  867. result++;
  868. return result;
  869. }
  870. } // namespace __lsan
  871. #else // CAN_SANITIZE_LEAKS
  872. namespace __lsan {
  873. void InitCommonLsan() {}
  874. void DoLeakCheck() {}
  875. void DoRecoverableLeakCheckVoid() {}
  876. void DisableInThisThread() {}
  877. void EnableInThisThread() {}
  878. } // namespace __lsan
  879. #endif // CAN_SANITIZE_LEAKS
  880. using namespace __lsan;
  881. extern "C" {
  882. SANITIZER_INTERFACE_ATTRIBUTE
  883. void __lsan_ignore_object(const void *p) {
  884. #if CAN_SANITIZE_LEAKS
  885. if (!common_flags()->detect_leaks)
  886. return;
  887. // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
  888. // locked.
  889. Lock l(&global_mutex);
  890. IgnoreObjectResult res = IgnoreObjectLocked(p);
  891. if (res == kIgnoreObjectInvalid)
  892. VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
  893. if (res == kIgnoreObjectAlreadyIgnored)
  894. VReport(1,
  895. "__lsan_ignore_object(): "
  896. "heap object at %p is already being ignored\n",
  897. p);
  898. if (res == kIgnoreObjectSuccess)
  899. VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
  900. #endif // CAN_SANITIZE_LEAKS
  901. }
  902. SANITIZER_INTERFACE_ATTRIBUTE
  903. void __lsan_register_root_region(const void *begin, uptr size) {
  904. #if CAN_SANITIZE_LEAKS
  905. Lock l(&global_mutex);
  906. RootRegion region = {reinterpret_cast<uptr>(begin), size};
  907. root_regions.push_back(region);
  908. VReport(1, "Registered root region at %p of size %zu\n", begin, size);
  909. #endif // CAN_SANITIZE_LEAKS
  910. }
  911. SANITIZER_INTERFACE_ATTRIBUTE
  912. void __lsan_unregister_root_region(const void *begin, uptr size) {
  913. #if CAN_SANITIZE_LEAKS
  914. Lock l(&global_mutex);
  915. bool removed = false;
  916. for (uptr i = 0; i < root_regions.size(); i++) {
  917. RootRegion region = root_regions[i];
  918. if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
  919. removed = true;
  920. uptr last_index = root_regions.size() - 1;
  921. root_regions[i] = root_regions[last_index];
  922. root_regions.pop_back();
  923. VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
  924. break;
  925. }
  926. }
  927. if (!removed) {
  928. Report(
  929. "__lsan_unregister_root_region(): region at %p of size %zu has not "
  930. "been registered.\n",
  931. begin, size);
  932. Die();
  933. }
  934. #endif // CAN_SANITIZE_LEAKS
  935. }
  936. SANITIZER_INTERFACE_ATTRIBUTE
  937. void __lsan_disable() {
  938. #if CAN_SANITIZE_LEAKS
  939. __lsan::DisableInThisThread();
  940. #endif
  941. }
  942. SANITIZER_INTERFACE_ATTRIBUTE
  943. void __lsan_enable() {
  944. #if CAN_SANITIZE_LEAKS
  945. __lsan::EnableInThisThread();
  946. #endif
  947. }
  948. SANITIZER_INTERFACE_ATTRIBUTE
  949. void __lsan_do_leak_check() {
  950. #if CAN_SANITIZE_LEAKS
  951. if (common_flags()->detect_leaks)
  952. __lsan::DoLeakCheck();
  953. #endif // CAN_SANITIZE_LEAKS
  954. }
  955. SANITIZER_INTERFACE_ATTRIBUTE
  956. int __lsan_do_recoverable_leak_check() {
  957. #if CAN_SANITIZE_LEAKS
  958. if (common_flags()->detect_leaks)
  959. return __lsan::DoRecoverableLeakCheck();
  960. #endif // CAN_SANITIZE_LEAKS
  961. return 0;
  962. }
  963. SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
  964. return "";
  965. }
  966. #if !SANITIZER_SUPPORTS_WEAK_HOOKS
  967. SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
  968. return 0;
  969. }
  970. SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
  971. return "";
  972. }
  973. #endif
  974. } // extern "C"