lsan_common.cpp 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107
  1. //=-- lsan_common.cpp -----------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of LeakSanitizer.
  10. // Implementation of common leak checking functionality.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "lsan_common.h"
  14. #include "sanitizer_common/sanitizer_common.h"
  15. #include "sanitizer_common/sanitizer_flag_parser.h"
  16. #include "sanitizer_common/sanitizer_flags.h"
  17. #include "sanitizer_common/sanitizer_placement_new.h"
  18. #include "sanitizer_common/sanitizer_procmaps.h"
  19. #include "sanitizer_common/sanitizer_report_decorator.h"
  20. #include "sanitizer_common/sanitizer_stackdepot.h"
  21. #include "sanitizer_common/sanitizer_stacktrace.h"
  22. #include "sanitizer_common/sanitizer_suppressions.h"
  23. #include "sanitizer_common/sanitizer_thread_registry.h"
  24. #include "sanitizer_common/sanitizer_tls_get_addr.h"
  25. #if CAN_SANITIZE_LEAKS
  26. # if SANITIZER_APPLE
  27. // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
  28. # if SANITIZER_IOS && !SANITIZER_IOSSIM
  29. # define OBJC_DATA_MASK 0x0000007ffffffff8UL
  30. # else
  31. # define OBJC_DATA_MASK 0x00007ffffffffff8UL
  32. # endif
  33. # endif
  34. namespace __lsan {
  35. // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
  36. // also to protect the global list of root regions.
  37. static Mutex global_mutex;
  38. void LockGlobal() SANITIZER_ACQUIRE(global_mutex) { global_mutex.Lock(); }
  39. void UnlockGlobal() SANITIZER_RELEASE(global_mutex) { global_mutex.Unlock(); }
  40. Flags lsan_flags;
  41. void DisableCounterUnderflow() {
  42. if (common_flags()->detect_leaks) {
  43. Report("Unmatched call to __lsan_enable().\n");
  44. Die();
  45. }
  46. }
  47. void Flags::SetDefaults() {
  48. # define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
  49. # include "lsan_flags.inc"
  50. # undef LSAN_FLAG
  51. }
  52. void RegisterLsanFlags(FlagParser *parser, Flags *f) {
  53. # define LSAN_FLAG(Type, Name, DefaultValue, Description) \
  54. RegisterFlag(parser, #Name, Description, &f->Name);
  55. # include "lsan_flags.inc"
  56. # undef LSAN_FLAG
  57. }
  58. # define LOG_POINTERS(...) \
  59. do { \
  60. if (flags()->log_pointers) \
  61. Report(__VA_ARGS__); \
  62. } while (0)
  63. # define LOG_THREADS(...) \
  64. do { \
  65. if (flags()->log_threads) \
  66. Report(__VA_ARGS__); \
  67. } while (0)
  68. class LeakSuppressionContext {
  69. bool parsed = false;
  70. SuppressionContext context;
  71. bool suppressed_stacks_sorted = true;
  72. InternalMmapVector<u32> suppressed_stacks;
  73. const LoadedModule *suppress_module = nullptr;
  74. void LazyInit();
  75. Suppression *GetSuppressionForAddr(uptr addr);
  76. bool SuppressInvalid(const StackTrace &stack);
  77. bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
  78. public:
  79. LeakSuppressionContext(const char *supprression_types[],
  80. int suppression_types_num)
  81. : context(supprression_types, suppression_types_num) {}
  82. bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
  83. const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
  84. if (!suppressed_stacks_sorted) {
  85. suppressed_stacks_sorted = true;
  86. SortAndDedup(suppressed_stacks);
  87. }
  88. return suppressed_stacks;
  89. }
  90. void PrintMatchedSuppressions();
  91. };
  92. ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
  93. static LeakSuppressionContext *suppression_ctx = nullptr;
  94. static const char kSuppressionLeak[] = "leak";
  95. static const char *kSuppressionTypes[] = {kSuppressionLeak};
  96. static const char kStdSuppressions[] =
  97. # if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
  98. // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
  99. // definition.
  100. "leak:*pthread_exit*\n"
  101. # endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
  102. # if SANITIZER_APPLE
  103. // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
  104. "leak:*_os_trace*\n"
  105. # endif
  106. // TLS leak in some glibc versions, described in
  107. // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
  108. "leak:*tls_get_addr*\n";
  109. void InitializeSuppressions() {
  110. CHECK_EQ(nullptr, suppression_ctx);
  111. suppression_ctx = new (suppression_placeholder)
  112. LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
  113. }
  114. void LeakSuppressionContext::LazyInit() {
  115. if (!parsed) {
  116. parsed = true;
  117. context.ParseFromFile(flags()->suppressions);
  118. if (&__lsan_default_suppressions)
  119. context.Parse(__lsan_default_suppressions());
  120. context.Parse(kStdSuppressions);
  121. if (flags()->use_tls && flags()->use_ld_allocations)
  122. suppress_module = GetLinker();
  123. }
  124. }
  125. Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
  126. Suppression *s = nullptr;
  127. // Suppress by module name.
  128. const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
  129. if (!module_name)
  130. module_name = "<unknown module>";
  131. if (context.Match(module_name, kSuppressionLeak, &s))
  132. return s;
  133. // Suppress by file or function name.
  134. SymbolizedStackHolder symbolized_stack(
  135. Symbolizer::GetOrInit()->SymbolizePC(addr));
  136. const SymbolizedStack *frames = symbolized_stack.get();
  137. for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
  138. if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
  139. context.Match(cur->info.file, kSuppressionLeak, &s)) {
  140. break;
  141. }
  142. }
  143. return s;
  144. }
  145. static uptr GetCallerPC(const StackTrace &stack) {
  146. // The top frame is our malloc/calloc/etc. The next frame is the caller.
  147. if (stack.size >= 2)
  148. return stack.trace[1];
  149. return 0;
  150. }
  151. # if SANITIZER_APPLE
  152. // Several pointers in the Objective-C runtime (method cache and class_rw_t,
  153. // for example) are tagged with additional bits we need to strip.
  154. static inline void *TransformPointer(void *p) {
  155. uptr ptr = reinterpret_cast<uptr>(p);
  156. return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK);
  157. }
  158. # endif
  159. // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
  160. // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
  161. // modules accounting etc.
  162. // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
  163. // They are allocated with a __libc_memalign() call in allocate_and_init()
  164. // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
  165. // blocks, but we can make sure they come from our own allocator by intercepting
  166. // __libc_memalign(). On top of that, there is no easy way to reach them. Their
  167. // addresses are stored in a dynamically allocated array (the DTV) which is
  168. // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
  169. // being reachable from the static TLS, and the dynamic TLS being reachable from
  170. // the DTV. This is because the initial DTV is allocated before our interception
  171. // mechanism kicks in, and thus we don't recognize it as allocated memory. We
  172. // can't special-case it either, since we don't know its size.
  173. // Our solution is to include in the root set all allocations made from
  174. // ld-linux.so (which is where allocate_and_init() is implemented). This is
  175. // guaranteed to include all dynamic TLS blocks (and possibly other allocations
  176. // which we don't care about).
  177. // On all other platforms, this simply checks to ensure that the caller pc is
  178. // valid before reporting chunks as leaked.
  179. bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
  180. uptr caller_pc = GetCallerPC(stack);
  181. // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
  182. // it as reachable, as we can't properly report its allocation stack anyway.
  183. return !caller_pc ||
  184. (suppress_module && suppress_module->containsAddress(caller_pc));
  185. }
  186. bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
  187. uptr hit_count, uptr total_size) {
  188. for (uptr i = 0; i < stack.size; i++) {
  189. Suppression *s = GetSuppressionForAddr(
  190. StackTrace::GetPreviousInstructionPc(stack.trace[i]));
  191. if (s) {
  192. s->weight += total_size;
  193. atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
  194. return true;
  195. }
  196. }
  197. return false;
  198. }
  199. bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
  200. uptr total_size) {
  201. LazyInit();
  202. StackTrace stack = StackDepotGet(stack_trace_id);
  203. if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
  204. return false;
  205. suppressed_stacks_sorted = false;
  206. suppressed_stacks.push_back(stack_trace_id);
  207. return true;
  208. }
  209. static LeakSuppressionContext *GetSuppressionContext() {
  210. CHECK(suppression_ctx);
  211. return suppression_ctx;
  212. }
  213. void InitCommonLsan() {
  214. if (common_flags()->detect_leaks) {
  215. // Initialization which can fail or print warnings should only be done if
  216. // LSan is actually enabled.
  217. InitializeSuppressions();
  218. InitializePlatformSpecificModules();
  219. }
  220. }
  221. class Decorator : public __sanitizer::SanitizerCommonDecorator {
  222. public:
  223. Decorator() : SanitizerCommonDecorator() {}
  224. const char *Error() { return Red(); }
  225. const char *Leak() { return Blue(); }
  226. };
  227. static inline bool MaybeUserPointer(uptr p) {
  228. // Since our heap is located in mmap-ed memory, we can assume a sensible lower
  229. // bound on heap addresses.
  230. const uptr kMinAddress = 4 * 4096;
  231. if (p < kMinAddress)
  232. return false;
  233. # if defined(__x86_64__)
  234. // TODO: support LAM48 and 5 level page tables.
  235. // LAM_U57 mask format
  236. // * top byte: 0x81 because the format is: [0] [6-bit tag] [0]
  237. // * top-1 byte: 0xff because it should be 0
  238. // * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff
  239. constexpr uptr kLAM_U57Mask = 0x81ff80;
  240. constexpr uptr kPointerMask = kLAM_U57Mask << 40;
  241. return ((p & kPointerMask) == 0);
  242. # elif defined(__mips64)
  243. return ((p >> 40) == 0);
  244. # elif defined(__aarch64__)
  245. // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in
  246. // address translation and can be used to store a tag.
  247. constexpr uptr kPointerMask = 255ULL << 48;
  248. // Accept up to 48 bit VMA.
  249. return ((p & kPointerMask) == 0);
  250. # elif defined(__loongarch_lp64)
  251. // Allow 47-bit user-space VMA at current.
  252. return ((p >> 47) == 0);
  253. # else
  254. return true;
  255. # endif
  256. }
  257. // Scans the memory range, looking for byte patterns that point into allocator
  258. // chunks. Marks those chunks with |tag| and adds them to |frontier|.
  259. // There are two usage modes for this function: finding reachable chunks
  260. // (|tag| = kReachable) and finding indirectly leaked chunks
  261. // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
  262. // so |frontier| = 0.
  263. void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
  264. const char *region_type, ChunkTag tag) {
  265. CHECK(tag == kReachable || tag == kIndirectlyLeaked);
  266. const uptr alignment = flags()->pointer_alignment();
  267. LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
  268. (void *)end);
  269. uptr pp = begin;
  270. if (pp % alignment)
  271. pp = pp + alignment - pp % alignment;
  272. for (; pp + sizeof(void *) <= end; pp += alignment) {
  273. void *p = *reinterpret_cast<void **>(pp);
  274. # if SANITIZER_APPLE
  275. p = TransformPointer(p);
  276. # endif
  277. if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
  278. continue;
  279. uptr chunk = PointsIntoChunk(p);
  280. if (!chunk)
  281. continue;
  282. // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
  283. if (chunk == begin)
  284. continue;
  285. LsanMetadata m(chunk);
  286. if (m.tag() == kReachable || m.tag() == kIgnored)
  287. continue;
  288. // Do this check relatively late so we can log only the interesting cases.
  289. if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
  290. LOG_POINTERS(
  291. "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
  292. "%zu.\n",
  293. (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
  294. m.requested_size());
  295. continue;
  296. }
  297. m.set_tag(tag);
  298. LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
  299. (void *)pp, p, (void *)chunk,
  300. (void *)(chunk + m.requested_size()), m.requested_size());
  301. if (frontier)
  302. frontier->push_back(chunk);
  303. }
  304. }
  305. // Scans a global range for pointers
  306. void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
  307. uptr allocator_begin = 0, allocator_end = 0;
  308. GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
  309. if (begin <= allocator_begin && allocator_begin < end) {
  310. CHECK_LE(allocator_begin, allocator_end);
  311. CHECK_LE(allocator_end, end);
  312. if (begin < allocator_begin)
  313. ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
  314. kReachable);
  315. if (allocator_end < end)
  316. ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
  317. } else {
  318. ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
  319. }
  320. }
  321. void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
  322. Frontier *frontier) {
  323. for (uptr i = 0; i < ranges.size(); i++) {
  324. ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
  325. kReachable);
  326. }
  327. }
  328. # if SANITIZER_FUCHSIA
  329. // Fuchsia handles all threads together with its own callback.
  330. static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
  331. uptr) {}
  332. # else
  333. # if SANITIZER_ANDROID
  334. // FIXME: Move this out into *libcdep.cpp
  335. extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
  336. pid_t, void (*cb)(void *, void *, uptr, void *), void *);
  337. # endif
  338. static void ProcessThreadRegistry(Frontier *frontier) {
  339. InternalMmapVector<uptr> ptrs;
  340. GetAdditionalThreadContextPtrsLocked(&ptrs);
  341. for (uptr i = 0; i < ptrs.size(); ++i) {
  342. void *ptr = reinterpret_cast<void *>(ptrs[i]);
  343. uptr chunk = PointsIntoChunk(ptr);
  344. if (!chunk)
  345. continue;
  346. LsanMetadata m(chunk);
  347. if (!m.allocated())
  348. continue;
  349. // Mark as reachable and add to frontier.
  350. LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
  351. m.set_tag(kReachable);
  352. frontier->push_back(chunk);
  353. }
  354. }
  355. // Scans thread data (stacks and TLS) for heap pointers.
  356. static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
  357. Frontier *frontier, tid_t caller_tid,
  358. uptr caller_sp) {
  359. InternalMmapVector<uptr> registers;
  360. InternalMmapVector<Range> extra_ranges;
  361. for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
  362. tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
  363. LOG_THREADS("Processing thread %llu.\n", os_id);
  364. uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
  365. DTLS *dtls;
  366. bool thread_found =
  367. GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
  368. &tls_end, &cache_begin, &cache_end, &dtls);
  369. if (!thread_found) {
  370. // If a thread can't be found in the thread registry, it's probably in the
  371. // process of destruction. Log this event and move on.
  372. LOG_THREADS("Thread %llu not found in registry.\n", os_id);
  373. continue;
  374. }
  375. uptr sp;
  376. PtraceRegistersStatus have_registers =
  377. suspended_threads.GetRegistersAndSP(i, &registers, &sp);
  378. if (have_registers != REGISTERS_AVAILABLE) {
  379. Report("Unable to get registers from thread %llu.\n", os_id);
  380. // If unable to get SP, consider the entire stack to be reachable unless
  381. // GetRegistersAndSP failed with ESRCH.
  382. if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
  383. continue;
  384. sp = stack_begin;
  385. }
  386. if (suspended_threads.GetThreadID(i) == caller_tid) {
  387. sp = caller_sp;
  388. }
  389. if (flags()->use_registers && have_registers) {
  390. uptr registers_begin = reinterpret_cast<uptr>(registers.data());
  391. uptr registers_end =
  392. reinterpret_cast<uptr>(registers.data() + registers.size());
  393. ScanRangeForPointers(registers_begin, registers_end, frontier,
  394. "REGISTERS", kReachable);
  395. }
  396. if (flags()->use_stacks) {
  397. LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
  398. (void *)stack_end, (void *)sp);
  399. if (sp < stack_begin || sp >= stack_end) {
  400. // SP is outside the recorded stack range (e.g. the thread is running a
  401. // signal handler on alternate stack, or swapcontext was used).
  402. // Again, consider the entire stack range to be reachable.
  403. LOG_THREADS("WARNING: stack pointer not in stack range.\n");
  404. uptr page_size = GetPageSizeCached();
  405. int skipped = 0;
  406. while (stack_begin < stack_end &&
  407. !IsAccessibleMemoryRange(stack_begin, 1)) {
  408. skipped++;
  409. stack_begin += page_size;
  410. }
  411. LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
  412. skipped, (void *)stack_begin, (void *)stack_end);
  413. } else {
  414. // Shrink the stack range to ignore out-of-scope values.
  415. stack_begin = sp;
  416. }
  417. ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
  418. kReachable);
  419. extra_ranges.clear();
  420. GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
  421. ScanExtraStackRanges(extra_ranges, frontier);
  422. }
  423. if (flags()->use_tls) {
  424. if (tls_begin) {
  425. LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
  426. // If the tls and cache ranges don't overlap, scan full tls range,
  427. // otherwise, only scan the non-overlapping portions
  428. if (cache_begin == cache_end || tls_end < cache_begin ||
  429. tls_begin > cache_end) {
  430. ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
  431. } else {
  432. if (tls_begin < cache_begin)
  433. ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
  434. kReachable);
  435. if (tls_end > cache_end)
  436. ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
  437. kReachable);
  438. }
  439. }
  440. # if SANITIZER_ANDROID
  441. auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
  442. void *arg) -> void {
  443. ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
  444. reinterpret_cast<uptr>(dtls_end),
  445. reinterpret_cast<Frontier *>(arg), "DTLS",
  446. kReachable);
  447. };
  448. // FIXME: There might be a race-condition here (and in Bionic) if the
  449. // thread is suspended in the middle of updating its DTLS. IOWs, we
  450. // could scan already freed memory. (probably fine for now)
  451. __libc_iterate_dynamic_tls(os_id, cb, frontier);
  452. # else
  453. if (dtls && !DTLSInDestruction(dtls)) {
  454. ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
  455. uptr dtls_beg = dtv.beg;
  456. uptr dtls_end = dtls_beg + dtv.size;
  457. if (dtls_beg < dtls_end) {
  458. LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
  459. (void *)dtls_end);
  460. ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
  461. kReachable);
  462. }
  463. });
  464. } else {
  465. // We are handling a thread with DTLS under destruction. Log about
  466. // this and continue.
  467. LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
  468. }
  469. # endif
  470. }
  471. }
  472. // Add pointers reachable from ThreadContexts
  473. ProcessThreadRegistry(frontier);
  474. }
  475. # endif // SANITIZER_FUCHSIA
  476. // A map that contains [region_begin, region_end) pairs.
  477. using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>;
  478. static RootRegions &GetRootRegionsLocked() {
  479. global_mutex.CheckLocked();
  480. static RootRegions *regions = nullptr;
  481. alignas(RootRegions) static char placeholder[sizeof(RootRegions)];
  482. if (!regions)
  483. regions = new (placeholder) RootRegions();
  484. return *regions;
  485. }
  486. bool HasRootRegions() { return !GetRootRegionsLocked().empty(); }
  487. void ScanRootRegions(Frontier *frontier,
  488. const InternalMmapVectorNoCtor<Region> &mapped_regions) {
  489. if (!flags()->use_root_regions)
  490. return;
  491. InternalMmapVector<Region> regions;
  492. GetRootRegionsLocked().forEach([&](const auto &kv) {
  493. regions.push_back({kv.first.first, kv.first.second});
  494. return true;
  495. });
  496. InternalMmapVector<Region> intersection;
  497. Intersect(mapped_regions, regions, intersection);
  498. for (const Region &r : intersection) {
  499. LOG_POINTERS("Root region intersects with mapped region at %p-%p\n",
  500. (void *)r.begin, (void *)r.end);
  501. ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable);
  502. }
  503. }
  504. // Scans root regions for heap pointers.
  505. static void ProcessRootRegions(Frontier *frontier) {
  506. if (!flags()->use_root_regions || !HasRootRegions())
  507. return;
  508. MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
  509. MemoryMappedSegment segment;
  510. InternalMmapVector<Region> mapped_regions;
  511. while (proc_maps.Next(&segment))
  512. if (segment.IsReadable())
  513. mapped_regions.push_back({segment.start, segment.end});
  514. ScanRootRegions(frontier, mapped_regions);
  515. }
  516. static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
  517. while (frontier->size()) {
  518. uptr next_chunk = frontier->back();
  519. frontier->pop_back();
  520. LsanMetadata m(next_chunk);
  521. ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
  522. "HEAP", tag);
  523. }
  524. }
  525. // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
  526. // which are reachable from it as indirectly leaked.
  527. static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
  528. chunk = GetUserBegin(chunk);
  529. LsanMetadata m(chunk);
  530. if (m.allocated() && m.tag() != kReachable) {
  531. ScanRangeForPointers(chunk, chunk + m.requested_size(),
  532. /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
  533. }
  534. }
  535. static void IgnoredSuppressedCb(uptr chunk, void *arg) {
  536. CHECK(arg);
  537. chunk = GetUserBegin(chunk);
  538. LsanMetadata m(chunk);
  539. if (!m.allocated() || m.tag() == kIgnored)
  540. return;
  541. const InternalMmapVector<u32> &suppressed =
  542. *static_cast<const InternalMmapVector<u32> *>(arg);
  543. uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
  544. if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
  545. return;
  546. LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
  547. (void *)(chunk + m.requested_size()), m.requested_size());
  548. m.set_tag(kIgnored);
  549. }
  550. // ForEachChunk callback. If chunk is marked as ignored, adds its address to
  551. // frontier.
  552. static void CollectIgnoredCb(uptr chunk, void *arg) {
  553. CHECK(arg);
  554. chunk = GetUserBegin(chunk);
  555. LsanMetadata m(chunk);
  556. if (m.allocated() && m.tag() == kIgnored) {
  557. LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
  558. (void *)(chunk + m.requested_size()), m.requested_size());
  559. reinterpret_cast<Frontier *>(arg)->push_back(chunk);
  560. }
  561. }
  562. // Sets the appropriate tag on each chunk.
  563. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
  564. Frontier *frontier, tid_t caller_tid,
  565. uptr caller_sp) {
  566. const InternalMmapVector<u32> &suppressed_stacks =
  567. GetSuppressionContext()->GetSortedSuppressedStacks();
  568. if (!suppressed_stacks.empty()) {
  569. ForEachChunk(IgnoredSuppressedCb,
  570. const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
  571. }
  572. ForEachChunk(CollectIgnoredCb, frontier);
  573. ProcessGlobalRegions(frontier);
  574. ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
  575. ProcessRootRegions(frontier);
  576. FloodFillTag(frontier, kReachable);
  577. // The check here is relatively expensive, so we do this in a separate flood
  578. // fill. That way we can skip the check for chunks that are reachable
  579. // otherwise.
  580. LOG_POINTERS("Processing platform-specific allocations.\n");
  581. ProcessPlatformSpecificAllocations(frontier);
  582. FloodFillTag(frontier, kReachable);
  583. // Iterate over leaked chunks and mark those that are reachable from other
  584. // leaked chunks.
  585. LOG_POINTERS("Scanning leaked chunks.\n");
  586. ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
  587. }
  588. // ForEachChunk callback. Resets the tags to pre-leak-check state.
  589. static void ResetTagsCb(uptr chunk, void *arg) {
  590. (void)arg;
  591. chunk = GetUserBegin(chunk);
  592. LsanMetadata m(chunk);
  593. if (m.allocated() && m.tag() != kIgnored)
  594. m.set_tag(kDirectlyLeaked);
  595. }
  596. // ForEachChunk callback. Aggregates information about unreachable chunks into
  597. // a LeakReport.
  598. static void CollectLeaksCb(uptr chunk, void *arg) {
  599. CHECK(arg);
  600. LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
  601. chunk = GetUserBegin(chunk);
  602. LsanMetadata m(chunk);
  603. if (!m.allocated())
  604. return;
  605. if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
  606. leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
  607. }
  608. void LeakSuppressionContext::PrintMatchedSuppressions() {
  609. InternalMmapVector<Suppression *> matched;
  610. context.GetMatched(&matched);
  611. if (!matched.size())
  612. return;
  613. const char *line = "-----------------------------------------------------";
  614. Printf("%s\n", line);
  615. Printf("Suppressions used:\n");
  616. Printf(" count bytes template\n");
  617. for (uptr i = 0; i < matched.size(); i++) {
  618. Printf("%7zu %10zu %s\n",
  619. static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
  620. matched[i]->weight, matched[i]->templ);
  621. }
  622. Printf("%s\n\n", line);
  623. }
  624. # if SANITIZER_FUCHSIA
  625. // Fuchsia provides a libc interface that guarantees all threads are
  626. // covered, and SuspendedThreadList is never really used.
  627. static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
  628. # else // !SANITIZER_FUCHSIA
  629. static void ReportUnsuspendedThreads(
  630. const SuspendedThreadsList &suspended_threads) {
  631. InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
  632. for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
  633. threads[i] = suspended_threads.GetThreadID(i);
  634. Sort(threads.data(), threads.size());
  635. InternalMmapVector<tid_t> unsuspended;
  636. GetRunningThreadsLocked(&unsuspended);
  637. for (auto os_id : unsuspended) {
  638. uptr i = InternalLowerBound(threads, os_id);
  639. if (i >= threads.size() || threads[i] != os_id)
  640. Report(
  641. "Running thread %zu was not suspended. False leaks are possible.\n",
  642. os_id);
  643. }
  644. }
  645. # endif // !SANITIZER_FUCHSIA
  646. static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
  647. void *arg) {
  648. CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
  649. CHECK(param);
  650. CHECK(!param->success);
  651. ReportUnsuspendedThreads(suspended_threads);
  652. ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
  653. param->caller_sp);
  654. ForEachChunk(CollectLeaksCb, &param->leaks);
  655. // Clean up for subsequent leak checks. This assumes we did not overwrite any
  656. // kIgnored tags.
  657. ForEachChunk(ResetTagsCb, nullptr);
  658. param->success = true;
  659. }
  660. static bool PrintResults(LeakReport &report) {
  661. uptr unsuppressed_count = report.UnsuppressedLeakCount();
  662. if (unsuppressed_count) {
  663. Decorator d;
  664. Printf(
  665. "\n"
  666. "================================================================="
  667. "\n");
  668. Printf("%s", d.Error());
  669. Report("ERROR: LeakSanitizer: detected memory leaks\n");
  670. Printf("%s", d.Default());
  671. report.ReportTopLeaks(flags()->max_leaks);
  672. }
  673. if (common_flags()->print_suppressions)
  674. GetSuppressionContext()->PrintMatchedSuppressions();
  675. if (unsuppressed_count > 0) {
  676. report.PrintSummary();
  677. return true;
  678. }
  679. return false;
  680. }
  681. static bool CheckForLeaks() {
  682. if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
  683. VReport(1, "LeakSanitizer is disabled");
  684. return false;
  685. }
  686. VReport(1, "LeakSanitizer: checking for leaks");
  687. // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
  688. // suppressions. However if a stack id was previously suppressed, it should be
  689. // suppressed in future checks as well.
  690. for (int i = 0;; ++i) {
  691. EnsureMainThreadIDIsCorrect();
  692. CheckForLeaksParam param;
  693. // Capture calling thread's stack pointer early, to avoid false negatives.
  694. // Old frame with dead pointers might be overlapped by new frame inside
  695. // CheckForLeaks which does not use bytes with pointers before the
  696. // threads are suspended and stack pointers captured.
  697. param.caller_tid = GetTid();
  698. param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
  699. LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
  700. if (!param.success) {
  701. Report("LeakSanitizer has encountered a fatal error.\n");
  702. Report(
  703. "HINT: For debugging, try setting environment variable "
  704. "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
  705. Report(
  706. "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
  707. "etc)\n");
  708. Die();
  709. }
  710. LeakReport leak_report;
  711. leak_report.AddLeakedChunks(param.leaks);
  712. // No new suppressions stacks, so rerun will not help and we can report.
  713. if (!leak_report.ApplySuppressions())
  714. return PrintResults(leak_report);
  715. // No indirect leaks to report, so we are done here.
  716. if (!leak_report.IndirectUnsuppressedLeakCount())
  717. return PrintResults(leak_report);
  718. if (i >= 8) {
  719. Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
  720. return PrintResults(leak_report);
  721. }
  722. // We found a new previously unseen suppressed call stack. Rerun to make
  723. // sure it does not hold indirect leaks.
  724. VReport(1, "Rerun with %zu suppressed stacks.",
  725. GetSuppressionContext()->GetSortedSuppressedStacks().size());
  726. }
  727. }
  728. static bool has_reported_leaks = false;
  729. bool HasReportedLeaks() { return has_reported_leaks; }
  730. void DoLeakCheck() {
  731. Lock l(&global_mutex);
  732. static bool already_done;
  733. if (already_done)
  734. return;
  735. already_done = true;
  736. has_reported_leaks = CheckForLeaks();
  737. if (has_reported_leaks)
  738. HandleLeaks();
  739. }
  740. static int DoRecoverableLeakCheck() {
  741. Lock l(&global_mutex);
  742. bool have_leaks = CheckForLeaks();
  743. return have_leaks ? 1 : 0;
  744. }
  745. void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
  746. ///// LeakReport implementation. /////
  747. // A hard limit on the number of distinct leaks, to avoid quadratic complexity
  748. // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
  749. // in real-world applications.
  750. // FIXME: Get rid of this limit by moving logic into DedupLeaks.
  751. const uptr kMaxLeaksConsidered = 5000;
  752. void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
  753. for (const LeakedChunk &leak : chunks) {
  754. uptr chunk = leak.chunk;
  755. u32 stack_trace_id = leak.stack_trace_id;
  756. uptr leaked_size = leak.leaked_size;
  757. ChunkTag tag = leak.tag;
  758. CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
  759. if (u32 resolution = flags()->resolution) {
  760. StackTrace stack = StackDepotGet(stack_trace_id);
  761. stack.size = Min(stack.size, resolution);
  762. stack_trace_id = StackDepotPut(stack);
  763. }
  764. bool is_directly_leaked = (tag == kDirectlyLeaked);
  765. uptr i;
  766. for (i = 0; i < leaks_.size(); i++) {
  767. if (leaks_[i].stack_trace_id == stack_trace_id &&
  768. leaks_[i].is_directly_leaked == is_directly_leaked) {
  769. leaks_[i].hit_count++;
  770. leaks_[i].total_size += leaked_size;
  771. break;
  772. }
  773. }
  774. if (i == leaks_.size()) {
  775. if (leaks_.size() == kMaxLeaksConsidered)
  776. return;
  777. Leak leak = {next_id_++, /* hit_count */ 1,
  778. leaked_size, stack_trace_id,
  779. is_directly_leaked, /* is_suppressed */ false};
  780. leaks_.push_back(leak);
  781. }
  782. if (flags()->report_objects) {
  783. LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size};
  784. leaked_objects_.push_back(obj);
  785. }
  786. }
  787. }
  788. static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
  789. if (leak1.is_directly_leaked == leak2.is_directly_leaked)
  790. return leak1.total_size > leak2.total_size;
  791. else
  792. return leak1.is_directly_leaked;
  793. }
  794. void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
  795. CHECK(leaks_.size() <= kMaxLeaksConsidered);
  796. Printf("\n");
  797. if (leaks_.size() == kMaxLeaksConsidered)
  798. Printf(
  799. "Too many leaks! Only the first %zu leaks encountered will be "
  800. "reported.\n",
  801. kMaxLeaksConsidered);
  802. uptr unsuppressed_count = UnsuppressedLeakCount();
  803. if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
  804. Printf("The %zu top leak(s):\n", num_leaks_to_report);
  805. Sort(leaks_.data(), leaks_.size(), &LeakComparator);
  806. uptr leaks_reported = 0;
  807. for (uptr i = 0; i < leaks_.size(); i++) {
  808. if (leaks_[i].is_suppressed)
  809. continue;
  810. PrintReportForLeak(i);
  811. leaks_reported++;
  812. if (leaks_reported == num_leaks_to_report)
  813. break;
  814. }
  815. if (leaks_reported < unsuppressed_count) {
  816. uptr remaining = unsuppressed_count - leaks_reported;
  817. Printf("Omitting %zu more leak(s).\n", remaining);
  818. }
  819. }
  820. void LeakReport::PrintReportForLeak(uptr index) {
  821. Decorator d;
  822. Printf("%s", d.Leak());
  823. Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
  824. leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
  825. leaks_[index].total_size, leaks_[index].hit_count);
  826. Printf("%s", d.Default());
  827. CHECK(leaks_[index].stack_trace_id);
  828. StackDepotGet(leaks_[index].stack_trace_id).Print();
  829. if (flags()->report_objects) {
  830. Printf("Objects leaked above:\n");
  831. PrintLeakedObjectsForLeak(index);
  832. Printf("\n");
  833. }
  834. }
  835. void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
  836. u32 leak_id = leaks_[index].id;
  837. for (uptr j = 0; j < leaked_objects_.size(); j++) {
  838. if (leaked_objects_[j].leak_id == leak_id)
  839. Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
  840. leaked_objects_[j].size);
  841. }
  842. }
  843. void LeakReport::PrintSummary() {
  844. CHECK(leaks_.size() <= kMaxLeaksConsidered);
  845. uptr bytes = 0, allocations = 0;
  846. for (uptr i = 0; i < leaks_.size(); i++) {
  847. if (leaks_[i].is_suppressed)
  848. continue;
  849. bytes += leaks_[i].total_size;
  850. allocations += leaks_[i].hit_count;
  851. }
  852. InternalScopedString summary;
  853. summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes,
  854. allocations);
  855. ReportErrorSummary(summary.data());
  856. }
  857. uptr LeakReport::ApplySuppressions() {
  858. LeakSuppressionContext *suppressions = GetSuppressionContext();
  859. uptr new_suppressions = 0;
  860. for (uptr i = 0; i < leaks_.size(); i++) {
  861. if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
  862. leaks_[i].total_size)) {
  863. leaks_[i].is_suppressed = true;
  864. ++new_suppressions;
  865. }
  866. }
  867. return new_suppressions;
  868. }
  869. uptr LeakReport::UnsuppressedLeakCount() {
  870. uptr result = 0;
  871. for (uptr i = 0; i < leaks_.size(); i++)
  872. if (!leaks_[i].is_suppressed)
  873. result++;
  874. return result;
  875. }
  876. uptr LeakReport::IndirectUnsuppressedLeakCount() {
  877. uptr result = 0;
  878. for (uptr i = 0; i < leaks_.size(); i++)
  879. if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
  880. result++;
  881. return result;
  882. }
  883. } // namespace __lsan
  884. #else // CAN_SANITIZE_LEAKS
  885. namespace __lsan {
  886. void InitCommonLsan() {}
  887. void DoLeakCheck() {}
  888. void DoRecoverableLeakCheckVoid() {}
  889. void DisableInThisThread() {}
  890. void EnableInThisThread() {}
  891. } // namespace __lsan
  892. #endif // CAN_SANITIZE_LEAKS
  893. using namespace __lsan;
  894. extern "C" {
  895. SANITIZER_INTERFACE_ATTRIBUTE
  896. void __lsan_ignore_object(const void *p) {
  897. #if CAN_SANITIZE_LEAKS
  898. if (!common_flags()->detect_leaks)
  899. return;
  900. // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
  901. // locked.
  902. Lock l(&global_mutex);
  903. IgnoreObjectResult res = IgnoreObject(p);
  904. if (res == kIgnoreObjectInvalid)
  905. VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
  906. if (res == kIgnoreObjectAlreadyIgnored)
  907. VReport(1,
  908. "__lsan_ignore_object(): "
  909. "heap object at %p is already being ignored\n",
  910. p);
  911. if (res == kIgnoreObjectSuccess)
  912. VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
  913. #endif // CAN_SANITIZE_LEAKS
  914. }
  915. SANITIZER_INTERFACE_ATTRIBUTE
  916. void __lsan_register_root_region(const void *begin, uptr size) {
  917. #if CAN_SANITIZE_LEAKS
  918. VReport(1, "Registered root region at %p of size %zu\n", begin, size);
  919. uptr b = reinterpret_cast<uptr>(begin);
  920. uptr e = b + size;
  921. CHECK_LT(b, e);
  922. Lock l(&global_mutex);
  923. ++GetRootRegionsLocked()[{b, e}];
  924. #endif // CAN_SANITIZE_LEAKS
  925. }
  926. SANITIZER_INTERFACE_ATTRIBUTE
  927. void __lsan_unregister_root_region(const void *begin, uptr size) {
  928. #if CAN_SANITIZE_LEAKS
  929. uptr b = reinterpret_cast<uptr>(begin);
  930. uptr e = b + size;
  931. CHECK_LT(b, e);
  932. VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
  933. {
  934. Lock l(&global_mutex);
  935. if (auto *f = GetRootRegionsLocked().find({b, e})) {
  936. if (--(f->second) == 0)
  937. GetRootRegionsLocked().erase(f);
  938. return;
  939. }
  940. }
  941. Report(
  942. "__lsan_unregister_root_region(): region at %p of size %zu has not "
  943. "been registered.\n",
  944. begin, size);
  945. Die();
  946. #endif // CAN_SANITIZE_LEAKS
  947. }
  948. SANITIZER_INTERFACE_ATTRIBUTE
  949. void __lsan_disable() {
  950. #if CAN_SANITIZE_LEAKS
  951. __lsan::DisableInThisThread();
  952. #endif
  953. }
  954. SANITIZER_INTERFACE_ATTRIBUTE
  955. void __lsan_enable() {
  956. #if CAN_SANITIZE_LEAKS
  957. __lsan::EnableInThisThread();
  958. #endif
  959. }
  960. SANITIZER_INTERFACE_ATTRIBUTE
  961. void __lsan_do_leak_check() {
  962. #if CAN_SANITIZE_LEAKS
  963. if (common_flags()->detect_leaks)
  964. __lsan::DoLeakCheck();
  965. #endif // CAN_SANITIZE_LEAKS
  966. }
  967. SANITIZER_INTERFACE_ATTRIBUTE
  968. int __lsan_do_recoverable_leak_check() {
  969. #if CAN_SANITIZE_LEAKS
  970. if (common_flags()->detect_leaks)
  971. return __lsan::DoRecoverableLeakCheck();
  972. #endif // CAN_SANITIZE_LEAKS
  973. return 0;
  974. }
  975. SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
  976. return "";
  977. }
  978. #if !SANITIZER_SUPPORTS_WEAK_HOOKS
  979. SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
  980. return 0;
  981. }
  982. SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
  983. return "";
  984. }
  985. #endif
  986. } // extern "C"