lsan_common.cpp 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044
  1. //=-- lsan_common.cpp -----------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of LeakSanitizer.
  10. // Implementation of common leak checking functionality.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "lsan_common.h"
  14. #include "sanitizer_common/sanitizer_common.h"
  15. #include "sanitizer_common/sanitizer_flag_parser.h"
  16. #include "sanitizer_common/sanitizer_flags.h"
  17. #include "sanitizer_common/sanitizer_placement_new.h"
  18. #include "sanitizer_common/sanitizer_procmaps.h"
  19. #include "sanitizer_common/sanitizer_report_decorator.h"
  20. #include "sanitizer_common/sanitizer_stackdepot.h"
  21. #include "sanitizer_common/sanitizer_stacktrace.h"
  22. #include "sanitizer_common/sanitizer_suppressions.h"
  23. #include "sanitizer_common/sanitizer_thread_registry.h"
  24. #include "sanitizer_common/sanitizer_tls_get_addr.h"
  25. #if CAN_SANITIZE_LEAKS
  26. namespace __lsan {
  27. // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
  28. // also to protect the global list of root regions.
  29. Mutex global_mutex;
  30. Flags lsan_flags;
  31. void DisableCounterUnderflow() {
  32. if (common_flags()->detect_leaks) {
  33. Report("Unmatched call to __lsan_enable().\n");
  34. Die();
  35. }
  36. }
  37. void Flags::SetDefaults() {
  38. # define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
  39. # include "lsan_flags.inc"
  40. # undef LSAN_FLAG
  41. }
  42. void RegisterLsanFlags(FlagParser *parser, Flags *f) {
  43. # define LSAN_FLAG(Type, Name, DefaultValue, Description) \
  44. RegisterFlag(parser, #Name, Description, &f->Name);
  45. # include "lsan_flags.inc"
  46. # undef LSAN_FLAG
  47. }
  48. # define LOG_POINTERS(...) \
  49. do { \
  50. if (flags()->log_pointers) \
  51. Report(__VA_ARGS__); \
  52. } while (0)
  53. # define LOG_THREADS(...) \
  54. do { \
  55. if (flags()->log_threads) \
  56. Report(__VA_ARGS__); \
  57. } while (0)
  58. class LeakSuppressionContext {
  59. bool parsed = false;
  60. SuppressionContext context;
  61. bool suppressed_stacks_sorted = true;
  62. InternalMmapVector<u32> suppressed_stacks;
  63. const LoadedModule *suppress_module = nullptr;
  64. void LazyInit();
  65. Suppression *GetSuppressionForAddr(uptr addr);
  66. bool SuppressInvalid(const StackTrace &stack);
  67. bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
  68. public:
  69. LeakSuppressionContext(const char *supprression_types[],
  70. int suppression_types_num)
  71. : context(supprression_types, suppression_types_num) {}
  72. bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
  73. const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
  74. if (!suppressed_stacks_sorted) {
  75. suppressed_stacks_sorted = true;
  76. SortAndDedup(suppressed_stacks);
  77. }
  78. return suppressed_stacks;
  79. }
  80. void PrintMatchedSuppressions();
  81. };
  82. ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
  83. static LeakSuppressionContext *suppression_ctx = nullptr;
  84. static const char kSuppressionLeak[] = "leak";
  85. static const char *kSuppressionTypes[] = {kSuppressionLeak};
  86. static const char kStdSuppressions[] =
  87. # if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
  88. // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
  89. // definition.
  90. "leak:*pthread_exit*\n"
  91. # endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
  92. # if SANITIZER_MAC
  93. // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
  94. "leak:*_os_trace*\n"
  95. # endif
  96. // TLS leak in some glibc versions, described in
  97. // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
  98. "leak:*tls_get_addr*\n";
  99. void InitializeSuppressions() {
  100. CHECK_EQ(nullptr, suppression_ctx);
  101. suppression_ctx = new (suppression_placeholder)
  102. LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
  103. }
  104. void LeakSuppressionContext::LazyInit() {
  105. if (!parsed) {
  106. parsed = true;
  107. context.ParseFromFile(flags()->suppressions);
  108. if (&__lsan_default_suppressions)
  109. context.Parse(__lsan_default_suppressions());
  110. context.Parse(kStdSuppressions);
  111. if (flags()->use_tls && flags()->use_ld_allocations)
  112. suppress_module = GetLinker();
  113. }
  114. }
  115. Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
  116. Suppression *s = nullptr;
  117. // Suppress by module name.
  118. if (const char *module_name =
  119. Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
  120. if (context.Match(module_name, kSuppressionLeak, &s))
  121. return s;
  122. // Suppress by file or function name.
  123. SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
  124. for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
  125. if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
  126. context.Match(cur->info.file, kSuppressionLeak, &s)) {
  127. break;
  128. }
  129. }
  130. frames->ClearAll();
  131. return s;
  132. }
  133. static uptr GetCallerPC(const StackTrace &stack) {
  134. // The top frame is our malloc/calloc/etc. The next frame is the caller.
  135. if (stack.size >= 2)
  136. return stack.trace[1];
  137. return 0;
  138. }
  139. // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
  140. // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
  141. // modules accounting etc.
  142. // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
  143. // They are allocated with a __libc_memalign() call in allocate_and_init()
  144. // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
  145. // blocks, but we can make sure they come from our own allocator by intercepting
  146. // __libc_memalign(). On top of that, there is no easy way to reach them. Their
  147. // addresses are stored in a dynamically allocated array (the DTV) which is
  148. // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
  149. // being reachable from the static TLS, and the dynamic TLS being reachable from
  150. // the DTV. This is because the initial DTV is allocated before our interception
  151. // mechanism kicks in, and thus we don't recognize it as allocated memory. We
  152. // can't special-case it either, since we don't know its size.
  153. // Our solution is to include in the root set all allocations made from
  154. // ld-linux.so (which is where allocate_and_init() is implemented). This is
  155. // guaranteed to include all dynamic TLS blocks (and possibly other allocations
  156. // which we don't care about).
  157. // On all other platforms, this simply checks to ensure that the caller pc is
  158. // valid before reporting chunks as leaked.
  159. bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
  160. uptr caller_pc = GetCallerPC(stack);
  161. // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
  162. // it as reachable, as we can't properly report its allocation stack anyway.
  163. return !caller_pc ||
  164. (suppress_module && suppress_module->containsAddress(caller_pc));
  165. }
  166. bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
  167. uptr hit_count, uptr total_size) {
  168. for (uptr i = 0; i < stack.size; i++) {
  169. Suppression *s = GetSuppressionForAddr(
  170. StackTrace::GetPreviousInstructionPc(stack.trace[i]));
  171. if (s) {
  172. s->weight += total_size;
  173. atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
  174. return true;
  175. }
  176. }
  177. return false;
  178. }
  179. bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
  180. uptr total_size) {
  181. LazyInit();
  182. StackTrace stack = StackDepotGet(stack_trace_id);
  183. if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
  184. return false;
  185. suppressed_stacks_sorted = false;
  186. suppressed_stacks.push_back(stack_trace_id);
  187. return true;
  188. }
  189. static LeakSuppressionContext *GetSuppressionContext() {
  190. CHECK(suppression_ctx);
  191. return suppression_ctx;
  192. }
  193. static InternalMmapVectorNoCtor<RootRegion> root_regions;
  194. InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
  195. return &root_regions;
  196. }
  197. void InitCommonLsan() {
  198. if (common_flags()->detect_leaks) {
  199. // Initialization which can fail or print warnings should only be done if
  200. // LSan is actually enabled.
  201. InitializeSuppressions();
  202. InitializePlatformSpecificModules();
  203. }
  204. }
  205. class Decorator : public __sanitizer::SanitizerCommonDecorator {
  206. public:
  207. Decorator() : SanitizerCommonDecorator() {}
  208. const char *Error() { return Red(); }
  209. const char *Leak() { return Blue(); }
  210. };
  211. static inline bool CanBeAHeapPointer(uptr p) {
  212. // Since our heap is located in mmap-ed memory, we can assume a sensible lower
  213. // bound on heap addresses.
  214. const uptr kMinAddress = 4 * 4096;
  215. if (p < kMinAddress)
  216. return false;
  217. # if defined(__x86_64__)
  218. // Accept only canonical form user-space addresses.
  219. return ((p >> 47) == 0);
  220. # elif defined(__mips64)
  221. return ((p >> 40) == 0);
  222. # elif defined(__aarch64__)
  223. unsigned runtimeVMA = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
  224. return ((p >> runtimeVMA) == 0);
  225. # else
  226. return true;
  227. # endif
  228. }
  229. // Scans the memory range, looking for byte patterns that point into allocator
  230. // chunks. Marks those chunks with |tag| and adds them to |frontier|.
  231. // There are two usage modes for this function: finding reachable chunks
  232. // (|tag| = kReachable) and finding indirectly leaked chunks
  233. // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
  234. // so |frontier| = 0.
  235. void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
  236. const char *region_type, ChunkTag tag) {
  237. CHECK(tag == kReachable || tag == kIndirectlyLeaked);
  238. const uptr alignment = flags()->pointer_alignment();
  239. LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
  240. (void *)end);
  241. uptr pp = begin;
  242. if (pp % alignment)
  243. pp = pp + alignment - pp % alignment;
  244. for (; pp + sizeof(void *) <= end; pp += alignment) {
  245. void *p = *reinterpret_cast<void **>(pp);
  246. if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p)))
  247. continue;
  248. uptr chunk = PointsIntoChunk(p);
  249. if (!chunk)
  250. continue;
  251. // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
  252. if (chunk == begin)
  253. continue;
  254. LsanMetadata m(chunk);
  255. if (m.tag() == kReachable || m.tag() == kIgnored)
  256. continue;
  257. // Do this check relatively late so we can log only the interesting cases.
  258. if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
  259. LOG_POINTERS(
  260. "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
  261. "%zu.\n",
  262. (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
  263. m.requested_size());
  264. continue;
  265. }
  266. m.set_tag(tag);
  267. LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
  268. (void *)pp, p, (void *)chunk,
  269. (void *)(chunk + m.requested_size()), m.requested_size());
  270. if (frontier)
  271. frontier->push_back(chunk);
  272. }
  273. }
  274. // Scans a global range for pointers
  275. void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
  276. uptr allocator_begin = 0, allocator_end = 0;
  277. GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
  278. if (begin <= allocator_begin && allocator_begin < end) {
  279. CHECK_LE(allocator_begin, allocator_end);
  280. CHECK_LE(allocator_end, end);
  281. if (begin < allocator_begin)
  282. ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
  283. kReachable);
  284. if (allocator_end < end)
  285. ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
  286. } else {
  287. ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
  288. }
  289. }
  290. void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) {
  291. Frontier *frontier = reinterpret_cast<Frontier *>(arg);
  292. ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
  293. }
  294. # if SANITIZER_FUCHSIA
  295. // Fuchsia handles all threads together with its own callback.
  296. static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
  297. # else
  298. # if SANITIZER_ANDROID
  299. // FIXME: Move this out into *libcdep.cpp
  300. extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
  301. pid_t, void (*cb)(void *, void *, uptr, void *), void *);
  302. # endif
  303. static void ProcessThreadRegistry(Frontier *frontier) {
  304. InternalMmapVector<uptr> ptrs;
  305. GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
  306. GetAdditionalThreadContextPtrs, &ptrs);
  307. for (uptr i = 0; i < ptrs.size(); ++i) {
  308. void *ptr = reinterpret_cast<void *>(ptrs[i]);
  309. uptr chunk = PointsIntoChunk(ptr);
  310. if (!chunk)
  311. continue;
  312. LsanMetadata m(chunk);
  313. if (!m.allocated())
  314. continue;
  315. // Mark as reachable and add to frontier.
  316. LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
  317. m.set_tag(kReachable);
  318. frontier->push_back(chunk);
  319. }
  320. }
  321. // Scans thread data (stacks and TLS) for heap pointers.
  322. static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
  323. Frontier *frontier) {
  324. InternalMmapVector<uptr> registers;
  325. for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
  326. tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
  327. LOG_THREADS("Processing thread %llu.\n", os_id);
  328. uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
  329. DTLS *dtls;
  330. bool thread_found =
  331. GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
  332. &tls_end, &cache_begin, &cache_end, &dtls);
  333. if (!thread_found) {
  334. // If a thread can't be found in the thread registry, it's probably in the
  335. // process of destruction. Log this event and move on.
  336. LOG_THREADS("Thread %llu not found in registry.\n", os_id);
  337. continue;
  338. }
  339. uptr sp;
  340. PtraceRegistersStatus have_registers =
  341. suspended_threads.GetRegistersAndSP(i, &registers, &sp);
  342. if (have_registers != REGISTERS_AVAILABLE) {
  343. Report("Unable to get registers from thread %llu.\n", os_id);
  344. // If unable to get SP, consider the entire stack to be reachable unless
  345. // GetRegistersAndSP failed with ESRCH.
  346. if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
  347. continue;
  348. sp = stack_begin;
  349. }
  350. if (flags()->use_registers && have_registers) {
  351. uptr registers_begin = reinterpret_cast<uptr>(registers.data());
  352. uptr registers_end =
  353. reinterpret_cast<uptr>(registers.data() + registers.size());
  354. ScanRangeForPointers(registers_begin, registers_end, frontier,
  355. "REGISTERS", kReachable);
  356. }
  357. if (flags()->use_stacks) {
  358. LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
  359. (void *)stack_end, (void *)sp);
  360. if (sp < stack_begin || sp >= stack_end) {
  361. // SP is outside the recorded stack range (e.g. the thread is running a
  362. // signal handler on alternate stack, or swapcontext was used).
  363. // Again, consider the entire stack range to be reachable.
  364. LOG_THREADS("WARNING: stack pointer not in stack range.\n");
  365. uptr page_size = GetPageSizeCached();
  366. int skipped = 0;
  367. while (stack_begin < stack_end &&
  368. !IsAccessibleMemoryRange(stack_begin, 1)) {
  369. skipped++;
  370. stack_begin += page_size;
  371. }
  372. LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
  373. skipped, (void *)stack_begin, (void *)stack_end);
  374. } else {
  375. // Shrink the stack range to ignore out-of-scope values.
  376. stack_begin = sp;
  377. }
  378. ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
  379. kReachable);
  380. ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
  381. }
  382. if (flags()->use_tls) {
  383. if (tls_begin) {
  384. LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
  385. // If the tls and cache ranges don't overlap, scan full tls range,
  386. // otherwise, only scan the non-overlapping portions
  387. if (cache_begin == cache_end || tls_end < cache_begin ||
  388. tls_begin > cache_end) {
  389. ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
  390. } else {
  391. if (tls_begin < cache_begin)
  392. ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
  393. kReachable);
  394. if (tls_end > cache_end)
  395. ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
  396. kReachable);
  397. }
  398. }
  399. # if SANITIZER_ANDROID
  400. auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
  401. void *arg) -> void {
  402. ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
  403. reinterpret_cast<uptr>(dtls_end),
  404. reinterpret_cast<Frontier *>(arg), "DTLS",
  405. kReachable);
  406. };
  407. // FIXME: There might be a race-condition here (and in Bionic) if the
  408. // thread is suspended in the middle of updating its DTLS. IOWs, we
  409. // could scan already freed memory. (probably fine for now)
  410. __libc_iterate_dynamic_tls(os_id, cb, frontier);
  411. # else
  412. if (dtls && !DTLSInDestruction(dtls)) {
  413. ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
  414. uptr dtls_beg = dtv.beg;
  415. uptr dtls_end = dtls_beg + dtv.size;
  416. if (dtls_beg < dtls_end) {
  417. LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
  418. (void *)dtls_end);
  419. ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
  420. kReachable);
  421. }
  422. });
  423. } else {
  424. // We are handling a thread with DTLS under destruction. Log about
  425. // this and continue.
  426. LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
  427. }
  428. # endif
  429. }
  430. }
  431. // Add pointers reachable from ThreadContexts
  432. ProcessThreadRegistry(frontier);
  433. }
  434. # endif // SANITIZER_FUCHSIA
  435. void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
  436. uptr region_begin, uptr region_end, bool is_readable) {
  437. uptr intersection_begin = Max(root_region.begin, region_begin);
  438. uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
  439. if (intersection_begin >= intersection_end)
  440. return;
  441. LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
  442. (void *)root_region.begin,
  443. (void *)(root_region.begin + root_region.size),
  444. (void *)region_begin, (void *)region_end,
  445. is_readable ? "readable" : "unreadable");
  446. if (is_readable)
  447. ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
  448. kReachable);
  449. }
  450. static void ProcessRootRegion(Frontier *frontier,
  451. const RootRegion &root_region) {
  452. MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
  453. MemoryMappedSegment segment;
  454. while (proc_maps.Next(&segment)) {
  455. ScanRootRegion(frontier, root_region, segment.start, segment.end,
  456. segment.IsReadable());
  457. }
  458. }
  459. // Scans root regions for heap pointers.
  460. static void ProcessRootRegions(Frontier *frontier) {
  461. if (!flags()->use_root_regions)
  462. return;
  463. for (uptr i = 0; i < root_regions.size(); i++)
  464. ProcessRootRegion(frontier, root_regions[i]);
  465. }
  466. static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
  467. while (frontier->size()) {
  468. uptr next_chunk = frontier->back();
  469. frontier->pop_back();
  470. LsanMetadata m(next_chunk);
  471. ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
  472. "HEAP", tag);
  473. }
  474. }
  475. // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
  476. // which are reachable from it as indirectly leaked.
  477. static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
  478. chunk = GetUserBegin(chunk);
  479. LsanMetadata m(chunk);
  480. if (m.allocated() && m.tag() != kReachable) {
  481. ScanRangeForPointers(chunk, chunk + m.requested_size(),
  482. /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
  483. }
  484. }
  485. static void IgnoredSuppressedCb(uptr chunk, void *arg) {
  486. CHECK(arg);
  487. chunk = GetUserBegin(chunk);
  488. LsanMetadata m(chunk);
  489. if (!m.allocated() || m.tag() == kIgnored)
  490. return;
  491. const InternalMmapVector<u32> &suppressed =
  492. *static_cast<const InternalMmapVector<u32> *>(arg);
  493. uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
  494. if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
  495. return;
  496. LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
  497. (void *)(chunk + m.requested_size()), m.requested_size());
  498. m.set_tag(kIgnored);
  499. }
  500. // ForEachChunk callback. If chunk is marked as ignored, adds its address to
  501. // frontier.
  502. static void CollectIgnoredCb(uptr chunk, void *arg) {
  503. CHECK(arg);
  504. chunk = GetUserBegin(chunk);
  505. LsanMetadata m(chunk);
  506. if (m.allocated() && m.tag() == kIgnored) {
  507. LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
  508. (void *)(chunk + m.requested_size()), m.requested_size());
  509. reinterpret_cast<Frontier *>(arg)->push_back(chunk);
  510. }
  511. }
  512. // Sets the appropriate tag on each chunk.
  513. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
  514. Frontier *frontier) {
  515. const InternalMmapVector<u32> &suppressed_stacks =
  516. GetSuppressionContext()->GetSortedSuppressedStacks();
  517. if (!suppressed_stacks.empty()) {
  518. ForEachChunk(IgnoredSuppressedCb,
  519. const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
  520. }
  521. ForEachChunk(CollectIgnoredCb, frontier);
  522. ProcessGlobalRegions(frontier);
  523. ProcessThreads(suspended_threads, frontier);
  524. ProcessRootRegions(frontier);
  525. FloodFillTag(frontier, kReachable);
  526. // The check here is relatively expensive, so we do this in a separate flood
  527. // fill. That way we can skip the check for chunks that are reachable
  528. // otherwise.
  529. LOG_POINTERS("Processing platform-specific allocations.\n");
  530. ProcessPlatformSpecificAllocations(frontier);
  531. FloodFillTag(frontier, kReachable);
  532. // Iterate over leaked chunks and mark those that are reachable from other
  533. // leaked chunks.
  534. LOG_POINTERS("Scanning leaked chunks.\n");
  535. ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
  536. }
  537. // ForEachChunk callback. Resets the tags to pre-leak-check state.
  538. static void ResetTagsCb(uptr chunk, void *arg) {
  539. (void)arg;
  540. chunk = GetUserBegin(chunk);
  541. LsanMetadata m(chunk);
  542. if (m.allocated() && m.tag() != kIgnored)
  543. m.set_tag(kDirectlyLeaked);
  544. }
  545. // ForEachChunk callback. Aggregates information about unreachable chunks into
  546. // a LeakReport.
  547. static void CollectLeaksCb(uptr chunk, void *arg) {
  548. CHECK(arg);
  549. LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
  550. chunk = GetUserBegin(chunk);
  551. LsanMetadata m(chunk);
  552. if (!m.allocated())
  553. return;
  554. if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
  555. leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
  556. }
  557. void LeakSuppressionContext::PrintMatchedSuppressions() {
  558. InternalMmapVector<Suppression *> matched;
  559. context.GetMatched(&matched);
  560. if (!matched.size())
  561. return;
  562. const char *line = "-----------------------------------------------------";
  563. Printf("%s\n", line);
  564. Printf("Suppressions used:\n");
  565. Printf(" count bytes template\n");
  566. for (uptr i = 0; i < matched.size(); i++) {
  567. Printf("%7zu %10zu %s\n",
  568. static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
  569. matched[i]->weight, matched[i]->templ);
  570. }
  571. Printf("%s\n\n", line);
  572. }
  573. static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
  574. const InternalMmapVector<tid_t> &suspended_threads =
  575. *(const InternalMmapVector<tid_t> *)arg;
  576. if (tctx->status == ThreadStatusRunning) {
  577. uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
  578. if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
  579. Report(
  580. "Running thread %llu was not suspended. False leaks are possible.\n",
  581. tctx->os_id);
  582. }
  583. }
  584. # if SANITIZER_FUCHSIA
  585. // Fuchsia provides a libc interface that guarantees all threads are
  586. // covered, and SuspendedThreadList is never really used.
  587. static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
  588. # else // !SANITIZER_FUCHSIA
  589. static void ReportUnsuspendedThreads(
  590. const SuspendedThreadsList &suspended_threads) {
  591. InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
  592. for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
  593. threads[i] = suspended_threads.GetThreadID(i);
  594. Sort(threads.data(), threads.size());
  595. GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
  596. &ReportIfNotSuspended, &threads);
  597. }
  598. # endif // !SANITIZER_FUCHSIA
  599. static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
  600. void *arg) {
  601. CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
  602. CHECK(param);
  603. CHECK(!param->success);
  604. ReportUnsuspendedThreads(suspended_threads);
  605. ClassifyAllChunks(suspended_threads, &param->frontier);
  606. ForEachChunk(CollectLeaksCb, &param->leaks);
  607. // Clean up for subsequent leak checks. This assumes we did not overwrite any
  608. // kIgnored tags.
  609. ForEachChunk(ResetTagsCb, nullptr);
  610. param->success = true;
  611. }
  612. static bool PrintResults(LeakReport &report) {
  613. uptr unsuppressed_count = report.UnsuppressedLeakCount();
  614. if (unsuppressed_count) {
  615. Decorator d;
  616. Printf(
  617. "\n"
  618. "================================================================="
  619. "\n");
  620. Printf("%s", d.Error());
  621. Report("ERROR: LeakSanitizer: detected memory leaks\n");
  622. Printf("%s", d.Default());
  623. report.ReportTopLeaks(flags()->max_leaks);
  624. }
  625. if (common_flags()->print_suppressions)
  626. GetSuppressionContext()->PrintMatchedSuppressions();
  627. if (unsuppressed_count > 0) {
  628. report.PrintSummary();
  629. return true;
  630. }
  631. return false;
  632. }
  633. static bool CheckForLeaks() {
  634. if (&__lsan_is_turned_off && __lsan_is_turned_off())
  635. return false;
  636. // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
  637. // suppressions. However if a stack id was previously suppressed, it should be
  638. // suppressed in future checks as well.
  639. for (int i = 0;; ++i) {
  640. EnsureMainThreadIDIsCorrect();
  641. CheckForLeaksParam param;
  642. LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
  643. if (!param.success) {
  644. Report("LeakSanitizer has encountered a fatal error.\n");
  645. Report(
  646. "HINT: For debugging, try setting environment variable "
  647. "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
  648. Report(
  649. "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
  650. "etc)\n");
  651. Die();
  652. }
  653. LeakReport leak_report;
  654. leak_report.AddLeakedChunks(param.leaks);
  655. // No new suppressions stacks, so rerun will not help and we can report.
  656. if (!leak_report.ApplySuppressions())
  657. return PrintResults(leak_report);
  658. // No indirect leaks to report, so we are done here.
  659. if (!leak_report.IndirectUnsuppressedLeakCount())
  660. return PrintResults(leak_report);
  661. if (i >= 8) {
  662. Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
  663. return PrintResults(leak_report);
  664. }
  665. // We found a new previously unseen suppressed call stack. Rerun to make
  666. // sure it does not hold indirect leaks.
  667. VReport(1, "Rerun with %zu suppressed stacks.",
  668. GetSuppressionContext()->GetSortedSuppressedStacks().size());
  669. }
  670. }
  671. static bool has_reported_leaks = false;
  672. bool HasReportedLeaks() { return has_reported_leaks; }
  673. void DoLeakCheck() {
  674. Lock l(&global_mutex);
  675. static bool already_done;
  676. if (already_done)
  677. return;
  678. already_done = true;
  679. has_reported_leaks = CheckForLeaks();
  680. if (has_reported_leaks)
  681. HandleLeaks();
  682. }
  683. static int DoRecoverableLeakCheck() {
  684. Lock l(&global_mutex);
  685. bool have_leaks = CheckForLeaks();
  686. return have_leaks ? 1 : 0;
  687. }
  688. void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
  689. ///// LeakReport implementation. /////
  690. // A hard limit on the number of distinct leaks, to avoid quadratic complexity
  691. // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
  692. // in real-world applications.
  693. // FIXME: Get rid of this limit by moving logic into DedupLeaks.
  694. const uptr kMaxLeaksConsidered = 5000;
  695. void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
  696. for (const LeakedChunk &leak : chunks) {
  697. uptr chunk = leak.chunk;
  698. u32 stack_trace_id = leak.stack_trace_id;
  699. uptr leaked_size = leak.leaked_size;
  700. ChunkTag tag = leak.tag;
  701. CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
  702. if (u32 resolution = flags()->resolution) {
  703. StackTrace stack = StackDepotGet(stack_trace_id);
  704. stack.size = Min(stack.size, resolution);
  705. stack_trace_id = StackDepotPut(stack);
  706. }
  707. bool is_directly_leaked = (tag == kDirectlyLeaked);
  708. uptr i;
  709. for (i = 0; i < leaks_.size(); i++) {
  710. if (leaks_[i].stack_trace_id == stack_trace_id &&
  711. leaks_[i].is_directly_leaked == is_directly_leaked) {
  712. leaks_[i].hit_count++;
  713. leaks_[i].total_size += leaked_size;
  714. break;
  715. }
  716. }
  717. if (i == leaks_.size()) {
  718. if (leaks_.size() == kMaxLeaksConsidered)
  719. return;
  720. Leak leak = {next_id_++, /* hit_count */ 1,
  721. leaked_size, stack_trace_id,
  722. is_directly_leaked, /* is_suppressed */ false};
  723. leaks_.push_back(leak);
  724. }
  725. if (flags()->report_objects) {
  726. LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
  727. leaked_objects_.push_back(obj);
  728. }
  729. }
  730. }
  731. static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
  732. if (leak1.is_directly_leaked == leak2.is_directly_leaked)
  733. return leak1.total_size > leak2.total_size;
  734. else
  735. return leak1.is_directly_leaked;
  736. }
  737. void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
  738. CHECK(leaks_.size() <= kMaxLeaksConsidered);
  739. Printf("\n");
  740. if (leaks_.size() == kMaxLeaksConsidered)
  741. Printf(
  742. "Too many leaks! Only the first %zu leaks encountered will be "
  743. "reported.\n",
  744. kMaxLeaksConsidered);
  745. uptr unsuppressed_count = UnsuppressedLeakCount();
  746. if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
  747. Printf("The %zu top leak(s):\n", num_leaks_to_report);
  748. Sort(leaks_.data(), leaks_.size(), &LeakComparator);
  749. uptr leaks_reported = 0;
  750. for (uptr i = 0; i < leaks_.size(); i++) {
  751. if (leaks_[i].is_suppressed)
  752. continue;
  753. PrintReportForLeak(i);
  754. leaks_reported++;
  755. if (leaks_reported == num_leaks_to_report)
  756. break;
  757. }
  758. if (leaks_reported < unsuppressed_count) {
  759. uptr remaining = unsuppressed_count - leaks_reported;
  760. Printf("Omitting %zu more leak(s).\n", remaining);
  761. }
  762. }
  763. void LeakReport::PrintReportForLeak(uptr index) {
  764. Decorator d;
  765. Printf("%s", d.Leak());
  766. Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
  767. leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
  768. leaks_[index].total_size, leaks_[index].hit_count);
  769. Printf("%s", d.Default());
  770. CHECK(leaks_[index].stack_trace_id);
  771. StackDepotGet(leaks_[index].stack_trace_id).Print();
  772. if (flags()->report_objects) {
  773. Printf("Objects leaked above:\n");
  774. PrintLeakedObjectsForLeak(index);
  775. Printf("\n");
  776. }
  777. }
  778. void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
  779. u32 leak_id = leaks_[index].id;
  780. for (uptr j = 0; j < leaked_objects_.size(); j++) {
  781. if (leaked_objects_[j].leak_id == leak_id)
  782. Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
  783. leaked_objects_[j].size);
  784. }
  785. }
  786. void LeakReport::PrintSummary() {
  787. CHECK(leaks_.size() <= kMaxLeaksConsidered);
  788. uptr bytes = 0, allocations = 0;
  789. for (uptr i = 0; i < leaks_.size(); i++) {
  790. if (leaks_[i].is_suppressed)
  791. continue;
  792. bytes += leaks_[i].total_size;
  793. allocations += leaks_[i].hit_count;
  794. }
  795. InternalScopedString summary;
  796. summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
  797. allocations);
  798. ReportErrorSummary(summary.data());
  799. }
  800. uptr LeakReport::ApplySuppressions() {
  801. LeakSuppressionContext *suppressions = GetSuppressionContext();
  802. uptr new_suppressions = false;
  803. for (uptr i = 0; i < leaks_.size(); i++) {
  804. if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
  805. leaks_[i].total_size)) {
  806. leaks_[i].is_suppressed = true;
  807. ++new_suppressions;
  808. }
  809. }
  810. return new_suppressions;
  811. }
  812. uptr LeakReport::UnsuppressedLeakCount() {
  813. uptr result = 0;
  814. for (uptr i = 0; i < leaks_.size(); i++)
  815. if (!leaks_[i].is_suppressed)
  816. result++;
  817. return result;
  818. }
  819. uptr LeakReport::IndirectUnsuppressedLeakCount() {
  820. uptr result = 0;
  821. for (uptr i = 0; i < leaks_.size(); i++)
  822. if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
  823. result++;
  824. return result;
  825. }
  826. } // namespace __lsan
  827. #else // CAN_SANITIZE_LEAKS
  828. namespace __lsan {
  829. void InitCommonLsan() {}
  830. void DoLeakCheck() {}
  831. void DoRecoverableLeakCheckVoid() {}
  832. void DisableInThisThread() {}
  833. void EnableInThisThread() {}
  834. } // namespace __lsan
  835. #endif // CAN_SANITIZE_LEAKS
  836. using namespace __lsan;
  837. extern "C" {
  838. SANITIZER_INTERFACE_ATTRIBUTE
  839. void __lsan_ignore_object(const void *p) {
  840. #if CAN_SANITIZE_LEAKS
  841. if (!common_flags()->detect_leaks)
  842. return;
  843. // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
  844. // locked.
  845. Lock l(&global_mutex);
  846. IgnoreObjectResult res = IgnoreObjectLocked(p);
  847. if (res == kIgnoreObjectInvalid)
  848. VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
  849. if (res == kIgnoreObjectAlreadyIgnored)
  850. VReport(1,
  851. "__lsan_ignore_object(): "
  852. "heap object at %p is already being ignored\n",
  853. p);
  854. if (res == kIgnoreObjectSuccess)
  855. VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
  856. #endif // CAN_SANITIZE_LEAKS
  857. }
  858. SANITIZER_INTERFACE_ATTRIBUTE
  859. void __lsan_register_root_region(const void *begin, uptr size) {
  860. #if CAN_SANITIZE_LEAKS
  861. Lock l(&global_mutex);
  862. RootRegion region = {reinterpret_cast<uptr>(begin), size};
  863. root_regions.push_back(region);
  864. VReport(1, "Registered root region at %p of size %zu\n", begin, size);
  865. #endif // CAN_SANITIZE_LEAKS
  866. }
  867. SANITIZER_INTERFACE_ATTRIBUTE
  868. void __lsan_unregister_root_region(const void *begin, uptr size) {
  869. #if CAN_SANITIZE_LEAKS
  870. Lock l(&global_mutex);
  871. bool removed = false;
  872. for (uptr i = 0; i < root_regions.size(); i++) {
  873. RootRegion region = root_regions[i];
  874. if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
  875. removed = true;
  876. uptr last_index = root_regions.size() - 1;
  877. root_regions[i] = root_regions[last_index];
  878. root_regions.pop_back();
  879. VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
  880. break;
  881. }
  882. }
  883. if (!removed) {
  884. Report(
  885. "__lsan_unregister_root_region(): region at %p of size %zu has not "
  886. "been registered.\n",
  887. begin, size);
  888. Die();
  889. }
  890. #endif // CAN_SANITIZE_LEAKS
  891. }
  892. SANITIZER_INTERFACE_ATTRIBUTE
  893. void __lsan_disable() {
  894. #if CAN_SANITIZE_LEAKS
  895. __lsan::DisableInThisThread();
  896. #endif
  897. }
  898. SANITIZER_INTERFACE_ATTRIBUTE
  899. void __lsan_enable() {
  900. #if CAN_SANITIZE_LEAKS
  901. __lsan::EnableInThisThread();
  902. #endif
  903. }
  904. SANITIZER_INTERFACE_ATTRIBUTE
  905. void __lsan_do_leak_check() {
  906. #if CAN_SANITIZE_LEAKS
  907. if (common_flags()->detect_leaks)
  908. __lsan::DoLeakCheck();
  909. #endif // CAN_SANITIZE_LEAKS
  910. }
  911. SANITIZER_INTERFACE_ATTRIBUTE
  912. int __lsan_do_recoverable_leak_check() {
  913. #if CAN_SANITIZE_LEAKS
  914. if (common_flags()->detect_leaks)
  915. return __lsan::DoRecoverableLeakCheck();
  916. #endif // CAN_SANITIZE_LEAKS
  917. return 0;
  918. }
  919. SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
  920. return "";
  921. }
  922. #if !SANITIZER_SUPPORTS_WEAK_HOOKS
  923. SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
  924. __lsan_is_turned_off() {
  925. return 0;
  926. }
  927. SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
  928. __lsan_default_suppressions() {
  929. return "";
  930. }
  931. #endif
  932. } // extern "C"