lsan_common_mac.cpp 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. //=-- lsan_common_mac.cpp -------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of LeakSanitizer.
  10. // Implementation of common leak checking functionality. Darwin-specific code.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "sanitizer_common/sanitizer_platform.h"
  14. #include "sanitizer_common/sanitizer_libc.h"
  15. #include "lsan_common.h"
  16. #if CAN_SANITIZE_LEAKS && SANITIZER_APPLE
  17. # include <mach/mach.h>
  18. # include <mach/vm_statistics.h>
  19. # include <pthread.h>
  20. # include "lsan_allocator.h"
  21. # include "sanitizer_common/sanitizer_allocator_internal.h"
  22. namespace __lsan {
  23. enum class SeenRegion {
  24. None = 0,
  25. AllocOnce = 1 << 0,
  26. LibDispatch = 1 << 1,
  27. Foundation = 1 << 2,
  28. All = AllocOnce | LibDispatch | Foundation
  29. };
  30. inline SeenRegion operator|(SeenRegion left, SeenRegion right) {
  31. return static_cast<SeenRegion>(static_cast<int>(left) |
  32. static_cast<int>(right));
  33. }
  34. inline SeenRegion &operator|=(SeenRegion &left, const SeenRegion &right) {
  35. left = left | right;
  36. return left;
  37. }
  38. struct RegionScanState {
  39. SeenRegion seen_regions = SeenRegion::None;
  40. bool in_libdispatch = false;
  41. };
  42. typedef struct {
  43. int disable_counter;
  44. u32 current_thread_id;
  45. AllocatorCache cache;
  46. } thread_local_data_t;
  47. static pthread_key_t key;
  48. static pthread_once_t key_once = PTHREAD_ONCE_INIT;
  49. // The main thread destructor requires the current thread id,
  50. // so we can't destroy it until it's been used and reset to invalid tid
  51. void restore_tid_data(void *ptr) {
  52. thread_local_data_t *data = (thread_local_data_t *)ptr;
  53. if (data->current_thread_id != kInvalidTid)
  54. pthread_setspecific(key, data);
  55. }
  56. static void make_tls_key() {
  57. CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0);
  58. }
  59. static thread_local_data_t *get_tls_val(bool alloc) {
  60. pthread_once(&key_once, make_tls_key);
  61. thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key);
  62. if (ptr == NULL && alloc) {
  63. ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
  64. ptr->disable_counter = 0;
  65. ptr->current_thread_id = kInvalidTid;
  66. ptr->cache = AllocatorCache();
  67. pthread_setspecific(key, ptr);
  68. }
  69. return ptr;
  70. }
  71. bool DisabledInThisThread() {
  72. thread_local_data_t *data = get_tls_val(false);
  73. return data ? data->disable_counter > 0 : false;
  74. }
  75. void DisableInThisThread() { ++get_tls_val(true)->disable_counter; }
  76. void EnableInThisThread() {
  77. int *disable_counter = &get_tls_val(true)->disable_counter;
  78. if (*disable_counter == 0) {
  79. DisableCounterUnderflow();
  80. }
  81. --*disable_counter;
  82. }
  83. u32 GetCurrentThread() {
  84. thread_local_data_t *data = get_tls_val(false);
  85. return data ? data->current_thread_id : kInvalidTid;
  86. }
  87. void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
  88. AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
  89. LoadedModule *GetLinker() { return nullptr; }
  90. // Required on Linux for initialization of TLS behavior, but should not be
  91. // required on Darwin.
  92. void InitializePlatformSpecificModules() {}
  93. // Sections which can't contain contain global pointers. This list errs on the
  94. // side of caution to avoid false positives, at the expense of performance.
  95. //
  96. // Other potentially safe sections include:
  97. // __all_image_info, __crash_info, __const, __got, __interpose, __objc_msg_break
  98. //
  99. // Sections which definitely cannot be included here are:
  100. // __objc_data, __objc_const, __data, __bss, __common, __thread_data,
  101. // __thread_bss, __thread_vars, __objc_opt_rw, __objc_opt_ptrs
  102. static const char *kSkippedSecNames[] = {
  103. "__cfstring", "__la_symbol_ptr", "__mod_init_func",
  104. "__mod_term_func", "__nl_symbol_ptr", "__objc_classlist",
  105. "__objc_classrefs", "__objc_imageinfo", "__objc_nlclslist",
  106. "__objc_protolist", "__objc_selrefs", "__objc_superrefs"};
  107. // Scans global variables for heap pointers.
  108. void ProcessGlobalRegions(Frontier *frontier) {
  109. for (auto name : kSkippedSecNames)
  110. CHECK(internal_strnlen(name, kMaxSegName + 1) <= kMaxSegName);
  111. MemoryMappingLayout memory_mapping(false);
  112. InternalMmapVector<LoadedModule> modules;
  113. modules.reserve(128);
  114. memory_mapping.DumpListOfModules(&modules);
  115. for (uptr i = 0; i < modules.size(); ++i) {
  116. // Even when global scanning is disabled, we still need to scan
  117. // system libraries for stashed pointers
  118. if (!flags()->use_globals && modules[i].instrumented()) continue;
  119. for (const __sanitizer::LoadedModule::AddressRange &range :
  120. modules[i].ranges()) {
  121. // Sections storing global variables are writable and non-executable
  122. if (range.executable || !range.writable) continue;
  123. for (auto name : kSkippedSecNames) {
  124. if (!internal_strcmp(range.name, name)) continue;
  125. }
  126. ScanGlobalRange(range.beg, range.end, frontier);
  127. }
  128. }
  129. }
  130. void ProcessPlatformSpecificAllocations(Frontier *frontier) {
  131. vm_address_t address = 0;
  132. kern_return_t err = KERN_SUCCESS;
  133. InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions();
  134. RegionScanState scan_state;
  135. while (err == KERN_SUCCESS) {
  136. vm_size_t size = 0;
  137. unsigned depth = 1;
  138. struct vm_region_submap_info_64 info;
  139. mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
  140. err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
  141. (vm_region_info_t)&info, &count);
  142. uptr end_address = address + size;
  143. if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) {
  144. // libxpc stashes some pointers in the Kernel Alloc Once page,
  145. // make sure not to report those as leaks.
  146. scan_state.seen_regions |= SeenRegion::AllocOnce;
  147. ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
  148. kReachable);
  149. } else if (info.user_tag == VM_MEMORY_FOUNDATION) {
  150. // Objective-C block trampolines use the Foundation region.
  151. scan_state.seen_regions |= SeenRegion::Foundation;
  152. ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
  153. kReachable);
  154. } else if (info.user_tag == VM_MEMORY_LIBDISPATCH) {
  155. // Dispatch continuations use the libdispatch region. Empirically, there
  156. // can be more than one region with this tag, so we'll optimistically
  157. // assume that they're continguous. Otherwise, we would need to scan every
  158. // region to ensure we find them all.
  159. scan_state.in_libdispatch = true;
  160. ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
  161. kReachable);
  162. } else if (scan_state.in_libdispatch) {
  163. scan_state.seen_regions |= SeenRegion::LibDispatch;
  164. scan_state.in_libdispatch = false;
  165. }
  166. // Recursing over the full memory map is very slow, break out
  167. // early if we don't need the full iteration.
  168. if (scan_state.seen_regions == SeenRegion::All &&
  169. !(flags()->use_root_regions && root_regions->size() > 0)) {
  170. break;
  171. }
  172. // This additional root region scan is required on Darwin in order to
  173. // detect root regions contained within mmap'd memory regions, because
  174. // the Darwin implementation of sanitizer_procmaps traverses images
  175. // as loaded by dyld, and not the complete set of all memory regions.
  176. //
  177. // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same
  178. // behavior as sanitizer_procmaps_linux and traverses all memory regions
  179. if (flags()->use_root_regions) {
  180. for (uptr i = 0; i < root_regions->size(); i++) {
  181. ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
  182. info.protection & kProtectionRead);
  183. }
  184. }
  185. address = end_address;
  186. }
  187. }
  188. // On darwin, we can intercept _exit gracefully, and return a failing exit code
  189. // if required at that point. Calling Die() here is undefined behavior and
  190. // causes rare race conditions.
  191. void HandleLeaks() {}
  192. void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
  193. CheckForLeaksParam *argument) {
  194. ScopedStopTheWorldLock lock;
  195. StopTheWorld(callback, argument);
  196. }
  197. } // namespace __lsan
  198. #endif // CAN_SANITIZE_LEAKS && SANITIZER_APPLE