msan_linux.cpp 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. //===-- msan_linux.cpp ----------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of MemorySanitizer.
  10. //
  11. // Linux-, NetBSD- and FreeBSD-specific code.
  12. //===----------------------------------------------------------------------===//
  13. #include "sanitizer_common/sanitizer_platform.h"
  14. #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
  15. # include <elf.h>
  16. # include <link.h>
  17. # include <pthread.h>
  18. # include <signal.h>
  19. # include <stdio.h>
  20. # include <stdlib.h>
  21. # if SANITIZER_LINUX
  22. # include <sys/personality.h>
  23. # endif
  24. # include <sys/resource.h>
  25. # include <sys/time.h>
  26. # include <unistd.h>
  27. # include <unwind.h>
  28. # include "msan.h"
  29. # include "msan_allocator.h"
  30. # include "msan_chained_origin_depot.h"
  31. # include "msan_report.h"
  32. # include "msan_thread.h"
  33. # include "sanitizer_common/sanitizer_common.h"
  34. # include "sanitizer_common/sanitizer_procmaps.h"
  35. # include "sanitizer_common/sanitizer_stackdepot.h"
  36. namespace __msan {
  37. void ReportMapRange(const char *descr, uptr beg, uptr size) {
  38. if (size > 0) {
  39. uptr end = beg + size - 1;
  40. VPrintf(1, "%s : 0x%zx - 0x%zx\n", descr, beg, end);
  41. }
  42. }
  43. static bool CheckMemoryRangeAvailability(uptr beg, uptr size, bool verbose) {
  44. if (size > 0) {
  45. uptr end = beg + size - 1;
  46. if (!MemoryRangeIsAvailable(beg, end)) {
  47. if (verbose)
  48. Printf("FATAL: Memory range 0x%zx - 0x%zx is not available.\n", beg,
  49. end);
  50. return false;
  51. }
  52. }
  53. return true;
  54. }
  55. static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
  56. if (size > 0) {
  57. void *addr = MmapFixedNoAccess(beg, size, name);
  58. if (beg == 0 && addr) {
  59. // Depending on the kernel configuration, we may not be able to protect
  60. // the page at address zero.
  61. uptr gap = 16 * GetPageSizeCached();
  62. beg += gap;
  63. size -= gap;
  64. addr = MmapFixedNoAccess(beg, size, name);
  65. }
  66. if ((uptr)addr != beg) {
  67. uptr end = beg + size - 1;
  68. Printf("FATAL: Cannot protect memory range 0x%zx - 0x%zx (%s).\n", beg,
  69. end, name);
  70. return false;
  71. }
  72. }
  73. return true;
  74. }
  75. static void CheckMemoryLayoutSanity() {
  76. uptr prev_end = 0;
  77. for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
  78. uptr start = kMemoryLayout[i].start;
  79. uptr end = kMemoryLayout[i].end;
  80. MappingDesc::Type type = kMemoryLayout[i].type;
  81. CHECK_LT(start, end);
  82. CHECK_EQ(prev_end, start);
  83. CHECK(addr_is_type(start, type));
  84. CHECK(addr_is_type((start + end) / 2, type));
  85. CHECK(addr_is_type(end - 1, type));
  86. if (type == MappingDesc::APP || type == MappingDesc::ALLOCATOR) {
  87. uptr addr = start;
  88. CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
  89. CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
  90. CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
  91. addr = (start + end) / 2;
  92. CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
  93. CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
  94. CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
  95. addr = end - 1;
  96. CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
  97. CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
  98. CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
  99. }
  100. prev_end = end;
  101. }
  102. }
  103. static bool InitShadow(bool init_origins, bool dry_run) {
  104. // Let user know mapping parameters first.
  105. VPrintf(1, "__msan_init %p\n", reinterpret_cast<void *>(&__msan_init));
  106. for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
  107. VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
  108. kMemoryLayout[i].end - 1);
  109. CheckMemoryLayoutSanity();
  110. if (!MEM_IS_APP(&__msan_init)) {
  111. if (!dry_run)
  112. Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
  113. reinterpret_cast<void *>(&__msan_init));
  114. return false;
  115. }
  116. const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
  117. for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
  118. uptr start = kMemoryLayout[i].start;
  119. uptr end = kMemoryLayout[i].end;
  120. uptr size = end - start;
  121. MappingDesc::Type type = kMemoryLayout[i].type;
  122. // Check if the segment should be mapped based on platform constraints.
  123. if (start >= maxVirtualAddress)
  124. continue;
  125. bool map = type == MappingDesc::SHADOW ||
  126. (init_origins && type == MappingDesc::ORIGIN);
  127. bool protect = type == MappingDesc::INVALID ||
  128. (!init_origins && type == MappingDesc::ORIGIN);
  129. CHECK(!(map && protect));
  130. if (!map && !protect) {
  131. CHECK(type == MappingDesc::APP || type == MappingDesc::ALLOCATOR);
  132. if (dry_run && type == MappingDesc::ALLOCATOR &&
  133. !CheckMemoryRangeAvailability(start, size, !dry_run))
  134. return false;
  135. }
  136. if (map) {
  137. if (dry_run && !CheckMemoryRangeAvailability(start, size, !dry_run))
  138. return false;
  139. if (!dry_run &&
  140. !MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name))
  141. return false;
  142. if (!dry_run && common_flags()->use_madv_dontdump)
  143. DontDumpShadowMemory(start, size);
  144. }
  145. if (protect) {
  146. if (dry_run && !CheckMemoryRangeAvailability(start, size, !dry_run))
  147. return false;
  148. if (!dry_run && !ProtectMemoryRange(start, size, kMemoryLayout[i].name))
  149. return false;
  150. }
  151. }
  152. return true;
  153. }
  154. bool InitShadowWithReExec(bool init_origins) {
  155. // Start with dry run: check layout is ok, but don't print warnings because
  156. // warning messages will cause tests to fail (even if we successfully re-exec
  157. // after the warning).
  158. bool success = InitShadow(__msan_get_track_origins(), true);
  159. if (!success) {
  160. # if SANITIZER_LINUX
  161. // Perhaps ASLR entropy is too high. If ASLR is enabled, re-exec without it.
  162. int old_personality = personality(0xffffffff);
  163. bool aslr_on =
  164. (old_personality != -1) && ((old_personality & ADDR_NO_RANDOMIZE) == 0);
  165. if (aslr_on) {
  166. VReport(1,
  167. "WARNING: MemorySanitizer: memory layout is incompatible, "
  168. "possibly due to high-entropy ASLR.\n"
  169. "Re-execing with fixed virtual address space.\n"
  170. "N.B. reducing ASLR entropy is preferable.\n");
  171. CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
  172. ReExec();
  173. }
  174. # endif
  175. }
  176. // The earlier dry run didn't actually map or protect anything. Run again in
  177. // non-dry run mode.
  178. return success && InitShadow(__msan_get_track_origins(), false);
  179. }
  180. static void MsanAtExit(void) {
  181. if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
  182. ReportStats();
  183. if (msan_report_count > 0) {
  184. ReportAtExitStatistics();
  185. if (common_flags()->exitcode)
  186. internal__exit(common_flags()->exitcode);
  187. }
  188. }
  189. void InstallAtExitHandler() {
  190. atexit(MsanAtExit);
  191. }
  192. // ---------------------- TSD ---------------- {{{1
  193. #if SANITIZER_NETBSD
  194. // Thread Static Data cannot be used in early init on NetBSD.
  195. // Reuse the MSan TSD API for compatibility with existing code
  196. // with an alternative implementation.
  197. static void (*tsd_destructor)(void *tsd) = nullptr;
  198. struct tsd_key {
  199. tsd_key() : key(nullptr) {}
  200. ~tsd_key() {
  201. CHECK(tsd_destructor);
  202. if (key)
  203. (*tsd_destructor)(key);
  204. }
  205. MsanThread *key;
  206. };
  207. static thread_local struct tsd_key key;
  208. void MsanTSDInit(void (*destructor)(void *tsd)) {
  209. CHECK(!tsd_destructor);
  210. tsd_destructor = destructor;
  211. }
  212. MsanThread *GetCurrentThread() {
  213. CHECK(tsd_destructor);
  214. return key.key;
  215. }
  216. void SetCurrentThread(MsanThread *tsd) {
  217. CHECK(tsd_destructor);
  218. CHECK(tsd);
  219. CHECK(!key.key);
  220. key.key = tsd;
  221. }
  222. void MsanTSDDtor(void *tsd) {
  223. CHECK(tsd_destructor);
  224. CHECK_EQ(key.key, tsd);
  225. key.key = nullptr;
  226. // Make sure that signal handler can not see a stale current thread pointer.
  227. atomic_signal_fence(memory_order_seq_cst);
  228. MsanThread::TSDDtor(tsd);
  229. }
  230. #else
  231. static pthread_key_t tsd_key;
  232. static bool tsd_key_inited = false;
  233. void MsanTSDInit(void (*destructor)(void *tsd)) {
  234. CHECK(!tsd_key_inited);
  235. tsd_key_inited = true;
  236. CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
  237. }
  238. static THREADLOCAL MsanThread* msan_current_thread;
  239. MsanThread *GetCurrentThread() {
  240. return msan_current_thread;
  241. }
  242. void SetCurrentThread(MsanThread *t) {
  243. // Make sure we do not reset the current MsanThread.
  244. CHECK_EQ(0, msan_current_thread);
  245. msan_current_thread = t;
  246. // Make sure that MsanTSDDtor gets called at the end.
  247. CHECK(tsd_key_inited);
  248. pthread_setspecific(tsd_key, (void *)t);
  249. }
  250. void MsanTSDDtor(void *tsd) {
  251. MsanThread *t = (MsanThread*)tsd;
  252. if (t->destructor_iterations_ > 1) {
  253. t->destructor_iterations_--;
  254. CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
  255. return;
  256. }
  257. msan_current_thread = nullptr;
  258. // Make sure that signal handler can not see a stale current thread pointer.
  259. atomic_signal_fence(memory_order_seq_cst);
  260. MsanThread::TSDDtor(tsd);
  261. }
  262. # endif
  263. static void BeforeFork() {
  264. // Usually we lock ThreadRegistry, but msan does not have one.
  265. LockAllocator();
  266. StackDepotLockBeforeFork();
  267. ChainedOriginDepotBeforeFork();
  268. }
  269. static void AfterFork(bool fork_child) {
  270. ChainedOriginDepotAfterFork(fork_child);
  271. StackDepotUnlockAfterFork(fork_child);
  272. UnlockAllocator();
  273. // Usually we unlock ThreadRegistry, but msan does not have one.
  274. }
  275. void InstallAtForkHandler() {
  276. pthread_atfork(
  277. &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
  278. []() { AfterFork(/* fork_child= */ true); });
  279. }
  280. } // namespace __msan
  281. #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD