hwasan_linux.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. ///
  9. /// \file
  10. /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
  11. /// FreeBSD-specific code.
  12. ///
  13. //===----------------------------------------------------------------------===//
  14. #include "sanitizer_common/sanitizer_platform.h"
  15. #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
  16. # include <dlfcn.h>
  17. # include <elf.h>
  18. # include <errno.h>
  19. # include <link.h>
  20. # include <pthread.h>
  21. # include <signal.h>
  22. # include <stdio.h>
  23. # include <stdlib.h>
  24. # include <sys/prctl.h>
  25. # include <sys/resource.h>
  26. # include <sys/time.h>
  27. # include <unistd.h>
  28. # include <unwind.h>
  29. # include "hwasan.h"
  30. # include "hwasan_dynamic_shadow.h"
  31. # include "hwasan_interface_internal.h"
  32. # include "hwasan_mapping.h"
  33. # include "hwasan_report.h"
  34. # include "hwasan_thread.h"
  35. # include "hwasan_thread_list.h"
  36. # include "sanitizer_common/sanitizer_common.h"
  37. # include "sanitizer_common/sanitizer_procmaps.h"
  38. # include "sanitizer_common/sanitizer_stackdepot.h"
  39. // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
  40. //
  41. // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
  42. // Not currently tested.
  43. // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
  44. // Integration tests downstream exist.
  45. // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
  46. // Tested with check-hwasan on x86_64-linux.
  47. // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
  48. // Tested with check-hwasan on aarch64-linux-android.
  49. # if !SANITIZER_ANDROID
  50. SANITIZER_INTERFACE_ATTRIBUTE
  51. THREADLOCAL uptr __hwasan_tls;
  52. # endif
  53. namespace __hwasan {
  54. // With the zero shadow base we can not actually map pages starting from 0.
  55. // This constant is somewhat arbitrary.
  56. constexpr uptr kZeroBaseShadowStart = 0;
  57. constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
  58. static void ProtectGap(uptr addr, uptr size) {
  59. __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
  60. kZeroBaseMaxShadowStart);
  61. }
  62. uptr kLowMemStart;
  63. uptr kLowMemEnd;
  64. uptr kHighMemStart;
  65. uptr kHighMemEnd;
  66. static void PrintRange(uptr start, uptr end, const char *name) {
  67. Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
  68. }
  69. static void PrintAddressSpaceLayout() {
  70. PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
  71. if (kHighShadowEnd + 1 < kHighMemStart)
  72. PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
  73. else
  74. CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
  75. PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
  76. if (kLowShadowEnd + 1 < kHighShadowStart)
  77. PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
  78. else
  79. CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
  80. PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
  81. if (kLowMemEnd + 1 < kLowShadowStart)
  82. PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
  83. else
  84. CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
  85. PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
  86. CHECK_EQ(0, kLowMemStart);
  87. }
  88. static uptr GetHighMemEnd() {
  89. // HighMem covers the upper part of the address space.
  90. uptr max_address = GetMaxUserVirtualAddress();
  91. // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
  92. // properly aligned:
  93. max_address |= (GetMmapGranularity() << kShadowScale) - 1;
  94. return max_address;
  95. }
  96. static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
  97. __hwasan_shadow_memory_dynamic_address =
  98. FindDynamicShadowStart(shadow_size_bytes);
  99. }
  100. void InitializeOsSupport() {
  101. # define PR_SET_TAGGED_ADDR_CTRL 55
  102. # define PR_GET_TAGGED_ADDR_CTRL 56
  103. # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
  104. // Check we're running on a kernel that can use the tagged address ABI.
  105. int local_errno = 0;
  106. if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
  107. &local_errno) &&
  108. local_errno == EINVAL) {
  109. # if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
  110. // Some older Android kernels have the tagged pointer ABI on
  111. // unconditionally, and hence don't have the tagged-addr prctl while still
  112. // allow the ABI.
  113. // If targeting Android and the prctl is not around we assume this is the
  114. // case.
  115. return;
  116. # else
  117. if (flags()->fail_without_syscall_abi) {
  118. Printf(
  119. "FATAL: "
  120. "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
  121. Die();
  122. }
  123. # endif
  124. }
  125. // Turn on the tagged address ABI.
  126. if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
  127. PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
  128. !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0))) {
  129. # if defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
  130. // Try the new prctl API for Intel LAM. The API is based on a currently
  131. // unsubmitted patch to the Linux kernel (as of May 2021) and is thus
  132. // subject to change. Patch is here:
  133. // https://lore.kernel.org/linux-mm/20210205151631.43511-12-kirill.shutemov@linux.intel.com/
  134. int tag_bits = kTagBits;
  135. int tag_shift = kAddressTagShift;
  136. if (!internal_iserror(
  137. internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE,
  138. reinterpret_cast<unsigned long>(&tag_bits),
  139. reinterpret_cast<unsigned long>(&tag_shift), 0))) {
  140. CHECK_EQ(tag_bits, kTagBits);
  141. CHECK_EQ(tag_shift, kAddressTagShift);
  142. return;
  143. }
  144. # endif // defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
  145. if (flags()->fail_without_syscall_abi) {
  146. Printf(
  147. "FATAL: HWAddressSanitizer failed to enable tagged address syscall "
  148. "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
  149. "configuration.\n");
  150. Die();
  151. }
  152. }
  153. # undef PR_SET_TAGGED_ADDR_CTRL
  154. # undef PR_GET_TAGGED_ADDR_CTRL
  155. # undef PR_TAGGED_ADDR_ENABLE
  156. }
  157. bool InitShadow() {
  158. // Define the entire memory range.
  159. kHighMemEnd = GetHighMemEnd();
  160. // Determine shadow memory base offset.
  161. InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
  162. // Place the low memory first.
  163. kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
  164. kLowMemStart = 0;
  165. // Define the low shadow based on the already placed low memory.
  166. kLowShadowEnd = MemToShadow(kLowMemEnd);
  167. kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
  168. // High shadow takes whatever memory is left up there (making sure it is not
  169. // interfering with low memory in the fixed case).
  170. kHighShadowEnd = MemToShadow(kHighMemEnd);
  171. kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
  172. // High memory starts where allocated shadow allows.
  173. kHighMemStart = ShadowToMem(kHighShadowStart);
  174. // Check the sanity of the defined memory ranges (there might be gaps).
  175. CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
  176. CHECK_GT(kHighMemStart, kHighShadowEnd);
  177. CHECK_GT(kHighShadowEnd, kHighShadowStart);
  178. CHECK_GT(kHighShadowStart, kLowMemEnd);
  179. CHECK_GT(kLowMemEnd, kLowMemStart);
  180. CHECK_GT(kLowShadowEnd, kLowShadowStart);
  181. CHECK_GT(kLowShadowStart, kLowMemEnd);
  182. if (Verbosity())
  183. PrintAddressSpaceLayout();
  184. // Reserve shadow memory.
  185. ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
  186. ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
  187. // Protect all the gaps.
  188. ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
  189. if (kLowMemEnd + 1 < kLowShadowStart)
  190. ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
  191. if (kLowShadowEnd + 1 < kHighShadowStart)
  192. ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
  193. if (kHighShadowEnd + 1 < kHighMemStart)
  194. ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
  195. return true;
  196. }
  197. void InitThreads() {
  198. CHECK(__hwasan_shadow_memory_dynamic_address);
  199. uptr guard_page_size = GetMmapGranularity();
  200. uptr thread_space_start =
  201. __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
  202. uptr thread_space_end =
  203. __hwasan_shadow_memory_dynamic_address - guard_page_size;
  204. ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
  205. "hwasan threads", /*madvise_shadow*/ false);
  206. ProtectGap(thread_space_end,
  207. __hwasan_shadow_memory_dynamic_address - thread_space_end);
  208. InitThreadList(thread_space_start, thread_space_end - thread_space_start);
  209. hwasanThreadList().CreateCurrentThread();
  210. }
  211. bool MemIsApp(uptr p) {
  212. // Memory outside the alias range has non-zero tags.
  213. # if !defined(HWASAN_ALIASING_MODE)
  214. CHECK(GetTagFromPointer(p) == 0);
  215. # endif
  216. return (p >= kHighMemStart && p <= kHighMemEnd) ||
  217. (p >= kLowMemStart && p <= kLowMemEnd);
  218. }
  219. void InstallAtExitHandler() { atexit(HwasanAtExit); }
  220. // ---------------------- TSD ---------------- {{{1
  221. extern "C" void __hwasan_thread_enter() {
  222. hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
  223. }
  224. extern "C" void __hwasan_thread_exit() {
  225. Thread *t = GetCurrentThread();
  226. // Make sure that signal handler can not see a stale current thread pointer.
  227. atomic_signal_fence(memory_order_seq_cst);
  228. if (t)
  229. hwasanThreadList().ReleaseThread(t);
  230. }
  231. # if HWASAN_WITH_INTERCEPTORS
  232. static pthread_key_t tsd_key;
  233. static bool tsd_key_inited = false;
  234. void HwasanTSDThreadInit() {
  235. if (tsd_key_inited)
  236. CHECK_EQ(0, pthread_setspecific(tsd_key,
  237. (void *)GetPthreadDestructorIterations()));
  238. }
  239. void HwasanTSDDtor(void *tsd) {
  240. uptr iterations = (uptr)tsd;
  241. if (iterations > 1) {
  242. CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
  243. return;
  244. }
  245. __hwasan_thread_exit();
  246. }
  247. void HwasanTSDInit() {
  248. CHECK(!tsd_key_inited);
  249. tsd_key_inited = true;
  250. CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
  251. }
  252. # else
  253. void HwasanTSDInit() {}
  254. void HwasanTSDThreadInit() {}
  255. # endif
  256. # if SANITIZER_ANDROID
  257. uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }
  258. # else
  259. uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
  260. # endif
  261. # if SANITIZER_ANDROID
  262. void AndroidTestTlsSlot() {
  263. uptr kMagicValue = 0x010203040A0B0C0D;
  264. uptr *tls_ptr = GetCurrentThreadLongPtr();
  265. uptr old_value = *tls_ptr;
  266. *tls_ptr = kMagicValue;
  267. dlerror();
  268. if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
  269. Printf(
  270. "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
  271. "for dlerror().\n");
  272. Die();
  273. }
  274. *tls_ptr = old_value;
  275. }
  276. # else
  277. void AndroidTestTlsSlot() {}
  278. # endif
  279. static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
  280. // Access type is passed in a platform dependent way (see below) and encoded
  281. // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
  282. // recoverable. Valid values of Y are 0 to 4, which are interpreted as
  283. // log2(access_size), and 0xF, which means that access size is passed via
  284. // platform dependent register (see below).
  285. # if defined(__aarch64__)
  286. // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
  287. // access size is stored in X1 register. Access address is always in X0
  288. // register.
  289. uptr pc = (uptr)info->si_addr;
  290. const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
  291. if ((code & 0xff00) != 0x900)
  292. return AccessInfo{}; // Not ours.
  293. const bool is_store = code & 0x10;
  294. const bool recover = code & 0x20;
  295. const uptr addr = uc->uc_mcontext.regs[0];
  296. const unsigned size_log = code & 0xf;
  297. if (size_log > 4 && size_log != 0xf)
  298. return AccessInfo{}; // Not ours.
  299. const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
  300. # elif defined(__x86_64__)
  301. // Access type is encoded in the instruction following INT3 as
  302. // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
  303. // RSI register. Access address is always in RDI register.
  304. uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
  305. uint8_t *nop = (uint8_t *)pc;
  306. if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
  307. *(nop + 3) < 0x40)
  308. return AccessInfo{}; // Not ours.
  309. const unsigned code = *(nop + 3);
  310. const bool is_store = code & 0x10;
  311. const bool recover = code & 0x20;
  312. const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
  313. const unsigned size_log = code & 0xf;
  314. if (size_log > 4 && size_log != 0xf)
  315. return AccessInfo{}; // Not ours.
  316. const uptr size =
  317. size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
  318. # else
  319. # error Unsupported architecture
  320. # endif
  321. return AccessInfo{addr, size, is_store, !is_store, recover};
  322. }
  323. static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
  324. AccessInfo ai = GetAccessInfo(info, uc);
  325. if (!ai.is_store && !ai.is_load)
  326. return false;
  327. SignalContext sig{info, uc};
  328. HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
  329. # if defined(__aarch64__)
  330. uc->uc_mcontext.pc += 4;
  331. # elif defined(__x86_64__)
  332. # else
  333. # error Unsupported architecture
  334. # endif
  335. return true;
  336. }
  337. static void OnStackUnwind(const SignalContext &sig, const void *,
  338. BufferedStackTrace *stack) {
  339. stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
  340. common_flags()->fast_unwind_on_fatal);
  341. }
  342. void HwasanOnDeadlySignal(int signo, void *info, void *context) {
  343. // Probably a tag mismatch.
  344. if (signo == SIGTRAP)
  345. if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t *)context))
  346. return;
  347. HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
  348. }
  349. void Thread::InitStackAndTls(const InitState *) {
  350. uptr tls_size;
  351. uptr stack_size;
  352. GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
  353. &tls_size);
  354. stack_top_ = stack_bottom_ + stack_size;
  355. tls_end_ = tls_begin_ + tls_size;
  356. }
  357. uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
  358. CHECK(IsAligned(p, kShadowAlignment));
  359. CHECK(IsAligned(size, kShadowAlignment));
  360. uptr shadow_start = MemToShadow(p);
  361. uptr shadow_size = MemToShadowSize(size);
  362. uptr page_size = GetPageSizeCached();
  363. uptr page_start = RoundUpTo(shadow_start, page_size);
  364. uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
  365. uptr threshold = common_flags()->clear_shadow_mmap_threshold;
  366. if (SANITIZER_LINUX &&
  367. UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
  368. internal_memset((void *)shadow_start, tag, page_start - shadow_start);
  369. internal_memset((void *)page_end, tag,
  370. shadow_start + shadow_size - page_end);
  371. // For an anonymous private mapping MADV_DONTNEED will return a zero page on
  372. // Linux.
  373. ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
  374. } else {
  375. internal_memset((void *)shadow_start, tag, shadow_size);
  376. }
  377. return AddTagToPointer(p, tag);
  378. }
  379. void HwasanInstallAtForkHandler() {
  380. auto before = []() {
  381. HwasanAllocatorLock();
  382. StackDepotLockAll();
  383. };
  384. auto after = []() {
  385. StackDepotUnlockAll();
  386. HwasanAllocatorUnlock();
  387. };
  388. pthread_atfork(before, after, after);
  389. }
  390. } // namespace __hwasan
  391. #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD