hwasan_linux.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557
  1. //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. ///
  9. /// \file
  10. /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
  11. /// FreeBSD-specific code.
  12. ///
  13. //===----------------------------------------------------------------------===//
  14. #include "sanitizer_common/sanitizer_platform.h"
  15. #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
  16. # include <dlfcn.h>
  17. # include <elf.h>
  18. # include <errno.h>
  19. # include <link.h>
  20. # include <pthread.h>
  21. # include <signal.h>
  22. # include <stdio.h>
  23. # include <stdlib.h>
  24. # include <sys/prctl.h>
  25. # include <sys/resource.h>
  26. # include <sys/time.h>
  27. # include <unistd.h>
  28. # include <unwind.h>
  29. # include "hwasan.h"
  30. # include "hwasan_dynamic_shadow.h"
  31. # include "hwasan_interface_internal.h"
  32. # include "hwasan_mapping.h"
  33. # include "hwasan_report.h"
  34. # include "hwasan_thread.h"
  35. # include "hwasan_thread_list.h"
  36. # include "sanitizer_common/sanitizer_common.h"
  37. # include "sanitizer_common/sanitizer_procmaps.h"
  38. # include "sanitizer_common/sanitizer_stackdepot.h"
  39. // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
  40. //
  41. // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
  42. // Not currently tested.
  43. // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
  44. // Integration tests downstream exist.
  45. // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
  46. // Tested with check-hwasan on x86_64-linux.
  47. // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
  48. // Tested with check-hwasan on aarch64-linux-android.
  49. # if !SANITIZER_ANDROID
  50. SANITIZER_INTERFACE_ATTRIBUTE
  51. THREADLOCAL uptr __hwasan_tls;
  52. # endif
  53. namespace __hwasan {
  54. // With the zero shadow base we can not actually map pages starting from 0.
  55. // This constant is somewhat arbitrary.
  56. constexpr uptr kZeroBaseShadowStart = 0;
  57. constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
  58. static void ProtectGap(uptr addr, uptr size) {
  59. __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
  60. kZeroBaseMaxShadowStart);
  61. }
  62. uptr kLowMemStart;
  63. uptr kLowMemEnd;
  64. uptr kHighMemStart;
  65. uptr kHighMemEnd;
  66. static void PrintRange(uptr start, uptr end, const char *name) {
  67. Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
  68. }
  69. static void PrintAddressSpaceLayout() {
  70. PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
  71. if (kHighShadowEnd + 1 < kHighMemStart)
  72. PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
  73. else
  74. CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
  75. PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
  76. if (kLowShadowEnd + 1 < kHighShadowStart)
  77. PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
  78. else
  79. CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
  80. PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
  81. if (kLowMemEnd + 1 < kLowShadowStart)
  82. PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
  83. else
  84. CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
  85. PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
  86. CHECK_EQ(0, kLowMemStart);
  87. }
  88. static uptr GetHighMemEnd() {
  89. // HighMem covers the upper part of the address space.
  90. uptr max_address = GetMaxUserVirtualAddress();
  91. // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
  92. // properly aligned:
  93. max_address |= (GetMmapGranularity() << kShadowScale) - 1;
  94. return max_address;
  95. }
  96. static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
  97. __hwasan_shadow_memory_dynamic_address =
  98. FindDynamicShadowStart(shadow_size_bytes);
  99. }
  100. static void MaybeDieIfNoTaggingAbi(const char *message) {
  101. if (!flags()->fail_without_syscall_abi)
  102. return;
  103. Printf("FATAL: %s\n", message);
  104. Die();
  105. }
  106. # define PR_SET_TAGGED_ADDR_CTRL 55
  107. # define PR_GET_TAGGED_ADDR_CTRL 56
  108. # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
  109. # define ARCH_GET_UNTAG_MASK 0x4001
  110. # define ARCH_ENABLE_TAGGED_ADDR 0x4002
  111. # define ARCH_GET_MAX_TAG_BITS 0x4003
  112. static bool CanUseTaggingAbi() {
  113. # if defined(__x86_64__)
  114. unsigned long num_bits = 0;
  115. // Check for x86 LAM support. This API is based on a currently unsubmitted
  116. // patch to the Linux kernel (as of August 2022) and is thus subject to
  117. // change. The patch is here:
  118. // https://lore.kernel.org/all/20220815041803.17954-1-kirill.shutemov@linux.intel.com/
  119. //
  120. // arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag
  121. // bits the user can request, or zero if LAM is not supported by the hardware.
  122. if (internal_iserror(internal_arch_prctl(ARCH_GET_MAX_TAG_BITS,
  123. reinterpret_cast<uptr>(&num_bits))))
  124. return false;
  125. // The platform must provide enough bits for HWASan tags.
  126. if (num_bits < kTagBits)
  127. return false;
  128. return true;
  129. # else
  130. // Check for ARM TBI support.
  131. return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
  132. # endif // __x86_64__
  133. }
  134. static bool EnableTaggingAbi() {
  135. # if defined(__x86_64__)
  136. // Enable x86 LAM tagging for the process.
  137. //
  138. // arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of
  139. // tag bits requested by the user does not exceed that provided by the system.
  140. // arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant
  141. // address bits. It is ~0ULL if either LAM is disabled for the process or LAM
  142. // is not supported by the hardware.
  143. if (internal_iserror(internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, kTagBits)))
  144. return false;
  145. unsigned long mask = 0;
  146. // Make sure the tag bits are where we expect them to be.
  147. if (internal_iserror(internal_arch_prctl(ARCH_GET_UNTAG_MASK,
  148. reinterpret_cast<uptr>(&mask))))
  149. return false;
  150. // @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag
  151. // bits. Therefore these masks must not overlap.
  152. if (mask & kAddressTagMask)
  153. return false;
  154. return true;
  155. # else
  156. // Enable ARM TBI tagging for the process. If for some reason tagging is not
  157. // supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns
  158. // -EINVAL.
  159. if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
  160. PR_TAGGED_ADDR_ENABLE, 0, 0, 0)))
  161. return false;
  162. // Ensure that TBI is enabled.
  163. if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) !=
  164. PR_TAGGED_ADDR_ENABLE)
  165. return false;
  166. return true;
  167. # endif // __x86_64__
  168. }
  169. void InitializeOsSupport() {
  170. // Check we're running on a kernel that can use the tagged address ABI.
  171. bool has_abi = CanUseTaggingAbi();
  172. if (!has_abi) {
  173. # if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
  174. // Some older Android kernels have the tagged pointer ABI on
  175. // unconditionally, and hence don't have the tagged-addr prctl while still
  176. // allow the ABI.
  177. // If targeting Android and the prctl is not around we assume this is the
  178. // case.
  179. return;
  180. # else
  181. MaybeDieIfNoTaggingAbi(
  182. "HWAddressSanitizer requires a kernel with tagged address ABI.");
  183. # endif
  184. }
  185. if (EnableTaggingAbi())
  186. return;
  187. # if SANITIZER_ANDROID
  188. MaybeDieIfNoTaggingAbi(
  189. "HWAddressSanitizer failed to enable tagged address syscall ABI.\n"
  190. "Check the `sysctl abi.tagged_addr_disabled` configuration.");
  191. # else
  192. MaybeDieIfNoTaggingAbi(
  193. "HWAddressSanitizer failed to enable tagged address syscall ABI.\n");
  194. # endif
  195. }
  196. bool InitShadow() {
  197. // Define the entire memory range.
  198. kHighMemEnd = GetHighMemEnd();
  199. // Determine shadow memory base offset.
  200. InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
  201. // Place the low memory first.
  202. kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
  203. kLowMemStart = 0;
  204. // Define the low shadow based on the already placed low memory.
  205. kLowShadowEnd = MemToShadow(kLowMemEnd);
  206. kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
  207. // High shadow takes whatever memory is left up there (making sure it is not
  208. // interfering with low memory in the fixed case).
  209. kHighShadowEnd = MemToShadow(kHighMemEnd);
  210. kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
  211. // High memory starts where allocated shadow allows.
  212. kHighMemStart = ShadowToMem(kHighShadowStart);
  213. // Check the sanity of the defined memory ranges (there might be gaps).
  214. CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
  215. CHECK_GT(kHighMemStart, kHighShadowEnd);
  216. CHECK_GT(kHighShadowEnd, kHighShadowStart);
  217. CHECK_GT(kHighShadowStart, kLowMemEnd);
  218. CHECK_GT(kLowMemEnd, kLowMemStart);
  219. CHECK_GT(kLowShadowEnd, kLowShadowStart);
  220. CHECK_GT(kLowShadowStart, kLowMemEnd);
  221. if (Verbosity())
  222. PrintAddressSpaceLayout();
  223. // Reserve shadow memory.
  224. ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
  225. ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
  226. // Protect all the gaps.
  227. ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
  228. if (kLowMemEnd + 1 < kLowShadowStart)
  229. ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
  230. if (kLowShadowEnd + 1 < kHighShadowStart)
  231. ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
  232. if (kHighShadowEnd + 1 < kHighMemStart)
  233. ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
  234. return true;
  235. }
  236. void InitThreads() {
  237. CHECK(__hwasan_shadow_memory_dynamic_address);
  238. uptr guard_page_size = GetMmapGranularity();
  239. uptr thread_space_start =
  240. __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
  241. uptr thread_space_end =
  242. __hwasan_shadow_memory_dynamic_address - guard_page_size;
  243. ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
  244. "hwasan threads", /*madvise_shadow*/ false);
  245. ProtectGap(thread_space_end,
  246. __hwasan_shadow_memory_dynamic_address - thread_space_end);
  247. InitThreadList(thread_space_start, thread_space_end - thread_space_start);
  248. hwasanThreadList().CreateCurrentThread();
  249. }
  250. bool MemIsApp(uptr p) {
  251. // Memory outside the alias range has non-zero tags.
  252. # if !defined(HWASAN_ALIASING_MODE)
  253. CHECK(GetTagFromPointer(p) == 0);
  254. # endif
  255. return (p >= kHighMemStart && p <= kHighMemEnd) ||
  256. (p >= kLowMemStart && p <= kLowMemEnd);
  257. }
  258. void InstallAtExitHandler() { atexit(HwasanAtExit); }
  259. // ---------------------- TSD ---------------- {{{1
  260. extern "C" void __hwasan_thread_enter() {
  261. hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
  262. }
  263. extern "C" void __hwasan_thread_exit() {
  264. Thread *t = GetCurrentThread();
  265. // Make sure that signal handler can not see a stale current thread pointer.
  266. atomic_signal_fence(memory_order_seq_cst);
  267. if (t)
  268. hwasanThreadList().ReleaseThread(t);
  269. }
  270. # if HWASAN_WITH_INTERCEPTORS
  271. static pthread_key_t tsd_key;
  272. static bool tsd_key_inited = false;
  273. void HwasanTSDThreadInit() {
  274. if (tsd_key_inited)
  275. CHECK_EQ(0, pthread_setspecific(tsd_key,
  276. (void *)GetPthreadDestructorIterations()));
  277. }
  278. void HwasanTSDDtor(void *tsd) {
  279. uptr iterations = (uptr)tsd;
  280. if (iterations > 1) {
  281. CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
  282. return;
  283. }
  284. __hwasan_thread_exit();
  285. }
  286. void HwasanTSDInit() {
  287. CHECK(!tsd_key_inited);
  288. tsd_key_inited = true;
  289. CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
  290. }
  291. # else
  292. void HwasanTSDInit() {}
  293. void HwasanTSDThreadInit() {}
  294. # endif
  295. # if SANITIZER_ANDROID
  296. uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }
  297. # else
  298. uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
  299. # endif
  300. # if SANITIZER_ANDROID
  301. void AndroidTestTlsSlot() {
  302. uptr kMagicValue = 0x010203040A0B0C0D;
  303. uptr *tls_ptr = GetCurrentThreadLongPtr();
  304. uptr old_value = *tls_ptr;
  305. *tls_ptr = kMagicValue;
  306. dlerror();
  307. if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
  308. Printf(
  309. "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
  310. "for dlerror().\n");
  311. Die();
  312. }
  313. *tls_ptr = old_value;
  314. }
  315. # else
  316. void AndroidTestTlsSlot() {}
  317. # endif
  318. static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
  319. // Access type is passed in a platform dependent way (see below) and encoded
  320. // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
  321. // recoverable. Valid values of Y are 0 to 4, which are interpreted as
  322. // log2(access_size), and 0xF, which means that access size is passed via
  323. // platform dependent register (see below).
  324. # if defined(__aarch64__)
  325. // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
  326. // access size is stored in X1 register. Access address is always in X0
  327. // register.
  328. uptr pc = (uptr)info->si_addr;
  329. const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
  330. if ((code & 0xff00) != 0x900)
  331. return AccessInfo{}; // Not ours.
  332. const bool is_store = code & 0x10;
  333. const bool recover = code & 0x20;
  334. const uptr addr = uc->uc_mcontext.regs[0];
  335. const unsigned size_log = code & 0xf;
  336. if (size_log > 4 && size_log != 0xf)
  337. return AccessInfo{}; // Not ours.
  338. const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
  339. # elif defined(__x86_64__)
  340. // Access type is encoded in the instruction following INT3 as
  341. // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
  342. // RSI register. Access address is always in RDI register.
  343. uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
  344. uint8_t *nop = (uint8_t *)pc;
  345. if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
  346. *(nop + 3) < 0x40)
  347. return AccessInfo{}; // Not ours.
  348. const unsigned code = *(nop + 3);
  349. const bool is_store = code & 0x10;
  350. const bool recover = code & 0x20;
  351. const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
  352. const unsigned size_log = code & 0xf;
  353. if (size_log > 4 && size_log != 0xf)
  354. return AccessInfo{}; // Not ours.
  355. const uptr size =
  356. size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
  357. # elif SANITIZER_RISCV64
  358. // Access type is encoded in the instruction following EBREAK as
  359. // ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in
  360. // X11 register. Access address is always in X10 register.
  361. uptr pc = (uptr)uc->uc_mcontext.__gregs[REG_PC];
  362. uint8_t byte1 = *((u8 *)(pc + 0));
  363. uint8_t byte2 = *((u8 *)(pc + 1));
  364. uint8_t byte3 = *((u8 *)(pc + 2));
  365. uint8_t byte4 = *((u8 *)(pc + 3));
  366. uint32_t ebreak = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
  367. bool isFaultShort = false;
  368. bool isEbreak = (ebreak == 0x100073);
  369. bool isShortEbreak = false;
  370. # if defined(__riscv_compressed)
  371. isFaultShort = ((ebreak & 0x3) != 0x3);
  372. isShortEbreak = ((ebreak & 0xffff) == 0x9002);
  373. # endif
  374. // faulted insn is not ebreak, not our case
  375. if (!(isEbreak || isShortEbreak))
  376. return AccessInfo{};
  377. // advance pc to point after ebreak and reconstruct addi instruction
  378. pc += isFaultShort ? 2 : 4;
  379. byte1 = *((u8 *)(pc + 0));
  380. byte2 = *((u8 *)(pc + 1));
  381. byte3 = *((u8 *)(pc + 2));
  382. byte4 = *((u8 *)(pc + 3));
  383. // reconstruct instruction
  384. uint32_t instr = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
  385. // check if this is really 32 bit instruction
  386. // code is encoded in top 12 bits, since instruction is supposed to be with
  387. // imm
  388. const unsigned code = (instr >> 20) & 0xffff;
  389. const uptr addr = uc->uc_mcontext.__gregs[10];
  390. const bool is_store = code & 0x10;
  391. const bool recover = code & 0x20;
  392. const unsigned size_log = code & 0xf;
  393. if (size_log > 4 && size_log != 0xf)
  394. return AccessInfo{}; // Not our case
  395. const uptr size =
  396. size_log == 0xf ? uc->uc_mcontext.__gregs[11] : 1U << size_log;
  397. # else
  398. # error Unsupported architecture
  399. # endif
  400. return AccessInfo{addr, size, is_store, !is_store, recover};
  401. }
  402. static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
  403. AccessInfo ai = GetAccessInfo(info, uc);
  404. if (!ai.is_store && !ai.is_load)
  405. return false;
  406. SignalContext sig{info, uc};
  407. HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
  408. # if defined(__aarch64__)
  409. uc->uc_mcontext.pc += 4;
  410. # elif defined(__x86_64__)
  411. # elif SANITIZER_RISCV64
  412. // pc points to EBREAK which is 2 bytes long
  413. uint8_t *exception_source = (uint8_t *)(uc->uc_mcontext.__gregs[REG_PC]);
  414. uint8_t byte1 = (uint8_t)(*(exception_source + 0));
  415. uint8_t byte2 = (uint8_t)(*(exception_source + 1));
  416. uint8_t byte3 = (uint8_t)(*(exception_source + 2));
  417. uint8_t byte4 = (uint8_t)(*(exception_source + 3));
  418. uint32_t faulted = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
  419. bool isFaultShort = false;
  420. # if defined(__riscv_compressed)
  421. isFaultShort = ((faulted & 0x3) != 0x3);
  422. # endif
  423. uc->uc_mcontext.__gregs[REG_PC] += isFaultShort ? 2 : 4;
  424. # else
  425. # error Unsupported architecture
  426. # endif
  427. return true;
  428. }
  429. static void OnStackUnwind(const SignalContext &sig, const void *,
  430. BufferedStackTrace *stack) {
  431. stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
  432. common_flags()->fast_unwind_on_fatal);
  433. }
  434. void HwasanOnDeadlySignal(int signo, void *info, void *context) {
  435. // Probably a tag mismatch.
  436. if (signo == SIGTRAP)
  437. if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t *)context))
  438. return;
  439. HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
  440. }
  441. void Thread::InitStackAndTls(const InitState *) {
  442. uptr tls_size;
  443. uptr stack_size;
  444. GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
  445. &tls_size);
  446. stack_top_ = stack_bottom_ + stack_size;
  447. tls_end_ = tls_begin_ + tls_size;
  448. }
  449. uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
  450. CHECK(IsAligned(p, kShadowAlignment));
  451. CHECK(IsAligned(size, kShadowAlignment));
  452. uptr shadow_start = MemToShadow(p);
  453. uptr shadow_size = MemToShadowSize(size);
  454. uptr page_size = GetPageSizeCached();
  455. uptr page_start = RoundUpTo(shadow_start, page_size);
  456. uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
  457. uptr threshold = common_flags()->clear_shadow_mmap_threshold;
  458. if (SANITIZER_LINUX &&
  459. UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
  460. internal_memset((void *)shadow_start, tag, page_start - shadow_start);
  461. internal_memset((void *)page_end, tag,
  462. shadow_start + shadow_size - page_end);
  463. // For an anonymous private mapping MADV_DONTNEED will return a zero page on
  464. // Linux.
  465. ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
  466. } else {
  467. internal_memset((void *)shadow_start, tag, shadow_size);
  468. }
  469. return AddTagToPointer(p, tag);
  470. }
  471. void HwasanInstallAtForkHandler() {
  472. auto before = []() {
  473. HwasanAllocatorLock();
  474. StackDepotLockAll();
  475. };
  476. auto after = []() {
  477. StackDepotUnlockAll();
  478. HwasanAllocatorUnlock();
  479. };
  480. pthread_atfork(before, after, after);
  481. }
  482. void InstallAtExitCheckLeaks() {
  483. if (CAN_SANITIZE_LEAKS) {
  484. if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
  485. if (flags()->halt_on_error)
  486. Atexit(__lsan::DoLeakCheck);
  487. else
  488. Atexit(__lsan::DoRecoverableLeakCheckVoid);
  489. }
  490. }
  491. }
  492. } // namespace __hwasan
  493. #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD