hwasan_linux.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. ///
  9. /// \file
  10. /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
  11. /// FreeBSD-specific code.
  12. ///
  13. //===----------------------------------------------------------------------===//
  14. #include "sanitizer_common/sanitizer_platform.h"
  15. #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
  16. # include <dlfcn.h>
  17. # include <elf.h>
  18. # include <errno.h>
  19. # include <link.h>
  20. # include <pthread.h>
  21. # include <signal.h>
  22. # include <stdio.h>
  23. # include <stdlib.h>
  24. # include <sys/prctl.h>
  25. # include <sys/resource.h>
  26. # include <sys/time.h>
  27. # include <unistd.h>
  28. # include <unwind.h>
  29. # include "hwasan.h"
  30. # include "hwasan_dynamic_shadow.h"
  31. # include "hwasan_interface_internal.h"
  32. # include "hwasan_mapping.h"
  33. # include "hwasan_report.h"
  34. # include "hwasan_thread.h"
  35. # include "hwasan_thread_list.h"
  36. # include "sanitizer_common/sanitizer_common.h"
  37. # include "sanitizer_common/sanitizer_procmaps.h"
  38. # include "sanitizer_common/sanitizer_stackdepot.h"
  39. // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
  40. //
  41. // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
  42. // Not currently tested.
  43. // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
  44. // Integration tests downstream exist.
  45. // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
  46. // Tested with check-hwasan on x86_64-linux.
  47. // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
  48. // Tested with check-hwasan on aarch64-linux-android.
  49. # if !SANITIZER_ANDROID
  50. SANITIZER_INTERFACE_ATTRIBUTE
  51. THREADLOCAL uptr __hwasan_tls;
  52. # endif
  53. namespace __hwasan {
  54. // With the zero shadow base we can not actually map pages starting from 0.
  55. // This constant is somewhat arbitrary.
  56. constexpr uptr kZeroBaseShadowStart = 0;
  57. constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
  58. static void ProtectGap(uptr addr, uptr size) {
  59. __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
  60. kZeroBaseMaxShadowStart);
  61. }
  62. uptr kLowMemStart;
  63. uptr kLowMemEnd;
  64. uptr kHighMemStart;
  65. uptr kHighMemEnd;
  66. static void PrintRange(uptr start, uptr end, const char *name) {
  67. Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
  68. }
  69. static void PrintAddressSpaceLayout() {
  70. PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
  71. if (kHighShadowEnd + 1 < kHighMemStart)
  72. PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
  73. else
  74. CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
  75. PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
  76. if (kLowShadowEnd + 1 < kHighShadowStart)
  77. PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
  78. else
  79. CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
  80. PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
  81. if (kLowMemEnd + 1 < kLowShadowStart)
  82. PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
  83. else
  84. CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
  85. PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
  86. CHECK_EQ(0, kLowMemStart);
  87. }
  88. static uptr GetHighMemEnd() {
  89. // HighMem covers the upper part of the address space.
  90. uptr max_address = GetMaxUserVirtualAddress();
  91. // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
  92. // properly aligned:
  93. max_address |= (GetMmapGranularity() << kShadowScale) - 1;
  94. return max_address;
  95. }
  96. static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
  97. if (flags()->fixed_shadow_base != (uptr)-1) {
  98. __hwasan_shadow_memory_dynamic_address = flags()->fixed_shadow_base;
  99. } else {
  100. __hwasan_shadow_memory_dynamic_address =
  101. FindDynamicShadowStart(shadow_size_bytes);
  102. }
  103. }
  104. static void MaybeDieIfNoTaggingAbi(const char *message) {
  105. if (!flags()->fail_without_syscall_abi)
  106. return;
  107. Printf("FATAL: %s\n", message);
  108. Die();
  109. }
  110. # define PR_SET_TAGGED_ADDR_CTRL 55
  111. # define PR_GET_TAGGED_ADDR_CTRL 56
  112. # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
  113. # define ARCH_GET_UNTAG_MASK 0x4001
  114. # define ARCH_ENABLE_TAGGED_ADDR 0x4002
  115. # define ARCH_GET_MAX_TAG_BITS 0x4003
  116. static bool CanUseTaggingAbi() {
  117. # if defined(__x86_64__)
  118. unsigned long num_bits = 0;
  119. // Check for x86 LAM support. This API is based on a currently unsubmitted
  120. // patch to the Linux kernel (as of August 2022) and is thus subject to
  121. // change. The patch is here:
  122. // https://lore.kernel.org/all/20220815041803.17954-1-kirill.shutemov@linux.intel.com/
  123. //
  124. // arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag
  125. // bits the user can request, or zero if LAM is not supported by the hardware.
  126. if (internal_iserror(internal_arch_prctl(ARCH_GET_MAX_TAG_BITS,
  127. reinterpret_cast<uptr>(&num_bits))))
  128. return false;
  129. // The platform must provide enough bits for HWASan tags.
  130. if (num_bits < kTagBits)
  131. return false;
  132. return true;
  133. # else
  134. // Check for ARM TBI support.
  135. return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
  136. # endif // __x86_64__
  137. }
  138. static bool EnableTaggingAbi() {
  139. # if defined(__x86_64__)
  140. // Enable x86 LAM tagging for the process.
  141. //
  142. // arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of
  143. // tag bits requested by the user does not exceed that provided by the system.
  144. // arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant
  145. // address bits. It is ~0ULL if either LAM is disabled for the process or LAM
  146. // is not supported by the hardware.
  147. if (internal_iserror(internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, kTagBits)))
  148. return false;
  149. unsigned long mask = 0;
  150. // Make sure the tag bits are where we expect them to be.
  151. if (internal_iserror(internal_arch_prctl(ARCH_GET_UNTAG_MASK,
  152. reinterpret_cast<uptr>(&mask))))
  153. return false;
  154. // @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag
  155. // bits. Therefore these masks must not overlap.
  156. if (mask & kAddressTagMask)
  157. return false;
  158. return true;
  159. # else
  160. // Enable ARM TBI tagging for the process. If for some reason tagging is not
  161. // supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns
  162. // -EINVAL.
  163. if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
  164. PR_TAGGED_ADDR_ENABLE, 0, 0, 0)))
  165. return false;
  166. // Ensure that TBI is enabled.
  167. if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) !=
  168. PR_TAGGED_ADDR_ENABLE)
  169. return false;
  170. return true;
  171. # endif // __x86_64__
  172. }
  173. void InitializeOsSupport() {
  174. // Check we're running on a kernel that can use the tagged address ABI.
  175. bool has_abi = CanUseTaggingAbi();
  176. if (!has_abi) {
  177. # if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
  178. // Some older Android kernels have the tagged pointer ABI on
  179. // unconditionally, and hence don't have the tagged-addr prctl while still
  180. // allow the ABI.
  181. // If targeting Android and the prctl is not around we assume this is the
  182. // case.
  183. return;
  184. # else
  185. MaybeDieIfNoTaggingAbi(
  186. "HWAddressSanitizer requires a kernel with tagged address ABI.");
  187. # endif
  188. }
  189. if (EnableTaggingAbi())
  190. return;
  191. # if SANITIZER_ANDROID
  192. MaybeDieIfNoTaggingAbi(
  193. "HWAddressSanitizer failed to enable tagged address syscall ABI.\n"
  194. "Check the `sysctl abi.tagged_addr_disabled` configuration.");
  195. # else
  196. MaybeDieIfNoTaggingAbi(
  197. "HWAddressSanitizer failed to enable tagged address syscall ABI.\n");
  198. # endif
  199. }
  200. bool InitShadow() {
  201. // Define the entire memory range.
  202. kHighMemEnd = GetHighMemEnd();
  203. // Determine shadow memory base offset.
  204. InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
  205. // Place the low memory first.
  206. kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
  207. kLowMemStart = 0;
  208. // Define the low shadow based on the already placed low memory.
  209. kLowShadowEnd = MemToShadow(kLowMemEnd);
  210. kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
  211. // High shadow takes whatever memory is left up there (making sure it is not
  212. // interfering with low memory in the fixed case).
  213. kHighShadowEnd = MemToShadow(kHighMemEnd);
  214. kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
  215. // High memory starts where allocated shadow allows.
  216. kHighMemStart = ShadowToMem(kHighShadowStart);
  217. // Check the sanity of the defined memory ranges (there might be gaps).
  218. CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
  219. CHECK_GT(kHighMemStart, kHighShadowEnd);
  220. CHECK_GT(kHighShadowEnd, kHighShadowStart);
  221. CHECK_GT(kHighShadowStart, kLowMemEnd);
  222. CHECK_GT(kLowMemEnd, kLowMemStart);
  223. CHECK_GT(kLowShadowEnd, kLowShadowStart);
  224. CHECK_GT(kLowShadowStart, kLowMemEnd);
  225. if (Verbosity())
  226. PrintAddressSpaceLayout();
  227. // Reserve shadow memory.
  228. ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
  229. ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
  230. // Protect all the gaps.
  231. ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
  232. if (kLowMemEnd + 1 < kLowShadowStart)
  233. ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
  234. if (kLowShadowEnd + 1 < kHighShadowStart)
  235. ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
  236. if (kHighShadowEnd + 1 < kHighMemStart)
  237. ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
  238. return true;
  239. }
  240. void InitThreads() {
  241. CHECK(__hwasan_shadow_memory_dynamic_address);
  242. uptr guard_page_size = GetMmapGranularity();
  243. uptr thread_space_start =
  244. __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
  245. uptr thread_space_end =
  246. __hwasan_shadow_memory_dynamic_address - guard_page_size;
  247. ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
  248. "hwasan threads", /*madvise_shadow*/ false);
  249. ProtectGap(thread_space_end,
  250. __hwasan_shadow_memory_dynamic_address - thread_space_end);
  251. InitThreadList(thread_space_start, thread_space_end - thread_space_start);
  252. hwasanThreadList().CreateCurrentThread();
  253. }
  254. bool MemIsApp(uptr p) {
  255. // Memory outside the alias range has non-zero tags.
  256. # if !defined(HWASAN_ALIASING_MODE)
  257. CHECK_EQ(GetTagFromPointer(p), 0);
  258. # endif
  259. return (p >= kHighMemStart && p <= kHighMemEnd) ||
  260. (p >= kLowMemStart && p <= kLowMemEnd);
  261. }
  262. void InstallAtExitHandler() { atexit(HwasanAtExit); }
  263. // ---------------------- TSD ---------------- {{{1
  264. # if HWASAN_WITH_INTERCEPTORS
  265. static pthread_key_t tsd_key;
  266. static bool tsd_key_inited = false;
  267. void HwasanTSDThreadInit() {
  268. if (tsd_key_inited)
  269. CHECK_EQ(0, pthread_setspecific(tsd_key,
  270. (void *)GetPthreadDestructorIterations()));
  271. }
  272. void HwasanTSDDtor(void *tsd) {
  273. uptr iterations = (uptr)tsd;
  274. if (iterations > 1) {
  275. CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
  276. return;
  277. }
  278. __hwasan_thread_exit();
  279. }
  280. void HwasanTSDInit() {
  281. CHECK(!tsd_key_inited);
  282. tsd_key_inited = true;
  283. CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
  284. }
  285. # else
  286. void HwasanTSDInit() {}
  287. void HwasanTSDThreadInit() {}
  288. # endif
  289. # if SANITIZER_ANDROID
  290. uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }
  291. # else
  292. uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
  293. # endif
  294. # if SANITIZER_ANDROID
  295. void AndroidTestTlsSlot() {
  296. uptr kMagicValue = 0x010203040A0B0C0D;
  297. uptr *tls_ptr = GetCurrentThreadLongPtr();
  298. uptr old_value = *tls_ptr;
  299. *tls_ptr = kMagicValue;
  300. dlerror();
  301. if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
  302. Printf(
  303. "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
  304. "for dlerror().\n");
  305. Die();
  306. }
  307. *tls_ptr = old_value;
  308. }
  309. # else
  310. void AndroidTestTlsSlot() {}
  311. # endif
  312. static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
  313. // Access type is passed in a platform dependent way (see below) and encoded
  314. // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
  315. // recoverable. Valid values of Y are 0 to 4, which are interpreted as
  316. // log2(access_size), and 0xF, which means that access size is passed via
  317. // platform dependent register (see below).
  318. # if defined(__aarch64__)
  319. // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
  320. // access size is stored in X1 register. Access address is always in X0
  321. // register.
  322. uptr pc = (uptr)info->si_addr;
  323. const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
  324. if ((code & 0xff00) != 0x900)
  325. return AccessInfo{}; // Not ours.
  326. const bool is_store = code & 0x10;
  327. const bool recover = code & 0x20;
  328. const uptr addr = uc->uc_mcontext.regs[0];
  329. const unsigned size_log = code & 0xf;
  330. if (size_log > 4 && size_log != 0xf)
  331. return AccessInfo{}; // Not ours.
  332. const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
  333. # elif defined(__x86_64__)
  334. // Access type is encoded in the instruction following INT3 as
  335. // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
  336. // RSI register. Access address is always in RDI register.
  337. uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
  338. uint8_t *nop = (uint8_t *)pc;
  339. if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
  340. *(nop + 3) < 0x40)
  341. return AccessInfo{}; // Not ours.
  342. const unsigned code = *(nop + 3);
  343. const bool is_store = code & 0x10;
  344. const bool recover = code & 0x20;
  345. const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
  346. const unsigned size_log = code & 0xf;
  347. if (size_log > 4 && size_log != 0xf)
  348. return AccessInfo{}; // Not ours.
  349. const uptr size =
  350. size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
  351. # elif SANITIZER_RISCV64
  352. // Access type is encoded in the instruction following EBREAK as
  353. // ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in
  354. // X11 register. Access address is always in X10 register.
  355. uptr pc = (uptr)uc->uc_mcontext.__gregs[REG_PC];
  356. uint8_t byte1 = *((u8 *)(pc + 0));
  357. uint8_t byte2 = *((u8 *)(pc + 1));
  358. uint8_t byte3 = *((u8 *)(pc + 2));
  359. uint8_t byte4 = *((u8 *)(pc + 3));
  360. uint32_t ebreak = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
  361. bool isFaultShort = false;
  362. bool isEbreak = (ebreak == 0x100073);
  363. bool isShortEbreak = false;
  364. # if defined(__riscv_compressed)
  365. isFaultShort = ((ebreak & 0x3) != 0x3);
  366. isShortEbreak = ((ebreak & 0xffff) == 0x9002);
  367. # endif
  368. // faulted insn is not ebreak, not our case
  369. if (!(isEbreak || isShortEbreak))
  370. return AccessInfo{};
  371. // advance pc to point after ebreak and reconstruct addi instruction
  372. pc += isFaultShort ? 2 : 4;
  373. byte1 = *((u8 *)(pc + 0));
  374. byte2 = *((u8 *)(pc + 1));
  375. byte3 = *((u8 *)(pc + 2));
  376. byte4 = *((u8 *)(pc + 3));
  377. // reconstruct instruction
  378. uint32_t instr = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
  379. // check if this is really 32 bit instruction
  380. // code is encoded in top 12 bits, since instruction is supposed to be with
  381. // imm
  382. const unsigned code = (instr >> 20) & 0xffff;
  383. const uptr addr = uc->uc_mcontext.__gregs[10];
  384. const bool is_store = code & 0x10;
  385. const bool recover = code & 0x20;
  386. const unsigned size_log = code & 0xf;
  387. if (size_log > 4 && size_log != 0xf)
  388. return AccessInfo{}; // Not our case
  389. const uptr size =
  390. size_log == 0xf ? uc->uc_mcontext.__gregs[11] : 1U << size_log;
  391. # else
  392. # error Unsupported architecture
  393. # endif
  394. return AccessInfo{addr, size, is_store, !is_store, recover};
  395. }
  396. static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
  397. AccessInfo ai = GetAccessInfo(info, uc);
  398. if (!ai.is_store && !ai.is_load)
  399. return false;
  400. SignalContext sig{info, uc};
  401. HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
  402. # if defined(__aarch64__)
  403. uc->uc_mcontext.pc += 4;
  404. # elif defined(__x86_64__)
  405. # elif SANITIZER_RISCV64
  406. // pc points to EBREAK which is 2 bytes long
  407. uint8_t *exception_source = (uint8_t *)(uc->uc_mcontext.__gregs[REG_PC]);
  408. uint8_t byte1 = (uint8_t)(*(exception_source + 0));
  409. uint8_t byte2 = (uint8_t)(*(exception_source + 1));
  410. uint8_t byte3 = (uint8_t)(*(exception_source + 2));
  411. uint8_t byte4 = (uint8_t)(*(exception_source + 3));
  412. uint32_t faulted = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
  413. bool isFaultShort = false;
  414. # if defined(__riscv_compressed)
  415. isFaultShort = ((faulted & 0x3) != 0x3);
  416. # endif
  417. uc->uc_mcontext.__gregs[REG_PC] += isFaultShort ? 2 : 4;
  418. # else
  419. # error Unsupported architecture
  420. # endif
  421. return true;
  422. }
  423. static void OnStackUnwind(const SignalContext &sig, const void *,
  424. BufferedStackTrace *stack) {
  425. stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
  426. common_flags()->fast_unwind_on_fatal);
  427. }
  428. void HwasanOnDeadlySignal(int signo, void *info, void *context) {
  429. // Probably a tag mismatch.
  430. if (signo == SIGTRAP)
  431. if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t *)context))
  432. return;
  433. HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
  434. }
  435. void Thread::InitStackAndTls(const InitState *) {
  436. uptr tls_size;
  437. uptr stack_size;
  438. GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
  439. &tls_size);
  440. stack_top_ = stack_bottom_ + stack_size;
  441. tls_end_ = tls_begin_ + tls_size;
  442. }
  443. uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
  444. CHECK(IsAligned(p, kShadowAlignment));
  445. CHECK(IsAligned(size, kShadowAlignment));
  446. uptr shadow_start = MemToShadow(p);
  447. uptr shadow_size = MemToShadowSize(size);
  448. uptr page_size = GetPageSizeCached();
  449. uptr page_start = RoundUpTo(shadow_start, page_size);
  450. uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
  451. uptr threshold = common_flags()->clear_shadow_mmap_threshold;
  452. if (SANITIZER_LINUX &&
  453. UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
  454. internal_memset((void *)shadow_start, tag, page_start - shadow_start);
  455. internal_memset((void *)page_end, tag,
  456. shadow_start + shadow_size - page_end);
  457. // For an anonymous private mapping MADV_DONTNEED will return a zero page on
  458. // Linux.
  459. ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
  460. } else {
  461. internal_memset((void *)shadow_start, tag, shadow_size);
  462. }
  463. return AddTagToPointer(p, tag);
  464. }
  465. static void BeforeFork() {
  466. if (CAN_SANITIZE_LEAKS) {
  467. __lsan::LockGlobal();
  468. }
  469. // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the
  470. // stuff we need.
  471. __lsan::LockThreads();
  472. __lsan::LockAllocator();
  473. StackDepotLockBeforeFork();
  474. }
  475. static void AfterFork(bool fork_child) {
  476. StackDepotUnlockAfterFork(fork_child);
  477. // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock
  478. // the stuff we need.
  479. __lsan::UnlockAllocator();
  480. __lsan::UnlockThreads();
  481. if (CAN_SANITIZE_LEAKS) {
  482. __lsan::UnlockGlobal();
  483. }
  484. }
  485. void HwasanInstallAtForkHandler() {
  486. pthread_atfork(
  487. &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
  488. []() { AfterFork(/* fork_child= */ true); });
  489. }
  490. void InstallAtExitCheckLeaks() {
  491. if (CAN_SANITIZE_LEAKS) {
  492. if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
  493. if (flags()->halt_on_error)
  494. Atexit(__lsan::DoLeakCheck);
  495. else
  496. Atexit(__lsan::DoRecoverableLeakCheckVoid);
  497. }
  498. }
  499. }
  500. } // namespace __hwasan
  501. using namespace __hwasan;
  502. extern "C" void __hwasan_thread_enter() {
  503. hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
  504. }
  505. extern "C" void __hwasan_thread_exit() {
  506. Thread *t = GetCurrentThread();
  507. // Make sure that signal handler can not see a stale current thread pointer.
  508. atomic_signal_fence(memory_order_seq_cst);
  509. if (t) {
  510. // Block async signals on the thread as the handler can be instrumented.
  511. // After this point instrumented code can't access essential data from TLS
  512. // and will crash.
  513. // Bionic already calls __hwasan_thread_exit with blocked signals.
  514. if (SANITIZER_GLIBC)
  515. BlockSignals();
  516. hwasanThreadList().ReleaseThread(t);
  517. }
  518. }
  519. #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD