sanitizer_fuchsia.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. //===-- sanitizer_fuchsia.cpp ---------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is shared between AddressSanitizer and other sanitizer
  10. // run-time libraries and implements Fuchsia-specific functions from
  11. // sanitizer_common.h.
  12. //===----------------------------------------------------------------------===//
  13. #include "sanitizer_fuchsia.h"
  14. #if SANITIZER_FUCHSIA
  15. #include <pthread.h>
  16. #include <stdlib.h>
  17. #include <unistd.h>
  18. #error #include <zircon/errors.h>
  19. #error #include <zircon/process.h>
  20. #error #include <zircon/syscalls.h>
  21. #error #include <zircon/utc.h>
  22. #include "sanitizer_common.h"
  23. #include "sanitizer_libc.h"
  24. #include "sanitizer_mutex.h"
  25. namespace __sanitizer {
  26. void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
  27. uptr internal_sched_yield() {
  28. zx_status_t status = _zx_nanosleep(0);
  29. CHECK_EQ(status, ZX_OK);
  30. return 0; // Why doesn't this return void?
  31. }
  32. void internal_usleep(u64 useconds) {
  33. zx_status_t status = _zx_nanosleep(_zx_deadline_after(ZX_USEC(useconds)));
  34. CHECK_EQ(status, ZX_OK);
  35. }
  36. u64 NanoTime() {
  37. zx_handle_t utc_clock = _zx_utc_reference_get();
  38. CHECK_NE(utc_clock, ZX_HANDLE_INVALID);
  39. zx_time_t time;
  40. zx_status_t status = _zx_clock_read(utc_clock, &time);
  41. CHECK_EQ(status, ZX_OK);
  42. return time;
  43. }
  44. u64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); }
  45. uptr internal_getpid() {
  46. zx_info_handle_basic_t info;
  47. zx_status_t status =
  48. _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
  49. sizeof(info), NULL, NULL);
  50. CHECK_EQ(status, ZX_OK);
  51. uptr pid = static_cast<uptr>(info.koid);
  52. CHECK_EQ(pid, info.koid);
  53. return pid;
  54. }
  55. int internal_dlinfo(void *handle, int request, void *p) { UNIMPLEMENTED(); }
  56. uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
  57. tid_t GetTid() { return GetThreadSelf(); }
  58. void Abort() { abort(); }
  59. int Atexit(void (*function)(void)) { return atexit(function); }
  60. void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
  61. pthread_attr_t attr;
  62. CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
  63. void *base;
  64. size_t size;
  65. CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
  66. CHECK_EQ(pthread_attr_destroy(&attr), 0);
  67. *stack_bottom = reinterpret_cast<uptr>(base);
  68. *stack_top = *stack_bottom + size;
  69. }
  70. void InitializePlatformEarly() {}
  71. void MaybeReexec() {}
  72. void CheckASLR() {}
  73. void CheckMPROTECT() {}
  74. void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
  75. void DisableCoreDumperIfNecessary() {}
  76. void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
  77. void SetAlternateSignalStack() {}
  78. void UnsetAlternateSignalStack() {}
  79. void InitTlsSize() {}
  80. bool SignalContext::IsStackOverflow() const { return false; }
  81. void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
  82. const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
  83. void FutexWait(atomic_uint32_t *p, u32 cmp) {
  84. zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(p), cmp,
  85. ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
  86. if (status != ZX_ERR_BAD_STATE) // Normal race.
  87. CHECK_EQ(status, ZX_OK);
  88. }
  89. void FutexWake(atomic_uint32_t *p, u32 count) {
  90. zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(p), count);
  91. CHECK_EQ(status, ZX_OK);
  92. }
  93. uptr GetPageSize() { return _zx_system_get_page_size(); }
  94. uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
  95. sanitizer_shadow_bounds_t ShadowBounds;
  96. void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); }
  97. uptr GetMaxUserVirtualAddress() {
  98. InitShadowBounds();
  99. return ShadowBounds.memory_limit - 1;
  100. }
  101. uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
  102. static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
  103. bool raw_report, bool die_for_nomem) {
  104. size = RoundUpTo(size, GetPageSize());
  105. zx_handle_t vmo;
  106. zx_status_t status = _zx_vmo_create(size, 0, &vmo);
  107. if (status != ZX_OK) {
  108. if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
  109. ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
  110. raw_report);
  111. return nullptr;
  112. }
  113. _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
  114. internal_strlen(mem_type));
  115. // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
  116. uintptr_t addr;
  117. status =
  118. _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
  119. vmo, 0, size, &addr);
  120. _zx_handle_close(vmo);
  121. if (status != ZX_OK) {
  122. if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
  123. ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
  124. raw_report);
  125. return nullptr;
  126. }
  127. IncreaseTotalMmap(size);
  128. return reinterpret_cast<void *>(addr);
  129. }
  130. void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
  131. return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
  132. }
  133. void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
  134. return MmapOrDie(size, mem_type);
  135. }
  136. void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
  137. return DoAnonymousMmapOrDie(size, mem_type, false, false);
  138. }
  139. uptr ReservedAddressRange::Init(uptr init_size, const char *name,
  140. uptr fixed_addr) {
  141. init_size = RoundUpTo(init_size, GetPageSize());
  142. DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
  143. uintptr_t base;
  144. zx_handle_t vmar;
  145. zx_status_t status = _zx_vmar_allocate(
  146. _zx_vmar_root_self(),
  147. ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
  148. init_size, &vmar, &base);
  149. if (status != ZX_OK)
  150. ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
  151. base_ = reinterpret_cast<void *>(base);
  152. size_ = init_size;
  153. name_ = name;
  154. os_handle_ = vmar;
  155. return reinterpret_cast<uptr>(base_);
  156. }
  157. static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
  158. void *base, const char *name, bool die_for_nomem) {
  159. uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
  160. map_size = RoundUpTo(map_size, GetPageSize());
  161. zx_handle_t vmo;
  162. zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
  163. if (status != ZX_OK) {
  164. if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
  165. ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
  166. return 0;
  167. }
  168. _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
  169. DCHECK_GE(base + size_, map_size + offset);
  170. uintptr_t addr;
  171. status =
  172. _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
  173. offset, vmo, 0, map_size, &addr);
  174. _zx_handle_close(vmo);
  175. if (status != ZX_OK) {
  176. if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
  177. ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
  178. }
  179. return 0;
  180. }
  181. IncreaseTotalMmap(map_size);
  182. return addr;
  183. }
  184. uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
  185. const char *name) {
  186. return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_,
  187. false);
  188. }
  189. uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
  190. const char *name) {
  191. return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_, true);
  192. }
  193. void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
  194. if (!addr || !size)
  195. return;
  196. size = RoundUpTo(size, GetPageSize());
  197. zx_status_t status =
  198. _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
  199. if (status != ZX_OK) {
  200. Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
  201. SanitizerToolName, size, size, addr);
  202. CHECK("unable to unmap" && 0);
  203. }
  204. DecreaseTotalMmap(size);
  205. }
  206. void ReservedAddressRange::Unmap(uptr addr, uptr size) {
  207. CHECK_LE(size, size_);
  208. const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
  209. if (addr == reinterpret_cast<uptr>(base_)) {
  210. if (size == size_) {
  211. // Destroying the vmar effectively unmaps the whole mapping.
  212. _zx_vmar_destroy(vmar);
  213. _zx_handle_close(vmar);
  214. os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
  215. DecreaseTotalMmap(size);
  216. return;
  217. }
  218. } else {
  219. CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
  220. }
  221. // Partial unmapping does not affect the fact that the initial range is still
  222. // reserved, and the resulting unmapped memory can't be reused.
  223. UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
  224. }
  225. // This should never be called.
  226. void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
  227. UNIMPLEMENTED();
  228. }
  229. bool MprotectNoAccess(uptr addr, uptr size) {
  230. return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK;
  231. }
  232. bool MprotectReadOnly(uptr addr, uptr size) {
  233. return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) ==
  234. ZX_OK;
  235. }
  236. void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
  237. const char *mem_type) {
  238. CHECK_GE(size, GetPageSize());
  239. CHECK(IsPowerOfTwo(size));
  240. CHECK(IsPowerOfTwo(alignment));
  241. zx_handle_t vmo;
  242. zx_status_t status = _zx_vmo_create(size, 0, &vmo);
  243. if (status != ZX_OK) {
  244. if (status != ZX_ERR_NO_MEMORY)
  245. ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
  246. return nullptr;
  247. }
  248. _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
  249. internal_strlen(mem_type));
  250. // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
  251. // Map a larger size to get a chunk of address space big enough that
  252. // it surely contains an aligned region of the requested size. Then
  253. // overwrite the aligned middle portion with a mapping from the
  254. // beginning of the VMO, and unmap the excess before and after.
  255. size_t map_size = size + alignment;
  256. uintptr_t addr;
  257. status =
  258. _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
  259. vmo, 0, map_size, &addr);
  260. if (status == ZX_OK) {
  261. uintptr_t map_addr = addr;
  262. uintptr_t map_end = map_addr + map_size;
  263. addr = RoundUpTo(map_addr, alignment);
  264. uintptr_t end = addr + size;
  265. if (addr != map_addr) {
  266. zx_info_vmar_t info;
  267. status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
  268. sizeof(info), NULL, NULL);
  269. if (status == ZX_OK) {
  270. uintptr_t new_addr;
  271. status = _zx_vmar_map(
  272. _zx_vmar_root_self(),
  273. ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
  274. addr - info.base, vmo, 0, size, &new_addr);
  275. if (status == ZX_OK)
  276. CHECK_EQ(new_addr, addr);
  277. }
  278. }
  279. if (status == ZX_OK && addr != map_addr)
  280. status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
  281. if (status == ZX_OK && end != map_end)
  282. status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
  283. }
  284. _zx_handle_close(vmo);
  285. if (status != ZX_OK) {
  286. if (status != ZX_ERR_NO_MEMORY)
  287. ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
  288. return nullptr;
  289. }
  290. IncreaseTotalMmap(size);
  291. return reinterpret_cast<void *>(addr);
  292. }
  293. void UnmapOrDie(void *addr, uptr size) {
  294. UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
  295. }
  296. void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
  297. uptr beg_aligned = RoundUpTo(beg, GetPageSize());
  298. uptr end_aligned = RoundDownTo(end, GetPageSize());
  299. if (beg_aligned < end_aligned) {
  300. zx_handle_t root_vmar = _zx_vmar_root_self();
  301. CHECK_NE(root_vmar, ZX_HANDLE_INVALID);
  302. zx_status_t status =
  303. _zx_vmar_op_range(root_vmar, ZX_VMAR_OP_DECOMMIT, beg_aligned,
  304. end_aligned - beg_aligned, nullptr, 0);
  305. CHECK_EQ(status, ZX_OK);
  306. }
  307. }
  308. void DumpProcessMap() {
  309. // TODO(mcgrathr): write it
  310. return;
  311. }
  312. bool IsAccessibleMemoryRange(uptr beg, uptr size) {
  313. // TODO(mcgrathr): Figure out a better way.
  314. zx_handle_t vmo;
  315. zx_status_t status = _zx_vmo_create(size, 0, &vmo);
  316. if (status == ZX_OK) {
  317. status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
  318. _zx_handle_close(vmo);
  319. }
  320. return status == ZX_OK;
  321. }
  322. // FIXME implement on this platform.
  323. void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
  324. bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
  325. uptr *read_len, uptr max_len, error_t *errno_p) {
  326. zx_handle_t vmo;
  327. zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
  328. if (status == ZX_OK) {
  329. uint64_t vmo_size;
  330. status = _zx_vmo_get_size(vmo, &vmo_size);
  331. if (status == ZX_OK) {
  332. if (vmo_size < max_len)
  333. max_len = vmo_size;
  334. size_t map_size = RoundUpTo(max_len, GetPageSize());
  335. uintptr_t addr;
  336. status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
  337. map_size, &addr);
  338. if (status == ZX_OK) {
  339. *buff = reinterpret_cast<char *>(addr);
  340. *buff_size = map_size;
  341. *read_len = max_len;
  342. }
  343. }
  344. _zx_handle_close(vmo);
  345. }
  346. if (status != ZX_OK && errno_p)
  347. *errno_p = status;
  348. return status == ZX_OK;
  349. }
  350. void RawWrite(const char *buffer) {
  351. constexpr size_t size = 128;
  352. static _Thread_local char line[size];
  353. static _Thread_local size_t lastLineEnd = 0;
  354. static _Thread_local size_t cur = 0;
  355. while (*buffer) {
  356. if (cur >= size) {
  357. if (lastLineEnd == 0)
  358. lastLineEnd = size;
  359. __sanitizer_log_write(line, lastLineEnd);
  360. internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
  361. cur = cur - lastLineEnd;
  362. lastLineEnd = 0;
  363. }
  364. if (*buffer == '\n')
  365. lastLineEnd = cur + 1;
  366. line[cur++] = *buffer++;
  367. }
  368. // Flush all complete lines before returning.
  369. if (lastLineEnd != 0) {
  370. __sanitizer_log_write(line, lastLineEnd);
  371. internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
  372. cur = cur - lastLineEnd;
  373. lastLineEnd = 0;
  374. }
  375. }
  376. void CatastrophicErrorWrite(const char *buffer, uptr length) {
  377. __sanitizer_log_write(buffer, length);
  378. }
  379. char **StoredArgv;
  380. char **StoredEnviron;
  381. char **GetArgv() { return StoredArgv; }
  382. char **GetEnviron() { return StoredEnviron; }
  383. const char *GetEnv(const char *name) {
  384. if (StoredEnviron) {
  385. uptr NameLen = internal_strlen(name);
  386. for (char **Env = StoredEnviron; *Env != 0; Env++) {
  387. if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
  388. return (*Env) + NameLen + 1;
  389. }
  390. }
  391. return nullptr;
  392. }
  393. uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
  394. const char *argv0 = "<UNKNOWN>";
  395. if (StoredArgv && StoredArgv[0]) {
  396. argv0 = StoredArgv[0];
  397. }
  398. internal_strncpy(buf, argv0, buf_len);
  399. return internal_strlen(buf);
  400. }
  401. uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
  402. return ReadBinaryName(buf, buf_len);
  403. }
  404. uptr MainThreadStackBase, MainThreadStackSize;
  405. bool GetRandom(void *buffer, uptr length, bool blocking) {
  406. CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
  407. _zx_cprng_draw(buffer, length);
  408. return true;
  409. }
  410. u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
  411. uptr GetRSS() { UNIMPLEMENTED(); }
  412. void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
  413. void internal_join_thread(void *th) {}
  414. void InitializePlatformCommonFlags(CommonFlags *cf) {}
  415. } // namespace __sanitizer
  416. using namespace __sanitizer;
  417. extern "C" {
  418. void __sanitizer_startup_hook(int argc, char **argv, char **envp,
  419. void *stack_base, size_t stack_size) {
  420. __sanitizer::StoredArgv = argv;
  421. __sanitizer::StoredEnviron = envp;
  422. __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
  423. __sanitizer::MainThreadStackSize = stack_size;
  424. }
  425. void __sanitizer_set_report_path(const char *path) {
  426. // Handle the initialization code in each sanitizer, but no other calls.
  427. // This setting is never consulted on Fuchsia.
  428. DCHECK_EQ(path, common_flags()->log_path);
  429. }
  430. void __sanitizer_set_report_fd(void *fd) {
  431. UNREACHABLE("not available on Fuchsia");
  432. }
  433. const char *__sanitizer_get_report_path() {
  434. UNREACHABLE("not available on Fuchsia");
  435. }
  436. } // extern "C"
  437. #endif // SANITIZER_FUCHSIA