sanitizer_fuchsia.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501
  1. //===-- sanitizer_fuchsia.cpp ---------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is shared between AddressSanitizer and other sanitizer
  10. // run-time libraries and implements Fuchsia-specific functions from
  11. // sanitizer_common.h.
  12. //===----------------------------------------------------------------------===//
  13. #include "sanitizer_fuchsia.h"
  14. #if SANITIZER_FUCHSIA
  15. # include <pthread.h>
  16. # include <stdlib.h>
  17. # include <unistd.h>
  18. # error #include <zircon/errors.h>
  19. # error #include <zircon/process.h>
  20. # error #include <zircon/syscalls.h>
  21. # error #include <zircon/utc.h>
  22. # include "sanitizer_common.h"
  23. # include "sanitizer_interface_internal.h"
  24. # include "sanitizer_libc.h"
  25. # include "sanitizer_mutex.h"
  26. namespace __sanitizer {
  27. void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
  28. uptr internal_sched_yield() {
  29. zx_status_t status = _zx_thread_legacy_yield(0u);
  30. CHECK_EQ(status, ZX_OK);
  31. return 0; // Why doesn't this return void?
  32. }
  33. void internal_usleep(u64 useconds) {
  34. zx_status_t status = _zx_nanosleep(_zx_deadline_after(ZX_USEC(useconds)));
  35. CHECK_EQ(status, ZX_OK);
  36. }
  37. u64 NanoTime() {
  38. zx_handle_t utc_clock = _zx_utc_reference_get();
  39. CHECK_NE(utc_clock, ZX_HANDLE_INVALID);
  40. zx_time_t time;
  41. zx_status_t status = _zx_clock_read(utc_clock, &time);
  42. CHECK_EQ(status, ZX_OK);
  43. return time;
  44. }
  45. u64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); }
  46. uptr internal_getpid() {
  47. zx_info_handle_basic_t info;
  48. zx_status_t status =
  49. _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
  50. sizeof(info), NULL, NULL);
  51. CHECK_EQ(status, ZX_OK);
  52. uptr pid = static_cast<uptr>(info.koid);
  53. CHECK_EQ(pid, info.koid);
  54. return pid;
  55. }
  56. int internal_dlinfo(void *handle, int request, void *p) { UNIMPLEMENTED(); }
  57. uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
  58. tid_t GetTid() { return GetThreadSelf(); }
  59. void Abort() { abort(); }
  60. int Atexit(void (*function)(void)) { return atexit(function); }
  61. void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
  62. pthread_attr_t attr;
  63. CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
  64. void *base;
  65. size_t size;
  66. CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
  67. CHECK_EQ(pthread_attr_destroy(&attr), 0);
  68. *stack_bottom = reinterpret_cast<uptr>(base);
  69. *stack_top = *stack_bottom + size;
  70. }
  71. void InitializePlatformEarly() {}
  72. void CheckASLR() {}
  73. void CheckMPROTECT() {}
  74. void PlatformPrepareForSandboxing(void *args) {}
  75. void DisableCoreDumperIfNecessary() {}
  76. void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
  77. void SetAlternateSignalStack() {}
  78. void UnsetAlternateSignalStack() {}
  79. void InitTlsSize() {}
  80. bool SignalContext::IsStackOverflow() const { return false; }
  81. void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
  82. const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
  83. void FutexWait(atomic_uint32_t *p, u32 cmp) {
  84. zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(p), cmp,
  85. ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
  86. if (status != ZX_ERR_BAD_STATE) // Normal race.
  87. CHECK_EQ(status, ZX_OK);
  88. }
  89. void FutexWake(atomic_uint32_t *p, u32 count) {
  90. zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(p), count);
  91. CHECK_EQ(status, ZX_OK);
  92. }
  93. uptr GetPageSize() { return _zx_system_get_page_size(); }
  94. uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
  95. sanitizer_shadow_bounds_t ShadowBounds;
  96. void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); }
  97. uptr GetMaxUserVirtualAddress() {
  98. InitShadowBounds();
  99. return ShadowBounds.memory_limit - 1;
  100. }
  101. uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
  102. bool ErrorIsOOM(error_t err) { return err == ZX_ERR_NO_MEMORY; }
  103. static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
  104. bool raw_report, bool die_for_nomem) {
  105. size = RoundUpTo(size, GetPageSize());
  106. zx_handle_t vmo;
  107. zx_status_t status = _zx_vmo_create(size, 0, &vmo);
  108. if (status != ZX_OK) {
  109. if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
  110. ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
  111. raw_report);
  112. return nullptr;
  113. }
  114. _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
  115. internal_strlen(mem_type));
  116. // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
  117. uintptr_t addr;
  118. status =
  119. _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
  120. vmo, 0, size, &addr);
  121. _zx_handle_close(vmo);
  122. if (status != ZX_OK) {
  123. if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
  124. ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
  125. raw_report);
  126. return nullptr;
  127. }
  128. IncreaseTotalMmap(size);
  129. return reinterpret_cast<void *>(addr);
  130. }
  131. void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
  132. return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
  133. }
  134. void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
  135. return MmapOrDie(size, mem_type);
  136. }
  137. void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
  138. return DoAnonymousMmapOrDie(size, mem_type, false, false);
  139. }
  140. uptr ReservedAddressRange::Init(uptr init_size, const char *name,
  141. uptr fixed_addr) {
  142. init_size = RoundUpTo(init_size, GetPageSize());
  143. DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
  144. uintptr_t base;
  145. zx_handle_t vmar;
  146. zx_status_t status = _zx_vmar_allocate(
  147. _zx_vmar_root_self(),
  148. ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
  149. init_size, &vmar, &base);
  150. if (status != ZX_OK)
  151. ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
  152. base_ = reinterpret_cast<void *>(base);
  153. size_ = init_size;
  154. name_ = name;
  155. os_handle_ = vmar;
  156. return reinterpret_cast<uptr>(base_);
  157. }
  158. static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
  159. void *base, const char *name, bool die_for_nomem) {
  160. uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
  161. map_size = RoundUpTo(map_size, GetPageSize());
  162. zx_handle_t vmo;
  163. zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
  164. if (status != ZX_OK) {
  165. if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
  166. ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
  167. return 0;
  168. }
  169. _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
  170. DCHECK_GE(base + size_, map_size + offset);
  171. uintptr_t addr;
  172. status =
  173. _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
  174. offset, vmo, 0, map_size, &addr);
  175. _zx_handle_close(vmo);
  176. if (status != ZX_OK) {
  177. if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
  178. ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
  179. }
  180. return 0;
  181. }
  182. IncreaseTotalMmap(map_size);
  183. return addr;
  184. }
  185. uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
  186. const char *name) {
  187. return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_,
  188. false);
  189. }
  190. uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
  191. const char *name) {
  192. return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_, true);
  193. }
  194. void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
  195. if (!addr || !size)
  196. return;
  197. size = RoundUpTo(size, GetPageSize());
  198. zx_status_t status =
  199. _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
  200. if (status != ZX_OK) {
  201. Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
  202. SanitizerToolName, size, size, addr);
  203. CHECK("unable to unmap" && 0);
  204. }
  205. DecreaseTotalMmap(size);
  206. }
  207. void ReservedAddressRange::Unmap(uptr addr, uptr size) {
  208. CHECK_LE(size, size_);
  209. const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
  210. if (addr == reinterpret_cast<uptr>(base_)) {
  211. if (size == size_) {
  212. // Destroying the vmar effectively unmaps the whole mapping.
  213. _zx_vmar_destroy(vmar);
  214. _zx_handle_close(vmar);
  215. os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
  216. DecreaseTotalMmap(size);
  217. return;
  218. }
  219. } else {
  220. CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
  221. }
  222. // Partial unmapping does not affect the fact that the initial range is still
  223. // reserved, and the resulting unmapped memory can't be reused.
  224. UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
  225. }
  226. // This should never be called.
  227. void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
  228. UNIMPLEMENTED();
  229. }
  230. bool MprotectNoAccess(uptr addr, uptr size) {
  231. return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK;
  232. }
  233. bool MprotectReadOnly(uptr addr, uptr size) {
  234. return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) ==
  235. ZX_OK;
  236. }
  237. void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
  238. const char *mem_type) {
  239. CHECK_GE(size, GetPageSize());
  240. CHECK(IsPowerOfTwo(size));
  241. CHECK(IsPowerOfTwo(alignment));
  242. zx_handle_t vmo;
  243. zx_status_t status = _zx_vmo_create(size, 0, &vmo);
  244. if (status != ZX_OK) {
  245. if (status != ZX_ERR_NO_MEMORY)
  246. ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
  247. return nullptr;
  248. }
  249. _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
  250. internal_strlen(mem_type));
  251. // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
  252. // Map a larger size to get a chunk of address space big enough that
  253. // it surely contains an aligned region of the requested size. Then
  254. // overwrite the aligned middle portion with a mapping from the
  255. // beginning of the VMO, and unmap the excess before and after.
  256. size_t map_size = size + alignment;
  257. uintptr_t addr;
  258. status =
  259. _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
  260. vmo, 0, map_size, &addr);
  261. if (status == ZX_OK) {
  262. uintptr_t map_addr = addr;
  263. uintptr_t map_end = map_addr + map_size;
  264. addr = RoundUpTo(map_addr, alignment);
  265. uintptr_t end = addr + size;
  266. if (addr != map_addr) {
  267. zx_info_vmar_t info;
  268. status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
  269. sizeof(info), NULL, NULL);
  270. if (status == ZX_OK) {
  271. uintptr_t new_addr;
  272. status = _zx_vmar_map(
  273. _zx_vmar_root_self(),
  274. ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
  275. addr - info.base, vmo, 0, size, &new_addr);
  276. if (status == ZX_OK)
  277. CHECK_EQ(new_addr, addr);
  278. }
  279. }
  280. if (status == ZX_OK && addr != map_addr)
  281. status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
  282. if (status == ZX_OK && end != map_end)
  283. status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
  284. }
  285. _zx_handle_close(vmo);
  286. if (status != ZX_OK) {
  287. if (status != ZX_ERR_NO_MEMORY)
  288. ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
  289. return nullptr;
  290. }
  291. IncreaseTotalMmap(size);
  292. return reinterpret_cast<void *>(addr);
  293. }
  294. void UnmapOrDie(void *addr, uptr size) {
  295. UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
  296. }
  297. void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
  298. uptr beg_aligned = RoundUpTo(beg, GetPageSize());
  299. uptr end_aligned = RoundDownTo(end, GetPageSize());
  300. if (beg_aligned < end_aligned) {
  301. zx_handle_t root_vmar = _zx_vmar_root_self();
  302. CHECK_NE(root_vmar, ZX_HANDLE_INVALID);
  303. zx_status_t status =
  304. _zx_vmar_op_range(root_vmar, ZX_VMAR_OP_DECOMMIT, beg_aligned,
  305. end_aligned - beg_aligned, nullptr, 0);
  306. CHECK_EQ(status, ZX_OK);
  307. }
  308. }
  309. void DumpProcessMap() {
  310. // TODO(mcgrathr): write it
  311. return;
  312. }
  313. bool IsAccessibleMemoryRange(uptr beg, uptr size) {
  314. // TODO(mcgrathr): Figure out a better way.
  315. zx_handle_t vmo;
  316. zx_status_t status = _zx_vmo_create(size, 0, &vmo);
  317. if (status == ZX_OK) {
  318. status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
  319. _zx_handle_close(vmo);
  320. }
  321. return status == ZX_OK;
  322. }
  323. // FIXME implement on this platform.
  324. void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
  325. bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
  326. uptr *read_len, uptr max_len, error_t *errno_p) {
  327. *errno_p = ZX_ERR_NOT_SUPPORTED;
  328. return false;
  329. }
  330. void RawWrite(const char *buffer) {
  331. constexpr size_t size = 128;
  332. static _Thread_local char line[size];
  333. static _Thread_local size_t lastLineEnd = 0;
  334. static _Thread_local size_t cur = 0;
  335. while (*buffer) {
  336. if (cur >= size) {
  337. if (lastLineEnd == 0)
  338. lastLineEnd = size;
  339. __sanitizer_log_write(line, lastLineEnd);
  340. internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
  341. cur = cur - lastLineEnd;
  342. lastLineEnd = 0;
  343. }
  344. if (*buffer == '\n')
  345. lastLineEnd = cur + 1;
  346. line[cur++] = *buffer++;
  347. }
  348. // Flush all complete lines before returning.
  349. if (lastLineEnd != 0) {
  350. __sanitizer_log_write(line, lastLineEnd);
  351. internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
  352. cur = cur - lastLineEnd;
  353. lastLineEnd = 0;
  354. }
  355. }
  356. void CatastrophicErrorWrite(const char *buffer, uptr length) {
  357. __sanitizer_log_write(buffer, length);
  358. }
  359. char **StoredArgv;
  360. char **StoredEnviron;
  361. char **GetArgv() { return StoredArgv; }
  362. char **GetEnviron() { return StoredEnviron; }
  363. const char *GetEnv(const char *name) {
  364. if (StoredEnviron) {
  365. uptr NameLen = internal_strlen(name);
  366. for (char **Env = StoredEnviron; *Env != 0; Env++) {
  367. if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
  368. return (*Env) + NameLen + 1;
  369. }
  370. }
  371. return nullptr;
  372. }
  373. uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
  374. const char *argv0 = "<UNKNOWN>";
  375. if (StoredArgv && StoredArgv[0]) {
  376. argv0 = StoredArgv[0];
  377. }
  378. internal_strncpy(buf, argv0, buf_len);
  379. return internal_strlen(buf);
  380. }
  381. uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
  382. return ReadBinaryName(buf, buf_len);
  383. }
  384. uptr MainThreadStackBase, MainThreadStackSize;
  385. bool GetRandom(void *buffer, uptr length, bool blocking) {
  386. CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
  387. _zx_cprng_draw(buffer, length);
  388. return true;
  389. }
  390. u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
  391. uptr GetRSS() { UNIMPLEMENTED(); }
  392. void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
  393. void internal_join_thread(void *th) {}
  394. void InitializePlatformCommonFlags(CommonFlags *cf) {}
  395. } // namespace __sanitizer
  396. using namespace __sanitizer;
  397. extern "C" {
  398. void __sanitizer_startup_hook(int argc, char **argv, char **envp,
  399. void *stack_base, size_t stack_size) {
  400. __sanitizer::StoredArgv = argv;
  401. __sanitizer::StoredEnviron = envp;
  402. __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
  403. __sanitizer::MainThreadStackSize = stack_size;
  404. }
  405. void __sanitizer_set_report_path(const char *path) {
  406. // Handle the initialization code in each sanitizer, but no other calls.
  407. // This setting is never consulted on Fuchsia.
  408. DCHECK_EQ(path, common_flags()->log_path);
  409. }
  410. void __sanitizer_set_report_fd(void *fd) {
  411. UNREACHABLE("not available on Fuchsia");
  412. }
  413. const char *__sanitizer_get_report_path() {
  414. UNREACHABLE("not available on Fuchsia");
  415. }
  416. } // extern "C"
  417. #endif // SANITIZER_FUCHSIA