sanitizer_fuchsia.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. //===-- sanitizer_fuchsia.cpp ---------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is shared between AddressSanitizer and other sanitizer
  10. // run-time libraries and implements Fuchsia-specific functions from
  11. // sanitizer_common.h.
  12. //===----------------------------------------------------------------------===//
  13. #include "sanitizer_fuchsia.h"
  14. #if SANITIZER_FUCHSIA
  15. # include <pthread.h>
  16. # include <stdlib.h>
  17. # include <unistd.h>
  18. # error #include <zircon/errors.h>
  19. # error #include <zircon/process.h>
  20. # error #include <zircon/syscalls.h>
  21. # error #include <zircon/utc.h>
  22. # include "sanitizer_common.h"
  23. # include "sanitizer_interface_internal.h"
  24. # include "sanitizer_libc.h"
  25. # include "sanitizer_mutex.h"
  26. namespace __sanitizer {
  27. void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
  28. uptr internal_sched_yield() {
  29. zx_status_t status = _zx_thread_legacy_yield(0u);
  30. CHECK_EQ(status, ZX_OK);
  31. return 0; // Why doesn't this return void?
  32. }
  33. void internal_usleep(u64 useconds) {
  34. zx_status_t status = _zx_nanosleep(_zx_deadline_after(ZX_USEC(useconds)));
  35. CHECK_EQ(status, ZX_OK);
  36. }
  37. u64 NanoTime() {
  38. zx_handle_t utc_clock = _zx_utc_reference_get();
  39. CHECK_NE(utc_clock, ZX_HANDLE_INVALID);
  40. zx_time_t time;
  41. zx_status_t status = _zx_clock_read(utc_clock, &time);
  42. CHECK_EQ(status, ZX_OK);
  43. return time;
  44. }
  45. u64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); }
  46. uptr internal_getpid() {
  47. zx_info_handle_basic_t info;
  48. zx_status_t status =
  49. _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
  50. sizeof(info), NULL, NULL);
  51. CHECK_EQ(status, ZX_OK);
  52. uptr pid = static_cast<uptr>(info.koid);
  53. CHECK_EQ(pid, info.koid);
  54. return pid;
  55. }
  56. int internal_dlinfo(void *handle, int request, void *p) { UNIMPLEMENTED(); }
  57. uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
  58. tid_t GetTid() { return GetThreadSelf(); }
  59. void Abort() { abort(); }
  60. int Atexit(void (*function)(void)) { return atexit(function); }
  61. void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
  62. pthread_attr_t attr;
  63. CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
  64. void *base;
  65. size_t size;
  66. CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
  67. CHECK_EQ(pthread_attr_destroy(&attr), 0);
  68. *stack_bottom = reinterpret_cast<uptr>(base);
  69. *stack_top = *stack_bottom + size;
  70. }
  71. void InitializePlatformEarly() {}
  72. void CheckASLR() {}
  73. void CheckMPROTECT() {}
  74. void PlatformPrepareForSandboxing(void *args) {}
  75. void DisableCoreDumperIfNecessary() {}
  76. void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
  77. void SetAlternateSignalStack() {}
  78. void UnsetAlternateSignalStack() {}
  79. void InitTlsSize() {}
  80. bool SignalContext::IsStackOverflow() const { return false; }
  81. void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
  82. const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
  83. void FutexWait(atomic_uint32_t *p, u32 cmp) {
  84. zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(p), cmp,
  85. ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
  86. if (status != ZX_ERR_BAD_STATE) // Normal race.
  87. CHECK_EQ(status, ZX_OK);
  88. }
  89. void FutexWake(atomic_uint32_t *p, u32 count) {
  90. zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(p), count);
  91. CHECK_EQ(status, ZX_OK);
  92. }
  93. uptr GetPageSize() { return _zx_system_get_page_size(); }
  94. uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
  95. sanitizer_shadow_bounds_t ShadowBounds;
  96. void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); }
  97. uptr GetMaxUserVirtualAddress() {
  98. InitShadowBounds();
  99. return ShadowBounds.memory_limit - 1;
  100. }
  101. uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
  102. bool ErrorIsOOM(error_t err) { return err == ZX_ERR_NO_MEMORY; }
  103. // For any sanitizer internal that needs to map something which can be unmapped
  104. // later, first attempt to map to a pre-allocated VMAR. This helps reduce
  105. // fragmentation from many small anonymous mmap calls. A good value for this
  106. // VMAR size would be the total size of your typical sanitizer internal objects
  107. // allocated in an "average" process lifetime. Examples of this include:
  108. // FakeStack, LowLevelAllocator mappings, TwoLevelMap, InternalMmapVector,
  109. // StackStore, CreateAsanThread, etc.
  110. //
  111. // This is roughly equal to the total sum of sanitizer internal mappings for a
  112. // large test case.
  113. constexpr size_t kSanitizerHeapVmarSize = 13ULL << 20;
  114. static zx_handle_t gSanitizerHeapVmar = ZX_HANDLE_INVALID;
  115. static zx_status_t GetSanitizerHeapVmar(zx_handle_t *vmar) {
  116. zx_status_t status = ZX_OK;
  117. if (gSanitizerHeapVmar == ZX_HANDLE_INVALID) {
  118. CHECK_EQ(kSanitizerHeapVmarSize % GetPageSizeCached(), 0);
  119. uintptr_t base;
  120. status = _zx_vmar_allocate(
  121. _zx_vmar_root_self(),
  122. ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
  123. kSanitizerHeapVmarSize, &gSanitizerHeapVmar, &base);
  124. }
  125. *vmar = gSanitizerHeapVmar;
  126. if (status == ZX_OK)
  127. CHECK_NE(gSanitizerHeapVmar, ZX_HANDLE_INVALID);
  128. return status;
  129. }
  130. static zx_status_t TryVmoMapSanitizerVmar(zx_vm_option_t options,
  131. size_t vmar_offset, zx_handle_t vmo,
  132. size_t size, uintptr_t *addr,
  133. zx_handle_t *vmar_used = nullptr) {
  134. zx_handle_t vmar;
  135. zx_status_t status = GetSanitizerHeapVmar(&vmar);
  136. if (status != ZX_OK)
  137. return status;
  138. status = _zx_vmar_map(gSanitizerHeapVmar, options, vmar_offset, vmo,
  139. /*vmo_offset=*/0, size, addr);
  140. if (vmar_used)
  141. *vmar_used = gSanitizerHeapVmar;
  142. if (status == ZX_ERR_NO_RESOURCES || status == ZX_ERR_INVALID_ARGS) {
  143. // This means there's no space in the heap VMAR, so fallback to the root
  144. // VMAR.
  145. status = _zx_vmar_map(_zx_vmar_root_self(), options, vmar_offset, vmo,
  146. /*vmo_offset=*/0, size, addr);
  147. if (vmar_used)
  148. *vmar_used = _zx_vmar_root_self();
  149. }
  150. return status;
  151. }
  152. static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
  153. bool raw_report, bool die_for_nomem) {
  154. size = RoundUpTo(size, GetPageSize());
  155. zx_handle_t vmo;
  156. zx_status_t status = _zx_vmo_create(size, 0, &vmo);
  157. if (status != ZX_OK) {
  158. if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
  159. ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
  160. raw_report);
  161. return nullptr;
  162. }
  163. _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
  164. internal_strlen(mem_type));
  165. uintptr_t addr;
  166. status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
  167. /*vmar_offset=*/0, vmo, size, &addr);
  168. _zx_handle_close(vmo);
  169. if (status != ZX_OK) {
  170. if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
  171. ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
  172. raw_report);
  173. return nullptr;
  174. }
  175. IncreaseTotalMmap(size);
  176. return reinterpret_cast<void *>(addr);
  177. }
  178. void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
  179. return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
  180. }
  181. void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
  182. return MmapOrDie(size, mem_type);
  183. }
  184. void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
  185. return DoAnonymousMmapOrDie(size, mem_type, false, false);
  186. }
  187. uptr ReservedAddressRange::Init(uptr init_size, const char *name,
  188. uptr fixed_addr) {
  189. init_size = RoundUpTo(init_size, GetPageSize());
  190. DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
  191. uintptr_t base;
  192. zx_handle_t vmar;
  193. zx_status_t status = _zx_vmar_allocate(
  194. _zx_vmar_root_self(),
  195. ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
  196. init_size, &vmar, &base);
  197. if (status != ZX_OK)
  198. ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
  199. base_ = reinterpret_cast<void *>(base);
  200. size_ = init_size;
  201. name_ = name;
  202. os_handle_ = vmar;
  203. return reinterpret_cast<uptr>(base_);
  204. }
  205. static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
  206. void *base, const char *name, bool die_for_nomem) {
  207. uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
  208. map_size = RoundUpTo(map_size, GetPageSize());
  209. zx_handle_t vmo;
  210. zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
  211. if (status != ZX_OK) {
  212. if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
  213. ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
  214. return 0;
  215. }
  216. _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
  217. DCHECK_GE(base + size_, map_size + offset);
  218. uintptr_t addr;
  219. status =
  220. _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
  221. offset, vmo, 0, map_size, &addr);
  222. _zx_handle_close(vmo);
  223. if (status != ZX_OK) {
  224. if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
  225. ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
  226. }
  227. return 0;
  228. }
  229. IncreaseTotalMmap(map_size);
  230. return addr;
  231. }
  232. uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
  233. const char *name) {
  234. return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
  235. name ? name : name_, false);
  236. }
  237. uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
  238. const char *name) {
  239. return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
  240. name ? name : name_, true);
  241. }
  242. void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
  243. if (!addr || !size)
  244. return;
  245. size = RoundUpTo(size, GetPageSize());
  246. zx_status_t status =
  247. _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
  248. if (status == ZX_ERR_INVALID_ARGS && target_vmar == gSanitizerHeapVmar) {
  249. // If there wasn't any space in the heap vmar, the fallback was the root
  250. // vmar.
  251. status = _zx_vmar_unmap(_zx_vmar_root_self(),
  252. reinterpret_cast<uintptr_t>(addr), size);
  253. }
  254. if (status != ZX_OK) {
  255. Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
  256. SanitizerToolName, size, size, addr);
  257. CHECK("unable to unmap" && 0);
  258. }
  259. DecreaseTotalMmap(size);
  260. }
  261. void ReservedAddressRange::Unmap(uptr addr, uptr size) {
  262. CHECK_LE(size, size_);
  263. const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
  264. if (addr == reinterpret_cast<uptr>(base_)) {
  265. if (size == size_) {
  266. // Destroying the vmar effectively unmaps the whole mapping.
  267. _zx_vmar_destroy(vmar);
  268. _zx_handle_close(vmar);
  269. os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
  270. DecreaseTotalMmap(size);
  271. return;
  272. }
  273. } else {
  274. CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
  275. }
  276. // Partial unmapping does not affect the fact that the initial range is still
  277. // reserved, and the resulting unmapped memory can't be reused.
  278. UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
  279. }
  280. // This should never be called.
  281. void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
  282. UNIMPLEMENTED();
  283. }
  284. bool MprotectNoAccess(uptr addr, uptr size) {
  285. return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK;
  286. }
  287. bool MprotectReadOnly(uptr addr, uptr size) {
  288. return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) ==
  289. ZX_OK;
  290. }
  291. bool MprotectReadWrite(uptr addr, uptr size) {
  292. return _zx_vmar_protect(_zx_vmar_root_self(),
  293. ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, addr,
  294. size) == ZX_OK;
  295. }
  296. void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
  297. const char *mem_type) {
  298. CHECK_GE(size, GetPageSize());
  299. CHECK(IsPowerOfTwo(size));
  300. CHECK(IsPowerOfTwo(alignment));
  301. zx_handle_t vmo;
  302. zx_status_t status = _zx_vmo_create(size, 0, &vmo);
  303. if (status != ZX_OK) {
  304. if (status != ZX_ERR_NO_MEMORY)
  305. ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
  306. return nullptr;
  307. }
  308. _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
  309. internal_strlen(mem_type));
  310. // Map a larger size to get a chunk of address space big enough that
  311. // it surely contains an aligned region of the requested size. Then
  312. // overwrite the aligned middle portion with a mapping from the
  313. // beginning of the VMO, and unmap the excess before and after.
  314. size_t map_size = size + alignment;
  315. uintptr_t addr;
  316. zx_handle_t vmar_used;
  317. status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
  318. /*vmar_offset=*/0, vmo, map_size, &addr,
  319. &vmar_used);
  320. if (status == ZX_OK) {
  321. uintptr_t map_addr = addr;
  322. uintptr_t map_end = map_addr + map_size;
  323. addr = RoundUpTo(map_addr, alignment);
  324. uintptr_t end = addr + size;
  325. if (addr != map_addr) {
  326. zx_info_vmar_t info;
  327. status = _zx_object_get_info(vmar_used, ZX_INFO_VMAR, &info, sizeof(info),
  328. NULL, NULL);
  329. if (status == ZX_OK) {
  330. uintptr_t new_addr;
  331. status = _zx_vmar_map(
  332. vmar_used,
  333. ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
  334. addr - info.base, vmo, 0, size, &new_addr);
  335. if (status == ZX_OK)
  336. CHECK_EQ(new_addr, addr);
  337. }
  338. }
  339. if (status == ZX_OK && addr != map_addr)
  340. status = _zx_vmar_unmap(vmar_used, map_addr, addr - map_addr);
  341. if (status == ZX_OK && end != map_end)
  342. status = _zx_vmar_unmap(vmar_used, end, map_end - end);
  343. }
  344. _zx_handle_close(vmo);
  345. if (status != ZX_OK) {
  346. if (status != ZX_ERR_NO_MEMORY)
  347. ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
  348. return nullptr;
  349. }
  350. IncreaseTotalMmap(size);
  351. return reinterpret_cast<void *>(addr);
  352. }
  353. void UnmapOrDie(void *addr, uptr size) {
  354. UnmapOrDieVmar(addr, size, gSanitizerHeapVmar);
  355. }
  356. void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
  357. uptr beg_aligned = RoundUpTo(beg, GetPageSize());
  358. uptr end_aligned = RoundDownTo(end, GetPageSize());
  359. if (beg_aligned < end_aligned) {
  360. zx_handle_t root_vmar = _zx_vmar_root_self();
  361. CHECK_NE(root_vmar, ZX_HANDLE_INVALID);
  362. zx_status_t status =
  363. _zx_vmar_op_range(root_vmar, ZX_VMAR_OP_DECOMMIT, beg_aligned,
  364. end_aligned - beg_aligned, nullptr, 0);
  365. CHECK_EQ(status, ZX_OK);
  366. }
  367. }
  368. void DumpProcessMap() {
  369. // TODO(mcgrathr): write it
  370. return;
  371. }
  372. bool IsAccessibleMemoryRange(uptr beg, uptr size) {
  373. // TODO(mcgrathr): Figure out a better way.
  374. zx_handle_t vmo;
  375. zx_status_t status = _zx_vmo_create(size, 0, &vmo);
  376. if (status == ZX_OK) {
  377. status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
  378. _zx_handle_close(vmo);
  379. }
  380. return status == ZX_OK;
  381. }
  382. // FIXME implement on this platform.
  383. void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
  384. bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
  385. uptr *read_len, uptr max_len, error_t *errno_p) {
  386. *errno_p = ZX_ERR_NOT_SUPPORTED;
  387. return false;
  388. }
  389. void RawWrite(const char *buffer) {
  390. constexpr size_t size = 128;
  391. static _Thread_local char line[size];
  392. static _Thread_local size_t lastLineEnd = 0;
  393. static _Thread_local size_t cur = 0;
  394. while (*buffer) {
  395. if (cur >= size) {
  396. if (lastLineEnd == 0)
  397. lastLineEnd = size;
  398. __sanitizer_log_write(line, lastLineEnd);
  399. internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
  400. cur = cur - lastLineEnd;
  401. lastLineEnd = 0;
  402. }
  403. if (*buffer == '\n')
  404. lastLineEnd = cur + 1;
  405. line[cur++] = *buffer++;
  406. }
  407. // Flush all complete lines before returning.
  408. if (lastLineEnd != 0) {
  409. __sanitizer_log_write(line, lastLineEnd);
  410. internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
  411. cur = cur - lastLineEnd;
  412. lastLineEnd = 0;
  413. }
  414. }
  415. void CatastrophicErrorWrite(const char *buffer, uptr length) {
  416. __sanitizer_log_write(buffer, length);
  417. }
  418. char **StoredArgv;
  419. char **StoredEnviron;
  420. char **GetArgv() { return StoredArgv; }
  421. char **GetEnviron() { return StoredEnviron; }
  422. const char *GetEnv(const char *name) {
  423. if (StoredEnviron) {
  424. uptr NameLen = internal_strlen(name);
  425. for (char **Env = StoredEnviron; *Env != 0; Env++) {
  426. if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
  427. return (*Env) + NameLen + 1;
  428. }
  429. }
  430. return nullptr;
  431. }
  432. uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
  433. const char *argv0 = "<UNKNOWN>";
  434. if (StoredArgv && StoredArgv[0]) {
  435. argv0 = StoredArgv[0];
  436. }
  437. internal_strncpy(buf, argv0, buf_len);
  438. return internal_strlen(buf);
  439. }
  440. uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
  441. return ReadBinaryName(buf, buf_len);
  442. }
  443. uptr MainThreadStackBase, MainThreadStackSize;
  444. bool GetRandom(void *buffer, uptr length, bool blocking) {
  445. CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
  446. _zx_cprng_draw(buffer, length);
  447. return true;
  448. }
  449. u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
  450. uptr GetRSS() { UNIMPLEMENTED(); }
  451. void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
  452. void internal_join_thread(void *th) {}
  453. void InitializePlatformCommonFlags(CommonFlags *cf) {}
  454. } // namespace __sanitizer
  455. using namespace __sanitizer;
  456. extern "C" {
  457. void __sanitizer_startup_hook(int argc, char **argv, char **envp,
  458. void *stack_base, size_t stack_size) {
  459. __sanitizer::StoredArgv = argv;
  460. __sanitizer::StoredEnviron = envp;
  461. __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
  462. __sanitizer::MainThreadStackSize = stack_size;
  463. }
  464. void __sanitizer_set_report_path(const char *path) {
  465. // Handle the initialization code in each sanitizer, but no other calls.
  466. // This setting is never consulted on Fuchsia.
  467. DCHECK_EQ(path, common_flags()->log_path);
  468. }
  469. void __sanitizer_set_report_fd(void *fd) {
  470. UNREACHABLE("not available on Fuchsia");
  471. }
  472. const char *__sanitizer_get_report_path() {
  473. UNREACHABLE("not available on Fuchsia");
  474. }
  475. } // extern "C"
  476. #endif // SANITIZER_FUCHSIA