sanitizer_common.h 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105
  1. //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is shared between run-time libraries of sanitizers.
  10. //
  11. // It declares common functions and classes that are used in both runtimes.
  12. // Implementation of some functions are provided in sanitizer_common, while
  13. // others must be defined by run-time library itself.
  14. //===----------------------------------------------------------------------===//
  15. #ifndef SANITIZER_COMMON_H
  16. #define SANITIZER_COMMON_H
  17. #include "sanitizer_flags.h"
  18. #include "sanitizer_internal_defs.h"
  19. #include "sanitizer_libc.h"
  20. #include "sanitizer_list.h"
  21. #include "sanitizer_mutex.h"
  22. #if defined(_MSC_VER) && !defined(__clang__)
  23. extern "C" void _ReadWriteBarrier();
  24. #pragma intrinsic(_ReadWriteBarrier)
  25. #endif
  26. namespace __sanitizer {
  27. struct AddressInfo;
  28. struct BufferedStackTrace;
  29. struct SignalContext;
  30. struct StackTrace;
  31. struct SymbolizedStack;
  32. // Constants.
  33. const uptr kWordSize = SANITIZER_WORDSIZE / 8;
  34. const uptr kWordSizeInBits = 8 * kWordSize;
  35. const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
  36. const uptr kMaxPathLength = 4096;
  37. const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
  38. const uptr kErrorMessageBufferSize = 1 << 16;
  39. // Denotes fake PC values that come from JIT/JAVA/etc.
  40. // For such PC values __tsan_symbolize_external_ex() will be called.
  41. const u64 kExternalPCBit = 1ULL << 60;
  42. extern const char *SanitizerToolName; // Can be changed by the tool.
  43. extern atomic_uint32_t current_verbosity;
  44. inline void SetVerbosity(int verbosity) {
  45. atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
  46. }
  47. inline int Verbosity() {
  48. return atomic_load(&current_verbosity, memory_order_relaxed);
  49. }
  50. #if SANITIZER_ANDROID
  51. inline uptr GetPageSize() {
  52. // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
  53. return 4096;
  54. }
  55. inline uptr GetPageSizeCached() {
  56. return 4096;
  57. }
  58. #else
  59. uptr GetPageSize();
  60. extern uptr PageSizeCached;
  61. inline uptr GetPageSizeCached() {
  62. if (!PageSizeCached)
  63. PageSizeCached = GetPageSize();
  64. return PageSizeCached;
  65. }
  66. #endif
  67. uptr GetMmapGranularity();
  68. uptr GetMaxVirtualAddress();
  69. uptr GetMaxUserVirtualAddress();
  70. // Threads
  71. tid_t GetTid();
  72. int TgKill(pid_t pid, tid_t tid, int sig);
  73. uptr GetThreadSelf();
  74. void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
  75. uptr *stack_bottom);
  76. void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
  77. uptr *tls_addr, uptr *tls_size);
  78. // Memory management
  79. void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
  80. inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
  81. return MmapOrDie(size, mem_type, /*raw_report*/ true);
  82. }
  83. void UnmapOrDie(void *addr, uptr size);
  84. // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
  85. // case returns nullptr.
  86. void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
  87. bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
  88. WARN_UNUSED_RESULT;
  89. bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
  90. const char *name = nullptr) WARN_UNUSED_RESULT;
  91. void *MmapNoReserveOrDie(uptr size, const char *mem_type);
  92. void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
  93. // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
  94. // that case returns nullptr.
  95. void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
  96. const char *name = nullptr);
  97. void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
  98. void *MmapNoAccess(uptr size);
  99. // Map aligned chunk of address space; size and alignment are powers of two.
  100. // Dies on all but out of memory errors, in the latter case returns nullptr.
  101. void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
  102. const char *mem_type);
  103. // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
  104. // unaccessible memory.
  105. bool MprotectNoAccess(uptr addr, uptr size);
  106. bool MprotectReadOnly(uptr addr, uptr size);
  107. bool MprotectReadWrite(uptr addr, uptr size);
  108. void MprotectMallocZones(void *addr, int prot);
  109. #if SANITIZER_WINDOWS
  110. // Zero previously mmap'd memory. Currently used only on Windows.
  111. bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
  112. #endif
  113. #if SANITIZER_LINUX
  114. // Unmap memory. Currently only used on Linux.
  115. void UnmapFromTo(uptr from, uptr to);
  116. #endif
  117. // Maps shadow_size_bytes of shadow memory and returns shadow address. It will
  118. // be aligned to the mmap granularity * 2^shadow_scale, or to
  119. // 2^min_shadow_base_alignment if that is larger. The returned address will
  120. // have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
  121. // shadow_size_bytes bytes on the right, which on linux is mapped no access.
  122. // The high_mem_end may be updated if the original shadow size doesn't fit.
  123. uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
  124. uptr min_shadow_base_alignment, uptr &high_mem_end);
  125. // Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
  126. // Reserves 2*S bytes of address space to the right of the returned address and
  127. // ring_buffer_size bytes to the left. The returned address is aligned to 2*S.
  128. // Also creates num_aliases regions of accessible memory starting at offset S
  129. // from the returned address. Each region has size alias_size and is backed by
  130. // the same physical memory.
  131. uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
  132. uptr num_aliases, uptr ring_buffer_size);
  133. // Reserve memory range [beg, end]. If madvise_shadow is true then apply
  134. // madvise (e.g. hugepages, core dumping) requested by options.
  135. void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
  136. bool madvise_shadow = true);
  137. // Protect size bytes of memory starting at addr. Also try to protect
  138. // several pages at the start of the address space as specified by
  139. // zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
  140. void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
  141. uptr zero_base_max_shadow_start);
  142. // Find an available address space.
  143. uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
  144. uptr *largest_gap_found, uptr *max_occupied_addr);
  145. // Used to check if we can map shadow memory to a fixed location.
  146. bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
  147. // Releases memory pages entirely within the [beg, end] address range. Noop if
  148. // the provided range does not contain at least one entire page.
  149. void ReleaseMemoryPagesToOS(uptr beg, uptr end);
  150. void IncreaseTotalMmap(uptr size);
  151. void DecreaseTotalMmap(uptr size);
  152. uptr GetRSS();
  153. void SetShadowRegionHugePageMode(uptr addr, uptr length);
  154. bool DontDumpShadowMemory(uptr addr, uptr length);
  155. // Check if the built VMA size matches the runtime one.
  156. void CheckVMASize();
  157. void RunMallocHooks(void *ptr, uptr size);
  158. void RunFreeHooks(void *ptr);
  159. class ReservedAddressRange {
  160. public:
  161. uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
  162. uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
  163. uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
  164. uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
  165. void Unmap(uptr addr, uptr size);
  166. void *base() const { return base_; }
  167. uptr size() const { return size_; }
  168. private:
  169. void* base_;
  170. uptr size_;
  171. const char* name_;
  172. uptr os_handle_;
  173. };
  174. typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
  175. /*out*/ uptr *stats);
  176. // Parse the contents of /proc/self/smaps and generate a memory profile.
  177. // |cb| is a tool-specific callback that fills the |stats| array.
  178. void GetMemoryProfile(fill_profile_f cb, uptr *stats);
  179. void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
  180. uptr smaps_len);
  181. // Simple low-level (mmap-based) allocator for internal use. Doesn't have
  182. // constructor, so all instances of LowLevelAllocator should be
  183. // linker initialized.
  184. //
  185. // NOTE: Users should instead use the singleton provided via
  186. // `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the
  187. // number of mmap fragments can be reduced and use the same contiguous mmap
  188. // provided by this singleton.
  189. class LowLevelAllocator {
  190. public:
  191. // Requires an external lock.
  192. void *Allocate(uptr size);
  193. private:
  194. char *allocated_end_;
  195. char *allocated_current_;
  196. };
  197. // Set the min alignment of LowLevelAllocator to at least alignment.
  198. void SetLowLevelAllocateMinAlignment(uptr alignment);
  199. typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
  200. // Allows to register tool-specific callbacks for LowLevelAllocator.
  201. // Passing NULL removes the callback.
  202. void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
  203. LowLevelAllocator &GetGlobalLowLevelAllocator();
  204. // IO
  205. void CatastrophicErrorWrite(const char *buffer, uptr length);
  206. void RawWrite(const char *buffer);
  207. bool ColorizeReports();
  208. void RemoveANSIEscapeSequencesFromString(char *buffer);
  209. void Printf(const char *format, ...) FORMAT(1, 2);
  210. void Report(const char *format, ...) FORMAT(1, 2);
  211. void SetPrintfAndReportCallback(void (*callback)(const char *));
  212. #define VReport(level, ...) \
  213. do { \
  214. if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
  215. } while (0)
  216. #define VPrintf(level, ...) \
  217. do { \
  218. if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
  219. } while (0)
  220. // Lock sanitizer error reporting and protects against nested errors.
  221. class ScopedErrorReportLock {
  222. public:
  223. ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
  224. ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
  225. static void Lock() SANITIZER_ACQUIRE(mutex_);
  226. static void Unlock() SANITIZER_RELEASE(mutex_);
  227. static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
  228. private:
  229. static atomic_uintptr_t reporting_thread_;
  230. static StaticSpinMutex mutex_;
  231. };
  232. extern uptr stoptheworld_tracer_pid;
  233. extern uptr stoptheworld_tracer_ppid;
  234. bool IsAccessibleMemoryRange(uptr beg, uptr size);
  235. // Error report formatting.
  236. const char *StripPathPrefix(const char *filepath,
  237. const char *strip_file_prefix);
  238. // Strip the directories from the module name.
  239. const char *StripModuleName(const char *module);
  240. // OS
  241. uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
  242. uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
  243. uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
  244. uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
  245. const char *GetProcessName();
  246. void UpdateProcessName();
  247. void CacheBinaryName();
  248. void DisableCoreDumperIfNecessary();
  249. void DumpProcessMap();
  250. const char *GetEnv(const char *name);
  251. bool SetEnv(const char *name, const char *value);
  252. u32 GetUid();
  253. void ReExec();
  254. void CheckASLR();
  255. void CheckMPROTECT();
  256. char **GetArgv();
  257. char **GetEnviron();
  258. void PrintCmdline();
  259. bool StackSizeIsUnlimited();
  260. void SetStackSizeLimitInBytes(uptr limit);
  261. bool AddressSpaceIsUnlimited();
  262. void SetAddressSpaceUnlimited();
  263. void AdjustStackSize(void *attr);
  264. void PlatformPrepareForSandboxing(void *args);
  265. void SetSandboxingCallback(void (*f)());
  266. void InitializeCoverage(bool enabled, const char *coverage_dir);
  267. void InitTlsSize();
  268. uptr GetTlsSize();
  269. // Other
  270. void WaitForDebugger(unsigned seconds, const char *label);
  271. void SleepForSeconds(unsigned seconds);
  272. void SleepForMillis(unsigned millis);
  273. u64 NanoTime();
  274. u64 MonotonicNanoTime();
  275. int Atexit(void (*function)(void));
  276. bool TemplateMatch(const char *templ, const char *str);
  277. // Exit
  278. void NORETURN Abort();
  279. void NORETURN Die();
  280. void NORETURN
  281. CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
  282. void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
  283. const char *mmap_type, error_t err,
  284. bool raw_report = false);
  285. void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
  286. bool raw_report = false);
  287. // Returns true if the platform-specific error reported is an OOM error.
  288. bool ErrorIsOOM(error_t err);
  289. // This reports an error in the form:
  290. //
  291. // `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
  292. //
  293. // Downstream tools that read sanitizer output will know that errors starting
  294. // in this format are specifically OOM errors.
  295. #define ERROR_OOM(err_msg, ...) \
  296. Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
  297. // Specific tools may override behavior of "Die" function to do tool-specific
  298. // job.
  299. typedef void (*DieCallbackType)(void);
  300. // It's possible to add several callbacks that would be run when "Die" is
  301. // called. The callbacks will be run in the opposite order. The tools are
  302. // strongly recommended to setup all callbacks during initialization, when there
  303. // is only a single thread.
  304. bool AddDieCallback(DieCallbackType callback);
  305. bool RemoveDieCallback(DieCallbackType callback);
  306. void SetUserDieCallback(DieCallbackType callback);
  307. void SetCheckUnwindCallback(void (*callback)());
  308. // Functions related to signal handling.
  309. typedef void (*SignalHandlerType)(int, void *, void *);
  310. HandleSignalMode GetHandleSignalMode(int signum);
  311. void InstallDeadlySignalHandlers(SignalHandlerType handler);
  312. // Signal reporting.
  313. // Each sanitizer uses slightly different implementation of stack unwinding.
  314. typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
  315. const void *callback_context,
  316. BufferedStackTrace *stack);
  317. // Print deadly signal report and die.
  318. void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
  319. UnwindSignalStackCallbackType unwind,
  320. const void *unwind_context);
  321. // Part of HandleDeadlySignal, exposed for asan.
  322. void StartReportDeadlySignal();
  323. // Part of HandleDeadlySignal, exposed for asan.
  324. void ReportDeadlySignal(const SignalContext &sig, u32 tid,
  325. UnwindSignalStackCallbackType unwind,
  326. const void *unwind_context);
  327. // Alternative signal stack (POSIX-only).
  328. void SetAlternateSignalStack();
  329. void UnsetAlternateSignalStack();
  330. // Construct a one-line string:
  331. // SUMMARY: SanitizerToolName: error_message
  332. // and pass it to __sanitizer_report_error_summary.
  333. // If alt_tool_name is provided, it's used in place of SanitizerToolName.
  334. void ReportErrorSummary(const char *error_message,
  335. const char *alt_tool_name = nullptr);
  336. // Same as above, but construct error_message as:
  337. // error_type file:line[:column][ function]
  338. void ReportErrorSummary(const char *error_type, const AddressInfo &info,
  339. const char *alt_tool_name = nullptr);
  340. // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
  341. void ReportErrorSummary(const char *error_type, const StackTrace *trace,
  342. const char *alt_tool_name = nullptr);
  343. // Skips frames which we consider internal and not usefull to the users.
  344. const SymbolizedStack *SkipInternalFrames(const SymbolizedStack *frames);
  345. void ReportMmapWriteExec(int prot, int mflags);
  346. // Math
  347. #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
  348. extern "C" {
  349. unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
  350. unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
  351. #if defined(_WIN64)
  352. unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
  353. unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
  354. #endif
  355. }
  356. #endif
  357. inline uptr MostSignificantSetBitIndex(uptr x) {
  358. CHECK_NE(x, 0U);
  359. unsigned long up;
  360. #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
  361. # ifdef _WIN64
  362. up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
  363. # else
  364. up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
  365. # endif
  366. #elif defined(_WIN64)
  367. _BitScanReverse64(&up, x);
  368. #else
  369. _BitScanReverse(&up, x);
  370. #endif
  371. return up;
  372. }
  373. inline uptr LeastSignificantSetBitIndex(uptr x) {
  374. CHECK_NE(x, 0U);
  375. unsigned long up;
  376. #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
  377. # ifdef _WIN64
  378. up = __builtin_ctzll(x);
  379. # else
  380. up = __builtin_ctzl(x);
  381. # endif
  382. #elif defined(_WIN64)
  383. _BitScanForward64(&up, x);
  384. #else
  385. _BitScanForward(&up, x);
  386. #endif
  387. return up;
  388. }
  389. inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
  390. inline uptr RoundUpToPowerOfTwo(uptr size) {
  391. CHECK(size);
  392. if (IsPowerOfTwo(size)) return size;
  393. uptr up = MostSignificantSetBitIndex(size);
  394. CHECK_LT(size, (1ULL << (up + 1)));
  395. CHECK_GT(size, (1ULL << up));
  396. return 1ULL << (up + 1);
  397. }
  398. inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
  399. RAW_CHECK(IsPowerOfTwo(boundary));
  400. return (size + boundary - 1) & ~(boundary - 1);
  401. }
  402. inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
  403. return x & ~(boundary - 1);
  404. }
  405. inline constexpr bool IsAligned(uptr a, uptr alignment) {
  406. return (a & (alignment - 1)) == 0;
  407. }
  408. inline uptr Log2(uptr x) {
  409. CHECK(IsPowerOfTwo(x));
  410. return LeastSignificantSetBitIndex(x);
  411. }
  412. // Don't use std::min, std::max or std::swap, to minimize dependency
  413. // on libstdc++.
  414. template <class T>
  415. constexpr T Min(T a, T b) {
  416. return a < b ? a : b;
  417. }
  418. template <class T>
  419. constexpr T Max(T a, T b) {
  420. return a > b ? a : b;
  421. }
  422. template <class T>
  423. constexpr T Abs(T a) {
  424. return a < 0 ? -a : a;
  425. }
  426. template<class T> void Swap(T& a, T& b) {
  427. T tmp = a;
  428. a = b;
  429. b = tmp;
  430. }
  431. // Char handling
  432. inline bool IsSpace(int c) {
  433. return (c == ' ') || (c == '\n') || (c == '\t') ||
  434. (c == '\f') || (c == '\r') || (c == '\v');
  435. }
  436. inline bool IsDigit(int c) {
  437. return (c >= '0') && (c <= '9');
  438. }
  439. inline int ToLower(int c) {
  440. return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
  441. }
  442. // A low-level vector based on mmap. May incur a significant memory overhead for
  443. // small vectors.
  444. // WARNING: The current implementation supports only POD types.
  445. template<typename T>
  446. class InternalMmapVectorNoCtor {
  447. public:
  448. using value_type = T;
  449. void Initialize(uptr initial_capacity) {
  450. capacity_bytes_ = 0;
  451. size_ = 0;
  452. data_ = 0;
  453. reserve(initial_capacity);
  454. }
  455. void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
  456. T &operator[](uptr i) {
  457. CHECK_LT(i, size_);
  458. return data_[i];
  459. }
  460. const T &operator[](uptr i) const {
  461. CHECK_LT(i, size_);
  462. return data_[i];
  463. }
  464. void push_back(const T &element) {
  465. if (UNLIKELY(size_ >= capacity())) {
  466. CHECK_EQ(size_, capacity());
  467. uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
  468. Realloc(new_capacity);
  469. }
  470. internal_memcpy(&data_[size_++], &element, sizeof(T));
  471. }
  472. T &back() {
  473. CHECK_GT(size_, 0);
  474. return data_[size_ - 1];
  475. }
  476. void pop_back() {
  477. CHECK_GT(size_, 0);
  478. size_--;
  479. }
  480. uptr size() const {
  481. return size_;
  482. }
  483. const T *data() const {
  484. return data_;
  485. }
  486. T *data() {
  487. return data_;
  488. }
  489. uptr capacity() const { return capacity_bytes_ / sizeof(T); }
  490. void reserve(uptr new_size) {
  491. // Never downsize internal buffer.
  492. if (new_size > capacity())
  493. Realloc(new_size);
  494. }
  495. void resize(uptr new_size) {
  496. if (new_size > size_) {
  497. reserve(new_size);
  498. internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
  499. }
  500. size_ = new_size;
  501. }
  502. void clear() { size_ = 0; }
  503. bool empty() const { return size() == 0; }
  504. const T *begin() const {
  505. return data();
  506. }
  507. T *begin() {
  508. return data();
  509. }
  510. const T *end() const {
  511. return data() + size();
  512. }
  513. T *end() {
  514. return data() + size();
  515. }
  516. void swap(InternalMmapVectorNoCtor &other) {
  517. Swap(data_, other.data_);
  518. Swap(capacity_bytes_, other.capacity_bytes_);
  519. Swap(size_, other.size_);
  520. }
  521. private:
  522. NOINLINE void Realloc(uptr new_capacity) {
  523. CHECK_GT(new_capacity, 0);
  524. CHECK_LE(size_, new_capacity);
  525. uptr new_capacity_bytes =
  526. RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
  527. T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
  528. internal_memcpy(new_data, data_, size_ * sizeof(T));
  529. UnmapOrDie(data_, capacity_bytes_);
  530. data_ = new_data;
  531. capacity_bytes_ = new_capacity_bytes;
  532. }
  533. T *data_;
  534. uptr capacity_bytes_;
  535. uptr size_;
  536. };
  537. template <typename T>
  538. bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
  539. const InternalMmapVectorNoCtor<T> &rhs) {
  540. if (lhs.size() != rhs.size()) return false;
  541. return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
  542. }
  543. template <typename T>
  544. bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
  545. const InternalMmapVectorNoCtor<T> &rhs) {
  546. return !(lhs == rhs);
  547. }
  548. template<typename T>
  549. class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
  550. public:
  551. InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
  552. explicit InternalMmapVector(uptr cnt) {
  553. InternalMmapVectorNoCtor<T>::Initialize(cnt);
  554. this->resize(cnt);
  555. }
  556. ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
  557. // Disallow copies and moves.
  558. InternalMmapVector(const InternalMmapVector &) = delete;
  559. InternalMmapVector &operator=(const InternalMmapVector &) = delete;
  560. InternalMmapVector(InternalMmapVector &&) = delete;
  561. InternalMmapVector &operator=(InternalMmapVector &&) = delete;
  562. };
  563. class InternalScopedString {
  564. public:
  565. InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
  566. uptr length() const { return buffer_.size() - 1; }
  567. void clear() {
  568. buffer_.resize(1);
  569. buffer_[0] = '\0';
  570. }
  571. void Append(const char *str);
  572. void AppendF(const char *format, ...) FORMAT(2, 3);
  573. const char *data() const { return buffer_.data(); }
  574. char *data() { return buffer_.data(); }
  575. private:
  576. InternalMmapVector<char> buffer_;
  577. };
  578. template <class T>
  579. struct CompareLess {
  580. bool operator()(const T &a, const T &b) const { return a < b; }
  581. };
  582. // HeapSort for arrays and InternalMmapVector.
  583. template <class T, class Compare = CompareLess<T>>
  584. void Sort(T *v, uptr size, Compare comp = {}) {
  585. if (size < 2)
  586. return;
  587. // Stage 1: insert elements to the heap.
  588. for (uptr i = 1; i < size; i++) {
  589. uptr j, p;
  590. for (j = i; j > 0; j = p) {
  591. p = (j - 1) / 2;
  592. if (comp(v[p], v[j]))
  593. Swap(v[j], v[p]);
  594. else
  595. break;
  596. }
  597. }
  598. // Stage 2: swap largest element with the last one,
  599. // and sink the new top.
  600. for (uptr i = size - 1; i > 0; i--) {
  601. Swap(v[0], v[i]);
  602. uptr j, max_ind;
  603. for (j = 0; j < i; j = max_ind) {
  604. uptr left = 2 * j + 1;
  605. uptr right = 2 * j + 2;
  606. max_ind = j;
  607. if (left < i && comp(v[max_ind], v[left]))
  608. max_ind = left;
  609. if (right < i && comp(v[max_ind], v[right]))
  610. max_ind = right;
  611. if (max_ind != j)
  612. Swap(v[j], v[max_ind]);
  613. else
  614. break;
  615. }
  616. }
  617. }
  618. // Works like std::lower_bound: finds the first element that is not less
  619. // than the val.
  620. template <class Container, class T,
  621. class Compare = CompareLess<typename Container::value_type>>
  622. uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
  623. uptr first = 0;
  624. uptr last = v.size();
  625. while (last > first) {
  626. uptr mid = (first + last) / 2;
  627. if (comp(v[mid], val))
  628. first = mid + 1;
  629. else
  630. last = mid;
  631. }
  632. return first;
  633. }
  634. enum ModuleArch {
  635. kModuleArchUnknown,
  636. kModuleArchI386,
  637. kModuleArchX86_64,
  638. kModuleArchX86_64H,
  639. kModuleArchARMV6,
  640. kModuleArchARMV7,
  641. kModuleArchARMV7S,
  642. kModuleArchARMV7K,
  643. kModuleArchARM64,
  644. kModuleArchLoongArch64,
  645. kModuleArchRISCV64,
  646. kModuleArchHexagon
  647. };
  648. // Sorts and removes duplicates from the container.
  649. template <class Container,
  650. class Compare = CompareLess<typename Container::value_type>>
  651. void SortAndDedup(Container &v, Compare comp = {}) {
  652. Sort(v.data(), v.size(), comp);
  653. uptr size = v.size();
  654. if (size < 2)
  655. return;
  656. uptr last = 0;
  657. for (uptr i = 1; i < size; ++i) {
  658. if (comp(v[last], v[i])) {
  659. ++last;
  660. if (last != i)
  661. v[last] = v[i];
  662. } else {
  663. CHECK(!comp(v[i], v[last]));
  664. }
  665. }
  666. v.resize(last + 1);
  667. }
  668. constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
  669. // Opens the file 'file_name" and reads up to 'max_len' bytes.
  670. // The resulting buffer is mmaped and stored in '*buff'.
  671. // Returns true if file was successfully opened and read.
  672. bool ReadFileToVector(const char *file_name,
  673. InternalMmapVectorNoCtor<char> *buff,
  674. uptr max_len = kDefaultFileMaxSize,
  675. error_t *errno_p = nullptr);
  676. // Opens the file 'file_name" and reads up to 'max_len' bytes.
  677. // This function is less I/O efficient than ReadFileToVector as it may reread
  678. // file multiple times to avoid mmap during read attempts. It's used to read
  679. // procmap, so short reads with mmap in between can produce inconsistent result.
  680. // The resulting buffer is mmaped and stored in '*buff'.
  681. // The size of the mmaped region is stored in '*buff_size'.
  682. // The total number of read bytes is stored in '*read_len'.
  683. // Returns true if file was successfully opened and read.
  684. bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
  685. uptr *read_len, uptr max_len = kDefaultFileMaxSize,
  686. error_t *errno_p = nullptr);
  687. int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
  688. uptr *pc_offset);
  689. // When adding a new architecture, don't forget to also update
  690. // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
  691. inline const char *ModuleArchToString(ModuleArch arch) {
  692. switch (arch) {
  693. case kModuleArchUnknown:
  694. return "";
  695. case kModuleArchI386:
  696. return "i386";
  697. case kModuleArchX86_64:
  698. return "x86_64";
  699. case kModuleArchX86_64H:
  700. return "x86_64h";
  701. case kModuleArchARMV6:
  702. return "armv6";
  703. case kModuleArchARMV7:
  704. return "armv7";
  705. case kModuleArchARMV7S:
  706. return "armv7s";
  707. case kModuleArchARMV7K:
  708. return "armv7k";
  709. case kModuleArchARM64:
  710. return "arm64";
  711. case kModuleArchLoongArch64:
  712. return "loongarch64";
  713. case kModuleArchRISCV64:
  714. return "riscv64";
  715. case kModuleArchHexagon:
  716. return "hexagon";
  717. }
  718. CHECK(0 && "Invalid module arch");
  719. return "";
  720. }
  721. #if SANITIZER_APPLE
  722. const uptr kModuleUUIDSize = 16;
  723. #else
  724. const uptr kModuleUUIDSize = 32;
  725. #endif
  726. const uptr kMaxSegName = 16;
  727. // Represents a binary loaded into virtual memory (e.g. this can be an
  728. // executable or a shared object).
  729. class LoadedModule {
  730. public:
  731. LoadedModule()
  732. : full_name_(nullptr),
  733. base_address_(0),
  734. max_address_(0),
  735. arch_(kModuleArchUnknown),
  736. uuid_size_(0),
  737. instrumented_(false) {
  738. internal_memset(uuid_, 0, kModuleUUIDSize);
  739. ranges_.clear();
  740. }
  741. void set(const char *module_name, uptr base_address);
  742. void set(const char *module_name, uptr base_address, ModuleArch arch,
  743. u8 uuid[kModuleUUIDSize], bool instrumented);
  744. void setUuid(const char *uuid, uptr size);
  745. void clear();
  746. void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
  747. const char *name = nullptr);
  748. bool containsAddress(uptr address) const;
  749. const char *full_name() const { return full_name_; }
  750. uptr base_address() const { return base_address_; }
  751. uptr max_address() const { return max_address_; }
  752. ModuleArch arch() const { return arch_; }
  753. const u8 *uuid() const { return uuid_; }
  754. uptr uuid_size() const { return uuid_size_; }
  755. bool instrumented() const { return instrumented_; }
  756. struct AddressRange {
  757. AddressRange *next;
  758. uptr beg;
  759. uptr end;
  760. bool executable;
  761. bool writable;
  762. char name[kMaxSegName];
  763. AddressRange(uptr beg, uptr end, bool executable, bool writable,
  764. const char *name)
  765. : next(nullptr),
  766. beg(beg),
  767. end(end),
  768. executable(executable),
  769. writable(writable) {
  770. internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
  771. }
  772. };
  773. const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
  774. private:
  775. char *full_name_; // Owned.
  776. uptr base_address_;
  777. uptr max_address_;
  778. ModuleArch arch_;
  779. uptr uuid_size_;
  780. u8 uuid_[kModuleUUIDSize];
  781. bool instrumented_;
  782. IntrusiveList<AddressRange> ranges_;
  783. };
  784. // List of LoadedModules. OS-dependent implementation is responsible for
  785. // filling this information.
  786. class ListOfModules {
  787. public:
  788. ListOfModules() : initialized(false) {}
  789. ~ListOfModules() { clear(); }
  790. void init();
  791. void fallbackInit(); // Uses fallback init if available, otherwise clears
  792. const LoadedModule *begin() const { return modules_.begin(); }
  793. LoadedModule *begin() { return modules_.begin(); }
  794. const LoadedModule *end() const { return modules_.end(); }
  795. LoadedModule *end() { return modules_.end(); }
  796. uptr size() const { return modules_.size(); }
  797. const LoadedModule &operator[](uptr i) const {
  798. CHECK_LT(i, modules_.size());
  799. return modules_[i];
  800. }
  801. private:
  802. void clear() {
  803. for (auto &module : modules_) module.clear();
  804. modules_.clear();
  805. }
  806. void clearOrInit() {
  807. initialized ? clear() : modules_.Initialize(kInitialCapacity);
  808. initialized = true;
  809. }
  810. InternalMmapVectorNoCtor<LoadedModule> modules_;
  811. // We rarely have more than 16K loaded modules.
  812. static const uptr kInitialCapacity = 1 << 14;
  813. bool initialized;
  814. };
  815. // Callback type for iterating over a set of memory ranges.
  816. typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
  817. enum AndroidApiLevel {
  818. ANDROID_NOT_ANDROID = 0,
  819. ANDROID_KITKAT = 19,
  820. ANDROID_LOLLIPOP_MR1 = 22,
  821. ANDROID_POST_LOLLIPOP = 23
  822. };
  823. void WriteToSyslog(const char *buffer);
  824. #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
  825. #define SANITIZER_WIN_TRACE 1
  826. #else
  827. #define SANITIZER_WIN_TRACE 0
  828. #endif
  829. #if SANITIZER_APPLE || SANITIZER_WIN_TRACE
  830. void LogFullErrorReport(const char *buffer);
  831. #else
  832. inline void LogFullErrorReport(const char *buffer) {}
  833. #endif
  834. #if SANITIZER_LINUX || SANITIZER_APPLE
  835. void WriteOneLineToSyslog(const char *s);
  836. void LogMessageOnPrintf(const char *str);
  837. #else
  838. inline void WriteOneLineToSyslog(const char *s) {}
  839. inline void LogMessageOnPrintf(const char *str) {}
  840. #endif
  841. #if SANITIZER_LINUX || SANITIZER_WIN_TRACE
  842. // Initialize Android logging. Any writes before this are silently lost.
  843. void AndroidLogInit();
  844. void SetAbortMessage(const char *);
  845. #else
  846. inline void AndroidLogInit() {}
  847. // FIXME: MacOS implementation could use CRSetCrashLogMessage.
  848. inline void SetAbortMessage(const char *) {}
  849. #endif
  850. #if SANITIZER_ANDROID
  851. void SanitizerInitializeUnwinder();
  852. AndroidApiLevel AndroidGetApiLevel();
  853. #else
  854. inline void AndroidLogWrite(const char *buffer_unused) {}
  855. inline void SanitizerInitializeUnwinder() {}
  856. inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
  857. #endif
  858. inline uptr GetPthreadDestructorIterations() {
  859. #if SANITIZER_ANDROID
  860. return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
  861. #elif SANITIZER_POSIX
  862. return 4;
  863. #else
  864. // Unused on Windows.
  865. return 0;
  866. #endif
  867. }
  868. void *internal_start_thread(void *(*func)(void*), void *arg);
  869. void internal_join_thread(void *th);
  870. void MaybeStartBackgroudThread();
  871. // Make the compiler think that something is going on there.
  872. // Use this inside a loop that looks like memset/memcpy/etc to prevent the
  873. // compiler from recognising it and turning it into an actual call to
  874. // memset/memcpy/etc.
  875. static inline void SanitizerBreakOptimization(void *arg) {
  876. #if defined(_MSC_VER) && !defined(__clang__)
  877. _ReadWriteBarrier();
  878. #else
  879. __asm__ __volatile__("" : : "r" (arg) : "memory");
  880. #endif
  881. }
  882. struct SignalContext {
  883. void *siginfo;
  884. void *context;
  885. uptr addr;
  886. uptr pc;
  887. uptr sp;
  888. uptr bp;
  889. bool is_memory_access;
  890. enum WriteFlag { Unknown, Read, Write } write_flag;
  891. // In some cases the kernel cannot provide the true faulting address; `addr`
  892. // will be zero then. This field allows to distinguish between these cases
  893. // and dereferences of null.
  894. bool is_true_faulting_addr;
  895. // VS2013 doesn't implement unrestricted unions, so we need a trivial default
  896. // constructor
  897. SignalContext() = default;
  898. // Creates signal context in a platform-specific manner.
  899. // SignalContext is going to keep pointers to siginfo and context without
  900. // owning them.
  901. SignalContext(void *siginfo, void *context)
  902. : siginfo(siginfo),
  903. context(context),
  904. addr(GetAddress()),
  905. is_memory_access(IsMemoryAccess()),
  906. write_flag(GetWriteFlag()),
  907. is_true_faulting_addr(IsTrueFaultingAddress()) {
  908. InitPcSpBp();
  909. }
  910. static void DumpAllRegisters(void *context);
  911. // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
  912. int GetType() const;
  913. // String description of the signal.
  914. const char *Describe() const;
  915. // Returns true if signal is stack overflow.
  916. bool IsStackOverflow() const;
  917. private:
  918. // Platform specific initialization.
  919. void InitPcSpBp();
  920. uptr GetAddress() const;
  921. WriteFlag GetWriteFlag() const;
  922. bool IsMemoryAccess() const;
  923. bool IsTrueFaultingAddress() const;
  924. };
  925. void InitializePlatformEarly();
  926. template <typename Fn>
  927. class RunOnDestruction {
  928. public:
  929. explicit RunOnDestruction(Fn fn) : fn_(fn) {}
  930. ~RunOnDestruction() { fn_(); }
  931. private:
  932. Fn fn_;
  933. };
  934. // A simple scope guard. Usage:
  935. // auto cleanup = at_scope_exit([]{ do_cleanup; });
  936. template <typename Fn>
  937. RunOnDestruction<Fn> at_scope_exit(Fn fn) {
  938. return RunOnDestruction<Fn>(fn);
  939. }
  940. // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
  941. // if a process uses virtual memory over 4TB (as many sanitizers like
  942. // to do). This function will abort the process if running on a kernel
  943. // that looks vulnerable.
  944. #if SANITIZER_LINUX && SANITIZER_S390_64
  945. void AvoidCVE_2016_2143();
  946. #else
  947. inline void AvoidCVE_2016_2143() {}
  948. #endif
  949. struct StackDepotStats {
  950. uptr n_uniq_ids;
  951. uptr allocated;
  952. };
  953. // The default value for allocator_release_to_os_interval_ms common flag to
  954. // indicate that sanitizer allocator should not attempt to release memory to OS.
  955. const s32 kReleaseToOSIntervalNever = -1;
  956. void CheckNoDeepBind(const char *filename, int flag);
  957. // Returns the requested amount of random data (up to 256 bytes) that can then
  958. // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
  959. bool GetRandom(void *buffer, uptr length, bool blocking = true);
  960. // Returns the number of logical processors on the system.
  961. u32 GetNumberOfCPUs();
  962. extern u32 NumberOfCPUsCached;
  963. inline u32 GetNumberOfCPUsCached() {
  964. if (!NumberOfCPUsCached)
  965. NumberOfCPUsCached = GetNumberOfCPUs();
  966. return NumberOfCPUsCached;
  967. }
  968. } // namespace __sanitizer
  969. inline void *operator new(__sanitizer::operator_new_size_type size,
  970. __sanitizer::LowLevelAllocator &alloc) {
  971. return alloc.Allocate(size);
  972. }
  973. #endif // SANITIZER_COMMON_H