sanitizer_linux_libcdep.cpp 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076
  1. //===-- sanitizer_linux_libcdep.cpp ---------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is shared between AddressSanitizer and ThreadSanitizer
  10. // run-time libraries and implements linux-specific functions from
  11. // sanitizer_libc.h.
  12. //===----------------------------------------------------------------------===//
  13. #include "sanitizer_platform.h"
  14. #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
  15. SANITIZER_SOLARIS
  16. # include "sanitizer_allocator_internal.h"
  17. # include "sanitizer_atomic.h"
  18. # include "sanitizer_common.h"
  19. # include "sanitizer_file.h"
  20. # include "sanitizer_flags.h"
  21. # include "sanitizer_getauxval.h"
  22. # include "sanitizer_glibc_version.h"
  23. # include "sanitizer_linux.h"
  24. # include "sanitizer_placement_new.h"
  25. # include "sanitizer_procmaps.h"
  26. # include "sanitizer_solaris.h"
  27. # if SANITIZER_NETBSD
  28. # define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
  29. # endif
  30. # include <dlfcn.h> // for dlsym()
  31. # include <link.h>
  32. # include <pthread.h>
  33. # include <signal.h>
  34. # include <sys/mman.h>
  35. # include <sys/resource.h>
  36. # include <syslog.h>
  37. # if !defined(ElfW)
  38. # define ElfW(type) Elf_##type
  39. # endif
  40. # if SANITIZER_FREEBSD
  41. # include <pthread_np.h>
  42. # include <sys/auxv.h>
  43. # include <sys/sysctl.h>
  44. # define pthread_getattr_np pthread_attr_get_np
  45. // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
  46. // that, it was never implemented. So just define it to zero.
  47. # undef MAP_NORESERVE
  48. # define MAP_NORESERVE 0
  49. extern const Elf_Auxinfo *__elf_aux_vector;
  50. # endif
  51. # if SANITIZER_NETBSD
  52. # include <lwp.h>
  53. # include <sys/sysctl.h>
  54. # error #include <sys/tls.h>
  55. # endif
  56. # if SANITIZER_SOLARIS
  57. # include <stddef.h>
  58. # include <stdlib.h>
  59. # include <thread.h>
  60. # endif
  61. # if SANITIZER_ANDROID
  62. # include <android/api-level.h>
  63. # if !defined(CPU_COUNT) && !defined(__aarch64__)
  64. # include <dirent.h>
  65. # include <fcntl.h>
  66. struct __sanitizer::linux_dirent {
  67. long d_ino;
  68. off_t d_off;
  69. unsigned short d_reclen;
  70. char d_name[];
  71. };
  72. # endif
  73. # endif
  74. # if !SANITIZER_ANDROID
  75. # include <elf.h>
  76. # include <unistd.h>
  77. # endif
  78. namespace __sanitizer {
  79. SANITIZER_WEAK_ATTRIBUTE int real_sigaction(int signum, const void *act,
  80. void *oldact);
  81. int internal_sigaction(int signum, const void *act, void *oldact) {
  82. # if !SANITIZER_GO
  83. if (&real_sigaction)
  84. return real_sigaction(signum, act, oldact);
  85. # endif
  86. return sigaction(signum, (const struct sigaction *)act,
  87. (struct sigaction *)oldact);
  88. }
  89. void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
  90. uptr *stack_bottom) {
  91. CHECK(stack_top);
  92. CHECK(stack_bottom);
  93. if (at_initialization) {
  94. // This is the main thread. Libpthread may not be initialized yet.
  95. struct rlimit rl;
  96. CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
  97. // Find the mapping that contains a stack variable.
  98. MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
  99. if (proc_maps.Error()) {
  100. *stack_top = *stack_bottom = 0;
  101. return;
  102. }
  103. MemoryMappedSegment segment;
  104. uptr prev_end = 0;
  105. while (proc_maps.Next(&segment)) {
  106. if ((uptr)&rl < segment.end)
  107. break;
  108. prev_end = segment.end;
  109. }
  110. CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
  111. // Get stacksize from rlimit, but clip it so that it does not overlap
  112. // with other mappings.
  113. uptr stacksize = rl.rlim_cur;
  114. if (stacksize > segment.end - prev_end)
  115. stacksize = segment.end - prev_end;
  116. // When running with unlimited stack size, we still want to set some limit.
  117. // The unlimited stack size is caused by 'ulimit -s unlimited'.
  118. // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
  119. if (stacksize > kMaxThreadStackSize)
  120. stacksize = kMaxThreadStackSize;
  121. *stack_top = segment.end;
  122. *stack_bottom = segment.end - stacksize;
  123. return;
  124. }
  125. uptr stacksize = 0;
  126. void *stackaddr = nullptr;
  127. # if SANITIZER_SOLARIS
  128. stack_t ss;
  129. CHECK_EQ(thr_stksegment(&ss), 0);
  130. stacksize = ss.ss_size;
  131. stackaddr = (char *)ss.ss_sp - stacksize;
  132. # else // !SANITIZER_SOLARIS
  133. pthread_attr_t attr;
  134. pthread_attr_init(&attr);
  135. CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
  136. internal_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
  137. pthread_attr_destroy(&attr);
  138. # endif // SANITIZER_SOLARIS
  139. *stack_top = (uptr)stackaddr + stacksize;
  140. *stack_bottom = (uptr)stackaddr;
  141. }
  142. # if !SANITIZER_GO
  143. bool SetEnv(const char *name, const char *value) {
  144. void *f = dlsym(RTLD_NEXT, "setenv");
  145. if (!f)
  146. return false;
  147. typedef int (*setenv_ft)(const char *name, const char *value, int overwrite);
  148. setenv_ft setenv_f;
  149. CHECK_EQ(sizeof(setenv_f), sizeof(f));
  150. internal_memcpy(&setenv_f, &f, sizeof(f));
  151. return setenv_f(name, value, 1) == 0;
  152. }
  153. # endif
  154. __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
  155. int *patch) {
  156. # ifdef _CS_GNU_LIBC_VERSION
  157. char buf[64];
  158. uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
  159. if (len >= sizeof(buf))
  160. return false;
  161. buf[len] = 0;
  162. static const char kGLibC[] = "glibc ";
  163. if (internal_strncmp(buf, kGLibC, sizeof(kGLibC) - 1) != 0)
  164. return false;
  165. const char *p = buf + sizeof(kGLibC) - 1;
  166. *major = internal_simple_strtoll(p, &p, 10);
  167. *minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
  168. *patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
  169. return true;
  170. # else
  171. return false;
  172. # endif
  173. }
  174. // True if we can use dlpi_tls_data. glibc before 2.25 may leave NULL (BZ
  175. // #19826) so dlpi_tls_data cannot be used.
  176. //
  177. // musl before 1.2.3 and FreeBSD as of 12.2 incorrectly set dlpi_tls_data to
  178. // the TLS initialization image
  179. // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254774
  180. __attribute__((unused)) static int g_use_dlpi_tls_data;
  181. # if SANITIZER_GLIBC && !SANITIZER_GO
  182. __attribute__((unused)) static size_t g_tls_size;
  183. void InitTlsSize() {
  184. int major, minor, patch;
  185. g_use_dlpi_tls_data =
  186. GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
  187. # if defined(__aarch64__) || defined(__x86_64__) || \
  188. defined(__powerpc64__) || defined(__loongarch__)
  189. void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
  190. size_t tls_align;
  191. ((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
  192. # endif
  193. }
  194. # else
  195. void InitTlsSize() {}
  196. # endif // SANITIZER_GLIBC && !SANITIZER_GO
  197. // On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage
  198. // of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan
  199. // to get the pointer to thread-specific data keys in the thread control block.
  200. # if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \
  201. !SANITIZER_ANDROID && !SANITIZER_GO
  202. // sizeof(struct pthread) from glibc.
  203. static atomic_uintptr_t thread_descriptor_size;
  204. static uptr ThreadDescriptorSizeFallback() {
  205. uptr val = 0;
  206. # if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
  207. int major;
  208. int minor;
  209. int patch;
  210. if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
  211. /* sizeof(struct pthread) values from various glibc versions. */
  212. if (SANITIZER_X32)
  213. val = 1728; // Assume only one particular version for x32.
  214. // For ARM sizeof(struct pthread) changed in Glibc 2.23.
  215. else if (SANITIZER_ARM)
  216. val = minor <= 22 ? 1120 : 1216;
  217. else if (minor <= 3)
  218. val = FIRST_32_SECOND_64(1104, 1696);
  219. else if (minor == 4)
  220. val = FIRST_32_SECOND_64(1120, 1728);
  221. else if (minor == 5)
  222. val = FIRST_32_SECOND_64(1136, 1728);
  223. else if (minor <= 9)
  224. val = FIRST_32_SECOND_64(1136, 1712);
  225. else if (minor == 10)
  226. val = FIRST_32_SECOND_64(1168, 1776);
  227. else if (minor == 11 || (minor == 12 && patch == 1))
  228. val = FIRST_32_SECOND_64(1168, 2288);
  229. else if (minor <= 14)
  230. val = FIRST_32_SECOND_64(1168, 2304);
  231. else if (minor < 32) // Unknown version
  232. val = FIRST_32_SECOND_64(1216, 2304);
  233. else // minor == 32
  234. val = FIRST_32_SECOND_64(1344, 2496);
  235. }
  236. # elif defined(__s390__) || defined(__sparc__)
  237. // The size of a prefix of TCB including pthread::{specific_1stblock,specific}
  238. // suffices. Just return offsetof(struct pthread, specific_used), which hasn't
  239. // changed since 2007-05. Technically this applies to i386/x86_64 as well but
  240. // we call _dl_get_tls_static_info and need the precise size of struct
  241. // pthread.
  242. return FIRST_32_SECOND_64(524, 1552);
  243. # elif defined(__mips__)
  244. // TODO(sagarthakur): add more values as per different glibc versions.
  245. val = FIRST_32_SECOND_64(1152, 1776);
  246. # elif SANITIZER_LOONGARCH64
  247. val = 1856; // from glibc 2.36
  248. # elif SANITIZER_RISCV64
  249. int major;
  250. int minor;
  251. int patch;
  252. if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
  253. // TODO: consider adding an optional runtime check for an unknown (untested)
  254. // glibc version
  255. if (minor <= 28) // WARNING: the highest tested version is 2.29
  256. val = 1772; // no guarantees for this one
  257. else if (minor <= 31)
  258. val = 1772; // tested against glibc 2.29, 2.31
  259. else
  260. val = 1936; // tested against glibc 2.32
  261. }
  262. # elif defined(__aarch64__)
  263. // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
  264. val = 1776;
  265. # elif defined(__powerpc64__)
  266. val = 1776; // from glibc.ppc64le 2.20-8.fc21
  267. # endif
  268. return val;
  269. }
  270. uptr ThreadDescriptorSize() {
  271. uptr val = atomic_load_relaxed(&thread_descriptor_size);
  272. if (val)
  273. return val;
  274. // _thread_db_sizeof_pthread is a GLIBC_PRIVATE symbol that is exported in
  275. // glibc 2.34 and later.
  276. if (unsigned *psizeof = static_cast<unsigned *>(
  277. dlsym(RTLD_DEFAULT, "_thread_db_sizeof_pthread")))
  278. val = *psizeof;
  279. if (!val)
  280. val = ThreadDescriptorSizeFallback();
  281. atomic_store_relaxed(&thread_descriptor_size, val);
  282. return val;
  283. }
  284. # if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \
  285. SANITIZER_LOONGARCH64
  286. // TlsPreTcbSize includes size of struct pthread_descr and size of tcb
  287. // head structure. It lies before the static tls blocks.
  288. static uptr TlsPreTcbSize() {
  289. # if defined(__mips__)
  290. const uptr kTcbHead = 16; // sizeof (tcbhead_t)
  291. # elif defined(__powerpc64__)
  292. const uptr kTcbHead = 88; // sizeof (tcbhead_t)
  293. # elif SANITIZER_RISCV64
  294. const uptr kTcbHead = 16; // sizeof (tcbhead_t)
  295. # elif SANITIZER_LOONGARCH64
  296. const uptr kTcbHead = 16; // sizeof (tcbhead_t)
  297. # endif
  298. const uptr kTlsAlign = 16;
  299. const uptr kTlsPreTcbSize =
  300. RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
  301. return kTlsPreTcbSize;
  302. }
  303. # endif
  304. namespace {
  305. struct TlsBlock {
  306. uptr begin, end, align;
  307. size_t tls_modid;
  308. bool operator<(const TlsBlock &rhs) const { return begin < rhs.begin; }
  309. };
  310. } // namespace
  311. # ifdef __s390__
  312. extern "C" uptr __tls_get_offset(void *arg);
  313. static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
  314. // The __tls_get_offset ABI requires %r12 to point to GOT and %r2 to be an
  315. // offset of a struct tls_index inside GOT. We don't possess either of the
  316. // two, so violate the letter of the "ELF Handling For Thread-Local
  317. // Storage" document and assume that the implementation just dereferences
  318. // %r2 + %r12.
  319. uptr tls_index[2] = {ti_module, ti_offset};
  320. register uptr r2 asm("2") = 0;
  321. register void *r12 asm("12") = tls_index;
  322. asm("basr %%r14, %[__tls_get_offset]"
  323. : "+r"(r2)
  324. : [__tls_get_offset] "r"(__tls_get_offset), "r"(r12)
  325. : "memory", "cc", "0", "1", "3", "4", "5", "14");
  326. return r2;
  327. }
  328. # else
  329. extern "C" void *__tls_get_addr(size_t *);
  330. # endif
  331. static size_t main_tls_modid;
  332. static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
  333. void *data) {
  334. size_t tls_modid;
  335. # if SANITIZER_SOLARIS
  336. // dlpi_tls_modid is only available since Solaris 11.4 SRU 10. Use
  337. // dlinfo(RTLD_DI_LINKMAP) instead which works on all of Solaris 11.3,
  338. // 11.4, and Illumos. The tlsmodid of the executable was changed to 1 in
  339. // 11.4 to match other implementations.
  340. if (size >= offsetof(dl_phdr_info_test, dlpi_tls_modid))
  341. main_tls_modid = 1;
  342. else
  343. main_tls_modid = 0;
  344. g_use_dlpi_tls_data = 0;
  345. Rt_map *map;
  346. dlinfo(RTLD_SELF, RTLD_DI_LINKMAP, &map);
  347. tls_modid = map->rt_tlsmodid;
  348. # else
  349. main_tls_modid = 1;
  350. tls_modid = info->dlpi_tls_modid;
  351. # endif
  352. if (tls_modid < main_tls_modid)
  353. return 0;
  354. uptr begin;
  355. # if !SANITIZER_SOLARIS
  356. begin = (uptr)info->dlpi_tls_data;
  357. # endif
  358. if (!g_use_dlpi_tls_data) {
  359. // Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
  360. // and FreeBSD.
  361. # ifdef __s390__
  362. begin = (uptr)__builtin_thread_pointer() + TlsGetOffset(tls_modid, 0);
  363. # else
  364. size_t mod_and_off[2] = {tls_modid, 0};
  365. begin = (uptr)__tls_get_addr(mod_and_off);
  366. # endif
  367. }
  368. for (unsigned i = 0; i != info->dlpi_phnum; ++i)
  369. if (info->dlpi_phdr[i].p_type == PT_TLS) {
  370. static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back(
  371. TlsBlock{begin, begin + info->dlpi_phdr[i].p_memsz,
  372. info->dlpi_phdr[i].p_align, tls_modid});
  373. break;
  374. }
  375. return 0;
  376. }
  377. __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
  378. uptr *align) {
  379. InternalMmapVector<TlsBlock> ranges;
  380. dl_iterate_phdr(CollectStaticTlsBlocks, &ranges);
  381. uptr len = ranges.size();
  382. Sort(ranges.begin(), len);
  383. // Find the range with tls_modid == main_tls_modid. For glibc, because
  384. // libc.so uses PT_TLS, this module is guaranteed to exist and is one of
  385. // the initially loaded modules.
  386. uptr one = 0;
  387. while (one != len && ranges[one].tls_modid != main_tls_modid) ++one;
  388. if (one == len) {
  389. // This may happen with musl if no module uses PT_TLS.
  390. *addr = 0;
  391. *size = 0;
  392. *align = 1;
  393. return;
  394. }
  395. // Find the maximum consecutive ranges. We consider two modules consecutive if
  396. // the gap is smaller than the alignment of the latter range. The dynamic
  397. // loader places static TLS blocks this way not to waste space.
  398. uptr l = one;
  399. *align = ranges[l].align;
  400. while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l].align)
  401. *align = Max(*align, ranges[--l].align);
  402. uptr r = one + 1;
  403. while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r].align)
  404. *align = Max(*align, ranges[r++].align);
  405. *addr = ranges[l].begin;
  406. *size = ranges[r - 1].end - ranges[l].begin;
  407. }
  408. # endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
  409. // SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO
  410. # if SANITIZER_NETBSD
  411. static struct tls_tcb *ThreadSelfTlsTcb() {
  412. struct tls_tcb *tcb = nullptr;
  413. # ifdef __HAVE___LWP_GETTCB_FAST
  414. tcb = (struct tls_tcb *)__lwp_gettcb_fast();
  415. # elif defined(__HAVE___LWP_GETPRIVATE_FAST)
  416. tcb = (struct tls_tcb *)__lwp_getprivate_fast();
  417. # endif
  418. return tcb;
  419. }
  420. uptr ThreadSelf() { return (uptr)ThreadSelfTlsTcb()->tcb_pthread; }
  421. int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
  422. const Elf_Phdr *hdr = info->dlpi_phdr;
  423. const Elf_Phdr *last_hdr = hdr + info->dlpi_phnum;
  424. for (; hdr != last_hdr; ++hdr) {
  425. if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) {
  426. *(uptr *)data = hdr->p_memsz;
  427. break;
  428. }
  429. }
  430. return 0;
  431. }
  432. # endif // SANITIZER_NETBSD
  433. # if SANITIZER_ANDROID
  434. // Bionic provides this API since S.
  435. extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_get_static_tls_bounds(void **,
  436. void **);
  437. # endif
  438. # if !SANITIZER_GO
  439. static void GetTls(uptr *addr, uptr *size) {
  440. # if SANITIZER_ANDROID
  441. if (&__libc_get_static_tls_bounds) {
  442. void *start_addr;
  443. void *end_addr;
  444. __libc_get_static_tls_bounds(&start_addr, &end_addr);
  445. *addr = reinterpret_cast<uptr>(start_addr);
  446. *size =
  447. reinterpret_cast<uptr>(end_addr) - reinterpret_cast<uptr>(start_addr);
  448. } else {
  449. *addr = 0;
  450. *size = 0;
  451. }
  452. # elif SANITIZER_GLIBC && defined(__x86_64__)
  453. // For aarch64 and x86-64, use an O(1) approach which requires relatively
  454. // precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
  455. # if SANITIZER_X32
  456. asm("mov %%fs:8,%0" : "=r"(*addr));
  457. # else
  458. asm("mov %%fs:16,%0" : "=r"(*addr));
  459. # endif
  460. *size = g_tls_size;
  461. *addr -= *size;
  462. *addr += ThreadDescriptorSize();
  463. # elif SANITIZER_GLIBC && defined(__aarch64__)
  464. *addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
  465. ThreadDescriptorSize();
  466. *size = g_tls_size + ThreadDescriptorSize();
  467. # elif SANITIZER_GLIBC && defined(__loongarch__)
  468. # ifdef __clang__
  469. *addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
  470. ThreadDescriptorSize();
  471. # else
  472. asm("or %0,$tp,$zero" : "=r"(*addr));
  473. *addr -= ThreadDescriptorSize();
  474. # endif
  475. *size = g_tls_size + ThreadDescriptorSize();
  476. # elif SANITIZER_GLIBC && defined(__powerpc64__)
  477. // Workaround for glibc<2.25(?). 2.27 is known to not need this.
  478. uptr tp;
  479. asm("addi %0,13,-0x7000" : "=r"(tp));
  480. const uptr pre_tcb_size = TlsPreTcbSize();
  481. *addr = tp - pre_tcb_size;
  482. *size = g_tls_size + pre_tcb_size;
  483. # elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS
  484. uptr align;
  485. GetStaticTlsBoundary(addr, size, &align);
  486. # if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
  487. defined(__sparc__)
  488. if (SANITIZER_GLIBC) {
  489. # if defined(__x86_64__) || defined(__i386__)
  490. align = Max<uptr>(align, 64);
  491. # else
  492. align = Max<uptr>(align, 16);
  493. # endif
  494. }
  495. const uptr tp = RoundUpTo(*addr + *size, align);
  496. // lsan requires the range to additionally cover the static TLS surplus
  497. // (elf/dl-tls.c defines 1664). Otherwise there may be false positives for
  498. // allocations only referenced by tls in dynamically loaded modules.
  499. if (SANITIZER_GLIBC)
  500. *size += 1644;
  501. else if (SANITIZER_FREEBSD)
  502. *size += 128; // RTLD_STATIC_TLS_EXTRA
  503. // Extend the range to include the thread control block. On glibc, lsan needs
  504. // the range to include pthread::{specific_1stblock,specific} so that
  505. // allocations only referenced by pthread_setspecific can be scanned. This may
  506. // underestimate by at most TLS_TCB_ALIGN-1 bytes but it should be fine
  507. // because the number of bytes after pthread::specific is larger.
  508. *addr = tp - RoundUpTo(*size, align);
  509. *size = tp - *addr + ThreadDescriptorSize();
  510. # else
  511. if (SANITIZER_GLIBC)
  512. *size += 1664;
  513. else if (SANITIZER_FREEBSD)
  514. *size += 128; // RTLD_STATIC_TLS_EXTRA
  515. # if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
  516. const uptr pre_tcb_size = TlsPreTcbSize();
  517. *addr -= pre_tcb_size;
  518. *size += pre_tcb_size;
  519. # else
  520. // arm and aarch64 reserve two words at TP, so this underestimates the range.
  521. // However, this is sufficient for the purpose of finding the pointers to
  522. // thread-specific data keys.
  523. const uptr tcb_size = ThreadDescriptorSize();
  524. *addr -= tcb_size;
  525. *size += tcb_size;
  526. # endif
  527. # endif
  528. # elif SANITIZER_NETBSD
  529. struct tls_tcb *const tcb = ThreadSelfTlsTcb();
  530. *addr = 0;
  531. *size = 0;
  532. if (tcb != 0) {
  533. // Find size (p_memsz) of dlpi_tls_modid 1 (TLS block of the main program).
  534. // ld.elf_so hardcodes the index 1.
  535. dl_iterate_phdr(GetSizeFromHdr, size);
  536. if (*size != 0) {
  537. // The block has been found and tcb_dtv[1] contains the base address
  538. *addr = (uptr)tcb->tcb_dtv[1];
  539. }
  540. }
  541. # else
  542. # error "Unknown OS"
  543. # endif
  544. }
  545. # endif
  546. # if !SANITIZER_GO
  547. uptr GetTlsSize() {
  548. # if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
  549. SANITIZER_SOLARIS
  550. uptr addr, size;
  551. GetTls(&addr, &size);
  552. return size;
  553. # else
  554. return 0;
  555. # endif
  556. }
  557. # endif
  558. void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
  559. uptr *tls_addr, uptr *tls_size) {
  560. # if SANITIZER_GO
  561. // Stub implementation for Go.
  562. *stk_addr = *stk_size = *tls_addr = *tls_size = 0;
  563. # else
  564. GetTls(tls_addr, tls_size);
  565. uptr stack_top, stack_bottom;
  566. GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
  567. *stk_addr = stack_bottom;
  568. *stk_size = stack_top - stack_bottom;
  569. if (!main) {
  570. // If stack and tls intersect, make them non-intersecting.
  571. if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
  572. if (*stk_addr + *stk_size < *tls_addr + *tls_size)
  573. *tls_size = *stk_addr + *stk_size - *tls_addr;
  574. *stk_size = *tls_addr - *stk_addr;
  575. }
  576. }
  577. # endif
  578. }
  579. # if !SANITIZER_FREEBSD
  580. typedef ElfW(Phdr) Elf_Phdr;
  581. # endif
  582. struct DlIteratePhdrData {
  583. InternalMmapVectorNoCtor<LoadedModule> *modules;
  584. bool first;
  585. };
  586. static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
  587. InternalMmapVectorNoCtor<LoadedModule> *modules) {
  588. if (module_name[0] == '\0')
  589. return 0;
  590. LoadedModule cur_module;
  591. cur_module.set(module_name, info->dlpi_addr);
  592. for (int i = 0; i < (int)info->dlpi_phnum; i++) {
  593. const Elf_Phdr *phdr = &info->dlpi_phdr[i];
  594. if (phdr->p_type == PT_LOAD) {
  595. uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
  596. uptr cur_end = cur_beg + phdr->p_memsz;
  597. bool executable = phdr->p_flags & PF_X;
  598. bool writable = phdr->p_flags & PF_W;
  599. cur_module.addAddressRange(cur_beg, cur_end, executable, writable);
  600. } else if (phdr->p_type == PT_NOTE) {
  601. # ifdef NT_GNU_BUILD_ID
  602. uptr off = 0;
  603. while (off + sizeof(ElfW(Nhdr)) < phdr->p_memsz) {
  604. auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(info->dlpi_addr +
  605. phdr->p_vaddr + off);
  606. constexpr auto kGnuNamesz = 4; // "GNU" with NUL-byte.
  607. static_assert(kGnuNamesz % 4 == 0, "kGnuNameSize is aligned to 4.");
  608. if (nhdr->n_type == NT_GNU_BUILD_ID && nhdr->n_namesz == kGnuNamesz) {
  609. if (off + sizeof(ElfW(Nhdr)) + nhdr->n_namesz + nhdr->n_descsz >
  610. phdr->p_memsz) {
  611. // Something is very wrong, bail out instead of reading potentially
  612. // arbitrary memory.
  613. break;
  614. }
  615. const char *name =
  616. reinterpret_cast<const char *>(nhdr) + sizeof(*nhdr);
  617. if (internal_memcmp(name, "GNU", 3) == 0) {
  618. const char *value = reinterpret_cast<const char *>(nhdr) +
  619. sizeof(*nhdr) + kGnuNamesz;
  620. cur_module.setUuid(value, nhdr->n_descsz);
  621. break;
  622. }
  623. }
  624. off += sizeof(*nhdr) + RoundUpTo(nhdr->n_namesz, 4) +
  625. RoundUpTo(nhdr->n_descsz, 4);
  626. }
  627. # endif
  628. }
  629. }
  630. modules->push_back(cur_module);
  631. return 0;
  632. }
  633. static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
  634. DlIteratePhdrData *data = (DlIteratePhdrData *)arg;
  635. if (data->first) {
  636. InternalMmapVector<char> module_name(kMaxPathLength);
  637. data->first = false;
  638. // First module is the binary itself.
  639. ReadBinaryNameCached(module_name.data(), module_name.size());
  640. return AddModuleSegments(module_name.data(), info, data->modules);
  641. }
  642. if (info->dlpi_name)
  643. return AddModuleSegments(info->dlpi_name, info, data->modules);
  644. return 0;
  645. }
  646. # if SANITIZER_ANDROID && __ANDROID_API__ < 21
  647. extern "C" __attribute__((weak)) int dl_iterate_phdr(
  648. int (*)(struct dl_phdr_info *, size_t, void *), void *);
  649. # endif
  650. static bool requiresProcmaps() {
  651. # if SANITIZER_ANDROID && __ANDROID_API__ <= 22
  652. // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
  653. // The runtime check allows the same library to work with
  654. // both K and L (and future) Android releases.
  655. return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1;
  656. # else
  657. return false;
  658. # endif
  659. }
  660. static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {
  661. MemoryMappingLayout memory_mapping(/*cache_enabled*/ true);
  662. memory_mapping.DumpListOfModules(modules);
  663. }
  664. void ListOfModules::init() {
  665. clearOrInit();
  666. if (requiresProcmaps()) {
  667. procmapsInit(&modules_);
  668. } else {
  669. DlIteratePhdrData data = {&modules_, true};
  670. dl_iterate_phdr(dl_iterate_phdr_cb, &data);
  671. }
  672. }
  673. // When a custom loader is used, dl_iterate_phdr may not contain the full
  674. // list of modules. Allow callers to fall back to using procmaps.
  675. void ListOfModules::fallbackInit() {
  676. if (!requiresProcmaps()) {
  677. clearOrInit();
  678. procmapsInit(&modules_);
  679. } else {
  680. clear();
  681. }
  682. }
  683. // getrusage does not give us the current RSS, only the max RSS.
  684. // Still, this is better than nothing if /proc/self/statm is not available
  685. // for some reason, e.g. due to a sandbox.
  686. static uptr GetRSSFromGetrusage() {
  687. struct rusage usage;
  688. if (getrusage(RUSAGE_SELF, &usage)) // Failed, probably due to a sandbox.
  689. return 0;
  690. return usage.ru_maxrss << 10; // ru_maxrss is in Kb.
  691. }
  692. uptr GetRSS() {
  693. if (!common_flags()->can_use_proc_maps_statm)
  694. return GetRSSFromGetrusage();
  695. fd_t fd = OpenFile("/proc/self/statm", RdOnly);
  696. if (fd == kInvalidFd)
  697. return GetRSSFromGetrusage();
  698. char buf[64];
  699. uptr len = internal_read(fd, buf, sizeof(buf) - 1);
  700. internal_close(fd);
  701. if ((sptr)len <= 0)
  702. return 0;
  703. buf[len] = 0;
  704. // The format of the file is:
  705. // 1084 89 69 11 0 79 0
  706. // We need the second number which is RSS in pages.
  707. char *pos = buf;
  708. // Skip the first number.
  709. while (*pos >= '0' && *pos <= '9') pos++;
  710. // Skip whitespaces.
  711. while (!(*pos >= '0' && *pos <= '9') && *pos != 0) pos++;
  712. // Read the number.
  713. uptr rss = 0;
  714. while (*pos >= '0' && *pos <= '9') rss = rss * 10 + *pos++ - '0';
  715. return rss * GetPageSizeCached();
  716. }
  717. // sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as
  718. // they allocate memory.
  719. u32 GetNumberOfCPUs() {
  720. # if SANITIZER_FREEBSD || SANITIZER_NETBSD
  721. u32 ncpu;
  722. int req[2];
  723. uptr len = sizeof(ncpu);
  724. req[0] = CTL_HW;
  725. req[1] = HW_NCPU;
  726. CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0);
  727. return ncpu;
  728. # elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
  729. // Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't
  730. // exist in sched.h. That is the case for toolchains generated with older
  731. // NDKs.
  732. // This code doesn't work on AArch64 because internal_getdents makes use of
  733. // the 64bit getdents syscall, but cpu_set_t seems to always exist on AArch64.
  734. uptr fd = internal_open("/sys/devices/system/cpu", O_RDONLY | O_DIRECTORY);
  735. if (internal_iserror(fd))
  736. return 0;
  737. InternalMmapVector<u8> buffer(4096);
  738. uptr bytes_read = buffer.size();
  739. uptr n_cpus = 0;
  740. u8 *d_type;
  741. struct linux_dirent *entry = (struct linux_dirent *)&buffer[bytes_read];
  742. while (true) {
  743. if ((u8 *)entry >= &buffer[bytes_read]) {
  744. bytes_read = internal_getdents(fd, (struct linux_dirent *)buffer.data(),
  745. buffer.size());
  746. if (internal_iserror(bytes_read) || !bytes_read)
  747. break;
  748. entry = (struct linux_dirent *)buffer.data();
  749. }
  750. d_type = (u8 *)entry + entry->d_reclen - 1;
  751. if (d_type >= &buffer[bytes_read] ||
  752. (u8 *)&entry->d_name[3] >= &buffer[bytes_read])
  753. break;
  754. if (entry->d_ino != 0 && *d_type == DT_DIR) {
  755. if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' &&
  756. entry->d_name[2] == 'u' && entry->d_name[3] >= '0' &&
  757. entry->d_name[3] <= '9')
  758. n_cpus++;
  759. }
  760. entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen);
  761. }
  762. internal_close(fd);
  763. return n_cpus;
  764. # elif SANITIZER_SOLARIS
  765. return sysconf(_SC_NPROCESSORS_ONLN);
  766. # else
  767. cpu_set_t CPUs;
  768. CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
  769. return CPU_COUNT(&CPUs);
  770. # endif
  771. }
  772. # if SANITIZER_LINUX
  773. # if SANITIZER_ANDROID
  774. static atomic_uint8_t android_log_initialized;
  775. void AndroidLogInit() {
  776. openlog(GetProcessName(), 0, LOG_USER);
  777. atomic_store(&android_log_initialized, 1, memory_order_release);
  778. }
  779. static bool ShouldLogAfterPrintf() {
  780. return atomic_load(&android_log_initialized, memory_order_acquire);
  781. }
  782. extern "C" SANITIZER_WEAK_ATTRIBUTE int async_safe_write_log(int pri,
  783. const char *tag,
  784. const char *msg);
  785. extern "C" SANITIZER_WEAK_ATTRIBUTE int __android_log_write(int prio,
  786. const char *tag,
  787. const char *msg);
  788. // ANDROID_LOG_INFO is 4, but can't be resolved at runtime.
  789. # define SANITIZER_ANDROID_LOG_INFO 4
  790. // async_safe_write_log is a new public version of __libc_write_log that is
  791. // used behind syslog. It is preferable to syslog as it will not do any dynamic
  792. // memory allocation or formatting.
  793. // If the function is not available, syslog is preferred for L+ (it was broken
  794. // pre-L) as __android_log_write triggers a racey behavior with the strncpy
  795. // interceptor. Fallback to __android_log_write pre-L.
  796. void WriteOneLineToSyslog(const char *s) {
  797. if (&async_safe_write_log) {
  798. async_safe_write_log(SANITIZER_ANDROID_LOG_INFO, GetProcessName(), s);
  799. } else if (AndroidGetApiLevel() > ANDROID_KITKAT) {
  800. syslog(LOG_INFO, "%s", s);
  801. } else {
  802. CHECK(&__android_log_write);
  803. __android_log_write(SANITIZER_ANDROID_LOG_INFO, nullptr, s);
  804. }
  805. }
  806. extern "C" SANITIZER_WEAK_ATTRIBUTE void android_set_abort_message(
  807. const char *);
  808. void SetAbortMessage(const char *str) {
  809. if (&android_set_abort_message)
  810. android_set_abort_message(str);
  811. }
  812. # else
  813. void AndroidLogInit() {}
  814. static bool ShouldLogAfterPrintf() { return true; }
  815. void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
  816. void SetAbortMessage(const char *str) {}
  817. # endif // SANITIZER_ANDROID
  818. void LogMessageOnPrintf(const char *str) {
  819. if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
  820. WriteToSyslog(str);
  821. }
  822. # endif // SANITIZER_LINUX
  823. # if SANITIZER_GLIBC && !SANITIZER_GO
  824. // glibc crashes when using clock_gettime from a preinit_array function as the
  825. // vDSO function pointers haven't been initialized yet. __progname is
  826. // initialized after the vDSO function pointers, so if it exists, is not null
  827. // and is not empty, we can use clock_gettime.
  828. extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
  829. inline bool CanUseVDSO() { return &__progname && __progname && *__progname; }
  830. // MonotonicNanoTime is a timing function that can leverage the vDSO by calling
  831. // clock_gettime. real_clock_gettime only exists if clock_gettime is
  832. // intercepted, so define it weakly and use it if available.
  833. extern "C" SANITIZER_WEAK_ATTRIBUTE int real_clock_gettime(u32 clk_id,
  834. void *tp);
  835. u64 MonotonicNanoTime() {
  836. timespec ts;
  837. if (CanUseVDSO()) {
  838. if (&real_clock_gettime)
  839. real_clock_gettime(CLOCK_MONOTONIC, &ts);
  840. else
  841. clock_gettime(CLOCK_MONOTONIC, &ts);
  842. } else {
  843. internal_clock_gettime(CLOCK_MONOTONIC, &ts);
  844. }
  845. return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
  846. }
  847. # else
  848. // Non-glibc & Go always use the regular function.
  849. u64 MonotonicNanoTime() {
  850. timespec ts;
  851. clock_gettime(CLOCK_MONOTONIC, &ts);
  852. return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
  853. }
  854. # endif // SANITIZER_GLIBC && !SANITIZER_GO
  855. void ReExec() {
  856. const char *pathname = "/proc/self/exe";
  857. # if SANITIZER_FREEBSD
  858. for (const auto *aux = __elf_aux_vector; aux->a_type != AT_NULL; aux++) {
  859. if (aux->a_type == AT_EXECPATH) {
  860. pathname = static_cast<const char *>(aux->a_un.a_ptr);
  861. break;
  862. }
  863. }
  864. # elif SANITIZER_NETBSD
  865. static const int name[] = {
  866. CTL_KERN,
  867. KERN_PROC_ARGS,
  868. -1,
  869. KERN_PROC_PATHNAME,
  870. };
  871. char path[400];
  872. uptr len;
  873. len = sizeof(path);
  874. if (internal_sysctl(name, ARRAY_SIZE(name), path, &len, NULL, 0) != -1)
  875. pathname = path;
  876. # elif SANITIZER_SOLARIS
  877. pathname = getexecname();
  878. CHECK_NE(pathname, NULL);
  879. # elif SANITIZER_USE_GETAUXVAL
  880. // Calling execve with /proc/self/exe sets that as $EXEC_ORIGIN. Binaries that
  881. // rely on that will fail to load shared libraries. Query AT_EXECFN instead.
  882. pathname = reinterpret_cast<const char *>(getauxval(AT_EXECFN));
  883. # endif
  884. uptr rv = internal_execve(pathname, GetArgv(), GetEnviron());
  885. int rverrno;
  886. CHECK_EQ(internal_iserror(rv, &rverrno), true);
  887. Printf("execve failed, errno %d\n", rverrno);
  888. Die();
  889. }
  890. void UnmapFromTo(uptr from, uptr to) {
  891. if (to == from)
  892. return;
  893. CHECK(to >= from);
  894. uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
  895. if (UNLIKELY(internal_iserror(res))) {
  896. Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n",
  897. SanitizerToolName, to - from, to - from, (void *)from);
  898. CHECK("unable to unmap" && 0);
  899. }
  900. }
  901. uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
  902. uptr min_shadow_base_alignment,
  903. UNUSED uptr &high_mem_end) {
  904. const uptr granularity = GetMmapGranularity();
  905. const uptr alignment =
  906. Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
  907. const uptr left_padding =
  908. Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
  909. const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
  910. const uptr map_size = shadow_size + left_padding + alignment;
  911. const uptr map_start = (uptr)MmapNoAccess(map_size);
  912. CHECK_NE(map_start, ~(uptr)0);
  913. const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
  914. UnmapFromTo(map_start, shadow_start - left_padding);
  915. UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
  916. return shadow_start;
  917. }
  918. static uptr MmapSharedNoReserve(uptr addr, uptr size) {
  919. return internal_mmap(
  920. reinterpret_cast<void *>(addr), size, PROT_READ | PROT_WRITE,
  921. MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
  922. }
  923. static uptr MremapCreateAlias(uptr base_addr, uptr alias_addr,
  924. uptr alias_size) {
  925. # if SANITIZER_LINUX
  926. return internal_mremap(reinterpret_cast<void *>(base_addr), 0, alias_size,
  927. MREMAP_MAYMOVE | MREMAP_FIXED,
  928. reinterpret_cast<void *>(alias_addr));
  929. # else
  930. CHECK(false && "mremap is not supported outside of Linux");
  931. return 0;
  932. # endif
  933. }
  934. static void CreateAliases(uptr start_addr, uptr alias_size, uptr num_aliases) {
  935. uptr total_size = alias_size * num_aliases;
  936. uptr mapped = MmapSharedNoReserve(start_addr, total_size);
  937. CHECK_EQ(mapped, start_addr);
  938. for (uptr i = 1; i < num_aliases; ++i) {
  939. uptr alias_addr = start_addr + i * alias_size;
  940. CHECK_EQ(MremapCreateAlias(start_addr, alias_addr, alias_size), alias_addr);
  941. }
  942. }
  943. uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
  944. uptr num_aliases, uptr ring_buffer_size) {
  945. CHECK_EQ(alias_size & (alias_size - 1), 0);
  946. CHECK_EQ(num_aliases & (num_aliases - 1), 0);
  947. CHECK_EQ(ring_buffer_size & (ring_buffer_size - 1), 0);
  948. const uptr granularity = GetMmapGranularity();
  949. shadow_size = RoundUpTo(shadow_size, granularity);
  950. CHECK_EQ(shadow_size & (shadow_size - 1), 0);
  951. const uptr alias_region_size = alias_size * num_aliases;
  952. const uptr alignment =
  953. 2 * Max(Max(shadow_size, alias_region_size), ring_buffer_size);
  954. const uptr left_padding = ring_buffer_size;
  955. const uptr right_size = alignment;
  956. const uptr map_size = left_padding + 2 * alignment;
  957. const uptr map_start = reinterpret_cast<uptr>(MmapNoAccess(map_size));
  958. CHECK_NE(map_start, static_cast<uptr>(-1));
  959. const uptr right_start = RoundUpTo(map_start + left_padding, alignment);
  960. UnmapFromTo(map_start, right_start - left_padding);
  961. UnmapFromTo(right_start + right_size, map_start + map_size);
  962. CreateAliases(right_start + right_size / 2, alias_size, num_aliases);
  963. return right_start;
  964. }
  965. void InitializePlatformCommonFlags(CommonFlags *cf) {
  966. # if SANITIZER_ANDROID
  967. if (&__libc_get_static_tls_bounds == nullptr)
  968. cf->detect_leaks = false;
  969. # endif
  970. }
  971. } // namespace __sanitizer
  972. #endif