hwasan_report.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781
  1. //===-- hwasan_report.cpp -------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of HWAddressSanitizer.
  10. //
  11. // Error reporting.
  12. //===----------------------------------------------------------------------===//
  13. #include "hwasan_report.h"
  14. #include <dlfcn.h>
  15. #include "hwasan.h"
  16. #include "hwasan_allocator.h"
  17. #include "hwasan_globals.h"
  18. #include "hwasan_mapping.h"
  19. #include "hwasan_thread.h"
  20. #include "hwasan_thread_list.h"
  21. #include "sanitizer_common/sanitizer_allocator_internal.h"
  22. #include "sanitizer_common/sanitizer_common.h"
  23. #include "sanitizer_common/sanitizer_flags.h"
  24. #include "sanitizer_common/sanitizer_mutex.h"
  25. #include "sanitizer_common/sanitizer_report_decorator.h"
  26. #include "sanitizer_common/sanitizer_stackdepot.h"
  27. #include "sanitizer_common/sanitizer_stacktrace_printer.h"
  28. #include "sanitizer_common/sanitizer_symbolizer.h"
  29. using namespace __sanitizer;
  30. namespace __hwasan {
  31. class ScopedReport {
  32. public:
  33. ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
  34. Lock lock(&error_message_lock_);
  35. error_message_ptr_ = fatal ? &error_message_ : nullptr;
  36. ++hwasan_report_count;
  37. }
  38. ~ScopedReport() {
  39. void (*report_cb)(const char *);
  40. {
  41. Lock lock(&error_message_lock_);
  42. report_cb = error_report_callback_;
  43. error_message_ptr_ = nullptr;
  44. }
  45. if (report_cb)
  46. report_cb(error_message_.data());
  47. if (fatal)
  48. SetAbortMessage(error_message_.data());
  49. if (common_flags()->print_module_map >= 2 ||
  50. (fatal && common_flags()->print_module_map))
  51. DumpProcessMap();
  52. if (fatal)
  53. Die();
  54. }
  55. static void MaybeAppendToErrorMessage(const char *msg) {
  56. Lock lock(&error_message_lock_);
  57. if (!error_message_ptr_)
  58. return;
  59. uptr len = internal_strlen(msg);
  60. uptr old_size = error_message_ptr_->size();
  61. error_message_ptr_->resize(old_size + len);
  62. // overwrite old trailing '\0', keep new trailing '\0' untouched.
  63. internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
  64. }
  65. static void SetErrorReportCallback(void (*callback)(const char *)) {
  66. Lock lock(&error_message_lock_);
  67. error_report_callback_ = callback;
  68. }
  69. private:
  70. ScopedErrorReportLock error_report_lock_;
  71. InternalMmapVector<char> error_message_;
  72. bool fatal;
  73. static InternalMmapVector<char> *error_message_ptr_;
  74. static Mutex error_message_lock_;
  75. static void (*error_report_callback_)(const char *);
  76. };
  77. InternalMmapVector<char> *ScopedReport::error_message_ptr_;
  78. Mutex ScopedReport::error_message_lock_;
  79. void (*ScopedReport::error_report_callback_)(const char *);
  80. // If there is an active ScopedReport, append to its error message.
  81. void AppendToErrorMessageBuffer(const char *buffer) {
  82. ScopedReport::MaybeAppendToErrorMessage(buffer);
  83. }
  84. static StackTrace GetStackTraceFromId(u32 id) {
  85. CHECK(id);
  86. StackTrace res = StackDepotGet(id);
  87. CHECK(res.trace);
  88. return res;
  89. }
  90. // A RAII object that holds a copy of the current thread stack ring buffer.
  91. // The actual stack buffer may change while we are iterating over it (for
  92. // example, Printf may call syslog() which can itself be built with hwasan).
  93. class SavedStackAllocations {
  94. public:
  95. SavedStackAllocations(StackAllocationsRingBuffer *rb) {
  96. uptr size = rb->size() * sizeof(uptr);
  97. void *storage =
  98. MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
  99. new (&rb_) StackAllocationsRingBuffer(*rb, storage);
  100. }
  101. ~SavedStackAllocations() {
  102. StackAllocationsRingBuffer *rb = get();
  103. UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
  104. }
  105. StackAllocationsRingBuffer *get() {
  106. return (StackAllocationsRingBuffer *)&rb_;
  107. }
  108. private:
  109. uptr rb_;
  110. };
  111. class Decorator: public __sanitizer::SanitizerCommonDecorator {
  112. public:
  113. Decorator() : SanitizerCommonDecorator() { }
  114. const char *Access() { return Blue(); }
  115. const char *Allocation() const { return Magenta(); }
  116. const char *Origin() const { return Magenta(); }
  117. const char *Name() const { return Green(); }
  118. const char *Location() { return Green(); }
  119. const char *Thread() { return Green(); }
  120. };
  121. static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
  122. HeapAllocationRecord *har, uptr *ring_index,
  123. uptr *num_matching_addrs,
  124. uptr *num_matching_addrs_4b) {
  125. if (!rb) return false;
  126. *num_matching_addrs = 0;
  127. *num_matching_addrs_4b = 0;
  128. for (uptr i = 0, size = rb->size(); i < size; i++) {
  129. auto h = (*rb)[i];
  130. if (h.tagged_addr <= tagged_addr &&
  131. h.tagged_addr + h.requested_size > tagged_addr) {
  132. *har = h;
  133. *ring_index = i;
  134. return true;
  135. }
  136. // Measure the number of heap ring buffer entries that would have matched
  137. // if we had only one entry per address (e.g. if the ring buffer data was
  138. // stored at the address itself). This will help us tune the allocator
  139. // implementation for MTE.
  140. if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) &&
  141. UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) {
  142. ++*num_matching_addrs;
  143. }
  144. // Measure the number of heap ring buffer entries that would have matched
  145. // if we only had 4 tag bits, which is the case for MTE.
  146. auto untag_4b = [](uptr p) {
  147. return p & ((1ULL << 60) - 1);
  148. };
  149. if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) &&
  150. untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) {
  151. ++*num_matching_addrs_4b;
  152. }
  153. }
  154. return false;
  155. }
  156. static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
  157. tag_t addr_tag, uptr untagged_addr) {
  158. uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
  159. bool found_local = false;
  160. for (uptr i = 0; i < frames; i++) {
  161. const uptr *record_addr = &(*sa)[i];
  162. uptr record = *record_addr;
  163. if (!record)
  164. break;
  165. tag_t base_tag =
  166. reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
  167. uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
  168. uptr pc_mask = (1ULL << kRecordFPShift) - 1;
  169. uptr pc = record & pc_mask;
  170. FrameInfo frame;
  171. if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) {
  172. for (LocalInfo &local : frame.locals) {
  173. if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
  174. continue;
  175. tag_t obj_tag = base_tag ^ local.tag_offset;
  176. if (obj_tag != addr_tag)
  177. continue;
  178. // Calculate the offset from the object address to the faulting
  179. // address. Because we only store bits 4-19 of FP (bits 0-3 are
  180. // guaranteed to be zero), the calculation is performed mod 2^20 and may
  181. // harmlessly underflow if the address mod 2^20 is below the object
  182. // address.
  183. uptr obj_offset =
  184. (untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1);
  185. if (obj_offset >= local.size)
  186. continue;
  187. if (!found_local) {
  188. Printf("Potentially referenced stack objects:\n");
  189. found_local = true;
  190. }
  191. Printf(" %s in %s %s:%d\n", local.name, local.function_name,
  192. local.decl_file, local.decl_line);
  193. }
  194. frame.Clear();
  195. }
  196. }
  197. if (found_local)
  198. return;
  199. // We didn't find any locals. Most likely we don't have symbols, so dump
  200. // the information that we have for offline analysis.
  201. InternalScopedString frame_desc;
  202. Printf("Previously allocated frames:\n");
  203. for (uptr i = 0; i < frames; i++) {
  204. const uptr *record_addr = &(*sa)[i];
  205. uptr record = *record_addr;
  206. if (!record)
  207. break;
  208. uptr pc_mask = (1ULL << 48) - 1;
  209. uptr pc = record & pc_mask;
  210. frame_desc.append(" record_addr:0x%zx record:0x%zx",
  211. reinterpret_cast<uptr>(record_addr), record);
  212. if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
  213. RenderFrame(&frame_desc, " %F %L", 0, frame->info.address, &frame->info,
  214. common_flags()->symbolize_vs_style,
  215. common_flags()->strip_path_prefix);
  216. frame->ClearAll();
  217. }
  218. Printf("%s\n", frame_desc.data());
  219. frame_desc.clear();
  220. }
  221. }
  222. // Returns true if tag == *tag_ptr, reading tags from short granules if
  223. // necessary. This may return a false positive if tags 1-15 are used as a
  224. // regular tag rather than a short granule marker.
  225. static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
  226. if (tag == *tag_ptr)
  227. return true;
  228. if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
  229. return false;
  230. uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr));
  231. tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
  232. return tag == inline_tag;
  233. }
  234. // HWASan globals store the size of the global in the descriptor. In cases where
  235. // we don't have a binary with symbols, we can't grab the size of the global
  236. // from the debug info - but we might be able to retrieve it from the
  237. // descriptor. Returns zero if the lookup failed.
  238. static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
  239. // Find the ELF object that this global resides in.
  240. Dl_info info;
  241. if (dladdr(reinterpret_cast<void *>(ptr), &info) == 0)
  242. return 0;
  243. auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
  244. auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
  245. reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
  246. // Get the load bias. This is normally the same as the dli_fbase address on
  247. // position-independent code, but can be different on non-PIE executables,
  248. // binaries using LLD's partitioning feature, or binaries compiled with a
  249. // linker script.
  250. ElfW(Addr) load_bias = 0;
  251. for (const auto &phdr :
  252. ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) {
  253. if (phdr.p_type != PT_LOAD || phdr.p_offset != 0)
  254. continue;
  255. load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr;
  256. break;
  257. }
  258. // Walk all globals in this ELF object, looking for the one we're interested
  259. // in. Once we find it, we can stop iterating and return the size of the
  260. // global we're interested in.
  261. for (const hwasan_global &global :
  262. HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum))
  263. if (global.addr() <= ptr && ptr < global.addr() + global.size())
  264. return global.size();
  265. return 0;
  266. }
  267. static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
  268. tag_t *left, tag_t *right) {
  269. Decorator d;
  270. uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
  271. HwasanChunkView chunk = FindHeapChunkByAddress(mem);
  272. if (chunk.IsAllocated()) {
  273. uptr offset;
  274. const char *whence;
  275. if (untagged_addr < chunk.End() && untagged_addr >= chunk.Beg()) {
  276. offset = untagged_addr - chunk.Beg();
  277. whence = "inside";
  278. } else if (candidate == left) {
  279. offset = untagged_addr - chunk.End();
  280. whence = "to the right of";
  281. } else {
  282. offset = chunk.Beg() - untagged_addr;
  283. whence = "to the left of";
  284. }
  285. Printf("%s", d.Error());
  286. Printf("\nCause: heap-buffer-overflow\n");
  287. Printf("%s", d.Default());
  288. Printf("%s", d.Location());
  289. Printf("%p is located %zd bytes %s %zd-byte region [%p,%p)\n",
  290. untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
  291. chunk.End());
  292. Printf("%s", d.Allocation());
  293. Printf("allocated here:\n");
  294. Printf("%s", d.Default());
  295. GetStackTraceFromId(chunk.GetAllocStackId()).Print();
  296. return;
  297. }
  298. // Check whether the address points into a loaded library. If so, this is
  299. // most likely a global variable.
  300. const char *module_name;
  301. uptr module_address;
  302. Symbolizer *sym = Symbolizer::GetOrInit();
  303. if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, &module_address)) {
  304. Printf("%s", d.Error());
  305. Printf("\nCause: global-overflow\n");
  306. Printf("%s", d.Default());
  307. DataInfo info;
  308. Printf("%s", d.Location());
  309. if (sym->SymbolizeData(mem, &info) && info.start) {
  310. Printf(
  311. "%p is located %zd bytes to the %s of %zd-byte global variable "
  312. "%s [%p,%p) in %s\n",
  313. untagged_addr,
  314. candidate == left ? untagged_addr - (info.start + info.size)
  315. : info.start - untagged_addr,
  316. candidate == left ? "right" : "left", info.size, info.name,
  317. info.start, info.start + info.size, module_name);
  318. } else {
  319. uptr size = GetGlobalSizeFromDescriptor(mem);
  320. if (size == 0)
  321. // We couldn't find the size of the global from the descriptors.
  322. Printf(
  323. "%p is located to the %s of a global variable in "
  324. "\n #0 0x%x (%s+0x%x)\n",
  325. untagged_addr, candidate == left ? "right" : "left", mem,
  326. module_name, module_address);
  327. else
  328. Printf(
  329. "%p is located to the %s of a %zd-byte global variable in "
  330. "\n #0 0x%x (%s+0x%x)\n",
  331. untagged_addr, candidate == left ? "right" : "left", size, mem,
  332. module_name, module_address);
  333. }
  334. Printf("%s", d.Default());
  335. }
  336. }
  337. void PrintAddressDescription(
  338. uptr tagged_addr, uptr access_size,
  339. StackAllocationsRingBuffer *current_stack_allocations) {
  340. Decorator d;
  341. int num_descriptions_printed = 0;
  342. uptr untagged_addr = UntagAddr(tagged_addr);
  343. if (MemIsShadow(untagged_addr)) {
  344. Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr,
  345. d.Default());
  346. return;
  347. }
  348. // Print some very basic information about the address, if it's a heap.
  349. HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
  350. if (uptr beg = chunk.Beg()) {
  351. uptr size = chunk.ActualSize();
  352. Printf("%s[%p,%p) is a %s %s heap chunk; "
  353. "size: %zd offset: %zd\n%s",
  354. d.Location(),
  355. beg, beg + size,
  356. chunk.FromSmallHeap() ? "small" : "large",
  357. chunk.IsAllocated() ? "allocated" : "unallocated",
  358. size, untagged_addr - beg,
  359. d.Default());
  360. }
  361. tag_t addr_tag = GetTagFromPointer(tagged_addr);
  362. bool on_stack = false;
  363. // Check stack first. If the address is on the stack of a live thread, we
  364. // know it cannot be a heap / global overflow.
  365. hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
  366. if (t->AddrIsInStack(untagged_addr)) {
  367. on_stack = true;
  368. // TODO(fmayer): figure out how to distinguish use-after-return and
  369. // stack-buffer-overflow.
  370. Printf("%s", d.Error());
  371. Printf("\nCause: stack tag-mismatch\n");
  372. Printf("%s", d.Location());
  373. Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
  374. t->unique_id());
  375. Printf("%s", d.Default());
  376. t->Announce();
  377. auto *sa = (t == GetCurrentThread() && current_stack_allocations)
  378. ? current_stack_allocations
  379. : t->stack_allocations();
  380. PrintStackAllocations(sa, addr_tag, untagged_addr);
  381. num_descriptions_printed++;
  382. }
  383. });
  384. // Check if this looks like a heap buffer overflow by scanning
  385. // the shadow left and right and looking for the first adjacent
  386. // object with a different memory tag. If that tag matches addr_tag,
  387. // check the allocator if it has a live chunk there.
  388. tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
  389. tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
  390. uptr candidate_distance = 0;
  391. for (; candidate_distance < 1000; candidate_distance++) {
  392. if (MemIsShadow(reinterpret_cast<uptr>(left)) &&
  393. TagsEqual(addr_tag, left)) {
  394. candidate = left;
  395. break;
  396. }
  397. --left;
  398. if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
  399. TagsEqual(addr_tag, right)) {
  400. candidate = right;
  401. break;
  402. }
  403. ++right;
  404. }
  405. constexpr auto kCloseCandidateDistance = 1;
  406. if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) {
  407. ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
  408. num_descriptions_printed++;
  409. }
  410. hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
  411. // Scan all threads' ring buffers to find if it's a heap-use-after-free.
  412. HeapAllocationRecord har;
  413. uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
  414. if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
  415. &ring_index, &num_matching_addrs,
  416. &num_matching_addrs_4b)) {
  417. Printf("%s", d.Error());
  418. Printf("\nCause: use-after-free\n");
  419. Printf("%s", d.Location());
  420. Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
  421. untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
  422. har.requested_size, UntagAddr(har.tagged_addr),
  423. UntagAddr(har.tagged_addr) + har.requested_size);
  424. Printf("%s", d.Allocation());
  425. Printf("freed by thread T%zd here:\n", t->unique_id());
  426. Printf("%s", d.Default());
  427. GetStackTraceFromId(har.free_context_id).Print();
  428. Printf("%s", d.Allocation());
  429. Printf("previously allocated here:\n", t);
  430. Printf("%s", d.Default());
  431. GetStackTraceFromId(har.alloc_context_id).Print();
  432. // Print a developer note: the index of this heap object
  433. // in the thread's deallocation ring buffer.
  434. Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1,
  435. flags()->heap_history_size);
  436. Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs);
  437. Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
  438. num_matching_addrs_4b);
  439. t->Announce();
  440. num_descriptions_printed++;
  441. }
  442. });
  443. if (candidate && num_descriptions_printed == 0) {
  444. ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
  445. num_descriptions_printed++;
  446. }
  447. // Print the remaining threads, as an extra information, 1 line per thread.
  448. hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
  449. if (!num_descriptions_printed)
  450. // We exhausted our possibilities. Bail out.
  451. Printf("HWAddressSanitizer can not describe address in more detail.\n");
  452. if (num_descriptions_printed > 1) {
  453. Printf(
  454. "There are %d potential causes, printed above in order "
  455. "of likeliness.\n",
  456. num_descriptions_printed);
  457. }
  458. }
  459. void ReportStats() {}
  460. static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
  461. void (*print_tag)(InternalScopedString &s,
  462. tag_t *tag)) {
  463. const uptr row_len = 16; // better be power of two.
  464. tag_t *center_row_beg = reinterpret_cast<tag_t *>(
  465. RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
  466. tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
  467. tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
  468. InternalScopedString s;
  469. for (tag_t *row = beg_row; row < end_row; row += row_len) {
  470. s.append("%s", row == center_row_beg ? "=>" : " ");
  471. s.append("%p:", (void *)row);
  472. for (uptr i = 0; i < row_len; i++) {
  473. s.append("%s", row + i == tag_ptr ? "[" : " ");
  474. print_tag(s, &row[i]);
  475. s.append("%s", row + i == tag_ptr ? "]" : " ");
  476. }
  477. s.append("\n");
  478. }
  479. Printf("%s", s.data());
  480. }
  481. static void PrintTagsAroundAddr(tag_t *tag_ptr) {
  482. Printf(
  483. "Memory tags around the buggy address (one tag corresponds to %zd "
  484. "bytes):\n", kShadowAlignment);
  485. PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) {
  486. s.append("%02x", *tag);
  487. });
  488. Printf(
  489. "Tags for short granules around the buggy address (one tag corresponds "
  490. "to %zd bytes):\n",
  491. kShadowAlignment);
  492. PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) {
  493. if (*tag >= 1 && *tag <= kShadowAlignment) {
  494. uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag));
  495. s.append("%02x",
  496. *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1));
  497. } else {
  498. s.append("..");
  499. }
  500. });
  501. Printf(
  502. "See "
  503. "https://clang.llvm.org/docs/"
  504. "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
  505. "description of short granule tags\n");
  506. }
  507. uptr GetTopPc(StackTrace *stack) {
  508. return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
  509. : 0;
  510. }
  511. void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
  512. ScopedReport R(flags()->halt_on_error);
  513. uptr untagged_addr = UntagAddr(tagged_addr);
  514. tag_t ptr_tag = GetTagFromPointer(tagged_addr);
  515. tag_t *tag_ptr = nullptr;
  516. tag_t mem_tag = 0;
  517. if (MemIsApp(untagged_addr)) {
  518. tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
  519. if (MemIsShadow(reinterpret_cast<uptr>(tag_ptr)))
  520. mem_tag = *tag_ptr;
  521. else
  522. tag_ptr = nullptr;
  523. }
  524. Decorator d;
  525. Printf("%s", d.Error());
  526. uptr pc = GetTopPc(stack);
  527. const char *bug_type = "invalid-free";
  528. const Thread *thread = GetCurrentThread();
  529. if (thread) {
  530. Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
  531. SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id());
  532. } else {
  533. Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
  534. SanitizerToolName, bug_type, untagged_addr, pc);
  535. }
  536. Printf("%s", d.Access());
  537. if (tag_ptr)
  538. Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
  539. Printf("%s", d.Default());
  540. stack->Print();
  541. PrintAddressDescription(tagged_addr, 0, nullptr);
  542. if (tag_ptr)
  543. PrintTagsAroundAddr(tag_ptr);
  544. ReportErrorSummary(bug_type, stack);
  545. }
  546. void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
  547. const u8 *expected) {
  548. uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
  549. u8 actual_expected[kShadowAlignment];
  550. internal_memcpy(actual_expected, expected, tail_size);
  551. tag_t ptr_tag = GetTagFromPointer(tagged_addr);
  552. // Short granule is stashed in the last byte of the magic string. To avoid
  553. // confusion, make the expected magic string contain the short granule tag.
  554. if (orig_size % kShadowAlignment != 0) {
  555. actual_expected[tail_size - 1] = ptr_tag;
  556. }
  557. ScopedReport R(flags()->halt_on_error);
  558. Decorator d;
  559. uptr untagged_addr = UntagAddr(tagged_addr);
  560. Printf("%s", d.Error());
  561. const char *bug_type = "allocation-tail-overwritten";
  562. Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
  563. bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
  564. Printf("\n%s", d.Default());
  565. Printf(
  566. "Stack of invalid access unknown. Issue detected at deallocation "
  567. "time.\n");
  568. Printf("%s", d.Allocation());
  569. Printf("deallocated here:\n");
  570. Printf("%s", d.Default());
  571. stack->Print();
  572. HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
  573. if (chunk.Beg()) {
  574. Printf("%s", d.Allocation());
  575. Printf("allocated here:\n");
  576. Printf("%s", d.Default());
  577. GetStackTraceFromId(chunk.GetAllocStackId()).Print();
  578. }
  579. InternalScopedString s;
  580. CHECK_GT(tail_size, 0U);
  581. CHECK_LT(tail_size, kShadowAlignment);
  582. u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
  583. s.append("Tail contains: ");
  584. for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
  585. s.append(".. ");
  586. for (uptr i = 0; i < tail_size; i++)
  587. s.append("%02x ", tail[i]);
  588. s.append("\n");
  589. s.append("Expected: ");
  590. for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
  591. s.append(".. ");
  592. for (uptr i = 0; i < tail_size; i++) s.append("%02x ", actual_expected[i]);
  593. s.append("\n");
  594. s.append(" ");
  595. for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
  596. s.append(" ");
  597. for (uptr i = 0; i < tail_size; i++)
  598. s.append("%s ", actual_expected[i] != tail[i] ? "^^" : " ");
  599. s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
  600. "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
  601. " char *x = new char[20];\n"
  602. " x[25] = 42;\n"
  603. "%s does not detect such bugs in uninstrumented code at the time of write,"
  604. "\nbut can detect them at the time of free/delete.\n"
  605. "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
  606. kShadowAlignment, SanitizerToolName);
  607. Printf("%s", s.data());
  608. GetCurrentThread()->Announce();
  609. tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
  610. PrintTagsAroundAddr(tag_ptr);
  611. ReportErrorSummary(bug_type, stack);
  612. }
  613. void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
  614. bool is_store, bool fatal, uptr *registers_frame) {
  615. ScopedReport R(fatal);
  616. SavedStackAllocations current_stack_allocations(
  617. GetCurrentThread()->stack_allocations());
  618. Decorator d;
  619. uptr untagged_addr = UntagAddr(tagged_addr);
  620. // TODO: when possible, try to print heap-use-after-free, etc.
  621. const char *bug_type = "tag-mismatch";
  622. uptr pc = GetTopPc(stack);
  623. Printf("%s", d.Error());
  624. Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
  625. untagged_addr, pc);
  626. Thread *t = GetCurrentThread();
  627. sptr offset =
  628. __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
  629. CHECK(offset >= 0 && offset < static_cast<sptr>(access_size));
  630. tag_t ptr_tag = GetTagFromPointer(tagged_addr);
  631. tag_t *tag_ptr =
  632. reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
  633. tag_t mem_tag = *tag_ptr;
  634. Printf("%s", d.Access());
  635. if (mem_tag && mem_tag < kShadowAlignment) {
  636. tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
  637. ~(kShadowAlignment - 1));
  638. // If offset is 0, (untagged_addr + offset) is not aligned to granules.
  639. // This is the offset of the leftmost accessed byte within the bad granule.
  640. u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
  641. tag_t short_tag = granule_ptr[kShadowAlignment - 1];
  642. // The first mismatch was a short granule that matched the ptr_tag.
  643. if (short_tag == ptr_tag) {
  644. // If the access starts after the end of the short granule, then the first
  645. // bad byte is the first byte of the access; otherwise it is the first
  646. // byte past the end of the short granule
  647. if (mem_tag > in_granule_offset) {
  648. offset += mem_tag - in_granule_offset;
  649. }
  650. }
  651. Printf(
  652. "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
  653. is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
  654. mem_tag, short_tag, t->unique_id());
  655. } else {
  656. Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
  657. is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
  658. mem_tag, t->unique_id());
  659. }
  660. if (offset != 0)
  661. Printf("Invalid access starting at offset %zu\n", offset);
  662. Printf("%s", d.Default());
  663. stack->Print();
  664. PrintAddressDescription(tagged_addr, access_size,
  665. current_stack_allocations.get());
  666. t->Announce();
  667. PrintTagsAroundAddr(tag_ptr);
  668. if (registers_frame)
  669. ReportRegisters(registers_frame, pc);
  670. ReportErrorSummary(bug_type, stack);
  671. }
  672. // See the frame breakdown defined in __hwasan_tag_mismatch (from
  673. // hwasan_tag_mismatch_aarch64.S).
  674. void ReportRegisters(uptr *frame, uptr pc) {
  675. Printf("Registers where the failure occurred (pc %p):\n", pc);
  676. // We explicitly print a single line (4 registers/line) each iteration to
  677. // reduce the amount of logcat error messages printed. Each Printf() will
  678. // result in a new logcat line, irrespective of whether a newline is present,
  679. // and so we wish to reduce the number of Printf() calls we have to make.
  680. Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
  681. frame[0], frame[1], frame[2], frame[3]);
  682. Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
  683. frame[4], frame[5], frame[6], frame[7]);
  684. Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
  685. frame[8], frame[9], frame[10], frame[11]);
  686. Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
  687. frame[12], frame[13], frame[14], frame[15]);
  688. Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
  689. frame[16], frame[17], frame[18], frame[19]);
  690. Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
  691. frame[20], frame[21], frame[22], frame[23]);
  692. Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
  693. frame[24], frame[25], frame[26], frame[27]);
  694. // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
  695. // passes it to this function.
  696. Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28],
  697. frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256);
  698. }
  699. } // namespace __hwasan
  700. void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
  701. __hwasan::ScopedReport::SetErrorReportCallback(callback);
  702. }