hwasan_report.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804
  1. //===-- hwasan_report.cpp -------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of HWAddressSanitizer.
  10. //
  11. // Error reporting.
  12. //===----------------------------------------------------------------------===//
  13. #include "hwasan_report.h"
  14. #include <dlfcn.h>
  15. #include "hwasan.h"
  16. #include "hwasan_allocator.h"
  17. #include "hwasan_globals.h"
  18. #include "hwasan_mapping.h"
  19. #include "hwasan_thread.h"
  20. #include "hwasan_thread_list.h"
  21. #include "sanitizer_common/sanitizer_allocator_internal.h"
  22. #include "sanitizer_common/sanitizer_common.h"
  23. #include "sanitizer_common/sanitizer_flags.h"
  24. #include "sanitizer_common/sanitizer_mutex.h"
  25. #include "sanitizer_common/sanitizer_report_decorator.h"
  26. #include "sanitizer_common/sanitizer_stackdepot.h"
  27. #include "sanitizer_common/sanitizer_stacktrace_printer.h"
  28. #include "sanitizer_common/sanitizer_symbolizer.h"
  29. using namespace __sanitizer;
  30. namespace __hwasan {
  31. class ScopedReport {
  32. public:
  33. ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
  34. Lock lock(&error_message_lock_);
  35. error_message_ptr_ = fatal ? &error_message_ : nullptr;
  36. ++hwasan_report_count;
  37. }
  38. ~ScopedReport() {
  39. void (*report_cb)(const char *);
  40. {
  41. Lock lock(&error_message_lock_);
  42. report_cb = error_report_callback_;
  43. error_message_ptr_ = nullptr;
  44. }
  45. if (report_cb)
  46. report_cb(error_message_.data());
  47. if (fatal)
  48. SetAbortMessage(error_message_.data());
  49. if (common_flags()->print_module_map >= 2 ||
  50. (fatal && common_flags()->print_module_map))
  51. DumpProcessMap();
  52. if (fatal)
  53. Die();
  54. }
  55. static void MaybeAppendToErrorMessage(const char *msg) {
  56. Lock lock(&error_message_lock_);
  57. if (!error_message_ptr_)
  58. return;
  59. uptr len = internal_strlen(msg);
  60. uptr old_size = error_message_ptr_->size();
  61. error_message_ptr_->resize(old_size + len);
  62. // overwrite old trailing '\0', keep new trailing '\0' untouched.
  63. internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
  64. }
  65. static void SetErrorReportCallback(void (*callback)(const char *)) {
  66. Lock lock(&error_message_lock_);
  67. error_report_callback_ = callback;
  68. }
  69. private:
  70. ScopedErrorReportLock error_report_lock_;
  71. InternalMmapVector<char> error_message_;
  72. bool fatal;
  73. static InternalMmapVector<char> *error_message_ptr_;
  74. static Mutex error_message_lock_;
  75. static void (*error_report_callback_)(const char *);
  76. };
  77. InternalMmapVector<char> *ScopedReport::error_message_ptr_;
  78. Mutex ScopedReport::error_message_lock_;
  79. void (*ScopedReport::error_report_callback_)(const char *);
  80. // If there is an active ScopedReport, append to its error message.
  81. void AppendToErrorMessageBuffer(const char *buffer) {
  82. ScopedReport::MaybeAppendToErrorMessage(buffer);
  83. }
  84. static StackTrace GetStackTraceFromId(u32 id) {
  85. CHECK(id);
  86. StackTrace res = StackDepotGet(id);
  87. CHECK(res.trace);
  88. return res;
  89. }
  90. static void MaybePrintAndroidHelpUrl() {
  91. #if SANITIZER_ANDROID
  92. Printf(
  93. "Learn more about HWASan reports: "
  94. "https://source.android.com/docs/security/test/memory-safety/"
  95. "hwasan-reports\n");
  96. #endif
  97. }
  98. // A RAII object that holds a copy of the current thread stack ring buffer.
  99. // The actual stack buffer may change while we are iterating over it (for
  100. // example, Printf may call syslog() which can itself be built with hwasan).
  101. class SavedStackAllocations {
  102. public:
  103. SavedStackAllocations(StackAllocationsRingBuffer *rb) {
  104. uptr size = rb->size() * sizeof(uptr);
  105. void *storage =
  106. MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
  107. new (&rb_) StackAllocationsRingBuffer(*rb, storage);
  108. }
  109. ~SavedStackAllocations() {
  110. StackAllocationsRingBuffer *rb = get();
  111. UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
  112. }
  113. StackAllocationsRingBuffer *get() {
  114. return (StackAllocationsRingBuffer *)&rb_;
  115. }
  116. private:
  117. uptr rb_;
  118. };
  119. class Decorator: public __sanitizer::SanitizerCommonDecorator {
  120. public:
  121. Decorator() : SanitizerCommonDecorator() { }
  122. const char *Access() { return Blue(); }
  123. const char *Allocation() const { return Magenta(); }
  124. const char *Origin() const { return Magenta(); }
  125. const char *Name() const { return Green(); }
  126. const char *Location() { return Green(); }
  127. const char *Thread() { return Green(); }
  128. };
  129. static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
  130. HeapAllocationRecord *har, uptr *ring_index,
  131. uptr *num_matching_addrs,
  132. uptr *num_matching_addrs_4b) {
  133. if (!rb) return false;
  134. *num_matching_addrs = 0;
  135. *num_matching_addrs_4b = 0;
  136. for (uptr i = 0, size = rb->size(); i < size; i++) {
  137. auto h = (*rb)[i];
  138. if (h.tagged_addr <= tagged_addr &&
  139. h.tagged_addr + h.requested_size > tagged_addr) {
  140. *har = h;
  141. *ring_index = i;
  142. return true;
  143. }
  144. // Measure the number of heap ring buffer entries that would have matched
  145. // if we had only one entry per address (e.g. if the ring buffer data was
  146. // stored at the address itself). This will help us tune the allocator
  147. // implementation for MTE.
  148. if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) &&
  149. UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) {
  150. ++*num_matching_addrs;
  151. }
  152. // Measure the number of heap ring buffer entries that would have matched
  153. // if we only had 4 tag bits, which is the case for MTE.
  154. auto untag_4b = [](uptr p) {
  155. return p & ((1ULL << 60) - 1);
  156. };
  157. if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) &&
  158. untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) {
  159. ++*num_matching_addrs_4b;
  160. }
  161. }
  162. return false;
  163. }
  164. static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
  165. tag_t addr_tag, uptr untagged_addr) {
  166. uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
  167. bool found_local = false;
  168. for (uptr i = 0; i < frames; i++) {
  169. const uptr *record_addr = &(*sa)[i];
  170. uptr record = *record_addr;
  171. if (!record)
  172. break;
  173. tag_t base_tag =
  174. reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
  175. uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
  176. uptr pc_mask = (1ULL << kRecordFPShift) - 1;
  177. uptr pc = record & pc_mask;
  178. FrameInfo frame;
  179. if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) {
  180. for (LocalInfo &local : frame.locals) {
  181. if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
  182. continue;
  183. tag_t obj_tag = base_tag ^ local.tag_offset;
  184. if (obj_tag != addr_tag)
  185. continue;
  186. // Calculate the offset from the object address to the faulting
  187. // address. Because we only store bits 4-19 of FP (bits 0-3 are
  188. // guaranteed to be zero), the calculation is performed mod 2^20 and may
  189. // harmlessly underflow if the address mod 2^20 is below the object
  190. // address.
  191. uptr obj_offset =
  192. (untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1);
  193. if (obj_offset >= local.size)
  194. continue;
  195. if (!found_local) {
  196. Printf("Potentially referenced stack objects:\n");
  197. found_local = true;
  198. }
  199. Printf(" %s in %s %s:%d\n", local.name, local.function_name,
  200. local.decl_file, local.decl_line);
  201. }
  202. frame.Clear();
  203. }
  204. }
  205. if (found_local)
  206. return;
  207. // We didn't find any locals. Most likely we don't have symbols, so dump
  208. // the information that we have for offline analysis.
  209. InternalScopedString frame_desc;
  210. Printf("Previously allocated frames:\n");
  211. for (uptr i = 0; i < frames; i++) {
  212. const uptr *record_addr = &(*sa)[i];
  213. uptr record = *record_addr;
  214. if (!record)
  215. break;
  216. uptr pc_mask = (1ULL << 48) - 1;
  217. uptr pc = record & pc_mask;
  218. frame_desc.append(" record_addr:0x%zx record:0x%zx",
  219. reinterpret_cast<uptr>(record_addr), record);
  220. if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
  221. RenderFrame(&frame_desc, " %F %L", 0, frame->info.address, &frame->info,
  222. common_flags()->symbolize_vs_style,
  223. common_flags()->strip_path_prefix);
  224. frame->ClearAll();
  225. }
  226. Printf("%s\n", frame_desc.data());
  227. frame_desc.clear();
  228. }
  229. }
  230. // Returns true if tag == *tag_ptr, reading tags from short granules if
  231. // necessary. This may return a false positive if tags 1-15 are used as a
  232. // regular tag rather than a short granule marker.
  233. static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
  234. if (tag == *tag_ptr)
  235. return true;
  236. if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
  237. return false;
  238. uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr));
  239. tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
  240. return tag == inline_tag;
  241. }
  242. // HWASan globals store the size of the global in the descriptor. In cases where
  243. // we don't have a binary with symbols, we can't grab the size of the global
  244. // from the debug info - but we might be able to retrieve it from the
  245. // descriptor. Returns zero if the lookup failed.
  246. static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
  247. // Find the ELF object that this global resides in.
  248. Dl_info info;
  249. if (dladdr(reinterpret_cast<void *>(ptr), &info) == 0)
  250. return 0;
  251. auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
  252. auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
  253. reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
  254. // Get the load bias. This is normally the same as the dli_fbase address on
  255. // position-independent code, but can be different on non-PIE executables,
  256. // binaries using LLD's partitioning feature, or binaries compiled with a
  257. // linker script.
  258. ElfW(Addr) load_bias = 0;
  259. for (const auto &phdr :
  260. ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) {
  261. if (phdr.p_type != PT_LOAD || phdr.p_offset != 0)
  262. continue;
  263. load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr;
  264. break;
  265. }
  266. // Walk all globals in this ELF object, looking for the one we're interested
  267. // in. Once we find it, we can stop iterating and return the size of the
  268. // global we're interested in.
  269. for (const hwasan_global &global :
  270. HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum))
  271. if (global.addr() <= ptr && ptr < global.addr() + global.size())
  272. return global.size();
  273. return 0;
  274. }
  275. static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
  276. tag_t *left, tag_t *right) {
  277. Decorator d;
  278. uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
  279. HwasanChunkView chunk = FindHeapChunkByAddress(mem);
  280. if (chunk.IsAllocated()) {
  281. uptr offset;
  282. const char *whence;
  283. if (untagged_addr < chunk.End() && untagged_addr >= chunk.Beg()) {
  284. offset = untagged_addr - chunk.Beg();
  285. whence = "inside";
  286. } else if (candidate == left) {
  287. offset = untagged_addr - chunk.End();
  288. whence = "after";
  289. } else {
  290. offset = chunk.Beg() - untagged_addr;
  291. whence = "before";
  292. }
  293. Printf("%s", d.Error());
  294. Printf("\nCause: heap-buffer-overflow\n");
  295. Printf("%s", d.Default());
  296. Printf("%s", d.Location());
  297. Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
  298. untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
  299. chunk.End());
  300. Printf("%s", d.Allocation());
  301. Printf("allocated here:\n");
  302. Printf("%s", d.Default());
  303. GetStackTraceFromId(chunk.GetAllocStackId()).Print();
  304. return;
  305. }
  306. // Check whether the address points into a loaded library. If so, this is
  307. // most likely a global variable.
  308. const char *module_name;
  309. uptr module_address;
  310. Symbolizer *sym = Symbolizer::GetOrInit();
  311. if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, &module_address)) {
  312. Printf("%s", d.Error());
  313. Printf("\nCause: global-overflow\n");
  314. Printf("%s", d.Default());
  315. DataInfo info;
  316. Printf("%s", d.Location());
  317. if (sym->SymbolizeData(mem, &info) && info.start) {
  318. Printf(
  319. "%p is located %zd bytes %s a %zd-byte global variable "
  320. "%s [%p,%p) in %s\n",
  321. untagged_addr,
  322. candidate == left ? untagged_addr - (info.start + info.size)
  323. : info.start - untagged_addr,
  324. candidate == left ? "after" : "before", info.size, info.name,
  325. info.start, info.start + info.size, module_name);
  326. } else {
  327. uptr size = GetGlobalSizeFromDescriptor(mem);
  328. if (size == 0)
  329. // We couldn't find the size of the global from the descriptors.
  330. Printf(
  331. "%p is located %s a global variable in "
  332. "\n #0 0x%x (%s+0x%x)\n",
  333. untagged_addr, candidate == left ? "after" : "before", mem,
  334. module_name, module_address);
  335. else
  336. Printf(
  337. "%p is located %s a %zd-byte global variable in "
  338. "\n #0 0x%x (%s+0x%x)\n",
  339. untagged_addr, candidate == left ? "after" : "before", size, mem,
  340. module_name, module_address);
  341. }
  342. Printf("%s", d.Default());
  343. }
  344. }
  345. void PrintAddressDescription(
  346. uptr tagged_addr, uptr access_size,
  347. StackAllocationsRingBuffer *current_stack_allocations) {
  348. Decorator d;
  349. int num_descriptions_printed = 0;
  350. uptr untagged_addr = UntagAddr(tagged_addr);
  351. if (MemIsShadow(untagged_addr)) {
  352. Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr,
  353. d.Default());
  354. return;
  355. }
  356. // Print some very basic information about the address, if it's a heap.
  357. HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
  358. if (uptr beg = chunk.Beg()) {
  359. uptr size = chunk.ActualSize();
  360. Printf("%s[%p,%p) is a %s %s heap chunk; "
  361. "size: %zd offset: %zd\n%s",
  362. d.Location(),
  363. beg, beg + size,
  364. chunk.FromSmallHeap() ? "small" : "large",
  365. chunk.IsAllocated() ? "allocated" : "unallocated",
  366. size, untagged_addr - beg,
  367. d.Default());
  368. }
  369. tag_t addr_tag = GetTagFromPointer(tagged_addr);
  370. bool on_stack = false;
  371. // Check stack first. If the address is on the stack of a live thread, we
  372. // know it cannot be a heap / global overflow.
  373. hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
  374. if (t->AddrIsInStack(untagged_addr)) {
  375. on_stack = true;
  376. // TODO(fmayer): figure out how to distinguish use-after-return and
  377. // stack-buffer-overflow.
  378. Printf("%s", d.Error());
  379. Printf("\nCause: stack tag-mismatch\n");
  380. Printf("%s", d.Location());
  381. Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
  382. t->unique_id());
  383. Printf("%s", d.Default());
  384. t->Announce();
  385. auto *sa = (t == GetCurrentThread() && current_stack_allocations)
  386. ? current_stack_allocations
  387. : t->stack_allocations();
  388. PrintStackAllocations(sa, addr_tag, untagged_addr);
  389. num_descriptions_printed++;
  390. }
  391. });
  392. // Check if this looks like a heap buffer overflow by scanning
  393. // the shadow left and right and looking for the first adjacent
  394. // object with a different memory tag. If that tag matches addr_tag,
  395. // check the allocator if it has a live chunk there.
  396. tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
  397. tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
  398. uptr candidate_distance = 0;
  399. for (; candidate_distance < 1000; candidate_distance++) {
  400. if (MemIsShadow(reinterpret_cast<uptr>(left)) &&
  401. TagsEqual(addr_tag, left)) {
  402. candidate = left;
  403. break;
  404. }
  405. --left;
  406. if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
  407. TagsEqual(addr_tag, right)) {
  408. candidate = right;
  409. break;
  410. }
  411. ++right;
  412. }
  413. constexpr auto kCloseCandidateDistance = 1;
  414. if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) {
  415. ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
  416. num_descriptions_printed++;
  417. }
  418. hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
  419. // Scan all threads' ring buffers to find if it's a heap-use-after-free.
  420. HeapAllocationRecord har;
  421. uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
  422. if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
  423. &ring_index, &num_matching_addrs,
  424. &num_matching_addrs_4b)) {
  425. Printf("%s", d.Error());
  426. Printf("\nCause: use-after-free\n");
  427. Printf("%s", d.Location());
  428. Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
  429. untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
  430. har.requested_size, UntagAddr(har.tagged_addr),
  431. UntagAddr(har.tagged_addr) + har.requested_size);
  432. Printf("%s", d.Allocation());
  433. Printf("freed by thread T%zd here:\n", t->unique_id());
  434. Printf("%s", d.Default());
  435. GetStackTraceFromId(har.free_context_id).Print();
  436. Printf("%s", d.Allocation());
  437. Printf("previously allocated here:\n", t);
  438. Printf("%s", d.Default());
  439. GetStackTraceFromId(har.alloc_context_id).Print();
  440. // Print a developer note: the index of this heap object
  441. // in the thread's deallocation ring buffer.
  442. Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1,
  443. flags()->heap_history_size);
  444. Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs);
  445. Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
  446. num_matching_addrs_4b);
  447. t->Announce();
  448. num_descriptions_printed++;
  449. }
  450. });
  451. if (candidate && num_descriptions_printed == 0) {
  452. ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
  453. num_descriptions_printed++;
  454. }
  455. // Print the remaining threads, as an extra information, 1 line per thread.
  456. hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
  457. if (!num_descriptions_printed)
  458. // We exhausted our possibilities. Bail out.
  459. Printf("HWAddressSanitizer can not describe address in more detail.\n");
  460. if (num_descriptions_printed > 1) {
  461. Printf(
  462. "There are %d potential causes, printed above in order "
  463. "of likeliness.\n",
  464. num_descriptions_printed);
  465. }
  466. }
  467. void ReportStats() {}
  468. static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
  469. void (*print_tag)(InternalScopedString &s,
  470. tag_t *tag)) {
  471. const uptr row_len = 16; // better be power of two.
  472. tag_t *center_row_beg = reinterpret_cast<tag_t *>(
  473. RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
  474. tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
  475. tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
  476. InternalScopedString s;
  477. for (tag_t *row = beg_row; row < end_row; row += row_len) {
  478. s.append("%s", row == center_row_beg ? "=>" : " ");
  479. s.append("%p:", (void *)ShadowToMem(reinterpret_cast<uptr>(row)));
  480. for (uptr i = 0; i < row_len; i++) {
  481. s.append("%s", row + i == tag_ptr ? "[" : " ");
  482. print_tag(s, &row[i]);
  483. s.append("%s", row + i == tag_ptr ? "]" : " ");
  484. }
  485. s.append("\n");
  486. }
  487. Printf("%s", s.data());
  488. }
  489. static void PrintTagsAroundAddr(tag_t *tag_ptr) {
  490. Printf(
  491. "Memory tags around the buggy address (one tag corresponds to %zd "
  492. "bytes):\n", kShadowAlignment);
  493. PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) {
  494. s.append("%02x", *tag);
  495. });
  496. Printf(
  497. "Tags for short granules around the buggy address (one tag corresponds "
  498. "to %zd bytes):\n",
  499. kShadowAlignment);
  500. PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) {
  501. if (*tag >= 1 && *tag <= kShadowAlignment) {
  502. uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag));
  503. s.append("%02x",
  504. *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1));
  505. } else {
  506. s.append("..");
  507. }
  508. });
  509. Printf(
  510. "See "
  511. "https://clang.llvm.org/docs/"
  512. "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
  513. "description of short granule tags\n");
  514. }
  515. uptr GetTopPc(StackTrace *stack) {
  516. return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
  517. : 0;
  518. }
  519. void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
  520. ScopedReport R(flags()->halt_on_error);
  521. uptr untagged_addr = UntagAddr(tagged_addr);
  522. tag_t ptr_tag = GetTagFromPointer(tagged_addr);
  523. tag_t *tag_ptr = nullptr;
  524. tag_t mem_tag = 0;
  525. if (MemIsApp(untagged_addr)) {
  526. tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
  527. if (MemIsShadow(reinterpret_cast<uptr>(tag_ptr)))
  528. mem_tag = *tag_ptr;
  529. else
  530. tag_ptr = nullptr;
  531. }
  532. Decorator d;
  533. Printf("%s", d.Error());
  534. uptr pc = GetTopPc(stack);
  535. const char *bug_type = "invalid-free";
  536. const Thread *thread = GetCurrentThread();
  537. if (thread) {
  538. Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
  539. SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id());
  540. } else {
  541. Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
  542. SanitizerToolName, bug_type, untagged_addr, pc);
  543. }
  544. Printf("%s", d.Access());
  545. if (tag_ptr)
  546. Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
  547. Printf("%s", d.Default());
  548. stack->Print();
  549. PrintAddressDescription(tagged_addr, 0, nullptr);
  550. if (tag_ptr)
  551. PrintTagsAroundAddr(tag_ptr);
  552. MaybePrintAndroidHelpUrl();
  553. ReportErrorSummary(bug_type, stack);
  554. }
  555. void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
  556. const u8 *expected) {
  557. uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
  558. u8 actual_expected[kShadowAlignment];
  559. internal_memcpy(actual_expected, expected, tail_size);
  560. tag_t ptr_tag = GetTagFromPointer(tagged_addr);
  561. // Short granule is stashed in the last byte of the magic string. To avoid
  562. // confusion, make the expected magic string contain the short granule tag.
  563. if (orig_size % kShadowAlignment != 0) {
  564. actual_expected[tail_size - 1] = ptr_tag;
  565. }
  566. ScopedReport R(flags()->halt_on_error);
  567. Decorator d;
  568. uptr untagged_addr = UntagAddr(tagged_addr);
  569. Printf("%s", d.Error());
  570. const char *bug_type = "allocation-tail-overwritten";
  571. Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
  572. bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
  573. Printf("\n%s", d.Default());
  574. Printf(
  575. "Stack of invalid access unknown. Issue detected at deallocation "
  576. "time.\n");
  577. Printf("%s", d.Allocation());
  578. Printf("deallocated here:\n");
  579. Printf("%s", d.Default());
  580. stack->Print();
  581. HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
  582. if (chunk.Beg()) {
  583. Printf("%s", d.Allocation());
  584. Printf("allocated here:\n");
  585. Printf("%s", d.Default());
  586. GetStackTraceFromId(chunk.GetAllocStackId()).Print();
  587. }
  588. InternalScopedString s;
  589. CHECK_GT(tail_size, 0U);
  590. CHECK_LT(tail_size, kShadowAlignment);
  591. u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
  592. s.append("Tail contains: ");
  593. for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
  594. s.append(".. ");
  595. for (uptr i = 0; i < tail_size; i++)
  596. s.append("%02x ", tail[i]);
  597. s.append("\n");
  598. s.append("Expected: ");
  599. for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
  600. s.append(".. ");
  601. for (uptr i = 0; i < tail_size; i++) s.append("%02x ", actual_expected[i]);
  602. s.append("\n");
  603. s.append(" ");
  604. for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
  605. s.append(" ");
  606. for (uptr i = 0; i < tail_size; i++)
  607. s.append("%s ", actual_expected[i] != tail[i] ? "^^" : " ");
  608. s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
  609. "after a heap object, but within the %zd-byte granule, e.g.\n"
  610. " char *x = new char[20];\n"
  611. " x[25] = 42;\n"
  612. "%s does not detect such bugs in uninstrumented code at the time of write,"
  613. "\nbut can detect them at the time of free/delete.\n"
  614. "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
  615. kShadowAlignment, SanitizerToolName);
  616. Printf("%s", s.data());
  617. GetCurrentThread()->Announce();
  618. tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
  619. PrintTagsAroundAddr(tag_ptr);
  620. MaybePrintAndroidHelpUrl();
  621. ReportErrorSummary(bug_type, stack);
  622. }
  623. void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
  624. bool is_store, bool fatal, uptr *registers_frame) {
  625. ScopedReport R(fatal);
  626. SavedStackAllocations current_stack_allocations(
  627. GetCurrentThread()->stack_allocations());
  628. Decorator d;
  629. uptr untagged_addr = UntagAddr(tagged_addr);
  630. // TODO: when possible, try to print heap-use-after-free, etc.
  631. const char *bug_type = "tag-mismatch";
  632. uptr pc = GetTopPc(stack);
  633. Printf("%s", d.Error());
  634. Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
  635. untagged_addr, pc);
  636. Thread *t = GetCurrentThread();
  637. sptr offset =
  638. __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
  639. CHECK(offset >= 0 && offset < static_cast<sptr>(access_size));
  640. tag_t ptr_tag = GetTagFromPointer(tagged_addr);
  641. tag_t *tag_ptr =
  642. reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
  643. tag_t mem_tag = *tag_ptr;
  644. Printf("%s", d.Access());
  645. if (mem_tag && mem_tag < kShadowAlignment) {
  646. tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
  647. ~(kShadowAlignment - 1));
  648. // If offset is 0, (untagged_addr + offset) is not aligned to granules.
  649. // This is the offset of the leftmost accessed byte within the bad granule.
  650. u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
  651. tag_t short_tag = granule_ptr[kShadowAlignment - 1];
  652. // The first mismatch was a short granule that matched the ptr_tag.
  653. if (short_tag == ptr_tag) {
  654. // If the access starts after the end of the short granule, then the first
  655. // bad byte is the first byte of the access; otherwise it is the first
  656. // byte past the end of the short granule
  657. if (mem_tag > in_granule_offset) {
  658. offset += mem_tag - in_granule_offset;
  659. }
  660. }
  661. Printf(
  662. "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
  663. is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
  664. mem_tag, short_tag, t->unique_id());
  665. } else {
  666. Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
  667. is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
  668. mem_tag, t->unique_id());
  669. }
  670. if (offset != 0)
  671. Printf("Invalid access starting at offset %zu\n", offset);
  672. Printf("%s", d.Default());
  673. stack->Print();
  674. PrintAddressDescription(tagged_addr, access_size,
  675. current_stack_allocations.get());
  676. t->Announce();
  677. PrintTagsAroundAddr(tag_ptr);
  678. if (registers_frame)
  679. ReportRegisters(registers_frame, pc);
  680. MaybePrintAndroidHelpUrl();
  681. ReportErrorSummary(bug_type, stack);
  682. }
  683. // See the frame breakdown defined in __hwasan_tag_mismatch (from
  684. // hwasan_tag_mismatch_{aarch64,riscv64}.S).
  685. void ReportRegisters(uptr *frame, uptr pc) {
  686. Printf("Registers where the failure occurred (pc %p):\n", pc);
  687. // We explicitly print a single line (4 registers/line) each iteration to
  688. // reduce the amount of logcat error messages printed. Each Printf() will
  689. // result in a new logcat line, irrespective of whether a newline is present,
  690. // and so we wish to reduce the number of Printf() calls we have to make.
  691. #if defined(__aarch64__)
  692. Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
  693. frame[0], frame[1], frame[2], frame[3]);
  694. #elif SANITIZER_RISCV64
  695. Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n",
  696. reinterpret_cast<u8 *>(frame) + 256, frame[1], frame[2], frame[3]);
  697. #endif
  698. Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
  699. frame[4], frame[5], frame[6], frame[7]);
  700. Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
  701. frame[8], frame[9], frame[10], frame[11]);
  702. Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
  703. frame[12], frame[13], frame[14], frame[15]);
  704. Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
  705. frame[16], frame[17], frame[18], frame[19]);
  706. Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
  707. frame[20], frame[21], frame[22], frame[23]);
  708. Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
  709. frame[24], frame[25], frame[26], frame[27]);
  710. // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
  711. // passes it to this function.
  712. #if defined(__aarch64__)
  713. Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28],
  714. frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256);
  715. #elif SANITIZER_RISCV64
  716. Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame[28],
  717. frame[29], frame[30], frame[31]);
  718. #else
  719. #endif
  720. }
  721. } // namespace __hwasan
  722. void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
  723. __hwasan::ScopedReport::SetErrorReportCallback(callback);
  724. }