hwasan_report.cpp 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100
  1. //===-- hwasan_report.cpp -------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of HWAddressSanitizer.
  10. //
  11. // Error reporting.
  12. //===----------------------------------------------------------------------===//
  13. #include "hwasan_report.h"
  14. #include <dlfcn.h>
  15. #include "hwasan.h"
  16. #include "hwasan_allocator.h"
  17. #include "hwasan_globals.h"
  18. #include "hwasan_mapping.h"
  19. #include "hwasan_thread.h"
  20. #include "hwasan_thread_list.h"
  21. #include "sanitizer_common/sanitizer_allocator_internal.h"
  22. #include "sanitizer_common/sanitizer_array_ref.h"
  23. #include "sanitizer_common/sanitizer_common.h"
  24. #include "sanitizer_common/sanitizer_flags.h"
  25. #include "sanitizer_common/sanitizer_internal_defs.h"
  26. #include "sanitizer_common/sanitizer_mutex.h"
  27. #include "sanitizer_common/sanitizer_report_decorator.h"
  28. #include "sanitizer_common/sanitizer_stackdepot.h"
  29. #include "sanitizer_common/sanitizer_stacktrace_printer.h"
  30. #include "sanitizer_common/sanitizer_symbolizer.h"
  31. using namespace __sanitizer;
  32. namespace __hwasan {
  33. class ScopedReport {
  34. public:
  35. explicit ScopedReport(bool fatal) : fatal(fatal) {
  36. Lock lock(&error_message_lock_);
  37. error_message_ptr_ = fatal ? &error_message_ : nullptr;
  38. ++hwasan_report_count;
  39. }
  40. ~ScopedReport() {
  41. void (*report_cb)(const char *);
  42. {
  43. Lock lock(&error_message_lock_);
  44. report_cb = error_report_callback_;
  45. error_message_ptr_ = nullptr;
  46. }
  47. if (report_cb)
  48. report_cb(error_message_.data());
  49. if (fatal)
  50. SetAbortMessage(error_message_.data());
  51. if (common_flags()->print_module_map >= 2 ||
  52. (fatal && common_flags()->print_module_map))
  53. DumpProcessMap();
  54. if (fatal)
  55. Die();
  56. }
  57. static void MaybeAppendToErrorMessage(const char *msg) {
  58. Lock lock(&error_message_lock_);
  59. if (!error_message_ptr_)
  60. return;
  61. error_message_ptr_->Append(msg);
  62. }
  63. static void SetErrorReportCallback(void (*callback)(const char *)) {
  64. Lock lock(&error_message_lock_);
  65. error_report_callback_ = callback;
  66. }
  67. private:
  68. InternalScopedString error_message_;
  69. bool fatal;
  70. static Mutex error_message_lock_;
  71. static InternalScopedString *error_message_ptr_
  72. SANITIZER_GUARDED_BY(error_message_lock_);
  73. static void (*error_report_callback_)(const char *);
  74. };
  75. Mutex ScopedReport::error_message_lock_;
  76. InternalScopedString *ScopedReport::error_message_ptr_;
  77. void (*ScopedReport::error_report_callback_)(const char *);
  78. // If there is an active ScopedReport, append to its error message.
  79. void AppendToErrorMessageBuffer(const char *buffer) {
  80. ScopedReport::MaybeAppendToErrorMessage(buffer);
  81. }
  82. static StackTrace GetStackTraceFromId(u32 id) {
  83. CHECK(id);
  84. StackTrace res = StackDepotGet(id);
  85. CHECK(res.trace);
  86. return res;
  87. }
  88. static void MaybePrintAndroidHelpUrl() {
  89. #if SANITIZER_ANDROID
  90. Printf(
  91. "Learn more about HWASan reports: "
  92. "https://source.android.com/docs/security/test/memory-safety/"
  93. "hwasan-reports\n");
  94. #endif
  95. }
  96. namespace {
  97. // A RAII object that holds a copy of the current thread stack ring buffer.
  98. // The actual stack buffer may change while we are iterating over it (for
  99. // example, Printf may call syslog() which can itself be built with hwasan).
  100. class SavedStackAllocations {
  101. public:
  102. SavedStackAllocations() = default;
  103. explicit SavedStackAllocations(Thread *t) { CopyFrom(t); }
  104. void CopyFrom(Thread *t) {
  105. StackAllocationsRingBuffer *rb = t->stack_allocations();
  106. uptr size = rb->size() * sizeof(uptr);
  107. void *storage =
  108. MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
  109. new (&rb_) StackAllocationsRingBuffer(*rb, storage);
  110. thread_id_ = t->unique_id();
  111. }
  112. ~SavedStackAllocations() {
  113. if (rb_) {
  114. StackAllocationsRingBuffer *rb = get();
  115. UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
  116. }
  117. }
  118. const StackAllocationsRingBuffer *get() const {
  119. return (const StackAllocationsRingBuffer *)&rb_;
  120. }
  121. StackAllocationsRingBuffer *get() {
  122. return (StackAllocationsRingBuffer *)&rb_;
  123. }
  124. u32 thread_id() const { return thread_id_; }
  125. private:
  126. uptr rb_ = 0;
  127. u32 thread_id_;
  128. };
  129. class Decorator: public __sanitizer::SanitizerCommonDecorator {
  130. public:
  131. Decorator() : SanitizerCommonDecorator() { }
  132. const char *Access() { return Blue(); }
  133. const char *Allocation() const { return Magenta(); }
  134. const char *Origin() const { return Magenta(); }
  135. const char *Name() const { return Green(); }
  136. const char *Location() { return Green(); }
  137. const char *Thread() { return Green(); }
  138. };
  139. } // namespace
  140. static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
  141. HeapAllocationRecord *har, uptr *ring_index,
  142. uptr *num_matching_addrs,
  143. uptr *num_matching_addrs_4b) {
  144. if (!rb) return false;
  145. *num_matching_addrs = 0;
  146. *num_matching_addrs_4b = 0;
  147. for (uptr i = 0, size = rb->size(); i < size; i++) {
  148. auto h = (*rb)[i];
  149. if (h.tagged_addr <= tagged_addr &&
  150. h.tagged_addr + h.requested_size > tagged_addr) {
  151. *har = h;
  152. *ring_index = i;
  153. return true;
  154. }
  155. // Measure the number of heap ring buffer entries that would have matched
  156. // if we had only one entry per address (e.g. if the ring buffer data was
  157. // stored at the address itself). This will help us tune the allocator
  158. // implementation for MTE.
  159. if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) &&
  160. UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) {
  161. ++*num_matching_addrs;
  162. }
  163. // Measure the number of heap ring buffer entries that would have matched
  164. // if we only had 4 tag bits, which is the case for MTE.
  165. auto untag_4b = [](uptr p) {
  166. return p & ((1ULL << 60) - 1);
  167. };
  168. if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) &&
  169. untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) {
  170. ++*num_matching_addrs_4b;
  171. }
  172. }
  173. return false;
  174. }
  175. static void PrintStackAllocations(const StackAllocationsRingBuffer *sa,
  176. tag_t addr_tag, uptr untagged_addr) {
  177. uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
  178. bool found_local = false;
  179. InternalScopedString location;
  180. for (uptr i = 0; i < frames; i++) {
  181. const uptr *record_addr = &(*sa)[i];
  182. uptr record = *record_addr;
  183. if (!record)
  184. break;
  185. tag_t base_tag =
  186. reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
  187. uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
  188. uptr pc_mask = (1ULL << kRecordFPShift) - 1;
  189. uptr pc = record & pc_mask;
  190. FrameInfo frame;
  191. if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) {
  192. for (LocalInfo &local : frame.locals) {
  193. if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
  194. continue;
  195. if (!(local.name && internal_strlen(local.name)) &&
  196. !(local.function_name && internal_strlen(local.function_name)) &&
  197. !(local.decl_file && internal_strlen(local.decl_file)))
  198. continue;
  199. tag_t obj_tag = base_tag ^ local.tag_offset;
  200. if (obj_tag != addr_tag)
  201. continue;
  202. // Guess top bits of local variable from the faulting address, because
  203. // we only store bits 4-19 of FP (bits 0-3 are guaranteed to be zero).
  204. uptr local_beg = (fp + local.frame_offset) |
  205. (untagged_addr & ~(uptr(kRecordFPModulus) - 1));
  206. uptr local_end = local_beg + local.size;
  207. if (!found_local) {
  208. Printf("\nPotentially referenced stack objects:\n");
  209. found_local = true;
  210. }
  211. uptr offset;
  212. const char *whence;
  213. const char *cause;
  214. if (local_beg <= untagged_addr && untagged_addr < local_end) {
  215. offset = untagged_addr - local_beg;
  216. whence = "inside";
  217. cause = "use-after-scope";
  218. } else if (untagged_addr >= local_end) {
  219. offset = untagged_addr - local_end;
  220. whence = "after";
  221. cause = "stack-buffer-overflow";
  222. } else {
  223. offset = local_beg - untagged_addr;
  224. whence = "before";
  225. cause = "stack-buffer-overflow";
  226. }
  227. Decorator d;
  228. Printf("%s", d.Error());
  229. Printf("Cause: %s\n", cause);
  230. Printf("%s", d.Default());
  231. Printf("%s", d.Location());
  232. StackTracePrinter::GetOrInit()->RenderSourceLocation(
  233. &location, local.decl_file, local.decl_line, /* column= */ 0,
  234. common_flags()->symbolize_vs_style,
  235. common_flags()->strip_path_prefix);
  236. Printf(
  237. "%p is located %zd bytes %s a %zd-byte local variable %s [%p,%p) "
  238. "in %s %s\n",
  239. untagged_addr, offset, whence, local_end - local_beg, local.name,
  240. local_beg, local_end, local.function_name, location.data());
  241. location.clear();
  242. Printf("%s\n", d.Default());
  243. }
  244. frame.Clear();
  245. }
  246. }
  247. if (found_local)
  248. return;
  249. // We didn't find any locals. Most likely we don't have symbols, so dump
  250. // the information that we have for offline analysis.
  251. InternalScopedString frame_desc;
  252. Printf("Previously allocated frames:\n");
  253. for (uptr i = 0; i < frames; i++) {
  254. const uptr *record_addr = &(*sa)[i];
  255. uptr record = *record_addr;
  256. if (!record)
  257. break;
  258. uptr pc_mask = (1ULL << 48) - 1;
  259. uptr pc = record & pc_mask;
  260. frame_desc.AppendF(" record_addr:0x%zx record:0x%zx",
  261. reinterpret_cast<uptr>(record_addr), record);
  262. SymbolizedStackHolder symbolized_stack(
  263. Symbolizer::GetOrInit()->SymbolizePC(pc));
  264. const SymbolizedStack *frame = symbolized_stack.get();
  265. if (frame) {
  266. StackTracePrinter::GetOrInit()->RenderFrame(
  267. &frame_desc, " %F %L", 0, frame->info.address, &frame->info,
  268. common_flags()->symbolize_vs_style,
  269. common_flags()->strip_path_prefix);
  270. }
  271. Printf("%s\n", frame_desc.data());
  272. frame_desc.clear();
  273. }
  274. }
  275. // Returns true if tag == *tag_ptr, reading tags from short granules if
  276. // necessary. This may return a false positive if tags 1-15 are used as a
  277. // regular tag rather than a short granule marker.
  278. static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
  279. if (tag == *tag_ptr)
  280. return true;
  281. if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
  282. return false;
  283. uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr));
  284. tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
  285. return tag == inline_tag;
  286. }
  287. // HWASan globals store the size of the global in the descriptor. In cases where
  288. // we don't have a binary with symbols, we can't grab the size of the global
  289. // from the debug info - but we might be able to retrieve it from the
  290. // descriptor. Returns zero if the lookup failed.
  291. static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
  292. // Find the ELF object that this global resides in.
  293. Dl_info info;
  294. if (dladdr(reinterpret_cast<void *>(ptr), &info) == 0)
  295. return 0;
  296. auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
  297. auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
  298. reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
  299. // Get the load bias. This is normally the same as the dli_fbase address on
  300. // position-independent code, but can be different on non-PIE executables,
  301. // binaries using LLD's partitioning feature, or binaries compiled with a
  302. // linker script.
  303. ElfW(Addr) load_bias = 0;
  304. for (const auto &phdr :
  305. ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) {
  306. if (phdr.p_type != PT_LOAD || phdr.p_offset != 0)
  307. continue;
  308. load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr;
  309. break;
  310. }
  311. // Walk all globals in this ELF object, looking for the one we're interested
  312. // in. Once we find it, we can stop iterating and return the size of the
  313. // global we're interested in.
  314. for (const hwasan_global &global :
  315. HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum))
  316. if (global.addr() <= ptr && ptr < global.addr() + global.size())
  317. return global.size();
  318. return 0;
  319. }
  320. void ReportStats() {}
  321. constexpr uptr kDumpWidth = 16;
  322. constexpr uptr kShadowLines = 17;
  323. constexpr uptr kShadowDumpSize = kShadowLines * kDumpWidth;
  324. constexpr uptr kShortLines = 3;
  325. constexpr uptr kShortDumpSize = kShortLines * kDumpWidth;
  326. constexpr uptr kShortDumpOffset = (kShadowLines - kShortLines) / 2 * kDumpWidth;
  327. static uptr GetPrintTagStart(uptr addr) {
  328. addr = MemToShadow(addr);
  329. addr = RoundDownTo(addr, kDumpWidth);
  330. addr -= kDumpWidth * (kShadowLines / 2);
  331. return addr;
  332. }
  333. template <typename PrintTag>
  334. static void PrintTagInfoAroundAddr(uptr addr, uptr num_rows,
  335. InternalScopedString &s,
  336. PrintTag print_tag) {
  337. uptr center_row_beg = RoundDownTo(addr, kDumpWidth);
  338. uptr beg_row = center_row_beg - kDumpWidth * (num_rows / 2);
  339. uptr end_row = center_row_beg + kDumpWidth * ((num_rows + 1) / 2);
  340. for (uptr row = beg_row; row < end_row; row += kDumpWidth) {
  341. s.Append(row == center_row_beg ? "=>" : " ");
  342. s.AppendF("%p:", (void *)ShadowToMem(row));
  343. for (uptr i = 0; i < kDumpWidth; i++) {
  344. s.Append(row + i == addr ? "[" : " ");
  345. print_tag(s, row + i);
  346. s.Append(row + i == addr ? "]" : " ");
  347. }
  348. s.AppendF("\n");
  349. }
  350. }
  351. template <typename GetTag, typename GetShortTag>
  352. static void PrintTagsAroundAddr(uptr addr, GetTag get_tag,
  353. GetShortTag get_short_tag) {
  354. InternalScopedString s;
  355. addr = MemToShadow(addr);
  356. s.AppendF(
  357. "\nMemory tags around the buggy address (one tag corresponds to %zd "
  358. "bytes):\n",
  359. kShadowAlignment);
  360. PrintTagInfoAroundAddr(addr, kShadowLines, s,
  361. [&](InternalScopedString &s, uptr tag_addr) {
  362. tag_t tag = get_tag(tag_addr);
  363. s.AppendF("%02x", tag);
  364. });
  365. s.AppendF(
  366. "Tags for short granules around the buggy address (one tag corresponds "
  367. "to %zd bytes):\n",
  368. kShadowAlignment);
  369. PrintTagInfoAroundAddr(addr, kShortLines, s,
  370. [&](InternalScopedString &s, uptr tag_addr) {
  371. tag_t tag = get_tag(tag_addr);
  372. if (tag >= 1 && tag <= kShadowAlignment) {
  373. tag_t short_tag = get_short_tag(tag_addr);
  374. s.AppendF("%02x", short_tag);
  375. } else {
  376. s.AppendF("..");
  377. }
  378. });
  379. s.AppendF(
  380. "See "
  381. "https://clang.llvm.org/docs/"
  382. "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
  383. "description of short granule tags\n");
  384. Printf("%s", s.data());
  385. }
  386. static uptr GetTopPc(const StackTrace *stack) {
  387. return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
  388. : 0;
  389. }
  390. namespace {
  391. class BaseReport {
  392. public:
  393. BaseReport(StackTrace *stack, bool fatal, uptr tagged_addr, uptr access_size)
  394. : scoped_report(fatal),
  395. stack(stack),
  396. tagged_addr(tagged_addr),
  397. access_size(access_size),
  398. untagged_addr(UntagAddr(tagged_addr)),
  399. ptr_tag(GetTagFromPointer(tagged_addr)),
  400. mismatch_offset(FindMismatchOffset()),
  401. heap(CopyHeapChunk()),
  402. allocations(CopyAllocations()),
  403. candidate(FindBufferOverflowCandidate()),
  404. shadow(CopyShadow()) {}
  405. protected:
  406. struct OverflowCandidate {
  407. uptr untagged_addr = 0;
  408. bool after = false;
  409. bool is_close = false;
  410. struct {
  411. uptr begin = 0;
  412. uptr end = 0;
  413. u32 thread_id = 0;
  414. u32 stack_id = 0;
  415. bool is_allocated = false;
  416. } heap;
  417. };
  418. struct HeapAllocation {
  419. HeapAllocationRecord har = {};
  420. uptr ring_index = 0;
  421. uptr num_matching_addrs = 0;
  422. uptr num_matching_addrs_4b = 0;
  423. u32 free_thread_id = 0;
  424. };
  425. struct Allocations {
  426. ArrayRef<SavedStackAllocations> stack;
  427. ArrayRef<HeapAllocation> heap;
  428. };
  429. struct HeapChunk {
  430. uptr begin = 0;
  431. uptr size = 0;
  432. u32 stack_id = 0;
  433. bool from_small_heap = false;
  434. bool is_allocated = false;
  435. };
  436. struct Shadow {
  437. uptr addr = 0;
  438. tag_t tags[kShadowDumpSize] = {};
  439. tag_t short_tags[kShortDumpSize] = {};
  440. };
  441. sptr FindMismatchOffset() const;
  442. Shadow CopyShadow() const;
  443. tag_t GetTagCopy(uptr addr) const;
  444. tag_t GetShortTagCopy(uptr addr) const;
  445. HeapChunk CopyHeapChunk() const;
  446. Allocations CopyAllocations();
  447. OverflowCandidate FindBufferOverflowCandidate() const;
  448. void PrintAddressDescription() const;
  449. void PrintHeapOrGlobalCandidate() const;
  450. void PrintTags(uptr addr) const;
  451. SavedStackAllocations stack_allocations_storage[16];
  452. HeapAllocation heap_allocations_storage[256];
  453. const ScopedReport scoped_report;
  454. const StackTrace *stack = nullptr;
  455. const uptr tagged_addr = 0;
  456. const uptr access_size = 0;
  457. const uptr untagged_addr = 0;
  458. const tag_t ptr_tag = 0;
  459. const sptr mismatch_offset = 0;
  460. const HeapChunk heap;
  461. const Allocations allocations;
  462. const OverflowCandidate candidate;
  463. const Shadow shadow;
  464. };
  465. sptr BaseReport::FindMismatchOffset() const {
  466. if (!access_size)
  467. return 0;
  468. sptr offset =
  469. __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
  470. CHECK_GE(offset, 0);
  471. CHECK_LT(offset, static_cast<sptr>(access_size));
  472. tag_t *tag_ptr =
  473. reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
  474. tag_t mem_tag = *tag_ptr;
  475. if (mem_tag && mem_tag < kShadowAlignment) {
  476. tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
  477. ~(kShadowAlignment - 1));
  478. // If offset is 0, (untagged_addr + offset) is not aligned to granules.
  479. // This is the offset of the leftmost accessed byte within the bad granule.
  480. u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
  481. tag_t short_tag = granule_ptr[kShadowAlignment - 1];
  482. // The first mismatch was a short granule that matched the ptr_tag.
  483. if (short_tag == ptr_tag) {
  484. // If the access starts after the end of the short granule, then the first
  485. // bad byte is the first byte of the access; otherwise it is the first
  486. // byte past the end of the short granule
  487. if (mem_tag > in_granule_offset) {
  488. offset += mem_tag - in_granule_offset;
  489. }
  490. }
  491. }
  492. return offset;
  493. }
  494. BaseReport::Shadow BaseReport::CopyShadow() const {
  495. Shadow result;
  496. if (!MemIsApp(untagged_addr))
  497. return result;
  498. result.addr = GetPrintTagStart(untagged_addr + mismatch_offset);
  499. uptr tag_addr = result.addr;
  500. uptr short_end = kShortDumpOffset + ARRAY_SIZE(shadow.short_tags);
  501. for (uptr i = 0; i < ARRAY_SIZE(result.tags); ++i, ++tag_addr) {
  502. if (!MemIsShadow(tag_addr))
  503. continue;
  504. result.tags[i] = *reinterpret_cast<tag_t *>(tag_addr);
  505. if (i < kShortDumpOffset || i >= short_end)
  506. continue;
  507. uptr granule_addr = ShadowToMem(tag_addr);
  508. if (1 <= result.tags[i] && result.tags[i] <= kShadowAlignment &&
  509. IsAccessibleMemoryRange(granule_addr, kShadowAlignment)) {
  510. result.short_tags[i - kShortDumpOffset] =
  511. *reinterpret_cast<tag_t *>(granule_addr + kShadowAlignment - 1);
  512. }
  513. }
  514. return result;
  515. }
  516. tag_t BaseReport::GetTagCopy(uptr addr) const {
  517. CHECK_GE(addr, shadow.addr);
  518. uptr idx = addr - shadow.addr;
  519. CHECK_LT(idx, ARRAY_SIZE(shadow.tags));
  520. return shadow.tags[idx];
  521. }
  522. tag_t BaseReport::GetShortTagCopy(uptr addr) const {
  523. CHECK_GE(addr, shadow.addr + kShortDumpOffset);
  524. uptr idx = addr - shadow.addr - kShortDumpOffset;
  525. CHECK_LT(idx, ARRAY_SIZE(shadow.short_tags));
  526. return shadow.short_tags[idx];
  527. }
  528. BaseReport::HeapChunk BaseReport::CopyHeapChunk() const {
  529. HeapChunk result = {};
  530. if (MemIsShadow(untagged_addr))
  531. return result;
  532. HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
  533. result.begin = chunk.Beg();
  534. if (result.begin) {
  535. result.size = chunk.ActualSize();
  536. result.from_small_heap = chunk.FromSmallHeap();
  537. result.is_allocated = chunk.IsAllocated();
  538. result.stack_id = chunk.GetAllocStackId();
  539. }
  540. return result;
  541. }
  542. BaseReport::Allocations BaseReport::CopyAllocations() {
  543. if (MemIsShadow(untagged_addr))
  544. return {};
  545. uptr stack_allocations_count = 0;
  546. uptr heap_allocations_count = 0;
  547. hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
  548. if (stack_allocations_count < ARRAY_SIZE(stack_allocations_storage) &&
  549. t->AddrIsInStack(untagged_addr)) {
  550. stack_allocations_storage[stack_allocations_count++].CopyFrom(t);
  551. }
  552. if (heap_allocations_count < ARRAY_SIZE(heap_allocations_storage)) {
  553. // Scan all threads' ring buffers to find if it's a heap-use-after-free.
  554. HeapAllocationRecord har;
  555. uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
  556. if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
  557. &ring_index, &num_matching_addrs,
  558. &num_matching_addrs_4b)) {
  559. auto &ha = heap_allocations_storage[heap_allocations_count++];
  560. ha.har = har;
  561. ha.ring_index = ring_index;
  562. ha.num_matching_addrs = num_matching_addrs;
  563. ha.num_matching_addrs_4b = num_matching_addrs_4b;
  564. ha.free_thread_id = t->unique_id();
  565. }
  566. }
  567. });
  568. return {{stack_allocations_storage, stack_allocations_count},
  569. {heap_allocations_storage, heap_allocations_count}};
  570. }
  571. BaseReport::OverflowCandidate BaseReport::FindBufferOverflowCandidate() const {
  572. OverflowCandidate result = {};
  573. if (MemIsShadow(untagged_addr))
  574. return result;
  575. // Check if this looks like a heap buffer overflow by scanning
  576. // the shadow left and right and looking for the first adjacent
  577. // object with a different memory tag. If that tag matches ptr_tag,
  578. // check the allocator if it has a live chunk there.
  579. tag_t *tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
  580. tag_t *candidate_tag_ptr = nullptr, *left = tag_ptr, *right = tag_ptr;
  581. uptr candidate_distance = 0;
  582. for (; candidate_distance < 1000; candidate_distance++) {
  583. if (MemIsShadow(reinterpret_cast<uptr>(left)) && TagsEqual(ptr_tag, left)) {
  584. candidate_tag_ptr = left;
  585. break;
  586. }
  587. --left;
  588. if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
  589. TagsEqual(ptr_tag, right)) {
  590. candidate_tag_ptr = right;
  591. break;
  592. }
  593. ++right;
  594. }
  595. constexpr auto kCloseCandidateDistance = 1;
  596. result.is_close = candidate_distance <= kCloseCandidateDistance;
  597. result.after = candidate_tag_ptr == left;
  598. result.untagged_addr = ShadowToMem(reinterpret_cast<uptr>(candidate_tag_ptr));
  599. HwasanChunkView chunk = FindHeapChunkByAddress(result.untagged_addr);
  600. if (chunk.IsAllocated()) {
  601. result.heap.is_allocated = true;
  602. result.heap.begin = chunk.Beg();
  603. result.heap.end = chunk.End();
  604. result.heap.thread_id = chunk.GetAllocThreadId();
  605. result.heap.stack_id = chunk.GetAllocStackId();
  606. }
  607. return result;
  608. }
  609. void BaseReport::PrintHeapOrGlobalCandidate() const {
  610. Decorator d;
  611. if (candidate.heap.is_allocated) {
  612. uptr offset;
  613. const char *whence;
  614. if (candidate.heap.begin <= untagged_addr &&
  615. untagged_addr < candidate.heap.end) {
  616. offset = untagged_addr - candidate.heap.begin;
  617. whence = "inside";
  618. } else if (candidate.after) {
  619. offset = untagged_addr - candidate.heap.end;
  620. whence = "after";
  621. } else {
  622. offset = candidate.heap.begin - untagged_addr;
  623. whence = "before";
  624. }
  625. Printf("%s", d.Error());
  626. Printf("\nCause: heap-buffer-overflow\n");
  627. Printf("%s", d.Default());
  628. Printf("%s", d.Location());
  629. Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
  630. untagged_addr, offset, whence,
  631. candidate.heap.end - candidate.heap.begin, candidate.heap.begin,
  632. candidate.heap.end);
  633. Printf("%s", d.Allocation());
  634. Printf("allocated by thread T%u here:\n", candidate.heap.thread_id);
  635. Printf("%s", d.Default());
  636. GetStackTraceFromId(candidate.heap.stack_id).Print();
  637. return;
  638. }
  639. // Check whether the address points into a loaded library. If so, this is
  640. // most likely a global variable.
  641. const char *module_name;
  642. uptr module_address;
  643. Symbolizer *sym = Symbolizer::GetOrInit();
  644. if (sym->GetModuleNameAndOffsetForPC(candidate.untagged_addr, &module_name,
  645. &module_address)) {
  646. Printf("%s", d.Error());
  647. Printf("\nCause: global-overflow\n");
  648. Printf("%s", d.Default());
  649. DataInfo info;
  650. Printf("%s", d.Location());
  651. if (sym->SymbolizeData(candidate.untagged_addr, &info) && info.start) {
  652. Printf(
  653. "%p is located %zd bytes %s a %zd-byte global variable "
  654. "%s [%p,%p) in %s\n",
  655. untagged_addr,
  656. candidate.after ? untagged_addr - (info.start + info.size)
  657. : info.start - untagged_addr,
  658. candidate.after ? "after" : "before", info.size, info.name,
  659. info.start, info.start + info.size, module_name);
  660. } else {
  661. uptr size = GetGlobalSizeFromDescriptor(candidate.untagged_addr);
  662. if (size == 0)
  663. // We couldn't find the size of the global from the descriptors.
  664. Printf(
  665. "%p is located %s a global variable in "
  666. "\n #0 0x%x (%s+0x%x)\n",
  667. untagged_addr, candidate.after ? "after" : "before",
  668. candidate.untagged_addr, module_name, module_address);
  669. else
  670. Printf(
  671. "%p is located %s a %zd-byte global variable in "
  672. "\n #0 0x%x (%s+0x%x)\n",
  673. untagged_addr, candidate.after ? "after" : "before", size,
  674. candidate.untagged_addr, module_name, module_address);
  675. }
  676. Printf("%s", d.Default());
  677. }
  678. }
  679. void BaseReport::PrintAddressDescription() const {
  680. Decorator d;
  681. int num_descriptions_printed = 0;
  682. if (MemIsShadow(untagged_addr)) {
  683. Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr,
  684. d.Default());
  685. return;
  686. }
  687. // Print some very basic information about the address, if it's a heap.
  688. if (heap.begin) {
  689. Printf(
  690. "%s[%p,%p) is a %s %s heap chunk; "
  691. "size: %zd offset: %zd\n%s",
  692. d.Location(), heap.begin, heap.begin + heap.size,
  693. heap.from_small_heap ? "small" : "large",
  694. heap.is_allocated ? "allocated" : "unallocated", heap.size,
  695. untagged_addr - heap.begin, d.Default());
  696. }
  697. auto announce_by_id = [](u32 thread_id) {
  698. hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
  699. if (thread_id == t->unique_id())
  700. t->Announce();
  701. });
  702. };
  703. // Check stack first. If the address is on the stack of a live thread, we
  704. // know it cannot be a heap / global overflow.
  705. for (const auto &sa : allocations.stack) {
  706. Printf("%s", d.Error());
  707. Printf("\nCause: stack tag-mismatch\n");
  708. Printf("%s", d.Location());
  709. Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
  710. sa.thread_id());
  711. Printf("%s", d.Default());
  712. announce_by_id(sa.thread_id());
  713. PrintStackAllocations(sa.get(), ptr_tag, untagged_addr);
  714. num_descriptions_printed++;
  715. }
  716. if (allocations.stack.empty() && candidate.untagged_addr &&
  717. candidate.is_close) {
  718. PrintHeapOrGlobalCandidate();
  719. num_descriptions_printed++;
  720. }
  721. for (const auto &ha : allocations.heap) {
  722. const HeapAllocationRecord har = ha.har;
  723. Printf("%s", d.Error());
  724. Printf("\nCause: use-after-free\n");
  725. Printf("%s", d.Location());
  726. Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
  727. untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
  728. har.requested_size, UntagAddr(har.tagged_addr),
  729. UntagAddr(har.tagged_addr) + har.requested_size);
  730. Printf("%s", d.Allocation());
  731. Printf("freed by thread T%u here:\n", ha.free_thread_id);
  732. Printf("%s", d.Default());
  733. GetStackTraceFromId(har.free_context_id).Print();
  734. Printf("%s", d.Allocation());
  735. Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id);
  736. Printf("%s", d.Default());
  737. GetStackTraceFromId(har.alloc_context_id).Print();
  738. // Print a developer note: the index of this heap object
  739. // in the thread's deallocation ring buffer.
  740. Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha.ring_index + 1,
  741. flags()->heap_history_size);
  742. Printf("hwasan_dev_note_num_matching_addrs: %zd\n", ha.num_matching_addrs);
  743. Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
  744. ha.num_matching_addrs_4b);
  745. announce_by_id(ha.free_thread_id);
  746. // TODO: announce_by_id(har.alloc_thread_id);
  747. num_descriptions_printed++;
  748. }
  749. if (candidate.untagged_addr && num_descriptions_printed == 0) {
  750. PrintHeapOrGlobalCandidate();
  751. num_descriptions_printed++;
  752. }
  753. // Print the remaining threads, as an extra information, 1 line per thread.
  754. if (flags()->print_live_threads_info) {
  755. Printf("\n");
  756. hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
  757. }
  758. if (!num_descriptions_printed)
  759. // We exhausted our possibilities. Bail out.
  760. Printf("HWAddressSanitizer can not describe address in more detail.\n");
  761. if (num_descriptions_printed > 1) {
  762. Printf(
  763. "There are %d potential causes, printed above in order "
  764. "of likeliness.\n",
  765. num_descriptions_printed);
  766. }
  767. }
  768. void BaseReport::PrintTags(uptr addr) const {
  769. if (shadow.addr) {
  770. PrintTagsAroundAddr(
  771. addr, [&](uptr addr) { return GetTagCopy(addr); },
  772. [&](uptr addr) { return GetShortTagCopy(addr); });
  773. }
  774. }
  775. class InvalidFreeReport : public BaseReport {
  776. public:
  777. InvalidFreeReport(StackTrace *stack, uptr tagged_addr)
  778. : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0) {}
  779. ~InvalidFreeReport();
  780. private:
  781. };
  782. InvalidFreeReport::~InvalidFreeReport() {
  783. Decorator d;
  784. Printf("%s", d.Error());
  785. uptr pc = GetTopPc(stack);
  786. const char *bug_type = "invalid-free";
  787. const Thread *thread = GetCurrentThread();
  788. if (thread) {
  789. Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
  790. SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id());
  791. } else {
  792. Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
  793. SanitizerToolName, bug_type, untagged_addr, pc);
  794. }
  795. Printf("%s", d.Access());
  796. if (shadow.addr) {
  797. Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag,
  798. GetTagCopy(MemToShadow(untagged_addr)));
  799. }
  800. Printf("%s", d.Default());
  801. stack->Print();
  802. PrintAddressDescription();
  803. PrintTags(untagged_addr);
  804. MaybePrintAndroidHelpUrl();
  805. ReportErrorSummary(bug_type, stack);
  806. }
  807. class TailOverwrittenReport : public BaseReport {
  808. public:
  809. explicit TailOverwrittenReport(StackTrace *stack, uptr tagged_addr,
  810. uptr orig_size, const u8 *expected)
  811. : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0),
  812. orig_size(orig_size),
  813. tail_size(kShadowAlignment - (orig_size % kShadowAlignment)) {
  814. CHECK_GT(tail_size, 0U);
  815. CHECK_LT(tail_size, kShadowAlignment);
  816. internal_memcpy(tail_copy,
  817. reinterpret_cast<u8 *>(untagged_addr + orig_size),
  818. tail_size);
  819. internal_memcpy(actual_expected, expected, tail_size);
  820. // Short granule is stashed in the last byte of the magic string. To avoid
  821. // confusion, make the expected magic string contain the short granule tag.
  822. if (orig_size % kShadowAlignment != 0)
  823. actual_expected[tail_size - 1] = ptr_tag;
  824. }
  825. ~TailOverwrittenReport();
  826. private:
  827. const uptr orig_size = 0;
  828. const uptr tail_size = 0;
  829. u8 actual_expected[kShadowAlignment] = {};
  830. u8 tail_copy[kShadowAlignment] = {};
  831. };
  832. TailOverwrittenReport::~TailOverwrittenReport() {
  833. Decorator d;
  834. Printf("%s", d.Error());
  835. const char *bug_type = "allocation-tail-overwritten";
  836. Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
  837. bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
  838. Printf("\n%s", d.Default());
  839. Printf(
  840. "Stack of invalid access unknown. Issue detected at deallocation "
  841. "time.\n");
  842. Printf("%s", d.Allocation());
  843. Printf("deallocated here:\n");
  844. Printf("%s", d.Default());
  845. stack->Print();
  846. if (heap.begin) {
  847. Printf("%s", d.Allocation());
  848. Printf("allocated here:\n");
  849. Printf("%s", d.Default());
  850. GetStackTraceFromId(heap.stack_id).Print();
  851. }
  852. InternalScopedString s;
  853. u8 *tail = tail_copy;
  854. s.AppendF("Tail contains: ");
  855. for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
  856. for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", tail[i]);
  857. s.AppendF("\n");
  858. s.AppendF("Expected: ");
  859. for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
  860. for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", actual_expected[i]);
  861. s.AppendF("\n");
  862. s.AppendF(" ");
  863. for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(" ");
  864. for (uptr i = 0; i < tail_size; i++)
  865. s.AppendF("%s ", actual_expected[i] != tail[i] ? "^^" : " ");
  866. s.AppendF(
  867. "\nThis error occurs when a buffer overflow overwrites memory\n"
  868. "after a heap object, but within the %zd-byte granule, e.g.\n"
  869. " char *x = new char[20];\n"
  870. " x[25] = 42;\n"
  871. "%s does not detect such bugs in uninstrumented code at the time of "
  872. "write,"
  873. "\nbut can detect them at the time of free/delete.\n"
  874. "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
  875. kShadowAlignment, SanitizerToolName);
  876. Printf("%s", s.data());
  877. GetCurrentThread()->Announce();
  878. PrintTags(untagged_addr);
  879. MaybePrintAndroidHelpUrl();
  880. ReportErrorSummary(bug_type, stack);
  881. }
  882. class TagMismatchReport : public BaseReport {
  883. public:
  884. explicit TagMismatchReport(StackTrace *stack, uptr tagged_addr,
  885. uptr access_size, bool is_store, bool fatal,
  886. uptr *registers_frame)
  887. : BaseReport(stack, fatal, tagged_addr, access_size),
  888. is_store(is_store),
  889. registers_frame(registers_frame) {}
  890. ~TagMismatchReport();
  891. private:
  892. const bool is_store;
  893. const uptr *registers_frame;
  894. };
  895. TagMismatchReport::~TagMismatchReport() {
  896. Decorator d;
  897. // TODO: when possible, try to print heap-use-after-free, etc.
  898. const char *bug_type = "tag-mismatch";
  899. uptr pc = GetTopPc(stack);
  900. Printf("%s", d.Error());
  901. Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
  902. untagged_addr, pc);
  903. Thread *t = GetCurrentThread();
  904. tag_t mem_tag = GetTagCopy(MemToShadow(untagged_addr + mismatch_offset));
  905. Printf("%s", d.Access());
  906. if (mem_tag && mem_tag < kShadowAlignment) {
  907. tag_t short_tag =
  908. GetShortTagCopy(MemToShadow(untagged_addr + mismatch_offset));
  909. Printf(
  910. "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
  911. is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
  912. mem_tag, short_tag, t->unique_id());
  913. } else {
  914. Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
  915. is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
  916. mem_tag, t->unique_id());
  917. }
  918. if (mismatch_offset)
  919. Printf("Invalid access starting at offset %zu\n", mismatch_offset);
  920. Printf("%s", d.Default());
  921. stack->Print();
  922. PrintAddressDescription();
  923. t->Announce();
  924. PrintTags(untagged_addr + mismatch_offset);
  925. if (registers_frame)
  926. ReportRegisters(registers_frame, pc);
  927. MaybePrintAndroidHelpUrl();
  928. ReportErrorSummary(bug_type, stack);
  929. }
  930. } // namespace
  931. void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
  932. InvalidFreeReport R(stack, tagged_addr);
  933. }
  934. void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
  935. const u8 *expected) {
  936. TailOverwrittenReport R(stack, tagged_addr, orig_size, expected);
  937. }
  938. void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
  939. bool is_store, bool fatal, uptr *registers_frame) {
  940. TagMismatchReport R(stack, tagged_addr, access_size, is_store, fatal,
  941. registers_frame);
  942. }
  943. // See the frame breakdown defined in __hwasan_tag_mismatch (from
  944. // hwasan_tag_mismatch_{aarch64,riscv64}.S).
  945. void ReportRegisters(const uptr *frame, uptr pc) {
  946. Printf("\nRegisters where the failure occurred (pc %p):\n", pc);
  947. // We explicitly print a single line (4 registers/line) each iteration to
  948. // reduce the amount of logcat error messages printed. Each Printf() will
  949. // result in a new logcat line, irrespective of whether a newline is present,
  950. // and so we wish to reduce the number of Printf() calls we have to make.
  951. #if defined(__aarch64__)
  952. Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
  953. frame[0], frame[1], frame[2], frame[3]);
  954. #elif SANITIZER_RISCV64
  955. Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n",
  956. reinterpret_cast<const u8 *>(frame) + 256, frame[1], frame[2],
  957. frame[3]);
  958. #endif
  959. Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
  960. frame[4], frame[5], frame[6], frame[7]);
  961. Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
  962. frame[8], frame[9], frame[10], frame[11]);
  963. Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
  964. frame[12], frame[13], frame[14], frame[15]);
  965. Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
  966. frame[16], frame[17], frame[18], frame[19]);
  967. Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
  968. frame[20], frame[21], frame[22], frame[23]);
  969. Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
  970. frame[24], frame[25], frame[26], frame[27]);
  971. // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
  972. // passes it to this function.
  973. #if defined(__aarch64__)
  974. Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28],
  975. frame[29], frame[30], reinterpret_cast<const u8 *>(frame) + 256);
  976. #elif SANITIZER_RISCV64
  977. Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame[28],
  978. frame[29], frame[30], frame[31]);
  979. #else
  980. #endif
  981. }
  982. } // namespace __hwasan
  983. void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
  984. __hwasan::ScopedReport::SetErrorReportCallback(callback);
  985. }