sanitizer_stacktrace.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. //===-- sanitizer_stacktrace.h ----------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is shared between AddressSanitizer and ThreadSanitizer
  10. // run-time libraries.
  11. //===----------------------------------------------------------------------===//
  12. #ifndef SANITIZER_STACKTRACE_H
  13. #define SANITIZER_STACKTRACE_H
  14. #include "sanitizer_common.h"
  15. #include "sanitizer_internal_defs.h"
  16. #include "sanitizer_platform.h"
  17. namespace __sanitizer {
  18. struct BufferedStackTrace;
  19. static const u32 kStackTraceMax = 255;
  20. #if SANITIZER_LINUX && defined(__mips__)
  21. # define SANITIZER_CAN_FAST_UNWIND 0
  22. #elif SANITIZER_WINDOWS
  23. # define SANITIZER_CAN_FAST_UNWIND 0
  24. #else
  25. # define SANITIZER_CAN_FAST_UNWIND 1
  26. #endif
  27. // Fast unwind is the only option on Mac for now; we will need to
  28. // revisit this macro when slow unwind works on Mac, see
  29. // https://github.com/google/sanitizers/issues/137
  30. #if SANITIZER_MAC
  31. # define SANITIZER_CAN_SLOW_UNWIND 0
  32. #else
  33. # define SANITIZER_CAN_SLOW_UNWIND 1
  34. #endif
  35. struct StackTrace {
  36. const uptr *trace;
  37. u32 size;
  38. u32 tag;
  39. static const int TAG_UNKNOWN = 0;
  40. static const int TAG_ALLOC = 1;
  41. static const int TAG_DEALLOC = 2;
  42. static const int TAG_CUSTOM = 100; // Tool specific tags start here.
  43. StackTrace() : trace(nullptr), size(0), tag(0) {}
  44. StackTrace(const uptr *trace, u32 size) : trace(trace), size(size), tag(0) {}
  45. StackTrace(const uptr *trace, u32 size, u32 tag)
  46. : trace(trace), size(size), tag(tag) {}
  47. // Prints a symbolized stacktrace, followed by an empty line.
  48. void Print() const;
  49. // Prints a symbolized stacktrace to the output string, followed by an empty
  50. // line.
  51. void PrintTo(InternalScopedString *output) const;
  52. // Prints a symbolized stacktrace to the output buffer, followed by an empty
  53. // line. Returns the number of symbols that should have been written to buffer
  54. // (not including trailing '\0'). Thus, the string is truncated iff return
  55. // value is not less than "out_buf_size".
  56. uptr PrintTo(char *out_buf, uptr out_buf_size) const;
  57. static bool WillUseFastUnwind(bool request_fast_unwind) {
  58. if (!SANITIZER_CAN_FAST_UNWIND)
  59. return false;
  60. if (!SANITIZER_CAN_SLOW_UNWIND)
  61. return true;
  62. return request_fast_unwind;
  63. }
  64. static uptr GetCurrentPc();
  65. static inline uptr GetPreviousInstructionPc(uptr pc);
  66. static uptr GetNextInstructionPc(uptr pc);
  67. };
  68. // Performance-critical, must be in the header.
  69. ALWAYS_INLINE
  70. uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
  71. #if defined(__arm__)
  72. // T32 (Thumb) branch instructions might be 16 or 32 bit long,
  73. // so we return (pc-2) in that case in order to be safe.
  74. // For A32 mode we return (pc-4) because all instructions are 32 bit long.
  75. return (pc - 3) & (~1);
  76. #elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__)
  77. // PCs are always 4 byte aligned.
  78. return pc - 4;
  79. #elif defined(__sparc__) || defined(__mips__)
  80. return pc - 8;
  81. #elif SANITIZER_RISCV64
  82. // RV-64 has variable instruciton length...
  83. // C extentions gives us 2-byte instructoins
  84. // RV-64 has 4-byte instructions
  85. // + RISCV architecture allows instructions up to 8 bytes
  86. // It seems difficult to figure out the exact instruction length -
  87. // pc - 2 seems like a safe option for the purposes of stack tracing
  88. return pc - 2;
  89. #else
  90. return pc - 1;
  91. #endif
  92. }
  93. // StackTrace that owns the buffer used to store the addresses.
  94. struct BufferedStackTrace : public StackTrace {
  95. uptr trace_buffer[kStackTraceMax];
  96. uptr top_frame_bp; // Optional bp of a top frame.
  97. BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
  98. void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
  99. // Get the stack trace with the given pc and bp.
  100. // The pc will be in the position 0 of the resulting stack trace.
  101. // The bp may refer to the current frame or to the caller's frame.
  102. void Unwind(uptr pc, uptr bp, void *context, bool request_fast,
  103. u32 max_depth = kStackTraceMax) {
  104. top_frame_bp = (max_depth > 0) ? bp : 0;
  105. // Small max_depth optimization
  106. if (max_depth <= 1) {
  107. if (max_depth == 1)
  108. trace_buffer[0] = pc;
  109. size = max_depth;
  110. return;
  111. }
  112. UnwindImpl(pc, bp, context, request_fast, max_depth);
  113. }
  114. void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
  115. uptr stack_bottom, bool request_fast_unwind);
  116. void Reset() {
  117. *static_cast<StackTrace *>(this) = StackTrace(trace_buffer, 0);
  118. top_frame_bp = 0;
  119. }
  120. private:
  121. // Every runtime defines its own implementation of this method
  122. void UnwindImpl(uptr pc, uptr bp, void *context, bool request_fast,
  123. u32 max_depth);
  124. // UnwindFast/Slow have platform-specific implementations
  125. void UnwindFast(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
  126. u32 max_depth);
  127. void UnwindSlow(uptr pc, u32 max_depth);
  128. void UnwindSlow(uptr pc, void *context, u32 max_depth);
  129. void PopStackFrames(uptr count);
  130. uptr LocatePcInTrace(uptr pc);
  131. BufferedStackTrace(const BufferedStackTrace &) = delete;
  132. void operator=(const BufferedStackTrace &) = delete;
  133. friend class FastUnwindTest;
  134. };
  135. #if defined(__s390x__)
  136. static const uptr kFrameSize = 160;
  137. #elif defined(__s390__)
  138. static const uptr kFrameSize = 96;
  139. #else
  140. static const uptr kFrameSize = 2 * sizeof(uhwptr);
  141. #endif
  142. // Check if given pointer points into allocated stack area.
  143. static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
  144. return frame > stack_bottom && frame < stack_top - kFrameSize;
  145. }
  146. } // namespace __sanitizer
  147. // Use this macro if you want to print stack trace with the caller
  148. // of the current function in the top frame.
  149. #define GET_CALLER_PC_BP \
  150. uptr bp = GET_CURRENT_FRAME(); \
  151. uptr pc = GET_CALLER_PC();
  152. #define GET_CALLER_PC_BP_SP \
  153. GET_CALLER_PC_BP; \
  154. uptr local_stack; \
  155. uptr sp = (uptr)&local_stack
  156. // Use this macro if you want to print stack trace with the current
  157. // function in the top frame.
  158. #define GET_CURRENT_PC_BP \
  159. uptr bp = GET_CURRENT_FRAME(); \
  160. uptr pc = StackTrace::GetCurrentPc()
  161. #define GET_CURRENT_PC_BP_SP \
  162. GET_CURRENT_PC_BP; \
  163. uptr local_stack; \
  164. uptr sp = (uptr)&local_stack
  165. // GET_CURRENT_PC() is equivalent to StackTrace::GetCurrentPc().
  166. // Optimized x86 version is faster than GetCurrentPc because
  167. // it does not involve a function call, instead it reads RIP register.
  168. // Reads of RIP by an instruction return RIP pointing to the next
  169. // instruction, which is exactly what we want here, thus 0 offset.
  170. // It needs to be a macro because otherwise we will get the name
  171. // of this function on the top of most stacks. Attribute artificial
  172. // does not do what it claims to do, unfortunatley. And attribute
  173. // __nodebug__ is clang-only. If we would have an attribute that
  174. // would remove this function from debug info, we could simply make
  175. // StackTrace::GetCurrentPc() faster.
  176. #if defined(__x86_64__)
  177. # define GET_CURRENT_PC() \
  178. (__extension__({ \
  179. uptr pc; \
  180. asm("lea 0(%%rip), %0" : "=r"(pc)); \
  181. pc; \
  182. }))
  183. #else
  184. # define GET_CURRENT_PC() StackTrace::GetCurrentPc()
  185. #endif
  186. #endif // SANITIZER_STACKTRACE_H