stacktrace_aarch64-inl.inc 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. #ifndef Y_ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
  2. #define Y_ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_
  3. // Generate stack tracer for aarch64
  4. #if defined(__linux__)
  5. #include <signal.h>
  6. #include <sys/mman.h>
  7. #include <ucontext.h>
  8. #include <unistd.h>
  9. #endif
  10. #include <atomic>
  11. #include <cassert>
  12. #include <cstdint>
  13. #include <iostream>
  14. #include <limits>
  15. #include "y_absl/base/attributes.h"
  16. #include "y_absl/debugging/internal/address_is_readable.h"
  17. #include "y_absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
  18. #include "y_absl/debugging/stacktrace.h"
  19. static const size_t kUnknownFrameSize = 0;
  20. // Stack end to use when we don't know the actual stack end
  21. // (effectively just the end of address space).
  22. constexpr uintptr_t kUnknownStackEnd =
  23. std::numeric_limits<size_t>::max() - sizeof(void *);
  24. #if defined(__linux__)
  25. // Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
  26. static const unsigned char* GetKernelRtSigreturnAddress() {
  27. constexpr uintptr_t kImpossibleAddress = 1;
  28. Y_ABSL_CONST_INIT static std::atomic<uintptr_t> memoized{kImpossibleAddress};
  29. uintptr_t address = memoized.load(std::memory_order_relaxed);
  30. if (address != kImpossibleAddress) {
  31. return reinterpret_cast<const unsigned char*>(address);
  32. }
  33. address = reinterpret_cast<uintptr_t>(nullptr);
  34. #ifdef Y_ABSL_HAVE_VDSO_SUPPORT
  35. y_absl::debugging_internal::VDSOSupport vdso;
  36. if (vdso.IsPresent()) {
  37. y_absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
  38. auto lookup = [&](int type) {
  39. return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type,
  40. &symbol_info);
  41. };
  42. if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
  43. symbol_info.address == nullptr) {
  44. // Unexpected: VDSO is present, yet the expected symbol is missing
  45. // or null.
  46. assert(false && "VDSO is present, but doesn't have expected symbol");
  47. } else {
  48. if (reinterpret_cast<uintptr_t>(symbol_info.address) !=
  49. kImpossibleAddress) {
  50. address = reinterpret_cast<uintptr_t>(symbol_info.address);
  51. } else {
  52. assert(false && "VDSO returned invalid address");
  53. }
  54. }
  55. }
  56. #endif
  57. memoized.store(address, std::memory_order_relaxed);
  58. return reinterpret_cast<const unsigned char*>(address);
  59. }
  60. #endif // __linux__
  61. // Compute the size of a stack frame in [low..high). We assume that
  62. // low < high. Return size of kUnknownFrameSize.
  63. template<typename T>
  64. static size_t ComputeStackFrameSize(const T* low,
  65. const T* high) {
  66. const char* low_char_ptr = reinterpret_cast<const char *>(low);
  67. const char* high_char_ptr = reinterpret_cast<const char *>(high);
  68. return low < high ? static_cast<size_t>(high_char_ptr - low_char_ptr)
  69. : kUnknownFrameSize;
  70. }
  71. // Saves stack info that is expensive to calculate to avoid recalculating per frame.
  72. struct StackInfo {
  73. uintptr_t stack_low;
  74. uintptr_t stack_high;
  75. uintptr_t sig_stack_low;
  76. uintptr_t sig_stack_high;
  77. };
  78. static bool InsideSignalStack(void** ptr, const StackInfo* stack_info) {
  79. uintptr_t comparable_ptr = reinterpret_cast<uintptr_t>(ptr);
  80. return (comparable_ptr >= stack_info->sig_stack_low &&
  81. comparable_ptr < stack_info->sig_stack_high);
  82. }
  83. // Given a pointer to a stack frame, locate and return the calling
  84. // stackframe, or return null if no stackframe can be found. Perform sanity
  85. // checks (the strictness of which is controlled by the boolean parameter
  86. // "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
  87. template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
  88. Y_ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
  89. Y_ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
  90. static void **NextStackFrame(void **old_frame_pointer, const void *uc,
  91. const StackInfo *stack_info) {
  92. void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
  93. #if defined(__linux__)
  94. if (WITH_CONTEXT && uc != nullptr) {
  95. // Check to see if next frame's return address is __kernel_rt_sigreturn.
  96. if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) {
  97. const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
  98. // old_frame_pointer[0] is not suitable for unwinding, look at
  99. // ucontext to discover frame pointer before signal.
  100. void **const pre_signal_frame_pointer =
  101. reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]);
  102. // The most recent signal always needs special handling to find the frame
  103. // pointer, but a nested signal does not. If pre_signal_frame_pointer is
  104. // earlier in the stack than the old_frame_pointer, then use it. If it is
  105. // later, then we have already unwound through it and it needs no special
  106. // handling.
  107. if (pre_signal_frame_pointer >= old_frame_pointer) {
  108. new_frame_pointer = pre_signal_frame_pointer;
  109. }
  110. // Check that alleged frame pointer is actually readable. This is to
  111. // prevent "double fault" in case we hit the first fault due to e.g.
  112. // stack corruption.
  113. if (!y_absl::debugging_internal::AddressIsReadable(
  114. new_frame_pointer))
  115. return nullptr;
  116. }
  117. }
  118. #endif
  119. // The frame pointer should be 8-byte aligned.
  120. if ((reinterpret_cast<uintptr_t>(new_frame_pointer) & 7) != 0)
  121. return nullptr;
  122. // Only check the size if both frames are in the same stack.
  123. if (InsideSignalStack(new_frame_pointer, stack_info) ==
  124. InsideSignalStack(old_frame_pointer, stack_info)) {
  125. // Check frame size. In strict mode, we assume frames to be under
  126. // 100,000 bytes. In non-strict mode, we relax the limit to 1MB.
  127. const size_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
  128. const size_t frame_size =
  129. ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
  130. if (frame_size == kUnknownFrameSize)
  131. return nullptr;
  132. // A very large frame may mean corrupt memory or an erroneous frame
  133. // pointer. But also maybe just a plain-old large frame. Assume that if the
  134. // frame is within a known stack, then it is valid.
  135. if (frame_size > max_size) {
  136. size_t stack_low = stack_info->stack_low;
  137. size_t stack_high = stack_info->stack_high;
  138. if (InsideSignalStack(new_frame_pointer, stack_info)) {
  139. stack_low = stack_info->sig_stack_low;
  140. stack_high = stack_info->sig_stack_high;
  141. }
  142. if (stack_high < kUnknownStackEnd &&
  143. static_cast<size_t>(getpagesize()) < stack_low) {
  144. const uintptr_t new_fp_u =
  145. reinterpret_cast<uintptr_t>(new_frame_pointer);
  146. // Stack bounds are known.
  147. if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) {
  148. // new_frame_pointer is not within a known stack.
  149. return nullptr;
  150. }
  151. } else {
  152. // Stack bounds are unknown, prefer truncated stack to possible crash.
  153. return nullptr;
  154. }
  155. }
  156. }
  157. return new_frame_pointer;
  158. }
  159. template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
  160. // We count on the bottom frame being this one. See the comment
  161. // at prev_return_address
  162. Y_ABSL_ATTRIBUTE_NOINLINE
  163. Y_ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
  164. Y_ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
  165. static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
  166. const void *ucp, int *min_dropped_frames) {
  167. #ifdef __GNUC__
  168. void **frame_pointer = reinterpret_cast<void**>(__builtin_frame_address(0));
  169. #else
  170. # error reading stack point not yet supported on this platform.
  171. #endif
  172. skip_count++; // Skip the frame for this function.
  173. int n = 0;
  174. // Assume that the first page is not stack.
  175. StackInfo stack_info;
  176. stack_info.stack_low = static_cast<uintptr_t>(getpagesize());
  177. stack_info.stack_high = kUnknownStackEnd;
  178. stack_info.sig_stack_low = stack_info.stack_low;
  179. stack_info.sig_stack_high = kUnknownStackEnd;
  180. // The frame pointer points to low address of a frame. The first 64-bit
  181. // word of a frame points to the next frame up the call chain, which normally
  182. // is just after the high address of the current frame. The second word of
  183. // a frame contains return address of to the caller. To find a pc value
  184. // associated with the current frame, we need to go down a level in the call
  185. // chain. So we remember return the address of the last frame seen. This
  186. // does not work for the first stack frame, which belongs to UnwindImp() but
  187. // we skip the frame for UnwindImp() anyway.
  188. void* prev_return_address = nullptr;
  189. // The nth frame size is the difference between the nth frame pointer and the
  190. // the frame pointer below it in the call chain. There is no frame below the
  191. // leaf frame, but this function is the leaf anyway, and we skip it.
  192. void** prev_frame_pointer = nullptr;
  193. while (frame_pointer && n < max_depth) {
  194. if (skip_count > 0) {
  195. skip_count--;
  196. } else {
  197. result[n] = prev_return_address;
  198. if (IS_STACK_FRAMES) {
  199. sizes[n] = static_cast<int>(
  200. ComputeStackFrameSize(prev_frame_pointer, frame_pointer));
  201. }
  202. n++;
  203. }
  204. prev_return_address = frame_pointer[1];
  205. prev_frame_pointer = frame_pointer;
  206. // The y_absl::GetStackFrames routine is called when we are in some
  207. // informational context (the failure signal handler for example).
  208. // Use the non-strict unwinding rules to produce a stack trace
  209. // that is as complete as possible (even if it contains a few bogus
  210. // entries in some rare cases).
  211. frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
  212. frame_pointer, ucp, &stack_info);
  213. }
  214. if (min_dropped_frames != nullptr) {
  215. // Implementation detail: we clamp the max of frames we are willing to
  216. // count, so as not to spend too much time in the loop below.
  217. const int kMaxUnwind = 200;
  218. int num_dropped_frames = 0;
  219. for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) {
  220. if (skip_count > 0) {
  221. skip_count--;
  222. } else {
  223. num_dropped_frames++;
  224. }
  225. frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
  226. frame_pointer, ucp, &stack_info);
  227. }
  228. *min_dropped_frames = num_dropped_frames;
  229. }
  230. return n;
  231. }
  232. namespace y_absl {
  233. Y_ABSL_NAMESPACE_BEGIN
  234. namespace debugging_internal {
  235. bool StackTraceWorksForTest() {
  236. return true;
  237. }
  238. } // namespace debugging_internal
  239. Y_ABSL_NAMESPACE_END
  240. } // namespace y_absl
  241. #endif // Y_ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_