stacktrace_generic-inl.inc 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. // Copyright 2017 The Abseil Authors.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // https://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. //
  15. // Portable implementation - just use glibc
  16. //
  17. // Note: The glibc implementation may cause a call to malloc.
  18. // This can cause a deadlock in HeapProfiler.
  19. #ifndef Y_ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
  20. #define Y_ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
  21. #include <execinfo.h>
  22. #include <atomic>
  23. #include <cstring>
  24. #include "y_absl/debugging/stacktrace.h"
  25. #include "y_absl/base/attributes.h"
  26. // Sometimes, we can try to get a stack trace from within a stack
  27. // trace, because we don't block signals inside this code (which would be too
  28. // expensive: the two extra system calls per stack trace do matter here).
  29. // That can cause a self-deadlock.
  30. // Protect against such reentrant call by failing to get a stack trace.
  31. //
  32. // We use __thread here because the code here is extremely low level -- it is
  33. // called while collecting stack traces from within malloc and mmap, and thus
  34. // can not call anything which might call malloc or mmap itself.
  35. static __thread int recursive = 0;
  36. // The stack trace function might be invoked very early in the program's
  37. // execution (e.g. from the very first malloc if using tcmalloc). Also, the
  38. // glibc implementation itself will trigger malloc the first time it is called.
  39. // As such, we suppress usage of backtrace during this early stage of execution.
  40. static std::atomic<bool> disable_stacktraces(true); // Disabled until healthy.
  41. // Waiting until static initializers run seems to be late enough.
  42. // This file is included into stacktrace.cc so this will only run once.
  43. Y_ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() {
  44. void* unused_stack[1];
  45. // Force the first backtrace to happen early to get the one-time shared lib
  46. // loading (allocation) out of the way. After the first call it is much safer
  47. // to use backtrace from a signal handler if we crash somewhere later.
  48. backtrace(unused_stack, 1);
  49. disable_stacktraces.store(false, std::memory_order_relaxed);
  50. return 0;
  51. }();
  52. template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
  53. static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
  54. const void *ucp, int *min_dropped_frames) {
  55. if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) {
  56. return 0;
  57. }
  58. ++recursive;
  59. static_cast<void>(ucp); // Unused.
  60. static const int kStackLength = 64;
  61. void * stack[kStackLength];
  62. int size;
  63. size = backtrace(stack, kStackLength);
  64. skip_count++; // we want to skip the current frame as well
  65. int result_count = size - skip_count;
  66. if (result_count < 0)
  67. result_count = 0;
  68. if (result_count > max_depth)
  69. result_count = max_depth;
  70. for (int i = 0; i < result_count; i++)
  71. result[i] = stack[i + skip_count];
  72. if (IS_STACK_FRAMES) {
  73. // No implementation for finding out the stack frame sizes yet.
  74. memset(sizes, 0, sizeof(*sizes) * static_cast<size_t>(result_count));
  75. }
  76. if (min_dropped_frames != nullptr) {
  77. if (size - skip_count - max_depth > 0) {
  78. *min_dropped_frames = size - skip_count - max_depth;
  79. } else {
  80. *min_dropped_frames = 0;
  81. }
  82. }
  83. --recursive;
  84. return result_count;
  85. }
  86. namespace y_absl {
  87. Y_ABSL_NAMESPACE_BEGIN
  88. namespace debugging_internal {
  89. bool StackTraceWorksForTest() {
  90. return true;
  91. }
  92. } // namespace debugging_internal
  93. Y_ABSL_NAMESPACE_END
  94. } // namespace y_absl
  95. #endif // Y_ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_