|
@@ -1,220 +1,220 @@
|
|
|
---- contrib/libs/tcmalloc/tcmalloc/internal/logging.h (index)
|
|
|
-+++ contrib/libs/tcmalloc/tcmalloc/internal/logging.h (working tree)
|
|
|
-@@ -67,6 +67,8 @@ struct StackTrace {
|
|
|
- // between the previous sample and this one
|
|
|
- size_t weight;
|
|
|
-
|
|
|
-+ void* user_data;
|
|
|
-+
|
|
|
- template <typename H>
|
|
|
- friend H AbslHashValue(H h, const StackTrace& t) {
|
|
|
- // As we use StackTrace as a key-value node in StackTraceTable, we only
|
|
|
---- contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (index)
|
|
|
-+++ contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (working tree)
|
|
|
-@@ -120,6 +120,12 @@ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetMaxTotalThreadCacheBytes(
|
|
|
- ABSL_ATTRIBUTE_WEAK void
|
|
|
- MallocExtension_EnableForkSupport();
|
|
|
-
|
|
|
-+ABSL_ATTRIBUTE_WEAK void
|
|
|
-+MallocExtension_SetSampleUserDataCallbacks(
|
|
|
-+ tcmalloc::MallocExtension::CreateSampleUserDataCallback create,
|
|
|
-+ tcmalloc::MallocExtension::CopySampleUserDataCallback copy,
|
|
|
-+ tcmalloc::MallocExtension::DestroySampleUserDataCallback destroy);
|
|
|
-+
|
|
|
- }
|
|
|
-
|
|
|
- #endif
|
|
|
---- contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (index)
|
|
|
-+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (working tree)
|
|
|
-@@ -468,6 +468,21 @@ void MallocExtension::EnableForkSupport() {
|
|
|
- #endif
|
|
|
- }
|
|
|
-
|
|
|
-+void MallocExtension::SetSampleUserDataCallbacks(
|
|
|
-+ CreateSampleUserDataCallback create,
|
|
|
-+ CopySampleUserDataCallback copy,
|
|
|
-+ DestroySampleUserDataCallback destroy) {
|
|
|
-+#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
|
|
|
-+ if (&MallocExtension_SetSampleUserDataCallbacks != nullptr) {
|
|
|
-+ MallocExtension_SetSampleUserDataCallbacks(create, copy, destroy);
|
|
|
-+ }
|
|
|
-+#else
|
|
|
-+ (void)create;
|
|
|
-+ (void)copy;
|
|
|
-+ (void)destroy;
|
|
|
-+#endif
|
|
|
-+}
|
|
|
-+
|
|
|
- } // namespace tcmalloc
|
|
|
-
|
|
|
- // Default implementation just returns size. The expectation is that
|
|
|
---- contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (index)
|
|
|
-+++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (working tree)
|
|
|
-@@ -94,6 +94,8 @@ class Profile final {
|
|
|
-
|
|
|
- int depth;
|
|
|
- void* stack[kMaxStackDepth];
|
|
|
-+
|
|
|
-+ void* user_data;
|
|
|
- };
|
|
|
-
|
|
|
- void Iterate(absl::FunctionRef<void(const Sample&)> f) const;
|
|
|
-@@ -472,6 +474,16 @@ class MallocExtension final {
|
|
|
- // Enables fork support.
|
|
|
- // Allocator will continue to function correctly in the child, after calling fork().
|
|
|
- static void EnableForkSupport();
|
|
|
-+
|
|
|
-+ using CreateSampleUserDataCallback = void*();
|
|
|
-+ using CopySampleUserDataCallback = void*(void*);
|
|
|
-+ using DestroySampleUserDataCallback = void(void*);
|
|
|
-+
|
|
|
-+ // Sets callbacks for lifetime control of custom user data attached to allocation samples
|
|
|
-+ static void SetSampleUserDataCallbacks(
|
|
|
-+ CreateSampleUserDataCallback create,
|
|
|
-+ CopySampleUserDataCallback copy,
|
|
|
-+ DestroySampleUserDataCallback destroy);
|
|
|
- };
|
|
|
-
|
|
|
- } // namespace tcmalloc
|
|
|
---- contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.cc (index)
|
|
|
-+++ contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.cc (working tree)
|
|
|
-@@ -55,6 +55,7 @@ void PeakHeapTracker::MaybeSaveSample() {
|
|
|
- StackTrace *t = peak_sampled_span_stacks_, *next = nullptr;
|
|
|
- while (t != nullptr) {
|
|
|
- next = reinterpret_cast<StackTrace*>(t->stack[kMaxStackDepth - 1]);
|
|
|
-+ Static::DestroySampleUserData(t->user_data);
|
|
|
- Static::stacktrace_allocator().Delete(t);
|
|
|
- t = next;
|
|
|
- }
|
|
|
-@@ -63,7 +64,9 @@ void PeakHeapTracker::MaybeSaveSample() {
|
|
|
- for (Span* s : Static::sampled_objects_) {
|
|
|
- t = Static::stacktrace_allocator().New();
|
|
|
-
|
|
|
-- *t = *s->sampled_stack();
|
|
|
-+ StackTrace* sampled_stack = s->sampled_stack();
|
|
|
-+ *t = *sampled_stack;
|
|
|
-+ t->user_data = Static::CopySampleUserData(sampled_stack->user_data);
|
|
|
- if (t->depth == kMaxStackDepth) {
|
|
|
- t->depth = kMaxStackDepth - 1;
|
|
|
- }
|
|
|
---- contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc (index)
|
|
|
-+++ contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc (working tree)
|
|
|
-@@ -73,6 +73,7 @@ StackTraceTable::~StackTraceTable() {
|
|
|
- Bucket* b = table_[i];
|
|
|
- while (b != nullptr) {
|
|
|
- Bucket* next = b->next;
|
|
|
-+ Static::DestroySampleUserData(b->trace.user_data);
|
|
|
- Static::bucket_allocator().Delete(b);
|
|
|
- b = next;
|
|
|
- }
|
|
|
-@@ -104,6 +105,7 @@ void StackTraceTable::AddTrace(double count, const StackTrace& t) {
|
|
|
- b = Static::bucket_allocator().New();
|
|
|
- b->hash = h;
|
|
|
- b->trace = t;
|
|
|
-+ b->trace.user_data = Static::CopySampleUserData(t.user_data);
|
|
|
- b->count = count;
|
|
|
- b->total_weight = t.weight * count;
|
|
|
- b->next = table_[idx];
|
|
|
-@@ -135,6 +137,8 @@ void StackTraceTable::Iterate(
|
|
|
- e.requested_alignment = b->trace.requested_alignment;
|
|
|
- e.allocated_size = allocated_size;
|
|
|
-
|
|
|
-+ e.user_data = b->trace.user_data;
|
|
|
-+
|
|
|
- e.depth = b->trace.depth;
|
|
|
- static_assert(kMaxStackDepth <= Profile::Sample::kMaxStackDepth,
|
|
|
- "Profile stack size smaller than internal stack sizes");
|
|
|
---- contrib/libs/tcmalloc/tcmalloc/static_vars.cc (index)
|
|
|
-+++ contrib/libs/tcmalloc/tcmalloc/static_vars.cc (working tree)
|
|
|
-@@ -60,6 +60,12 @@ ABSL_CONST_INIT PageHeapAllocator<StackTraceTable::Bucket>
|
|
|
- ABSL_CONST_INIT std::atomic<bool> Static::inited_{false};
|
|
|
- ABSL_CONST_INIT bool Static::cpu_cache_active_ = false;
|
|
|
- ABSL_CONST_INIT bool Static::fork_support_enabled_ = false;
|
|
|
-+ABSL_CONST_INIT Static::CreateSampleUserDataCallback*
|
|
|
-+ Static::create_sample_user_data_callback_ = nullptr;
|
|
|
-+ABSL_CONST_INIT Static::CopySampleUserDataCallback*
|
|
|
-+ Static::copy_sample_user_data_callback_ = nullptr;
|
|
|
-+ABSL_CONST_INIT Static::DestroySampleUserDataCallback*
|
|
|
-+ Static::destroy_sample_user_data_callback_ = nullptr;
|
|
|
- ABSL_CONST_INIT Static::PageAllocatorStorage Static::page_allocator_;
|
|
|
- ABSL_CONST_INIT PageMap Static::pagemap_;
|
|
|
- ABSL_CONST_INIT absl::base_internal::SpinLock guarded_page_lock(
|
|
|
---- contrib/libs/tcmalloc/tcmalloc/static_vars.h (index)
|
|
|
-+++ contrib/libs/tcmalloc/tcmalloc/static_vars.h (working tree)
|
|
|
-@@ -130,6 +130,34 @@ class Static {
|
|
|
- static bool ForkSupportEnabled() { return fork_support_enabled_; }
|
|
|
- static void EnableForkSupport() { fork_support_enabled_ = true; }
|
|
|
-
|
|
|
-+ using CreateSampleUserDataCallback = void*();
|
|
|
-+ using CopySampleUserDataCallback = void*(void*);
|
|
|
-+ using DestroySampleUserDataCallback = void(void*);
|
|
|
-+
|
|
|
-+ static void SetSampleUserDataCallbacks(
|
|
|
-+ CreateSampleUserDataCallback create,
|
|
|
-+ CopySampleUserDataCallback copy,
|
|
|
-+ DestroySampleUserDataCallback destroy) {
|
|
|
-+ create_sample_user_data_callback_ = create;
|
|
|
-+ copy_sample_user_data_callback_ = copy;
|
|
|
-+ destroy_sample_user_data_callback_ = destroy;
|
|
|
-+ }
|
|
|
-+
|
|
|
-+ static void* CreateSampleUserData() {
|
|
|
-+ if (create_sample_user_data_callback_)
|
|
|
-+ return create_sample_user_data_callback_();
|
|
|
-+ return nullptr;
|
|
|
-+ }
|
|
|
-+ static void* CopySampleUserData(void* user_data) {
|
|
|
-+ if (copy_sample_user_data_callback_)
|
|
|
-+ return copy_sample_user_data_callback_(user_data);
|
|
|
-+ return nullptr;
|
|
|
-+ }
|
|
|
-+ static void DestroySampleUserData(void* user_data) {
|
|
|
-+ if (destroy_sample_user_data_callback_)
|
|
|
-+ destroy_sample_user_data_callback_(user_data);
|
|
|
-+ }
|
|
|
-+
|
|
|
- static bool ABSL_ATTRIBUTE_ALWAYS_INLINE IsOnFastPath() {
|
|
|
- return
|
|
|
- #ifndef TCMALLOC_DEPRECATED_PERTHREAD
|
|
|
-@@ -176,6 +204,9 @@ class Static {
|
|
|
- ABSL_CONST_INIT static std::atomic<bool> inited_;
|
|
|
- static bool cpu_cache_active_;
|
|
|
- static bool fork_support_enabled_;
|
|
|
-+ static CreateSampleUserDataCallback* create_sample_user_data_callback_;
|
|
|
-+ static CopySampleUserDataCallback* copy_sample_user_data_callback_;
|
|
|
-+ static DestroySampleUserDataCallback* destroy_sample_user_data_callback_;
|
|
|
- ABSL_CONST_INIT static PeakHeapTracker peak_heap_tracker_;
|
|
|
- ABSL_CONST_INIT static NumaTopology<kNumaPartitions, kNumBaseClasses>
|
|
|
- numa_topology_;
|
|
|
---- contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (index)
|
|
|
-+++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (working tree)
|
|
|
-@@ -1151,6 +1151,13 @@ void TCMallocPostFork() {
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
-+extern "C" void MallocExtension_SetSampleUserDataCallbacks(
|
|
|
-+ MallocExtension::CreateSampleUserDataCallback create,
|
|
|
-+ MallocExtension::CopySampleUserDataCallback copy,
|
|
|
-+ MallocExtension::DestroySampleUserDataCallback destroy) {
|
|
|
-+ Static::SetSampleUserDataCallbacks(create, copy, destroy);
|
|
|
-+}
|
|
|
-+
|
|
|
- // nallocx slow path.
|
|
|
- // Moved to a separate function because size_class_with_alignment is not inlined
|
|
|
- // which would cause nallocx to become non-leaf function with stack frame and
|
|
|
-@@ -1500,6 +1507,7 @@ static void* SampleifyAllocation(size_t requested_size, size_t weight,
|
|
|
- tmp.requested_alignment = requested_alignment;
|
|
|
- tmp.allocated_size = allocated_size;
|
|
|
- tmp.weight = weight;
|
|
|
-+ tmp.user_data = Static::CreateSampleUserData();
|
|
|
-
|
|
|
- {
|
|
|
- absl::base_internal::SpinLockHolder h(&pageheap_lock);
|
|
|
-@@ -1629,6 +1637,7 @@ static void do_free_pages(void* ptr, const PageId p) {
|
|
|
- 1);
|
|
|
- }
|
|
|
- notify_sampled_alloc = true;
|
|
|
-+ Static::DestroySampleUserData(st->user_data);
|
|
|
- Static::stacktrace_allocator().Delete(st);
|
|
|
- }
|
|
|
- if (IsSampledMemory(ptr)) {
|
|
|
+--- contrib/libs/tcmalloc/tcmalloc/internal/logging.h (index)
|
|
|
++++ contrib/libs/tcmalloc/tcmalloc/internal/logging.h (working tree)
|
|
|
+@@ -67,6 +67,8 @@ struct StackTrace {
|
|
|
+ // between the previous sample and this one
|
|
|
+ size_t weight;
|
|
|
+
|
|
|
++ void* user_data;
|
|
|
++
|
|
|
+ template <typename H>
|
|
|
+ friend H AbslHashValue(H h, const StackTrace& t) {
|
|
|
+ // As we use StackTrace as a key-value node in StackTraceTable, we only
|
|
|
+--- contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (index)
|
|
|
++++ contrib/libs/tcmalloc/tcmalloc/internal_malloc_extension.h (working tree)
|
|
|
+@@ -120,6 +120,12 @@ ABSL_ATTRIBUTE_WEAK void MallocExtension_Internal_SetMaxTotalThreadCacheBytes(
|
|
|
+ ABSL_ATTRIBUTE_WEAK void
|
|
|
+ MallocExtension_EnableForkSupport();
|
|
|
+
|
|
|
++ABSL_ATTRIBUTE_WEAK void
|
|
|
++MallocExtension_SetSampleUserDataCallbacks(
|
|
|
++ tcmalloc::MallocExtension::CreateSampleUserDataCallback create,
|
|
|
++ tcmalloc::MallocExtension::CopySampleUserDataCallback copy,
|
|
|
++ tcmalloc::MallocExtension::DestroySampleUserDataCallback destroy);
|
|
|
++
|
|
|
+ }
|
|
|
+
|
|
|
+ #endif
|
|
|
+--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (index)
|
|
|
++++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.cc (working tree)
|
|
|
+@@ -468,6 +468,21 @@ void MallocExtension::EnableForkSupport() {
|
|
|
+ #endif
|
|
|
+ }
|
|
|
+
|
|
|
++void MallocExtension::SetSampleUserDataCallbacks(
|
|
|
++ CreateSampleUserDataCallback create,
|
|
|
++ CopySampleUserDataCallback copy,
|
|
|
++ DestroySampleUserDataCallback destroy) {
|
|
|
++#if ABSL_INTERNAL_HAVE_WEAK_MALLOCEXTENSION_STUBS
|
|
|
++ if (&MallocExtension_SetSampleUserDataCallbacks != nullptr) {
|
|
|
++ MallocExtension_SetSampleUserDataCallbacks(create, copy, destroy);
|
|
|
++ }
|
|
|
++#else
|
|
|
++ (void)create;
|
|
|
++ (void)copy;
|
|
|
++ (void)destroy;
|
|
|
++#endif
|
|
|
++}
|
|
|
++
|
|
|
+ } // namespace tcmalloc
|
|
|
+
|
|
|
+ // Default implementation just returns size. The expectation is that
|
|
|
+--- contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (index)
|
|
|
++++ contrib/libs/tcmalloc/tcmalloc/malloc_extension.h (working tree)
|
|
|
+@@ -94,6 +94,8 @@ class Profile final {
|
|
|
+
|
|
|
+ int depth;
|
|
|
+ void* stack[kMaxStackDepth];
|
|
|
++
|
|
|
++ void* user_data;
|
|
|
+ };
|
|
|
+
|
|
|
+ void Iterate(absl::FunctionRef<void(const Sample&)> f) const;
|
|
|
+@@ -472,6 +474,16 @@ class MallocExtension final {
|
|
|
+ // Enables fork support.
|
|
|
+ // Allocator will continue to function correctly in the child, after calling fork().
|
|
|
+ static void EnableForkSupport();
|
|
|
++
|
|
|
++ using CreateSampleUserDataCallback = void*();
|
|
|
++ using CopySampleUserDataCallback = void*(void*);
|
|
|
++ using DestroySampleUserDataCallback = void(void*);
|
|
|
++
|
|
|
++ // Sets callbacks for lifetime control of custom user data attached to allocation samples
|
|
|
++ static void SetSampleUserDataCallbacks(
|
|
|
++ CreateSampleUserDataCallback create,
|
|
|
++ CopySampleUserDataCallback copy,
|
|
|
++ DestroySampleUserDataCallback destroy);
|
|
|
+ };
|
|
|
+
|
|
|
+ } // namespace tcmalloc
|
|
|
+--- contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.cc (index)
|
|
|
++++ contrib/libs/tcmalloc/tcmalloc/peak_heap_tracker.cc (working tree)
|
|
|
+@@ -55,6 +55,7 @@ void PeakHeapTracker::MaybeSaveSample() {
|
|
|
+ StackTrace *t = peak_sampled_span_stacks_, *next = nullptr;
|
|
|
+ while (t != nullptr) {
|
|
|
+ next = reinterpret_cast<StackTrace*>(t->stack[kMaxStackDepth - 1]);
|
|
|
++ Static::DestroySampleUserData(t->user_data);
|
|
|
+ Static::stacktrace_allocator().Delete(t);
|
|
|
+ t = next;
|
|
|
+ }
|
|
|
+@@ -63,7 +64,9 @@ void PeakHeapTracker::MaybeSaveSample() {
|
|
|
+ for (Span* s : Static::sampled_objects_) {
|
|
|
+ t = Static::stacktrace_allocator().New();
|
|
|
+
|
|
|
+- *t = *s->sampled_stack();
|
|
|
++ StackTrace* sampled_stack = s->sampled_stack();
|
|
|
++ *t = *sampled_stack;
|
|
|
++ t->user_data = Static::CopySampleUserData(sampled_stack->user_data);
|
|
|
+ if (t->depth == kMaxStackDepth) {
|
|
|
+ t->depth = kMaxStackDepth - 1;
|
|
|
+ }
|
|
|
+--- contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc (index)
|
|
|
++++ contrib/libs/tcmalloc/tcmalloc/stack_trace_table.cc (working tree)
|
|
|
+@@ -73,6 +73,7 @@ StackTraceTable::~StackTraceTable() {
|
|
|
+ Bucket* b = table_[i];
|
|
|
+ while (b != nullptr) {
|
|
|
+ Bucket* next = b->next;
|
|
|
++ Static::DestroySampleUserData(b->trace.user_data);
|
|
|
+ Static::bucket_allocator().Delete(b);
|
|
|
+ b = next;
|
|
|
+ }
|
|
|
+@@ -104,6 +105,7 @@ void StackTraceTable::AddTrace(double count, const StackTrace& t) {
|
|
|
+ b = Static::bucket_allocator().New();
|
|
|
+ b->hash = h;
|
|
|
+ b->trace = t;
|
|
|
++ b->trace.user_data = Static::CopySampleUserData(t.user_data);
|
|
|
+ b->count = count;
|
|
|
+ b->total_weight = t.weight * count;
|
|
|
+ b->next = table_[idx];
|
|
|
+@@ -135,6 +137,8 @@ void StackTraceTable::Iterate(
|
|
|
+ e.requested_alignment = b->trace.requested_alignment;
|
|
|
+ e.allocated_size = allocated_size;
|
|
|
+
|
|
|
++ e.user_data = b->trace.user_data;
|
|
|
++
|
|
|
+ e.depth = b->trace.depth;
|
|
|
+ static_assert(kMaxStackDepth <= Profile::Sample::kMaxStackDepth,
|
|
|
+ "Profile stack size smaller than internal stack sizes");
|
|
|
+--- contrib/libs/tcmalloc/tcmalloc/static_vars.cc (index)
|
|
|
++++ contrib/libs/tcmalloc/tcmalloc/static_vars.cc (working tree)
|
|
|
+@@ -60,6 +60,12 @@ ABSL_CONST_INIT PageHeapAllocator<StackTraceTable::Bucket>
|
|
|
+ ABSL_CONST_INIT std::atomic<bool> Static::inited_{false};
|
|
|
+ ABSL_CONST_INIT bool Static::cpu_cache_active_ = false;
|
|
|
+ ABSL_CONST_INIT bool Static::fork_support_enabled_ = false;
|
|
|
++ABSL_CONST_INIT Static::CreateSampleUserDataCallback*
|
|
|
++ Static::create_sample_user_data_callback_ = nullptr;
|
|
|
++ABSL_CONST_INIT Static::CopySampleUserDataCallback*
|
|
|
++ Static::copy_sample_user_data_callback_ = nullptr;
|
|
|
++ABSL_CONST_INIT Static::DestroySampleUserDataCallback*
|
|
|
++ Static::destroy_sample_user_data_callback_ = nullptr;
|
|
|
+ ABSL_CONST_INIT Static::PageAllocatorStorage Static::page_allocator_;
|
|
|
+ ABSL_CONST_INIT PageMap Static::pagemap_;
|
|
|
+ ABSL_CONST_INIT absl::base_internal::SpinLock guarded_page_lock(
|
|
|
+--- contrib/libs/tcmalloc/tcmalloc/static_vars.h (index)
|
|
|
++++ contrib/libs/tcmalloc/tcmalloc/static_vars.h (working tree)
|
|
|
+@@ -130,6 +130,34 @@ class Static {
|
|
|
+ static bool ForkSupportEnabled() { return fork_support_enabled_; }
|
|
|
+ static void EnableForkSupport() { fork_support_enabled_ = true; }
|
|
|
+
|
|
|
++ using CreateSampleUserDataCallback = void*();
|
|
|
++ using CopySampleUserDataCallback = void*(void*);
|
|
|
++ using DestroySampleUserDataCallback = void(void*);
|
|
|
++
|
|
|
++ static void SetSampleUserDataCallbacks(
|
|
|
++ CreateSampleUserDataCallback create,
|
|
|
++ CopySampleUserDataCallback copy,
|
|
|
++ DestroySampleUserDataCallback destroy) {
|
|
|
++ create_sample_user_data_callback_ = create;
|
|
|
++ copy_sample_user_data_callback_ = copy;
|
|
|
++ destroy_sample_user_data_callback_ = destroy;
|
|
|
++ }
|
|
|
++
|
|
|
++ static void* CreateSampleUserData() {
|
|
|
++ if (create_sample_user_data_callback_)
|
|
|
++ return create_sample_user_data_callback_();
|
|
|
++ return nullptr;
|
|
|
++ }
|
|
|
++ static void* CopySampleUserData(void* user_data) {
|
|
|
++ if (copy_sample_user_data_callback_)
|
|
|
++ return copy_sample_user_data_callback_(user_data);
|
|
|
++ return nullptr;
|
|
|
++ }
|
|
|
++ static void DestroySampleUserData(void* user_data) {
|
|
|
++ if (destroy_sample_user_data_callback_)
|
|
|
++ destroy_sample_user_data_callback_(user_data);
|
|
|
++ }
|
|
|
++
|
|
|
+ static bool ABSL_ATTRIBUTE_ALWAYS_INLINE IsOnFastPath() {
|
|
|
+ return
|
|
|
+ #ifndef TCMALLOC_DEPRECATED_PERTHREAD
|
|
|
+@@ -176,6 +204,9 @@ class Static {
|
|
|
+ ABSL_CONST_INIT static std::atomic<bool> inited_;
|
|
|
+ static bool cpu_cache_active_;
|
|
|
+ static bool fork_support_enabled_;
|
|
|
++ static CreateSampleUserDataCallback* create_sample_user_data_callback_;
|
|
|
++ static CopySampleUserDataCallback* copy_sample_user_data_callback_;
|
|
|
++ static DestroySampleUserDataCallback* destroy_sample_user_data_callback_;
|
|
|
+ ABSL_CONST_INIT static PeakHeapTracker peak_heap_tracker_;
|
|
|
+ ABSL_CONST_INIT static NumaTopology<kNumaPartitions, kNumBaseClasses>
|
|
|
+ numa_topology_;
|
|
|
+--- contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (index)
|
|
|
++++ contrib/libs/tcmalloc/tcmalloc/tcmalloc.cc (working tree)
|
|
|
+@@ -1151,6 +1151,13 @@ void TCMallocPostFork() {
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
++extern "C" void MallocExtension_SetSampleUserDataCallbacks(
|
|
|
++ MallocExtension::CreateSampleUserDataCallback create,
|
|
|
++ MallocExtension::CopySampleUserDataCallback copy,
|
|
|
++ MallocExtension::DestroySampleUserDataCallback destroy) {
|
|
|
++ Static::SetSampleUserDataCallbacks(create, copy, destroy);
|
|
|
++}
|
|
|
++
|
|
|
+ // nallocx slow path.
|
|
|
+ // Moved to a separate function because size_class_with_alignment is not inlined
|
|
|
+ // which would cause nallocx to become non-leaf function with stack frame and
|
|
|
+@@ -1500,6 +1507,7 @@ static void* SampleifyAllocation(size_t requested_size, size_t weight,
|
|
|
+ tmp.requested_alignment = requested_alignment;
|
|
|
+ tmp.allocated_size = allocated_size;
|
|
|
+ tmp.weight = weight;
|
|
|
++ tmp.user_data = Static::CreateSampleUserData();
|
|
|
+
|
|
|
+ {
|
|
|
+ absl::base_internal::SpinLockHolder h(&pageheap_lock);
|
|
|
+@@ -1629,6 +1637,7 @@ static void do_free_pages(void* ptr, const PageId p) {
|
|
|
+ 1);
|
|
|
+ }
|
|
|
+ notify_sampled_alloc = true;
|
|
|
++ Static::DestroySampleUserData(st->user_data);
|
|
|
+ Static::stacktrace_allocator().Delete(st);
|
|
|
+ }
|
|
|
+ if (IsSampledMemory(ptr)) {
|