memtrace.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. /**
  2. * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
  3. * SPDX-License-Identifier: Apache-2.0.
  4. */
  5. #include <aws/common/atomics.h>
  6. #include <aws/common/byte_buf.h>
  7. #include <aws/common/clock.h>
  8. #include <aws/common/hash_table.h>
  9. #include <aws/common/logging.h>
  10. #include <aws/common/mutex.h>
  11. #include <aws/common/priority_queue.h>
  12. #include <aws/common/string.h>
  13. #include <aws/common/system_info.h>
  14. /* describes a single live allocation.
  15. * allocated by aws_default_allocator() */
  16. struct alloc_info {
  17. size_t size;
  18. uint64_t time;
  19. uint64_t stack; /* hash of stack frame pointers */
  20. };
  21. /* Using a flexible array member is the C99 compliant way to have the frames immediately follow the header.
  22. *
  23. * MSVC doesn't know this for some reason so we need to use a pragma to make
  24. * it happy.
  25. */
  26. #ifdef _MSC_VER
  27. # pragma warning(push)
  28. # pragma warning(disable : 4200) /* nonstandard extension used: zero-sized array in struct/union */
  29. #endif
  30. /* one of these is stored per unique stack
  31. * allocated by aws_default_allocator() */
  32. struct stack_trace {
  33. size_t depth; /* length of frames[] */
  34. void *const frames[]; /* rest of frames are allocated after */
  35. };
  36. #ifdef _MSC_VER
  37. # pragma warning(pop)
  38. #endif
  39. /* Tracking structure, used as the allocator impl.
  40. * This structure, and all its bookkeeping data structures, are created with the aws_default_allocator().
  41. * This is not customizable because it's too expensive for every little allocation to store
  42. * a pointer back to its original allocator. */
  43. struct alloc_tracer {
  44. struct aws_allocator *traced_allocator; /* underlying allocator */
  45. enum aws_mem_trace_level level; /* level to trace at */
  46. size_t frames_per_stack; /* how many frames to keep per stack */
  47. struct aws_atomic_var allocated; /* bytes currently allocated */
  48. struct aws_mutex mutex; /* protects everything below */
  49. struct aws_hash_table allocs; /* live allocations, maps address -> alloc_info */
  50. struct aws_hash_table stacks; /* unique stack traces, maps hash -> stack_trace */
  51. };
  52. /* number of frames to skip in call stacks (s_alloc_tracer_track, and the vtable function) */
  53. #define FRAMES_TO_SKIP 2
  54. static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size);
  55. static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr);
  56. static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size);
  57. static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
  58. static struct aws_allocator s_trace_allocator = {
  59. .mem_acquire = s_trace_mem_acquire,
  60. .mem_release = s_trace_mem_release,
  61. .mem_realloc = s_trace_mem_realloc,
  62. .mem_calloc = s_trace_mem_calloc,
  63. };
  64. /* for the hash table, to destroy elements */
  65. static void s_destroy_alloc(void *data) {
  66. struct alloc_info *alloc = data;
  67. aws_mem_release(aws_default_allocator(), alloc);
  68. }
  69. static void s_destroy_stacktrace(void *data) {
  70. struct stack_trace *stack = data;
  71. aws_mem_release(aws_default_allocator(), stack);
  72. }
  73. static void s_alloc_tracer_init(
  74. struct alloc_tracer *tracer,
  75. struct aws_allocator *traced_allocator,
  76. enum aws_mem_trace_level level,
  77. size_t frames_per_stack) {
  78. void *stack[1];
  79. if (!aws_backtrace(stack, 1)) {
  80. /* clamp level if tracing isn't available */
  81. level = level > AWS_MEMTRACE_BYTES ? AWS_MEMTRACE_BYTES : level;
  82. }
  83. tracer->traced_allocator = traced_allocator;
  84. tracer->level = level;
  85. if (tracer->level >= AWS_MEMTRACE_BYTES) {
  86. aws_atomic_init_int(&tracer->allocated, 0);
  87. AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_mutex_init(&tracer->mutex));
  88. AWS_FATAL_ASSERT(
  89. AWS_OP_SUCCESS ==
  90. aws_hash_table_init(
  91. &tracer->allocs, aws_default_allocator(), 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_alloc));
  92. }
  93. if (tracer->level == AWS_MEMTRACE_STACKS) {
  94. if (frames_per_stack > 128) {
  95. frames_per_stack = 128;
  96. }
  97. tracer->frames_per_stack = frames_per_stack ? frames_per_stack : 8;
  98. AWS_FATAL_ASSERT(
  99. AWS_OP_SUCCESS ==
  100. aws_hash_table_init(
  101. &tracer->stacks, aws_default_allocator(), 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_stacktrace));
  102. }
  103. }
  104. static void s_alloc_tracer_track(struct alloc_tracer *tracer, void *ptr, size_t size) {
  105. if (tracer->level == AWS_MEMTRACE_NONE) {
  106. return;
  107. }
  108. aws_atomic_fetch_add(&tracer->allocated, size);
  109. struct alloc_info *alloc = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct alloc_info));
  110. AWS_FATAL_ASSERT(alloc);
  111. alloc->size = size;
  112. aws_high_res_clock_get_ticks(&alloc->time);
  113. if (tracer->level == AWS_MEMTRACE_STACKS) {
  114. /* capture stack frames, skip 2 for this function and the allocation vtable function */
  115. AWS_VARIABLE_LENGTH_ARRAY(void *, stack_frames, (FRAMES_TO_SKIP + tracer->frames_per_stack));
  116. size_t stack_depth = aws_backtrace(stack_frames, FRAMES_TO_SKIP + tracer->frames_per_stack);
  117. if (stack_depth) {
  118. /* hash the stack pointers */
  119. struct aws_byte_cursor stack_cursor =
  120. aws_byte_cursor_from_array(stack_frames, stack_depth * sizeof(void *));
  121. uint64_t stack_id = aws_hash_byte_cursor_ptr(&stack_cursor);
  122. alloc->stack = stack_id; /* associate the stack with the alloc */
  123. aws_mutex_lock(&tracer->mutex);
  124. struct aws_hash_element *item = NULL;
  125. int was_created = 0;
  126. AWS_FATAL_ASSERT(
  127. AWS_OP_SUCCESS ==
  128. aws_hash_table_create(&tracer->stacks, (void *)(uintptr_t)stack_id, &item, &was_created));
  129. /* If this is a new stack, save it to the hash */
  130. if (was_created) {
  131. struct stack_trace *stack = aws_mem_calloc(
  132. aws_default_allocator(),
  133. 1,
  134. sizeof(struct stack_trace) + (sizeof(void *) * tracer->frames_per_stack));
  135. AWS_FATAL_ASSERT(stack);
  136. memcpy(
  137. (void **)&stack->frames[0],
  138. &stack_frames[FRAMES_TO_SKIP],
  139. (stack_depth - FRAMES_TO_SKIP) * sizeof(void *));
  140. stack->depth = stack_depth - FRAMES_TO_SKIP;
  141. item->value = stack;
  142. }
  143. aws_mutex_unlock(&tracer->mutex);
  144. }
  145. }
  146. aws_mutex_lock(&tracer->mutex);
  147. AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_put(&tracer->allocs, ptr, alloc, NULL));
  148. aws_mutex_unlock(&tracer->mutex);
  149. }
  150. static void s_alloc_tracer_untrack(struct alloc_tracer *tracer, void *ptr) {
  151. if (tracer->level == AWS_MEMTRACE_NONE) {
  152. return;
  153. }
  154. aws_mutex_lock(&tracer->mutex);
  155. struct aws_hash_element *item;
  156. AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(&tracer->allocs, ptr, &item));
  157. /* because the tracer can be installed at any time, it is possible for an allocation to not
  158. * be tracked. Therefore, we make sure the find succeeds, but then check the returned
  159. * value */
  160. if (item) {
  161. AWS_FATAL_ASSERT(item->key == ptr && item->value);
  162. struct alloc_info *alloc = item->value;
  163. aws_atomic_fetch_sub(&tracer->allocated, alloc->size);
  164. s_destroy_alloc(item->value);
  165. AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_remove_element(&tracer->allocs, item));
  166. }
  167. aws_mutex_unlock(&tracer->mutex);
  168. }
  169. /* used only to resolve stacks -> trace, count, size at dump time */
  170. struct stack_metadata {
  171. struct aws_string *trace;
  172. size_t count;
  173. size_t size;
  174. };
  175. static int s_collect_stack_trace(void *context, struct aws_hash_element *item) {
  176. struct alloc_tracer *tracer = context;
  177. struct aws_hash_table *all_stacks = &tracer->stacks;
  178. struct stack_metadata *stack_info = item->value;
  179. struct aws_hash_element *stack_item = NULL;
  180. AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(all_stacks, item->key, &stack_item));
  181. AWS_FATAL_ASSERT(stack_item);
  182. struct stack_trace *stack = stack_item->value;
  183. void *const *stack_frames = &stack->frames[0];
  184. /* convert the frame pointers to symbols, and concat into a buffer */
  185. char buf[4096] = {0};
  186. struct aws_byte_buf stacktrace = aws_byte_buf_from_empty_array(buf, AWS_ARRAY_SIZE(buf));
  187. struct aws_byte_cursor newline = aws_byte_cursor_from_c_str("\n");
  188. char **symbols = aws_backtrace_symbols(stack_frames, stack->depth);
  189. for (size_t idx = 0; idx < stack->depth; ++idx) {
  190. if (idx > 0) {
  191. aws_byte_buf_append(&stacktrace, &newline);
  192. }
  193. const char *caller = symbols[idx];
  194. if (!caller || !caller[0]) {
  195. break;
  196. }
  197. struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(caller);
  198. aws_byte_buf_append(&stacktrace, &cursor);
  199. }
  200. aws_mem_release(aws_default_allocator(), symbols);
  201. /* record the resultant buffer as a string */
  202. stack_info->trace = aws_string_new_from_array(aws_default_allocator(), stacktrace.buffer, stacktrace.len);
  203. AWS_FATAL_ASSERT(stack_info->trace);
  204. aws_byte_buf_clean_up(&stacktrace);
  205. return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
  206. }
  207. static int s_stack_info_compare_size(const void *a, const void *b) {
  208. const struct stack_metadata *stack_a = *(const struct stack_metadata **)a;
  209. const struct stack_metadata *stack_b = *(const struct stack_metadata **)b;
  210. return stack_b->size > stack_a->size;
  211. }
  212. static int s_stack_info_compare_count(const void *a, const void *b) {
  213. const struct stack_metadata *stack_a = *(const struct stack_metadata **)a;
  214. const struct stack_metadata *stack_b = *(const struct stack_metadata **)b;
  215. return stack_b->count > stack_a->count;
  216. }
  217. static void s_stack_info_destroy(void *data) {
  218. struct stack_metadata *stack = data;
  219. struct aws_allocator *allocator = stack->trace->allocator;
  220. aws_string_destroy(stack->trace);
  221. aws_mem_release(allocator, stack);
  222. }
  223. /* tally up count/size per stack from all allocs */
  224. static int s_collect_stack_stats(void *context, struct aws_hash_element *item) {
  225. struct aws_hash_table *stack_info = context;
  226. struct alloc_info *alloc = item->value;
  227. struct aws_hash_element *stack_item = NULL;
  228. int was_created = 0;
  229. AWS_FATAL_ASSERT(
  230. AWS_OP_SUCCESS ==
  231. aws_hash_table_create(stack_info, (void *)(uintptr_t)alloc->stack, &stack_item, &was_created));
  232. if (was_created) {
  233. stack_item->value = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct stack_metadata));
  234. AWS_FATAL_ASSERT(stack_item->value);
  235. }
  236. struct stack_metadata *stack = stack_item->value;
  237. stack->count++;
  238. stack->size += alloc->size;
  239. return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
  240. }
  241. static int s_insert_stacks(void *context, struct aws_hash_element *item) {
  242. struct aws_priority_queue *pq = context;
  243. struct stack_metadata *stack = item->value;
  244. AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(pq, &stack));
  245. return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
  246. }
  247. static int s_insert_allocs(void *context, struct aws_hash_element *item) {
  248. struct aws_priority_queue *allocs = context;
  249. struct alloc_info *alloc = item->value;
  250. AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(allocs, &alloc));
  251. return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
  252. }
  253. static int s_alloc_compare(const void *a, const void *b) {
  254. const struct alloc_info *alloc_a = *(const struct alloc_info **)a;
  255. const struct alloc_info *alloc_b = *(const struct alloc_info **)b;
  256. return alloc_a->time > alloc_b->time;
  257. }
  258. void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) {
  259. struct alloc_tracer *tracer = trace_allocator->impl;
  260. if (tracer->level == AWS_MEMTRACE_NONE || aws_atomic_load_int(&tracer->allocated) == 0) {
  261. return;
  262. }
  263. aws_mutex_lock(&tracer->mutex);
  264. size_t num_allocs = aws_hash_table_get_entry_count(&tracer->allocs);
  265. AWS_LOGF_TRACE(
  266. AWS_LS_COMMON_MEMTRACE, "################################################################################");
  267. AWS_LOGF_TRACE(
  268. AWS_LS_COMMON_MEMTRACE, "# BEGIN MEMTRACE DUMP #");
  269. AWS_LOGF_TRACE(
  270. AWS_LS_COMMON_MEMTRACE, "################################################################################");
  271. AWS_LOGF_TRACE(
  272. AWS_LS_COMMON_MEMTRACE,
  273. "tracer: %zu bytes still allocated in %zu allocations",
  274. aws_atomic_load_int(&tracer->allocated),
  275. num_allocs);
  276. /* convert stacks from pointers -> symbols */
  277. struct aws_hash_table stack_info;
  278. AWS_ZERO_STRUCT(stack_info);
  279. if (tracer->level == AWS_MEMTRACE_STACKS) {
  280. AWS_FATAL_ASSERT(
  281. AWS_OP_SUCCESS ==
  282. aws_hash_table_init(
  283. &stack_info, aws_default_allocator(), 64, aws_hash_ptr, aws_ptr_eq, NULL, s_stack_info_destroy));
  284. /* collect active stacks, tally up sizes and counts */
  285. aws_hash_table_foreach(&tracer->allocs, s_collect_stack_stats, &stack_info);
  286. /* collect stack traces for active stacks */
  287. aws_hash_table_foreach(&stack_info, s_collect_stack_trace, tracer);
  288. }
  289. /* sort allocs by time */
  290. struct aws_priority_queue allocs;
  291. AWS_FATAL_ASSERT(
  292. AWS_OP_SUCCESS ==
  293. aws_priority_queue_init_dynamic(
  294. &allocs, aws_default_allocator(), num_allocs, sizeof(struct alloc_info *), s_alloc_compare));
  295. aws_hash_table_foreach(&tracer->allocs, s_insert_allocs, &allocs);
  296. /* dump allocs by time */
  297. AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
  298. AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:");
  299. AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
  300. while (aws_priority_queue_size(&allocs)) {
  301. struct alloc_info *alloc = NULL;
  302. aws_priority_queue_pop(&allocs, &alloc);
  303. if (alloc->stack) {
  304. struct aws_hash_element *item = NULL;
  305. AWS_FATAL_ASSERT(
  306. AWS_OP_SUCCESS == aws_hash_table_find(&stack_info, (void *)(uintptr_t)alloc->stack, &item));
  307. struct stack_metadata *stack = item->value;
  308. AWS_LOGF_TRACE(
  309. AWS_LS_COMMON_MEMTRACE,
  310. "ALLOC %zu bytes, stacktrace:\n%s\n",
  311. alloc->size,
  312. aws_string_c_str(stack->trace));
  313. } else {
  314. AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes", alloc->size);
  315. }
  316. }
  317. aws_priority_queue_clean_up(&allocs);
  318. if (tracer->level == AWS_MEMTRACE_STACKS) {
  319. size_t num_stacks = aws_hash_table_get_entry_count(&stack_info);
  320. /* sort stacks by total size leaked */
  321. struct aws_priority_queue stacks_by_size;
  322. AWS_FATAL_ASSERT(
  323. AWS_OP_SUCCESS == aws_priority_queue_init_dynamic(
  324. &stacks_by_size,
  325. aws_default_allocator(),
  326. num_stacks,
  327. sizeof(struct stack_metadata *),
  328. s_stack_info_compare_size));
  329. aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_size);
  330. AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
  331. AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:");
  332. AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
  333. while (aws_priority_queue_size(&stacks_by_size) > 0) {
  334. struct stack_metadata *stack = NULL;
  335. aws_priority_queue_pop(&stacks_by_size, &stack);
  336. AWS_LOGF_TRACE(
  337. AWS_LS_COMMON_MEMTRACE,
  338. "%zu bytes in %zu allocations:\n%s\n",
  339. stack->size,
  340. stack->count,
  341. aws_string_c_str(stack->trace));
  342. }
  343. aws_priority_queue_clean_up(&stacks_by_size);
  344. /* sort stacks by number of leaks */
  345. struct aws_priority_queue stacks_by_count;
  346. AWS_FATAL_ASSERT(
  347. AWS_OP_SUCCESS == aws_priority_queue_init_dynamic(
  348. &stacks_by_count,
  349. aws_default_allocator(),
  350. num_stacks,
  351. sizeof(struct stack_metadata *),
  352. s_stack_info_compare_count));
  353. AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
  354. AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:");
  355. AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
  356. aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_count);
  357. while (aws_priority_queue_size(&stacks_by_count) > 0) {
  358. struct stack_metadata *stack = NULL;
  359. aws_priority_queue_pop(&stacks_by_count, &stack);
  360. AWS_LOGF_TRACE(
  361. AWS_LS_COMMON_MEMTRACE,
  362. "%zu allocations leaking %zu bytes:\n%s\n",
  363. stack->count,
  364. stack->size,
  365. aws_string_c_str(stack->trace));
  366. }
  367. aws_priority_queue_clean_up(&stacks_by_count);
  368. aws_hash_table_clean_up(&stack_info);
  369. }
  370. AWS_LOGF_TRACE(
  371. AWS_LS_COMMON_MEMTRACE, "################################################################################");
  372. AWS_LOGF_TRACE(
  373. AWS_LS_COMMON_MEMTRACE, "# END MEMTRACE DUMP #");
  374. AWS_LOGF_TRACE(
  375. AWS_LS_COMMON_MEMTRACE, "################################################################################");
  376. aws_mutex_unlock(&tracer->mutex);
  377. }
  378. static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size) {
  379. struct alloc_tracer *tracer = allocator->impl;
  380. void *ptr = aws_mem_acquire(tracer->traced_allocator, size);
  381. if (ptr) {
  382. s_alloc_tracer_track(tracer, ptr, size);
  383. }
  384. return ptr;
  385. }
  386. static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr) {
  387. struct alloc_tracer *tracer = allocator->impl;
  388. s_alloc_tracer_untrack(tracer, ptr);
  389. aws_mem_release(tracer->traced_allocator, ptr);
  390. }
  391. static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *old_ptr, size_t old_size, size_t new_size) {
  392. struct alloc_tracer *tracer = allocator->impl;
  393. void *new_ptr = old_ptr;
  394. if (aws_mem_realloc(tracer->traced_allocator, &new_ptr, old_size, new_size)) {
  395. return NULL;
  396. }
  397. s_alloc_tracer_untrack(tracer, old_ptr);
  398. s_alloc_tracer_track(tracer, new_ptr, new_size);
  399. return new_ptr;
  400. }
  401. static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
  402. struct alloc_tracer *tracer = allocator->impl;
  403. void *ptr = aws_mem_calloc(tracer->traced_allocator, num, size);
  404. if (ptr) {
  405. s_alloc_tracer_track(tracer, ptr, num * size);
  406. }
  407. return ptr;
  408. }
  409. struct aws_allocator *aws_mem_tracer_new(
  410. struct aws_allocator *allocator,
  411. struct aws_allocator *deprecated,
  412. enum aws_mem_trace_level level,
  413. size_t frames_per_stack) {
  414. /* deprecated customizable bookkeeping allocator */
  415. (void)deprecated;
  416. struct alloc_tracer *tracer = NULL;
  417. struct aws_allocator *trace_allocator = NULL;
  418. aws_mem_acquire_many(
  419. aws_default_allocator(),
  420. 2,
  421. &tracer,
  422. sizeof(struct alloc_tracer),
  423. &trace_allocator,
  424. sizeof(struct aws_allocator));
  425. AWS_FATAL_ASSERT(trace_allocator);
  426. AWS_FATAL_ASSERT(tracer);
  427. AWS_ZERO_STRUCT(*trace_allocator);
  428. AWS_ZERO_STRUCT(*tracer);
  429. /* copy the template vtable s*/
  430. *trace_allocator = s_trace_allocator;
  431. trace_allocator->impl = tracer;
  432. s_alloc_tracer_init(tracer, allocator, level, frames_per_stack);
  433. return trace_allocator;
  434. }
  435. struct aws_allocator *aws_mem_tracer_destroy(struct aws_allocator *trace_allocator) {
  436. struct alloc_tracer *tracer = trace_allocator->impl;
  437. struct aws_allocator *allocator = tracer->traced_allocator;
  438. if (tracer->level != AWS_MEMTRACE_NONE) {
  439. aws_mutex_lock(&tracer->mutex);
  440. aws_hash_table_clean_up(&tracer->allocs);
  441. aws_hash_table_clean_up(&tracer->stacks);
  442. aws_mutex_unlock(&tracer->mutex);
  443. aws_mutex_clean_up(&tracer->mutex);
  444. }
  445. aws_mem_release(aws_default_allocator(), tracer);
  446. /* trace_allocator is freed as part of the block tracer was allocated in */
  447. return allocator;
  448. }
  449. size_t aws_mem_tracer_bytes(struct aws_allocator *trace_allocator) {
  450. struct alloc_tracer *tracer = trace_allocator->impl;
  451. if (tracer->level == AWS_MEMTRACE_NONE) {
  452. return 0;
  453. }
  454. return aws_atomic_load_int(&tracer->allocated);
  455. }
  456. size_t aws_mem_tracer_count(struct aws_allocator *trace_allocator) {
  457. struct alloc_tracer *tracer = trace_allocator->impl;
  458. if (tracer->level == AWS_MEMTRACE_NONE) {
  459. return 0;
  460. }
  461. aws_mutex_lock(&tracer->mutex);
  462. size_t count = aws_hash_table_get_entry_count(&tracer->allocs);
  463. aws_mutex_unlock(&tracer->mutex);
  464. return count;
  465. }