hook.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. #include "jemalloc/internal/jemalloc_preamble.h"
  2. #include "jemalloc/internal/hook.h"
  3. #include "jemalloc/internal/atomic.h"
  4. #include "jemalloc/internal/mutex.h"
  5. #include "jemalloc/internal/seq.h"
  6. typedef struct hooks_internal_s hooks_internal_t;
  7. struct hooks_internal_s {
  8. hooks_t hooks;
  9. bool in_use;
  10. };
  11. seq_define(hooks_internal_t, hooks)
  12. static atomic_u_t nhooks = ATOMIC_INIT(0);
  13. static seq_hooks_t hooks[HOOK_MAX];
  14. static malloc_mutex_t hooks_mu;
  15. bool
  16. hook_boot() {
  17. return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
  18. malloc_mutex_rank_exclusive);
  19. }
  20. static void *
  21. hook_install_locked(hooks_t *to_install) {
  22. hooks_internal_t hooks_internal;
  23. for (int i = 0; i < HOOK_MAX; i++) {
  24. bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
  25. /* We hold mu; no concurrent access. */
  26. assert(success);
  27. if (!hooks_internal.in_use) {
  28. hooks_internal.hooks = *to_install;
  29. hooks_internal.in_use = true;
  30. seq_store_hooks(&hooks[i], &hooks_internal);
  31. atomic_store_u(&nhooks,
  32. atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
  33. ATOMIC_RELAXED);
  34. return &hooks[i];
  35. }
  36. }
  37. return NULL;
  38. }
  39. void *
  40. hook_install(tsdn_t *tsdn, hooks_t *to_install) {
  41. malloc_mutex_lock(tsdn, &hooks_mu);
  42. void *ret = hook_install_locked(to_install);
  43. if (ret != NULL) {
  44. tsd_global_slow_inc(tsdn);
  45. }
  46. malloc_mutex_unlock(tsdn, &hooks_mu);
  47. return ret;
  48. }
  49. static void
  50. hook_remove_locked(seq_hooks_t *to_remove) {
  51. hooks_internal_t hooks_internal;
  52. bool success = seq_try_load_hooks(&hooks_internal, to_remove);
  53. /* We hold mu; no concurrent access. */
  54. assert(success);
  55. /* Should only remove hooks that were added. */
  56. assert(hooks_internal.in_use);
  57. hooks_internal.in_use = false;
  58. seq_store_hooks(to_remove, &hooks_internal);
  59. atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
  60. ATOMIC_RELAXED);
  61. }
  62. void
  63. hook_remove(tsdn_t *tsdn, void *opaque) {
  64. if (config_debug) {
  65. char *hooks_begin = (char *)&hooks[0];
  66. char *hooks_end = (char *)&hooks[HOOK_MAX];
  67. char *hook = (char *)opaque;
  68. assert(hooks_begin <= hook && hook < hooks_end
  69. && (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
  70. }
  71. malloc_mutex_lock(tsdn, &hooks_mu);
  72. hook_remove_locked((seq_hooks_t *)opaque);
  73. tsd_global_slow_dec(tsdn);
  74. malloc_mutex_unlock(tsdn, &hooks_mu);
  75. }
  76. #define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
  77. for (int for_each_hook_counter = 0; \
  78. for_each_hook_counter < HOOK_MAX; \
  79. for_each_hook_counter++) { \
  80. bool for_each_hook_success = seq_try_load_hooks( \
  81. (hooks_internal_ptr), &hooks[for_each_hook_counter]); \
  82. if (!for_each_hook_success) { \
  83. continue; \
  84. } \
  85. if (!(hooks_internal_ptr)->in_use) { \
  86. continue; \
  87. }
  88. #define FOR_EACH_HOOK_END \
  89. }
  90. static bool *
  91. hook_reentrantp() {
  92. /*
  93. * We prevent user reentrancy within hooks. This is basically just a
  94. * thread-local bool that triggers an early-exit.
  95. *
  96. * We don't fold in_hook into reentrancy. There are two reasons for
  97. * this:
  98. * - Right now, we turn on reentrancy during things like extent hook
  99. * execution. Allocating during extent hooks is not officially
  100. * supported, but we don't want to break it for the time being. These
  101. * sorts of allocations should probably still be hooked, though.
  102. * - If a hook allocates, we may want it to be relatively fast (after
  103. * all, it executes on every allocator operation). Turning on
  104. * reentrancy is a fairly heavyweight mode (disabling tcache,
  105. * redirecting to arena 0, etc.). It's possible we may one day want
  106. * to turn on reentrant mode here, if it proves too difficult to keep
  107. * this working. But that's fairly easy for us to see; OTOH, people
  108. * not using hooks because they're too slow is easy for us to miss.
  109. *
  110. * The tricky part is
  111. * that this code might get invoked even if we don't have access to tsd.
  112. * This function mimics getting a pointer to thread-local data, except
  113. * that it might secretly return a pointer to some global data if we
  114. * know that the caller will take the early-exit path.
  115. * If we return a bool that indicates that we are reentrant, then the
  116. * caller will go down the early exit path, leaving the global
  117. * untouched.
  118. */
  119. static bool in_hook_global = true;
  120. tsdn_t *tsdn = tsdn_fetch();
  121. bool *in_hook = tsdn_in_hookp_get(tsdn);
  122. if (in_hook!= NULL) {
  123. return in_hook;
  124. }
  125. return &in_hook_global;
  126. }
  127. #define HOOK_PROLOGUE \
  128. if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
  129. return; \
  130. } \
  131. bool *in_hook = hook_reentrantp(); \
  132. if (*in_hook) { \
  133. return; \
  134. } \
  135. *in_hook = true;
  136. #define HOOK_EPILOGUE \
  137. *in_hook = false;
  138. void
  139. hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
  140. uintptr_t args_raw[3]) {
  141. HOOK_PROLOGUE
  142. hooks_internal_t hook;
  143. FOR_EACH_HOOK_BEGIN(&hook)
  144. hook_alloc h = hook.hooks.alloc_hook;
  145. if (h != NULL) {
  146. h(hook.hooks.extra, type, result, result_raw, args_raw);
  147. }
  148. FOR_EACH_HOOK_END
  149. HOOK_EPILOGUE
  150. }
  151. void
  152. hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
  153. HOOK_PROLOGUE
  154. hooks_internal_t hook;
  155. FOR_EACH_HOOK_BEGIN(&hook)
  156. hook_dalloc h = hook.hooks.dalloc_hook;
  157. if (h != NULL) {
  158. h(hook.hooks.extra, type, address, args_raw);
  159. }
  160. FOR_EACH_HOOK_END
  161. HOOK_EPILOGUE
  162. }
  163. void
  164. hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
  165. size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
  166. HOOK_PROLOGUE
  167. hooks_internal_t hook;
  168. FOR_EACH_HOOK_BEGIN(&hook)
  169. hook_expand h = hook.hooks.expand_hook;
  170. if (h != NULL) {
  171. h(hook.hooks.extra, type, address, old_usize, new_usize,
  172. result_raw, args_raw);
  173. }
  174. FOR_EACH_HOOK_END
  175. HOOK_EPILOGUE
  176. }