ebpf_softirq.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "ebpf.h"
  3. #include "ebpf_softirq.h"
  4. struct config softirq_config = { .first_section = NULL,
  5. .last_section = NULL,
  6. .mutex = NETDATA_MUTEX_INITIALIZER,
  7. .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
  8. .rwlock = AVL_LOCK_INITIALIZER } };
  9. #define SOFTIRQ_MAP_LATENCY 0
  10. static ebpf_local_maps_t softirq_maps[] = {
  11. {
  12. .name = "tbl_softirq",
  13. .internal_input = NETDATA_SOFTIRQ_MAX_IRQS,
  14. .user_input = 0,
  15. .type = NETDATA_EBPF_MAP_STATIC,
  16. .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
  17. #ifdef LIBBPF_MAJOR_VERSION
  18. .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
  19. #endif
  20. },
  21. /* end */
  22. {
  23. .name = NULL,
  24. .internal_input = 0,
  25. .user_input = 0,
  26. .type = NETDATA_EBPF_MAP_CONTROLLER,
  27. .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
  28. #ifdef LIBBPF_MAJOR_VERSION
  29. .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
  30. #endif
  31. }
  32. };
  33. #define SOFTIRQ_TP_CLASS_IRQ "irq"
  34. static ebpf_tracepoint_t softirq_tracepoints[] = {
  35. {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_entry"},
  36. {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_exit"},
  37. /* end */
  38. {.enabled = false, .class = NULL, .event = NULL}
  39. };
  40. // these must be in the order defined by the kernel:
  41. // https://elixir.bootlin.com/linux/v5.12.19/source/include/trace/events/irq.h#L13
  42. static softirq_val_t softirq_vals[] = {
  43. {.name = "HI", .latency = 0},
  44. {.name = "TIMER", .latency = 0},
  45. {.name = "NET_TX", .latency = 0},
  46. {.name = "NET_RX", .latency = 0},
  47. {.name = "BLOCK", .latency = 0},
  48. {.name = "IRQ_POLL", .latency = 0},
  49. {.name = "TASKLET", .latency = 0},
  50. {.name = "SCHED", .latency = 0},
  51. {.name = "HRTIMER", .latency = 0},
  52. {.name = "RCU", .latency = 0},
  53. };
  54. // tmp store for soft IRQ values we get from a per-CPU eBPF map.
  55. static softirq_ebpf_val_t *softirq_ebpf_vals = NULL;
  56. /**
  57. * Obsolete global
  58. *
  59. * Obsolete global charts created by thread.
  60. *
  61. * @param em a pointer to `struct ebpf_module`
  62. */
  63. static void ebpf_obsolete_softirq_global(ebpf_module_t *em)
  64. {
  65. ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP,
  66. "softirq_latency",
  67. "Software IRQ latency",
  68. EBPF_COMMON_DIMENSION_MILLISECONDS,
  69. "softirqs",
  70. NETDATA_EBPF_CHART_TYPE_STACKED,
  71. NULL,
  72. NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS+1,
  73. em->update_every);
  74. }
  75. /**
  76. * Cleanup
  77. *
  78. * Clean up allocated memory.
  79. *
  80. * @param ptr thread data.
  81. */
  82. static void softirq_cleanup(void *ptr)
  83. {
  84. ebpf_module_t *em = (ebpf_module_t *)ptr;
  85. if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
  86. pthread_mutex_lock(&lock);
  87. ebpf_obsolete_softirq_global(em);
  88. pthread_mutex_unlock(&lock);
  89. fflush(stdout);
  90. }
  91. ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
  92. if (em->objects) {
  93. ebpf_unload_legacy_code(em->objects, em->probe_links);
  94. em->objects = NULL;
  95. em->probe_links = NULL;
  96. }
  97. for (int i = 0; softirq_tracepoints[i].class != NULL; i++) {
  98. ebpf_disable_tracepoint(&softirq_tracepoints[i]);
  99. }
  100. freez(softirq_ebpf_vals);
  101. softirq_ebpf_vals = NULL;
  102. pthread_mutex_lock(&ebpf_exit_cleanup);
  103. em->enabled = NETDATA_THREAD_EBPF_STOPPED;
  104. ebpf_update_stats(&plugin_statistics, em);
  105. pthread_mutex_unlock(&ebpf_exit_cleanup);
  106. }
  107. /*****************************************************************
  108. * MAIN LOOP
  109. *****************************************************************/
  110. /**
  111. * Read Latency Map
  112. *
  113. * Read data from kernel ring to plot for users.
  114. *
  115. * @param maps_per_core do I need to read all cores?
  116. */
  117. static void softirq_read_latency_map(int maps_per_core)
  118. {
  119. int fd = softirq_maps[SOFTIRQ_MAP_LATENCY].map_fd;
  120. int i;
  121. size_t length = sizeof(softirq_ebpf_val_t);
  122. if (maps_per_core)
  123. length *= ebpf_nprocs;
  124. for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
  125. int test = bpf_map_lookup_elem(fd, &i, softirq_ebpf_vals);
  126. if (unlikely(test < 0)) {
  127. continue;
  128. }
  129. uint64_t total_latency = 0;
  130. int cpu_i;
  131. int end = (maps_per_core) ? ebpf_nprocs : 1;
  132. for (cpu_i = 0; cpu_i < end; cpu_i++) {
  133. total_latency += softirq_ebpf_vals[cpu_i].latency/1000;
  134. }
  135. softirq_vals[i].latency = total_latency;
  136. memset(softirq_ebpf_vals, 0, length);
  137. }
  138. }
  139. static void softirq_create_charts(int update_every)
  140. {
  141. ebpf_create_chart(
  142. NETDATA_EBPF_SYSTEM_GROUP,
  143. "softirq_latency",
  144. "Software IRQ latency",
  145. EBPF_COMMON_DIMENSION_MILLISECONDS,
  146. "softirqs",
  147. NULL,
  148. NETDATA_EBPF_CHART_TYPE_STACKED,
  149. NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS+1,
  150. NULL, NULL, 0, update_every,
  151. NETDATA_EBPF_MODULE_NAME_SOFTIRQ
  152. );
  153. fflush(stdout);
  154. }
  155. static void softirq_create_dims()
  156. {
  157. uint32_t i;
  158. for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
  159. ebpf_write_global_dimension(
  160. softirq_vals[i].name, softirq_vals[i].name,
  161. ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]
  162. );
  163. }
  164. }
  165. static inline void softirq_write_dims()
  166. {
  167. uint32_t i;
  168. for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
  169. write_chart_dimension(softirq_vals[i].name, softirq_vals[i].latency);
  170. }
  171. }
  172. /**
  173. * Main loop for this collector.
  174. */
  175. static void softirq_collector(ebpf_module_t *em)
  176. {
  177. softirq_ebpf_vals = callocz(ebpf_nprocs, sizeof(softirq_ebpf_val_t));
  178. // create chart and static dims.
  179. pthread_mutex_lock(&lock);
  180. softirq_create_charts(em->update_every);
  181. softirq_create_dims();
  182. ebpf_update_stats(&plugin_statistics, em);
  183. ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
  184. pthread_mutex_unlock(&lock);
  185. // loop and read from published data until ebpf plugin is closed.
  186. heartbeat_t hb;
  187. heartbeat_init(&hb);
  188. int update_every = em->update_every;
  189. int counter = update_every - 1;
  190. int maps_per_core = em->maps_per_core;
  191. //This will be cancelled by its parent
  192. uint32_t running_time = 0;
  193. uint32_t lifetime = em->lifetime;
  194. while (!ebpf_exit_plugin && running_time < lifetime) {
  195. (void)heartbeat_next(&hb, USEC_PER_SEC);
  196. if (ebpf_exit_plugin || ++counter != update_every)
  197. continue;
  198. counter = 0;
  199. softirq_read_latency_map(maps_per_core);
  200. pthread_mutex_lock(&lock);
  201. // write dims now for all hitherto discovered IRQs.
  202. write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "softirq_latency");
  203. softirq_write_dims();
  204. write_end_chart();
  205. pthread_mutex_unlock(&lock);
  206. pthread_mutex_lock(&ebpf_exit_cleanup);
  207. if (running_time && !em->running_time)
  208. running_time = update_every;
  209. else
  210. running_time += update_every;
  211. em->running_time = running_time;
  212. pthread_mutex_unlock(&ebpf_exit_cleanup);
  213. }
  214. }
  215. /*****************************************************************
  216. * EBPF SOFTIRQ THREAD
  217. *****************************************************************/
  218. /**
  219. * Soft IRQ latency thread.
  220. *
  221. * @param ptr a `ebpf_module_t *`.
  222. * @return always NULL.
  223. */
  224. void *ebpf_softirq_thread(void *ptr)
  225. {
  226. netdata_thread_cleanup_push(softirq_cleanup, ptr);
  227. ebpf_module_t *em = (ebpf_module_t *)ptr;
  228. em->maps = softirq_maps;
  229. if (ebpf_enable_tracepoints(softirq_tracepoints) == 0) {
  230. goto endsoftirq;
  231. }
  232. #ifdef LIBBPF_MAJOR_VERSION
  233. ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
  234. #endif
  235. em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
  236. if (!em->probe_links) {
  237. goto endsoftirq;
  238. }
  239. softirq_collector(em);
  240. endsoftirq:
  241. ebpf_update_disabled_plugin_stats(em);
  242. netdata_thread_cleanup_pop(1);
  243. return NULL;
  244. }