ebpf_softirq.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "ebpf.h"
  3. #include "ebpf_softirq.h"
  4. struct config softirq_config = { .first_section = NULL,
  5. .last_section = NULL,
  6. .mutex = NETDATA_MUTEX_INITIALIZER,
  7. .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
  8. .rwlock = AVL_LOCK_INITIALIZER } };
  9. #define SOFTIRQ_MAP_LATENCY 0
  10. static ebpf_local_maps_t softirq_maps[] = {
  11. {
  12. .name = "tbl_softirq",
  13. .internal_input = NETDATA_SOFTIRQ_MAX_IRQS,
  14. .user_input = 0,
  15. .type = NETDATA_EBPF_MAP_STATIC,
  16. .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
  17. #ifdef LIBBPF_MAJOR_VERSION
  18. .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
  19. #endif
  20. },
  21. /* end */
  22. {
  23. .name = NULL,
  24. .internal_input = 0,
  25. .user_input = 0,
  26. .type = NETDATA_EBPF_MAP_CONTROLLER,
  27. .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
  28. #ifdef LIBBPF_MAJOR_VERSION
  29. .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
  30. #endif
  31. }
  32. };
  33. #define SOFTIRQ_TP_CLASS_IRQ "irq"
  34. static ebpf_tracepoint_t softirq_tracepoints[] = {
  35. {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_entry"},
  36. {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_exit"},
  37. /* end */
  38. {.enabled = false, .class = NULL, .event = NULL}
  39. };
  40. // these must be in the order defined by the kernel:
  41. // https://elixir.bootlin.com/linux/v5.12.19/source/include/trace/events/irq.h#L13
  42. static softirq_val_t softirq_vals[] = {
  43. {.name = "HI", .latency = 0},
  44. {.name = "TIMER", .latency = 0},
  45. {.name = "NET_TX", .latency = 0},
  46. {.name = "NET_RX", .latency = 0},
  47. {.name = "BLOCK", .latency = 0},
  48. {.name = "IRQ_POLL", .latency = 0},
  49. {.name = "TASKLET", .latency = 0},
  50. {.name = "SCHED", .latency = 0},
  51. {.name = "HRTIMER", .latency = 0},
  52. {.name = "RCU", .latency = 0},
  53. };
  54. // tmp store for soft IRQ values we get from a per-CPU eBPF map.
  55. static softirq_ebpf_val_t *softirq_ebpf_vals = NULL;
  56. /**
  57. * Cachestat Free
  58. *
  59. * Cleanup variables after child threads to stop
  60. *
  61. * @param ptr thread data.
  62. */
  63. static void ebpf_softirq_free(ebpf_module_t *em)
  64. {
  65. pthread_mutex_lock(&ebpf_exit_cleanup);
  66. em->enabled = NETDATA_THREAD_EBPF_STOPPING;
  67. pthread_mutex_unlock(&ebpf_exit_cleanup);
  68. for (int i = 0; softirq_tracepoints[i].class != NULL; i++) {
  69. ebpf_disable_tracepoint(&softirq_tracepoints[i]);
  70. }
  71. freez(softirq_ebpf_vals);
  72. pthread_mutex_lock(&ebpf_exit_cleanup);
  73. em->enabled = NETDATA_THREAD_EBPF_STOPPED;
  74. pthread_mutex_unlock(&ebpf_exit_cleanup);
  75. }
  76. /**
  77. * Cleanup
  78. *
  79. * Clean up allocated memory.
  80. *
  81. * @param ptr thread data.
  82. */
  83. static void softirq_cleanup(void *ptr)
  84. {
  85. ebpf_module_t *em = (ebpf_module_t *)ptr;
  86. ebpf_softirq_free(em);
  87. }
  88. /*****************************************************************
  89. * MAIN LOOP
  90. *****************************************************************/
  91. /**
  92. * Read Latency Map
  93. *
  94. * Read data from kernel ring to plot for users.
  95. *
  96. * @param maps_per_core do I need to read all cores?
  97. */
  98. static void softirq_read_latency_map(int maps_per_core)
  99. {
  100. int fd = softirq_maps[SOFTIRQ_MAP_LATENCY].map_fd;
  101. int i;
  102. size_t length = sizeof(softirq_ebpf_val_t);
  103. if (maps_per_core)
  104. length *= ebpf_nprocs;
  105. for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
  106. int test = bpf_map_lookup_elem(fd, &i, softirq_ebpf_vals);
  107. if (unlikely(test < 0)) {
  108. continue;
  109. }
  110. uint64_t total_latency = 0;
  111. int cpu_i;
  112. int end = (maps_per_core) ? ebpf_nprocs : 1;
  113. for (cpu_i = 0; cpu_i < end; cpu_i++) {
  114. total_latency += softirq_ebpf_vals[cpu_i].latency/1000;
  115. }
  116. softirq_vals[i].latency = total_latency;
  117. memset(softirq_ebpf_vals, 0, length);
  118. }
  119. }
  120. static void softirq_create_charts(int update_every)
  121. {
  122. ebpf_create_chart(
  123. NETDATA_EBPF_SYSTEM_GROUP,
  124. "softirq_latency",
  125. "Software IRQ latency",
  126. EBPF_COMMON_DIMENSION_MILLISECONDS,
  127. "softirqs",
  128. NULL,
  129. NETDATA_EBPF_CHART_TYPE_STACKED,
  130. NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS+1,
  131. NULL, NULL, 0, update_every,
  132. NETDATA_EBPF_MODULE_NAME_SOFTIRQ
  133. );
  134. fflush(stdout);
  135. }
  136. static void softirq_create_dims()
  137. {
  138. uint32_t i;
  139. for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
  140. ebpf_write_global_dimension(
  141. softirq_vals[i].name, softirq_vals[i].name,
  142. ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]
  143. );
  144. }
  145. }
  146. static inline void softirq_write_dims()
  147. {
  148. uint32_t i;
  149. for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
  150. write_chart_dimension(softirq_vals[i].name, softirq_vals[i].latency);
  151. }
  152. }
  153. /**
  154. * Main loop for this collector.
  155. */
  156. static void softirq_collector(ebpf_module_t *em)
  157. {
  158. softirq_ebpf_vals = callocz(ebpf_nprocs, sizeof(softirq_ebpf_val_t));
  159. // create chart and static dims.
  160. pthread_mutex_lock(&lock);
  161. softirq_create_charts(em->update_every);
  162. softirq_create_dims();
  163. ebpf_update_stats(&plugin_statistics, em);
  164. ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
  165. pthread_mutex_unlock(&lock);
  166. // loop and read from published data until ebpf plugin is closed.
  167. heartbeat_t hb;
  168. heartbeat_init(&hb);
  169. int update_every = em->update_every;
  170. int counter = update_every - 1;
  171. int maps_per_core = em->maps_per_core;
  172. //This will be cancelled by its parent
  173. while (!ebpf_exit_plugin) {
  174. (void)heartbeat_next(&hb, USEC_PER_SEC);
  175. if (ebpf_exit_plugin || ++counter != update_every)
  176. continue;
  177. counter = 0;
  178. softirq_read_latency_map(maps_per_core);
  179. pthread_mutex_lock(&lock);
  180. // write dims now for all hitherto discovered IRQs.
  181. write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "softirq_latency");
  182. softirq_write_dims();
  183. write_end_chart();
  184. pthread_mutex_unlock(&lock);
  185. }
  186. }
  187. /*****************************************************************
  188. * EBPF SOFTIRQ THREAD
  189. *****************************************************************/
  190. /**
  191. * Soft IRQ latency thread.
  192. *
  193. * @param ptr a `ebpf_module_t *`.
  194. * @return always NULL.
  195. */
  196. void *ebpf_softirq_thread(void *ptr)
  197. {
  198. netdata_thread_cleanup_push(softirq_cleanup, ptr);
  199. ebpf_module_t *em = (ebpf_module_t *)ptr;
  200. em->maps = softirq_maps;
  201. if (ebpf_enable_tracepoints(softirq_tracepoints) == 0) {
  202. goto endsoftirq;
  203. }
  204. #ifdef LIBBPF_MAJOR_VERSION
  205. ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
  206. #endif
  207. em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
  208. if (!em->probe_links) {
  209. goto endsoftirq;
  210. }
  211. softirq_collector(em);
  212. endsoftirq:
  213. ebpf_update_disabled_plugin_stats(em);
  214. netdata_thread_cleanup_pop(1);
  215. return NULL;
  216. }