ebpf_softirq.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "ebpf.h"
  3. #include "ebpf_softirq.h"
  4. struct config softirq_config = { .first_section = NULL,
  5. .last_section = NULL,
  6. .mutex = NETDATA_MUTEX_INITIALIZER,
  7. .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
  8. .rwlock = AVL_LOCK_INITIALIZER } };
  9. #define SOFTIRQ_MAP_LATENCY 0
  10. static ebpf_local_maps_t softirq_maps[] = {
  11. {
  12. .name = "tbl_softirq",
  13. .internal_input = NETDATA_SOFTIRQ_MAX_IRQS,
  14. .user_input = 0,
  15. .type = NETDATA_EBPF_MAP_STATIC,
  16. .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
  17. #ifdef LIBBPF_MAJOR_VERSION
  18. .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
  19. #endif
  20. },
  21. /* end */
  22. {
  23. .name = NULL,
  24. .internal_input = 0,
  25. .user_input = 0,
  26. .type = NETDATA_EBPF_MAP_CONTROLLER,
  27. .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
  28. #ifdef LIBBPF_MAJOR_VERSION
  29. .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
  30. #endif
  31. }
  32. };
  33. #define SOFTIRQ_TP_CLASS_IRQ "irq"
  34. static ebpf_tracepoint_t softirq_tracepoints[] = {
  35. {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_entry"},
  36. {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_exit"},
  37. /* end */
  38. {.enabled = false, .class = NULL, .event = NULL}
  39. };
  40. // these must be in the order defined by the kernel:
  41. // https://elixir.bootlin.com/linux/v5.12.19/source/include/trace/events/irq.h#L13
  42. static softirq_val_t softirq_vals[] = {
  43. {.name = "HI", .latency = 0},
  44. {.name = "TIMER", .latency = 0},
  45. {.name = "NET_TX", .latency = 0},
  46. {.name = "NET_RX", .latency = 0},
  47. {.name = "BLOCK", .latency = 0},
  48. {.name = "IRQ_POLL", .latency = 0},
  49. {.name = "TASKLET", .latency = 0},
  50. {.name = "SCHED", .latency = 0},
  51. {.name = "HRTIMER", .latency = 0},
  52. {.name = "RCU", .latency = 0},
  53. };
  54. // tmp store for soft IRQ values we get from a per-CPU eBPF map.
  55. static softirq_ebpf_val_t *softirq_ebpf_vals = NULL;
  56. /**
  57. * Cleanup
  58. *
  59. * Clean up allocated memory.
  60. *
  61. * @param ptr thread data.
  62. */
  63. static void softirq_cleanup(void *ptr)
  64. {
  65. ebpf_module_t *em = (ebpf_module_t *)ptr;
  66. if (em->objects)
  67. ebpf_unload_legacy_code(em->objects, em->probe_links);
  68. for (int i = 0; softirq_tracepoints[i].class != NULL; i++) {
  69. ebpf_disable_tracepoint(&softirq_tracepoints[i]);
  70. }
  71. freez(softirq_ebpf_vals);
  72. pthread_mutex_lock(&ebpf_exit_cleanup);
  73. em->enabled = NETDATA_THREAD_EBPF_STOPPED;
  74. pthread_mutex_unlock(&ebpf_exit_cleanup);
  75. }
  76. /*****************************************************************
  77. * MAIN LOOP
  78. *****************************************************************/
  79. /**
  80. * Read Latency Map
  81. *
  82. * Read data from kernel ring to plot for users.
  83. *
  84. * @param maps_per_core do I need to read all cores?
  85. */
  86. static void softirq_read_latency_map(int maps_per_core)
  87. {
  88. int fd = softirq_maps[SOFTIRQ_MAP_LATENCY].map_fd;
  89. int i;
  90. size_t length = sizeof(softirq_ebpf_val_t);
  91. if (maps_per_core)
  92. length *= ebpf_nprocs;
  93. for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
  94. int test = bpf_map_lookup_elem(fd, &i, softirq_ebpf_vals);
  95. if (unlikely(test < 0)) {
  96. continue;
  97. }
  98. uint64_t total_latency = 0;
  99. int cpu_i;
  100. int end = (maps_per_core) ? ebpf_nprocs : 1;
  101. for (cpu_i = 0; cpu_i < end; cpu_i++) {
  102. total_latency += softirq_ebpf_vals[cpu_i].latency/1000;
  103. }
  104. softirq_vals[i].latency = total_latency;
  105. memset(softirq_ebpf_vals, 0, length);
  106. }
  107. }
  108. static void softirq_create_charts(int update_every)
  109. {
  110. ebpf_create_chart(
  111. NETDATA_EBPF_SYSTEM_GROUP,
  112. "softirq_latency",
  113. "Software IRQ latency",
  114. EBPF_COMMON_DIMENSION_MILLISECONDS,
  115. "softirqs",
  116. NULL,
  117. NETDATA_EBPF_CHART_TYPE_STACKED,
  118. NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS+1,
  119. NULL, NULL, 0, update_every,
  120. NETDATA_EBPF_MODULE_NAME_SOFTIRQ
  121. );
  122. fflush(stdout);
  123. }
  124. static void softirq_create_dims()
  125. {
  126. uint32_t i;
  127. for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
  128. ebpf_write_global_dimension(
  129. softirq_vals[i].name, softirq_vals[i].name,
  130. ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]
  131. );
  132. }
  133. }
  134. static inline void softirq_write_dims()
  135. {
  136. uint32_t i;
  137. for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
  138. write_chart_dimension(softirq_vals[i].name, softirq_vals[i].latency);
  139. }
  140. }
  141. /**
  142. * Main loop for this collector.
  143. */
  144. static void softirq_collector(ebpf_module_t *em)
  145. {
  146. softirq_ebpf_vals = callocz(ebpf_nprocs, sizeof(softirq_ebpf_val_t));
  147. // create chart and static dims.
  148. pthread_mutex_lock(&lock);
  149. softirq_create_charts(em->update_every);
  150. softirq_create_dims();
  151. ebpf_update_stats(&plugin_statistics, em);
  152. ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
  153. pthread_mutex_unlock(&lock);
  154. // loop and read from published data until ebpf plugin is closed.
  155. heartbeat_t hb;
  156. heartbeat_init(&hb);
  157. int update_every = em->update_every;
  158. int counter = update_every - 1;
  159. int maps_per_core = em->maps_per_core;
  160. //This will be cancelled by its parent
  161. while (!ebpf_exit_plugin) {
  162. (void)heartbeat_next(&hb, USEC_PER_SEC);
  163. if (ebpf_exit_plugin || ++counter != update_every)
  164. continue;
  165. counter = 0;
  166. softirq_read_latency_map(maps_per_core);
  167. pthread_mutex_lock(&lock);
  168. // write dims now for all hitherto discovered IRQs.
  169. write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "softirq_latency");
  170. softirq_write_dims();
  171. write_end_chart();
  172. pthread_mutex_unlock(&lock);
  173. }
  174. }
  175. /*****************************************************************
  176. * EBPF SOFTIRQ THREAD
  177. *****************************************************************/
  178. /**
  179. * Soft IRQ latency thread.
  180. *
  181. * @param ptr a `ebpf_module_t *`.
  182. * @return always NULL.
  183. */
  184. void *ebpf_softirq_thread(void *ptr)
  185. {
  186. netdata_thread_cleanup_push(softirq_cleanup, ptr);
  187. ebpf_module_t *em = (ebpf_module_t *)ptr;
  188. em->maps = softirq_maps;
  189. if (ebpf_enable_tracepoints(softirq_tracepoints) == 0) {
  190. goto endsoftirq;
  191. }
  192. #ifdef LIBBPF_MAJOR_VERSION
  193. ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
  194. #endif
  195. em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
  196. if (!em->probe_links) {
  197. goto endsoftirq;
  198. }
  199. softirq_collector(em);
  200. endsoftirq:
  201. ebpf_update_disabled_plugin_stats(em);
  202. netdata_thread_cleanup_pop(1);
  203. return NULL;
  204. }