ebpf_cgroup.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include <sys/resource.h>
  3. #include "ebpf.h"
  4. #include "ebpf_cgroup.h"
  5. ebpf_cgroup_target_t *ebpf_cgroup_pids = NULL;
  6. static void *ebpf_mapped_memory = NULL;
  7. int send_cgroup_chart = 0;
  8. // --------------------------------------------------------------------------------------------------------------------
  9. // Map shared memory
  10. /**
  11. * Map Shared Memory locally
  12. *
  13. * Map the shared memory for current process
  14. *
  15. * @param fd file descriptor returned after shm_open was called.
  16. * @param length length of the shared memory
  17. *
  18. * @return It returns a pointer to the region mapped on success and MAP_FAILED otherwise.
  19. */
  20. static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length)
  21. {
  22. void *value;
  23. value = mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
  24. if (!value) {
  25. netdata_log_error("Cannot map shared memory used between eBPF and cgroup, integration between processes won't happen");
  26. close(shm_fd_ebpf_cgroup);
  27. shm_fd_ebpf_cgroup = -1;
  28. shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
  29. }
  30. return value;
  31. }
  32. /**
  33. * Unmap Shared Memory
  34. *
  35. * Unmap shared memory used to integrate eBPF and cgroup plugin
  36. */
  37. void ebpf_unmap_cgroup_shared_memory()
  38. {
  39. munmap(ebpf_mapped_memory, shm_ebpf_cgroup.header->body_length);
  40. }
  41. /**
  42. * Map cgroup shared memory
  43. *
  44. * Map cgroup shared memory from cgroup to plugin
  45. */
  46. void ebpf_map_cgroup_shared_memory()
  47. {
  48. static int limit_try = 0;
  49. static time_t next_try = 0;
  50. if (shm_ebpf_cgroup.header || limit_try > NETDATA_EBPF_CGROUP_MAX_TRIES)
  51. return;
  52. time_t curr_time = time(NULL);
  53. if (curr_time < next_try)
  54. return;
  55. limit_try++;
  56. next_try = curr_time + NETDATA_EBPF_CGROUP_NEXT_TRY_SEC;
  57. if (shm_fd_ebpf_cgroup < 0) {
  58. shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660);
  59. if (shm_fd_ebpf_cgroup < 0) {
  60. if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES)
  61. netdata_log_error("Shared memory was not initialized, integration between processes won't happen.");
  62. return;
  63. }
  64. }
  65. // Map only header
  66. void *mapped = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup,
  67. sizeof(netdata_ebpf_cgroup_shm_header_t));
  68. if (unlikely(mapped == SEM_FAILED)) {
  69. return;
  70. }
  71. netdata_ebpf_cgroup_shm_header_t *header = mapped;
  72. size_t length = header->body_length;
  73. munmap(header, sizeof(netdata_ebpf_cgroup_shm_header_t));
  74. if (length <= ((sizeof(netdata_ebpf_cgroup_shm_header_t) + sizeof(netdata_ebpf_cgroup_shm_body_t)))) {
  75. return;
  76. }
  77. ebpf_mapped_memory = (void *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length);
  78. if (unlikely(ebpf_mapped_memory == MAP_FAILED)) {
  79. return;
  80. }
  81. shm_ebpf_cgroup.header = ebpf_mapped_memory;
  82. shm_ebpf_cgroup.body = ebpf_mapped_memory + sizeof(netdata_ebpf_cgroup_shm_header_t);
  83. shm_sem_ebpf_cgroup = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT, 0660, 1);
  84. if (shm_sem_ebpf_cgroup == SEM_FAILED) {
  85. netdata_log_error("Cannot create semaphore, integration between eBPF and cgroup won't happen");
  86. limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
  87. munmap(ebpf_mapped_memory, length);
  88. shm_ebpf_cgroup.header = NULL;
  89. shm_ebpf_cgroup.body = NULL;
  90. close(shm_fd_ebpf_cgroup);
  91. shm_fd_ebpf_cgroup = -1;
  92. shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
  93. }
  94. }
  95. // --------------------------------------------------------------------------------------------------------------------
  96. // Close and Cleanup
  97. /**
  98. * Clean Specific cgroup pid
  99. *
  100. * Clean all PIDs associated with cgroup.
  101. *
  102. * @param pt structure pid on target that will have your PRs removed
  103. */
  104. static inline void ebpf_clean_specific_cgroup_pids(struct pid_on_target2 *pt)
  105. {
  106. while (pt) {
  107. struct pid_on_target2 *next_pid = pt->next;
  108. freez(pt);
  109. pt = next_pid;
  110. }
  111. }
  112. /**
  113. * Remove Cgroup Update Target Update List
  114. *
  115. * Remove from cgroup target and update the link list
  116. */
  117. static void ebpf_remove_cgroup_target_update_list()
  118. {
  119. ebpf_cgroup_target_t *next, *ect = ebpf_cgroup_pids;
  120. ebpf_cgroup_target_t *prev = ebpf_cgroup_pids;
  121. while (ect) {
  122. next = ect->next;
  123. if (!ect->updated) {
  124. if (ect == ebpf_cgroup_pids) {
  125. ebpf_cgroup_pids = next;
  126. prev = next;
  127. } else {
  128. prev->next = next;
  129. }
  130. ebpf_clean_specific_cgroup_pids(ect->pids);
  131. freez(ect);
  132. } else {
  133. prev = ect;
  134. }
  135. ect = next;
  136. }
  137. }
  138. // --------------------------------------------------------------------------------------------------------------------
  139. // Fill variables
  140. /**
  141. * Set Target Data
  142. *
  143. * Set local variable values according shared memory information.
  144. *
  145. * @param out local output variable.
  146. * @param ptr input from shared memory.
  147. */
  148. static inline void ebpf_cgroup_set_target_data(ebpf_cgroup_target_t *out, netdata_ebpf_cgroup_shm_body_t *ptr)
  149. {
  150. out->hash = ptr->hash;
  151. snprintfz(out->name, 255, "%s", ptr->name);
  152. out->systemd = ptr->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE;
  153. out->updated = 1;
  154. }
  155. /**
  156. * Find or create
  157. *
  158. * Find the structure inside the link list or allocate and link when it is not present.
  159. *
  160. * @param ptr Input from shared memory.
  161. *
  162. * @return It returns a pointer for the structure associated with the input.
  163. */
  164. static ebpf_cgroup_target_t * ebpf_cgroup_find_or_create(netdata_ebpf_cgroup_shm_body_t *ptr)
  165. {
  166. ebpf_cgroup_target_t *ect, *prev;
  167. for (ect = ebpf_cgroup_pids, prev = ebpf_cgroup_pids; ect; prev = ect, ect = ect->next) {
  168. if (ect->hash == ptr->hash && !strcmp(ect->name, ptr->name)) {
  169. ect->updated = 1;
  170. return ect;
  171. }
  172. }
  173. ebpf_cgroup_target_t *new_ect = callocz(1, sizeof(ebpf_cgroup_target_t));
  174. ebpf_cgroup_set_target_data(new_ect, ptr);
  175. if (!ebpf_cgroup_pids) {
  176. ebpf_cgroup_pids = new_ect;
  177. } else {
  178. prev->next = new_ect;
  179. }
  180. return new_ect;
  181. }
  182. /**
  183. * Update pid link list
  184. *
  185. * Update PIDs list associated with specific cgroup.
  186. *
  187. * @param ect cgroup structure where pids will be stored
  188. * @param path file with PIDs associated to cgroup.
  189. */
  190. static void ebpf_update_pid_link_list(ebpf_cgroup_target_t *ect, char *path)
  191. {
  192. procfile *ff = procfile_open_no_log(path, " \t:", PROCFILE_FLAG_DEFAULT);
  193. if (!ff)
  194. return;
  195. ff = procfile_readall(ff);
  196. if (!ff)
  197. return;
  198. size_t lines = procfile_lines(ff), l;
  199. for (l = 0; l < lines ;l++) {
  200. int pid = (int)str2l(procfile_lineword(ff, l, 0));
  201. if (pid) {
  202. struct pid_on_target2 *pt, *prev;
  203. for (pt = ect->pids, prev = ect->pids; pt; prev = pt, pt = pt->next) {
  204. if (pt->pid == pid)
  205. break;
  206. }
  207. if (!pt) {
  208. struct pid_on_target2 *w = callocz(1, sizeof(struct pid_on_target2));
  209. w->pid = pid;
  210. if (!ect->pids)
  211. ect->pids = w;
  212. else
  213. prev->next = w;
  214. }
  215. }
  216. }
  217. procfile_close(ff);
  218. }
  219. /**
  220. * Set remove var
  221. *
  222. * Set variable remove. If this variable is not reset, the structure will be removed from link list.
  223. */
  224. void ebpf_reset_updated_var()
  225. {
  226. ebpf_cgroup_target_t *ect;
  227. for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
  228. ect->updated = 0;
  229. }
  230. }
  231. /**
  232. * Parse cgroup shared memory
  233. *
  234. * This function is responsible to copy necessary data from shared memory to local memory.
  235. */
  236. void ebpf_parse_cgroup_shm_data()
  237. {
  238. static int previous = 0;
  239. if (!shm_ebpf_cgroup.header || shm_sem_ebpf_cgroup == SEM_FAILED)
  240. return;
  241. sem_wait(shm_sem_ebpf_cgroup);
  242. int i, end = shm_ebpf_cgroup.header->cgroup_root_count;
  243. if (end <= 0) {
  244. sem_post(shm_sem_ebpf_cgroup);
  245. return;
  246. }
  247. pthread_mutex_lock(&mutex_cgroup_shm);
  248. ebpf_remove_cgroup_target_update_list();
  249. ebpf_reset_updated_var();
  250. for (i = 0; i < end; i++) {
  251. netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i];
  252. if (ptr->enabled) {
  253. ebpf_cgroup_target_t *ect = ebpf_cgroup_find_or_create(ptr);
  254. ebpf_update_pid_link_list(ect, ptr->path);
  255. }
  256. }
  257. send_cgroup_chart = previous != shm_ebpf_cgroup.header->cgroup_root_count;
  258. previous = shm_ebpf_cgroup.header->cgroup_root_count;
  259. sem_post(shm_sem_ebpf_cgroup);
  260. pthread_mutex_unlock(&mutex_cgroup_shm);
  261. #ifdef NETDATA_DEV_MODE
  262. netdata_log_info("Updating cgroup %d (Previous: %d, Current: %d)",
  263. send_cgroup_chart, previous, shm_ebpf_cgroup.header->cgroup_root_count);
  264. #endif
  265. sem_post(shm_sem_ebpf_cgroup);
  266. }
  267. // --------------------------------------------------------------------------------------------------------------------
  268. // Create charts
  269. /**
  270. * Create charts on systemd submenu
  271. *
  272. * @param id the chart id
  273. * @param title the value displayed on vertical axis.
  274. * @param units the value displayed on vertical axis.
  275. * @param family Submenu that the chart will be attached on dashboard.
  276. * @param charttype chart type
  277. * @param order the chart order
  278. * @param algorithm the algorithm used by dimension
  279. * @param context add context for chart
  280. * @param module chart module name, this is the eBPF thread.
  281. * @param update_every value to overwrite the update frequency set by the server.
  282. */
  283. void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
  284. char *algorithm, char *context, char *module, int update_every)
  285. {
  286. ebpf_cgroup_target_t *w;
  287. ebpf_write_chart_cmd(NETDATA_SERVICE_FAMILY, id, title, units, family, charttype, context,
  288. order, update_every, module);
  289. for (w = ebpf_cgroup_pids; w; w = w->next) {
  290. if (unlikely(w->systemd) && unlikely(w->updated))
  291. fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm);
  292. }
  293. }
  294. // --------------------------------------------------------------------------------------------------------------------
  295. // Cgroup main thread
  296. /**
  297. * CGROUP exit
  298. *
  299. * Clean up the main thread.
  300. *
  301. * @param ptr thread data.
  302. */
  303. static void ebpf_cgroup_exit(void *ptr)
  304. {
  305. UNUSED(ptr);
  306. }
  307. /**
  308. * Cgroup integratin
  309. *
  310. * Thread responsible to call functions responsible to sync data between plugins.
  311. *
  312. * @param ptr It is a NULL value for this thread.
  313. *
  314. * @return It always returns NULL.
  315. */
  316. void *ebpf_cgroup_integration(void *ptr)
  317. {
  318. netdata_thread_cleanup_push(ebpf_cgroup_exit, ptr);
  319. usec_t step = USEC_PER_SEC;
  320. int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
  321. heartbeat_t hb;
  322. heartbeat_init(&hb);
  323. //Plugin will be killed when it receives a signal
  324. while (!ebpf_plugin_exit) {
  325. (void)heartbeat_next(&hb, step);
  326. // We are using a small heartbeat time to wake up thread,
  327. // but we should not update so frequently the shared memory data
  328. if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) {
  329. counter = 0;
  330. if (!shm_ebpf_cgroup.header)
  331. ebpf_map_cgroup_shared_memory();
  332. else
  333. ebpf_parse_cgroup_shm_data();
  334. }
  335. }
  336. netdata_thread_cleanup_pop(1);
  337. return NULL;
  338. }