ebpf_cgroup.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include <sys/resource.h>
  3. #include "ebpf.h"
  4. #include "ebpf_cgroup.h"
  5. ebpf_cgroup_target_t *ebpf_cgroup_pids = NULL;
  6. int send_cgroup_chart = 0;
  7. // --------------------------------------------------------------------------------------------------------------------
  8. // Map shared memory
  9. /**
  10. * Map Shared Memory locally
  11. *
  12. * Map the shared memory for current process
  13. *
  14. * @param fd file descriptor returned after shm_open was called.
  15. * @param length length of the shared memory
  16. *
  17. * @return It returns a pointer to the region mapped.
  18. */
  19. static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length)
  20. {
  21. void *value;
  22. value = mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
  23. if (!value) {
  24. error("Cannot map shared memory used between eBPF and cgroup, integration between processes won't happen");
  25. close(shm_fd_ebpf_cgroup);
  26. shm_fd_ebpf_cgroup = -1;
  27. shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
  28. }
  29. return value;
  30. }
  31. /**
  32. * Map cgroup shared memory
  33. *
  34. * Map cgroup shared memory from cgroup to plugin
  35. */
  36. void ebpf_map_cgroup_shared_memory()
  37. {
  38. static int limit_try = 0;
  39. static time_t next_try = 0;
  40. if (shm_ebpf_cgroup.header || limit_try > NETDATA_EBPF_CGROUP_MAX_TRIES)
  41. return;
  42. time_t curr_time = time(NULL);
  43. if (curr_time < next_try)
  44. return;
  45. limit_try++;
  46. next_try = curr_time + NETDATA_EBPF_CGROUP_NEXT_TRY_SEC;
  47. shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660);
  48. if (shm_fd_ebpf_cgroup < 0) {
  49. if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES)
  50. error("Shared memory was not initialized, integration between processes won't happen.");
  51. return;
  52. }
  53. // Map only header
  54. shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup,
  55. sizeof(netdata_ebpf_cgroup_shm_header_t));
  56. if (!shm_ebpf_cgroup.header) {
  57. limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
  58. return;
  59. }
  60. size_t length = shm_ebpf_cgroup.header->body_length;
  61. munmap(shm_ebpf_cgroup.header, sizeof(netdata_ebpf_cgroup_shm_header_t));
  62. shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length);
  63. if (!shm_ebpf_cgroup.header) {
  64. limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
  65. return;
  66. }
  67. shm_ebpf_cgroup.body = (netdata_ebpf_cgroup_shm_body_t *) ((char *)shm_ebpf_cgroup.header +
  68. sizeof(netdata_ebpf_cgroup_shm_header_t));
  69. shm_sem_ebpf_cgroup = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT, 0660, 1);
  70. if (shm_sem_ebpf_cgroup == SEM_FAILED) {
  71. error("Cannot create semaphore, integration between eBPF and cgroup won't happen");
  72. munmap(shm_ebpf_cgroup.header, length);
  73. shm_ebpf_cgroup.header = NULL;
  74. close(shm_fd_ebpf_cgroup);
  75. shm_fd_ebpf_cgroup = -1;
  76. shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
  77. }
  78. }
  79. // --------------------------------------------------------------------------------------------------------------------
  80. // Close and Cleanup
  81. /**
  82. * Clean Specific cgroup pid
  83. *
  84. * Clean all PIDs associated with cgroup.
  85. *
  86. * @param pt structure pid on target that will have your PRs removed
  87. */
  88. static inline void ebpf_clean_specific_cgroup_pids(struct pid_on_target2 *pt)
  89. {
  90. while (pt) {
  91. struct pid_on_target2 *next_pid = pt->next;
  92. freez(pt);
  93. pt = next_pid;
  94. }
  95. }
  96. /**
  97. * Remove Cgroup Update Target Update List
  98. *
  99. * Remove from cgroup target and update the link list
  100. */
  101. static void ebpf_remove_cgroup_target_update_list()
  102. {
  103. ebpf_cgroup_target_t *next, *ect = ebpf_cgroup_pids;
  104. ebpf_cgroup_target_t *prev = ebpf_cgroup_pids;
  105. while (ect) {
  106. next = ect->next;
  107. if (!ect->updated) {
  108. if (ect == ebpf_cgroup_pids) {
  109. ebpf_cgroup_pids = next;
  110. prev = next;
  111. } else {
  112. prev->next = next;
  113. }
  114. ebpf_clean_specific_cgroup_pids(ect->pids);
  115. freez(ect);
  116. } else {
  117. prev = ect;
  118. }
  119. ect = next;
  120. }
  121. }
  122. // --------------------------------------------------------------------------------------------------------------------
  123. // Fill variables
  124. /**
  125. * Set Target Data
  126. *
  127. * Set local variable values according shared memory information.
  128. *
  129. * @param out local output variable.
  130. * @param ptr input from shared memory.
  131. */
  132. static inline void ebpf_cgroup_set_target_data(ebpf_cgroup_target_t *out, netdata_ebpf_cgroup_shm_body_t *ptr)
  133. {
  134. out->hash = ptr->hash;
  135. snprintfz(out->name, 255, "%s", ptr->name);
  136. out->systemd = ptr->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE;
  137. out->updated = 1;
  138. }
  139. /**
  140. * Find or create
  141. *
  142. * Find the structure inside the link list or allocate and link when it is not present.
  143. *
  144. * @param ptr Input from shared memory.
  145. *
  146. * @return It returns a pointer for the structure associated with the input.
  147. */
  148. static ebpf_cgroup_target_t * ebpf_cgroup_find_or_create(netdata_ebpf_cgroup_shm_body_t *ptr)
  149. {
  150. ebpf_cgroup_target_t *ect, *prev;
  151. for (ect = ebpf_cgroup_pids, prev = ebpf_cgroup_pids; ect; prev = ect, ect = ect->next) {
  152. if (ect->hash == ptr->hash && !strcmp(ect->name, ptr->name)) {
  153. ect->updated = 1;
  154. return ect;
  155. }
  156. }
  157. ebpf_cgroup_target_t *new_ect = callocz(1, sizeof(ebpf_cgroup_target_t));
  158. ebpf_cgroup_set_target_data(new_ect, ptr);
  159. if (!ebpf_cgroup_pids) {
  160. ebpf_cgroup_pids = new_ect;
  161. } else {
  162. prev->next = new_ect;
  163. }
  164. return new_ect;
  165. }
  166. /**
  167. * Update pid link list
  168. *
  169. * Update PIDs list associated with specific cgroup.
  170. *
  171. * @param ect cgroup structure where pids will be stored
  172. * @param path file with PIDs associated to cgroup.
  173. */
  174. static void ebpf_update_pid_link_list(ebpf_cgroup_target_t *ect, char *path)
  175. {
  176. procfile *ff = procfile_open_no_log(path, " \t:", PROCFILE_FLAG_DEFAULT);
  177. if (!ff)
  178. return;
  179. ff = procfile_readall(ff);
  180. if (!ff)
  181. return;
  182. size_t lines = procfile_lines(ff), l;
  183. for (l = 0; l < lines ;l++) {
  184. int pid = (int)str2l(procfile_lineword(ff, l, 0));
  185. if (pid) {
  186. struct pid_on_target2 *pt, *prev;
  187. for (pt = ect->pids, prev = ect->pids; pt; prev = pt, pt = pt->next) {
  188. if (pt->pid == pid)
  189. break;
  190. }
  191. if (!pt) {
  192. struct pid_on_target2 *w = callocz(1, sizeof(struct pid_on_target2));
  193. w->pid = pid;
  194. if (!ect->pids)
  195. ect->pids = w;
  196. else
  197. prev->next = w;
  198. }
  199. }
  200. }
  201. procfile_close(ff);
  202. }
  203. /**
  204. * Set remove var
  205. *
  206. * Set variable remove. If this variable is not reset, the structure will be removed from link list.
  207. */
  208. void ebpf_reset_updated_var()
  209. {
  210. ebpf_cgroup_target_t *ect;
  211. for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
  212. ect->updated = 0;
  213. }
  214. }
  215. /**
  216. * Parse cgroup shared memory
  217. *
  218. * This function is responsible to copy necessary data from shared memory to local memory.
  219. */
  220. void ebpf_parse_cgroup_shm_data()
  221. {
  222. static int previous = 0;
  223. if (shm_ebpf_cgroup.header) {
  224. sem_wait(shm_sem_ebpf_cgroup);
  225. int i, end = shm_ebpf_cgroup.header->cgroup_root_count;
  226. pthread_mutex_lock(&mutex_cgroup_shm);
  227. ebpf_remove_cgroup_target_update_list();
  228. ebpf_reset_updated_var();
  229. for (i = 0; i < end; i++) {
  230. netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i];
  231. if (ptr->enabled) {
  232. ebpf_cgroup_target_t *ect = ebpf_cgroup_find_or_create(ptr);
  233. ebpf_update_pid_link_list(ect, ptr->path);
  234. }
  235. }
  236. send_cgroup_chart = previous != shm_ebpf_cgroup.header->cgroup_root_count;
  237. previous = shm_ebpf_cgroup.header->cgroup_root_count;
  238. #ifdef NETDATA_DEV_MODE
  239. error("Updating cgroup %d (Previous: %d, Current: %d)", send_cgroup_chart, previous, shm_ebpf_cgroup.header->cgroup_root_count);
  240. #endif
  241. pthread_mutex_unlock(&mutex_cgroup_shm);
  242. sem_post(shm_sem_ebpf_cgroup);
  243. }
  244. }
  245. // --------------------------------------------------------------------------------------------------------------------
  246. // Create charts
  247. /**
  248. * Create charts on systemd submenu
  249. *
  250. * @param id the chart id
  251. * @param title the value displayed on vertical axis.
  252. * @param units the value displayed on vertical axis.
  253. * @param family Submenu that the chart will be attached on dashboard.
  254. * @param charttype chart type
  255. * @param order the chart order
  256. * @param algorithm the algorithm used by dimension
  257. * @param context add context for chart
  258. * @param module chart module name, this is the eBPF thread.
  259. * @param update_every value to overwrite the update frequency set by the server.
  260. */
  261. void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
  262. char *algorithm, char *context, char *module, int update_every)
  263. {
  264. ebpf_cgroup_target_t *w;
  265. ebpf_write_chart_cmd(NETDATA_SERVICE_FAMILY, id, title, units, family, charttype, context,
  266. order, update_every, module);
  267. for (w = ebpf_cgroup_pids; w; w = w->next) {
  268. if (unlikely(w->systemd) && unlikely(w->updated))
  269. fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm);
  270. }
  271. }