ebpf_cgroup.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include <sys/resource.h>
  3. #include "ebpf.h"
  4. #include "ebpf_cgroup.h"
  5. ebpf_cgroup_target_t *ebpf_cgroup_pids = NULL;
  6. // --------------------------------------------------------------------------------------------------------------------
  7. // Map shared memory
  8. /**
  9. * Map Shared Memory locally
  10. *
  11. * Map the shared memory for current process
  12. *
  13. * @param fd file descriptor returned after shm_open was called.
  14. * @param length length of the shared memory
  15. *
  16. * @return It returns a pointer to the region mapped.
  17. */
  18. static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length)
  19. {
  20. void *value;
  21. value = mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
  22. if (!value) {
  23. error("Cannot map shared memory used between eBPF and cgroup, integration between processes won't happen");
  24. close(shm_fd_ebpf_cgroup);
  25. shm_fd_ebpf_cgroup = -1;
  26. shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
  27. }
  28. return value;
  29. }
  30. /**
  31. * Map cgroup shared memory
  32. *
  33. * Map cgroup shared memory from cgroup to plugin
  34. */
  35. void ebpf_map_cgroup_shared_memory()
  36. {
  37. static int limit_try = 0;
  38. static time_t next_try = 0;
  39. if (shm_ebpf_cgroup.header || limit_try > NETDATA_EBPF_CGROUP_MAX_TRIES)
  40. return;
  41. time_t curr_time = time(NULL);
  42. if (curr_time < next_try)
  43. return;
  44. limit_try++;
  45. next_try = curr_time + NETDATA_EBPF_CGROUP_NEXT_TRY_SEC;
  46. shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660);
  47. if (shm_fd_ebpf_cgroup < 0) {
  48. if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES)
  49. error("Shared memory was not initialized, integration between processes won't happen.");
  50. return;
  51. }
  52. // Map only header
  53. shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup,
  54. sizeof(netdata_ebpf_cgroup_shm_header_t));
  55. if (!shm_ebpf_cgroup.header) {
  56. limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
  57. return;
  58. }
  59. size_t length = shm_ebpf_cgroup.header->body_length;
  60. munmap(shm_ebpf_cgroup.header, sizeof(netdata_ebpf_cgroup_shm_header_t));
  61. shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length);
  62. if (!shm_ebpf_cgroup.header) {
  63. limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
  64. return;
  65. }
  66. shm_ebpf_cgroup.body = (netdata_ebpf_cgroup_shm_body_t *) ((char *)shm_ebpf_cgroup.header +
  67. sizeof(netdata_ebpf_cgroup_shm_header_t));
  68. shm_sem_ebpf_cgroup = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT, 0660, 1);
  69. if (shm_sem_ebpf_cgroup == SEM_FAILED) {
  70. error("Cannot create semaphore, integration between eBPF and cgroup won't happen");
  71. munmap(shm_ebpf_cgroup.header, length);
  72. shm_ebpf_cgroup.header = NULL;
  73. close(shm_fd_ebpf_cgroup);
  74. shm_fd_ebpf_cgroup = -1;
  75. shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
  76. }
  77. }
  78. // --------------------------------------------------------------------------------------------------------------------
  79. // Close and Cleanup
  80. /**
  81. * Close shared memory
  82. */
  83. void ebpf_close_cgroup_shm()
  84. {
  85. if (shm_sem_ebpf_cgroup != SEM_FAILED) {
  86. sem_close(shm_sem_ebpf_cgroup);
  87. sem_unlink(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME);
  88. shm_sem_ebpf_cgroup = SEM_FAILED;
  89. }
  90. if (shm_fd_ebpf_cgroup > 0) {
  91. close(shm_fd_ebpf_cgroup);
  92. shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
  93. shm_fd_ebpf_cgroup = -1;
  94. }
  95. }
  96. /**
  97. * Clean Specific cgroup pid
  98. *
  99. * Clean all PIDs associated with cgroup.
  100. *
  101. * @param pt structure pid on target that will have your PRs removed
  102. */
  103. static inline void ebpf_clean_specific_cgroup_pids(struct pid_on_target2 *pt)
  104. {
  105. while (pt) {
  106. struct pid_on_target2 *next_pid = pt->next;
  107. freez(pt);
  108. pt = next_pid;
  109. }
  110. }
  111. /**
  112. * Remove Cgroup Update Target Update List
  113. *
  114. * Remove from cgroup target and update the link list
  115. */
  116. static void ebpf_remove_cgroup_target_update_list()
  117. {
  118. ebpf_cgroup_target_t *next, *ect = ebpf_cgroup_pids;
  119. ebpf_cgroup_target_t *prev = ebpf_cgroup_pids;
  120. while (ect) {
  121. next = ect->next;
  122. if (!ect->updated) {
  123. if (ect == ebpf_cgroup_pids) {
  124. ebpf_cgroup_pids = next;
  125. prev = next;
  126. } else {
  127. prev->next = next;
  128. }
  129. ebpf_clean_specific_cgroup_pids(ect->pids);
  130. freez(ect);
  131. } else {
  132. prev = ect;
  133. }
  134. ect = next;
  135. }
  136. }
  137. // --------------------------------------------------------------------------------------------------------------------
  138. // Fill variables
  139. /**
  140. * Set Target Data
  141. *
  142. * Set local variable values according shared memory information.
  143. *
  144. * @param out local output variable.
  145. * @param ptr input from shared memory.
  146. */
  147. static inline void ebpf_cgroup_set_target_data(ebpf_cgroup_target_t *out, netdata_ebpf_cgroup_shm_body_t *ptr)
  148. {
  149. out->hash = ptr->hash;
  150. snprintfz(out->name, 255, "%s", ptr->name);
  151. out->systemd = ptr->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE;
  152. out->updated = 1;
  153. }
  154. /**
  155. * Find or create
  156. *
  157. * Find the structure inside the link list or allocate and link when it is not present.
  158. *
  159. * @param ptr Input from shared memory.
  160. *
  161. * @return It returns a pointer for the structure associated with the input.
  162. */
  163. static ebpf_cgroup_target_t * ebpf_cgroup_find_or_create(netdata_ebpf_cgroup_shm_body_t *ptr)
  164. {
  165. ebpf_cgroup_target_t *ect, *prev;
  166. for (ect = ebpf_cgroup_pids, prev = ebpf_cgroup_pids; ect; prev = ect, ect = ect->next) {
  167. if (ect->hash == ptr->hash && !strcmp(ect->name, ptr->name)) {
  168. ect->updated = 1;
  169. return ect;
  170. }
  171. }
  172. ebpf_cgroup_target_t *new_ect = callocz(1, sizeof(ebpf_cgroup_target_t));
  173. ebpf_cgroup_set_target_data(new_ect, ptr);
  174. if (!ebpf_cgroup_pids) {
  175. ebpf_cgroup_pids = new_ect;
  176. } else {
  177. prev->next = new_ect;
  178. }
  179. return new_ect;
  180. }
  181. /**
  182. * Update pid link list
  183. *
  184. * Update PIDs list associated with specific cgroup.
  185. *
  186. * @param ect cgroup structure where pids will be stored
  187. * @param path file with PIDs associated to cgroup.
  188. */
  189. static void ebpf_update_pid_link_list(ebpf_cgroup_target_t *ect, char *path)
  190. {
  191. procfile *ff = procfile_open_no_log(path, " \t:", PROCFILE_FLAG_DEFAULT);
  192. if (!ff)
  193. return;
  194. ff = procfile_readall(ff);
  195. if (!ff)
  196. return;
  197. size_t lines = procfile_lines(ff), l;
  198. for (l = 0; l < lines ;l++) {
  199. int pid = (int)str2l(procfile_lineword(ff, l, 0));
  200. if (pid) {
  201. struct pid_on_target2 *pt, *prev;
  202. for (pt = ect->pids, prev = ect->pids; pt; prev = pt, pt = pt->next) {
  203. if (pt->pid == pid)
  204. break;
  205. }
  206. if (!pt) {
  207. struct pid_on_target2 *w = callocz(1, sizeof(struct pid_on_target2));
  208. w->pid = pid;
  209. if (!ect->pids)
  210. ect->pids = w;
  211. else
  212. prev->next = w;
  213. }
  214. }
  215. }
  216. procfile_close(ff);
  217. }
  218. /**
  219. * Set remove var
  220. *
  221. * Set variable remove. If this variable is not reset, the structure will be removed from link list.
  222. */
  223. void ebpf_reset_updated_var()
  224. {
  225. ebpf_cgroup_target_t *ect;
  226. for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
  227. ect->updated = 0;
  228. }
  229. }
  230. /**
  231. * Parse cgroup shared memory
  232. *
  233. * This function is responsible to copy necessary data from shared memory to local memory.
  234. */
  235. void ebpf_parse_cgroup_shm_data()
  236. {
  237. if (shm_ebpf_cgroup.header) {
  238. sem_wait(shm_sem_ebpf_cgroup);
  239. int i, end = shm_ebpf_cgroup.header->cgroup_root_count;
  240. pthread_mutex_lock(&mutex_cgroup_shm);
  241. ebpf_remove_cgroup_target_update_list();
  242. ebpf_reset_updated_var();
  243. for (i = 0; i < end; i++) {
  244. netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i];
  245. if (ptr->enabled) {
  246. ebpf_cgroup_target_t *ect = ebpf_cgroup_find_or_create(ptr);
  247. ebpf_update_pid_link_list(ect, ptr->path);
  248. }
  249. }
  250. pthread_mutex_unlock(&mutex_cgroup_shm);
  251. sem_post(shm_sem_ebpf_cgroup);
  252. }
  253. }
  254. // --------------------------------------------------------------------------------------------------------------------
  255. // Create charts
  256. /**
  257. * Create charts on systemd submenu
  258. *
  259. * @param id the chart id
  260. * @param title the value displayed on vertical axis.
  261. * @param units the value displayed on vertical axis.
  262. * @param family Submenu that the chart will be attached on dashboard.
  263. * @param charttype chart type
  264. * @param order the chart order
  265. * @param algorithm the algorithm used by dimension
  266. * @param context add context for chart
  267. * @param module chart module name, this is the eBPF thread.
  268. * @param update_every value to overwrite the update frequency set by the server.
  269. */
  270. void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
  271. char *algorithm, char *context, char *module, int update_every)
  272. {
  273. ebpf_cgroup_target_t *w;
  274. ebpf_write_chart_cmd(NETDATA_SERVICE_FAMILY, id, title, units, family, charttype, context,
  275. order, update_every, module);
  276. for (w = ebpf_cgroup_pids; w; w = w->next) {
  277. if (unlikely(w->systemd) && unlikely(w->updated))
  278. fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm);
  279. }
  280. }