ebpf_cachestat.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "ebpf.h"
  3. #include "ebpf_cachestat.h"
  4. static ebpf_data_t cachestat_data;
  5. netdata_publish_cachestat_t **cachestat_pid;
  6. static struct bpf_link **probe_links = NULL;
  7. static struct bpf_object *objects = NULL;
  8. static char *cachestat_counter_dimension_name[NETDATA_CACHESTAT_END] = { "ratio", "dirty", "hit",
  9. "miss" };
  10. static netdata_syscall_stat_t cachestat_counter_aggregated_data[NETDATA_CACHESTAT_END];
  11. static netdata_publish_syscall_t cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_END];
  12. netdata_cachestat_pid_t *cachestat_vector = NULL;
  13. static netdata_idx_t cachestat_hash_values[NETDATA_CACHESTAT_END];
  14. static netdata_idx_t *cachestat_values = NULL;
  15. static int read_thread_closed = 1;
  16. struct netdata_static_thread cachestat_threads = {"CACHESTAT KERNEL",
  17. NULL, NULL, 1, NULL,
  18. NULL, NULL};
  19. static ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
  20. .user_input = 0},
  21. {.name = NULL, .internal_input = 0, .user_input = 0}};
  22. static int *map_fd = NULL;
  23. struct config cachestat_config = { .first_section = NULL,
  24. .last_section = NULL,
  25. .mutex = NETDATA_MUTEX_INITIALIZER,
  26. .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
  27. .rwlock = AVL_LOCK_INITIALIZER } };
  28. /*****************************************************************
  29. *
  30. * FUNCTIONS TO CLOSE THE THREAD
  31. *
  32. *****************************************************************/
  33. /**
  34. * Clean PID structures
  35. *
  36. * Clean the allocated structures.
  37. */
  38. void clean_cachestat_pid_structures() {
  39. struct pid_stat *pids = root_of_pids;
  40. while (pids) {
  41. freez(cachestat_pid[pids->pid]);
  42. pids = pids->next;
  43. }
  44. }
  45. /**
  46. * Clean up the main thread.
  47. *
  48. * @param ptr thread data.
  49. */
  50. static void ebpf_cachestat_cleanup(void *ptr)
  51. {
  52. ebpf_module_t *em = (ebpf_module_t *)ptr;
  53. if (!em->enabled)
  54. return;
  55. heartbeat_t hb;
  56. heartbeat_init(&hb);
  57. uint32_t tick = 2*USEC_PER_MS;
  58. while (!read_thread_closed) {
  59. usec_t dt = heartbeat_next(&hb, tick);
  60. UNUSED(dt);
  61. }
  62. ebpf_cleanup_publish_syscall(cachestat_counter_publish_aggregated);
  63. freez(cachestat_vector);
  64. freez(cachestat_values);
  65. if (probe_links) {
  66. struct bpf_program *prog;
  67. size_t i = 0 ;
  68. bpf_object__for_each_program(prog, objects) {
  69. bpf_link__destroy(probe_links[i]);
  70. i++;
  71. }
  72. bpf_object__close(objects);
  73. }
  74. }
  75. /*****************************************************************
  76. *
  77. * COMMON FUNCTIONS
  78. *
  79. *****************************************************************/
  80. /**
  81. * Update publish
  82. *
  83. * Update publish values before to write dimension.
  84. *
  85. * @param out strcuture that will receive data.
  86. * @param mpa calls for mark_page_accessed during the last second.
  87. * @param mbd calls for mark_buffer_dirty during the last second.
  88. * @param apcl calls for add_to_page_cache_lru during the last second.
  89. * @param apd calls for account_page_dirtied during the last second.
  90. */
  91. void cachestat_update_publish(netdata_publish_cachestat_t *out, uint64_t mpa, uint64_t mbd,
  92. uint64_t apcl, uint64_t apd)
  93. {
  94. // Adapted algorithm from https://github.com/iovisor/bcc/blob/master/tools/cachestat.py#L126-L138
  95. calculated_number total = (calculated_number) (((long long)mpa) - ((long long)mbd));
  96. if (total < 0)
  97. total = 0;
  98. calculated_number misses = (calculated_number) ( ((long long) apcl) - ((long long) apd) );
  99. if (misses < 0)
  100. misses = 0;
  101. // If hits are < 0, then its possible misses are overestimate due to possibly page cache read ahead adding
  102. // more pages than needed. In this case just assume misses as total and reset hits.
  103. calculated_number hits = total - misses;
  104. if (hits < 0 ) {
  105. misses = total;
  106. hits = 0;
  107. }
  108. calculated_number ratio = (total > 0) ? hits/total : 1;
  109. out->ratio = (long long )(ratio*100);
  110. out->hit = (long long)hits;
  111. out->miss = (long long)misses;
  112. }
  113. /**
  114. * Save previous values
  115. *
  116. * Save values used this time.
  117. *
  118. * @param publish
  119. */
  120. static void save_previous_values(netdata_publish_cachestat_t *publish) {
  121. publish->prev.mark_page_accessed = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED];
  122. publish->prev.account_page_dirtied = cachestat_hash_values[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED];
  123. publish->prev.add_to_page_cache_lru = cachestat_hash_values[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU];
  124. publish->prev.mark_buffer_dirty = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY];
  125. }
  126. /**
  127. * Calculate statistics
  128. *
  129. * @param publish the structure where we will store the data.
  130. */
  131. static void calculate_stats(netdata_publish_cachestat_t *publish) {
  132. if (!publish->prev.mark_page_accessed) {
  133. save_previous_values(publish);
  134. return;
  135. }
  136. uint64_t mpa = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED] - publish->prev.mark_page_accessed;
  137. uint64_t mbd = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY] - publish->prev.mark_buffer_dirty;
  138. uint64_t apcl = cachestat_hash_values[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU] - publish->prev.add_to_page_cache_lru;
  139. uint64_t apd = cachestat_hash_values[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED] - publish->prev.account_page_dirtied;
  140. save_previous_values(publish);
  141. // We are changing the original algorithm to have a smooth ratio.
  142. cachestat_update_publish(publish, mpa, mbd, apcl, apd);
  143. }
  144. /*****************************************************************
  145. *
  146. * APPS
  147. *
  148. *****************************************************************/
  149. /**
  150. * Apps Accumulator
  151. *
  152. * Sum all values read from kernel and store in the first address.
  153. *
  154. * @param out the vector with read values.
  155. */
  156. static void cachestat_apps_accumulator(netdata_cachestat_pid_t *out)
  157. {
  158. int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
  159. netdata_cachestat_pid_t *total = &out[0];
  160. for (i = 1; i < end; i++) {
  161. netdata_cachestat_pid_t *w = &out[i];
  162. total->account_page_dirtied += w->account_page_dirtied;
  163. total->add_to_page_cache_lru += w->add_to_page_cache_lru;
  164. total->mark_buffer_dirty += w->mark_buffer_dirty;
  165. total->mark_page_accessed += w->mark_page_accessed;
  166. }
  167. }
  168. /**
  169. * Save Pid values
  170. *
  171. * Save the current values inside the structure
  172. *
  173. * @param out vector used to plot charts
  174. * @param publish vector with values read from hash tables.
  175. */
  176. static inline void cachestat_save_pid_values(netdata_publish_cachestat_t *out, netdata_cachestat_pid_t *publish)
  177. {
  178. if (!out->current.mark_page_accessed) {
  179. memcpy(&out->current, &publish[0], sizeof(netdata_cachestat_pid_t));
  180. return;
  181. }
  182. memcpy(&out->prev, &out->current, sizeof(netdata_cachestat_pid_t));
  183. memcpy(&out->current, &publish[0], sizeof(netdata_cachestat_pid_t));
  184. }
  185. /**
  186. * Fill PID
  187. *
  188. * Fill PID structures
  189. *
  190. * @param current_pid pid that we are collecting data
  191. * @param out values read from hash tables;
  192. */
  193. static void cachestat_fill_pid(uint32_t current_pid, netdata_cachestat_pid_t *publish)
  194. {
  195. netdata_publish_cachestat_t *curr = cachestat_pid[current_pid];
  196. if (!curr) {
  197. curr = callocz(1, sizeof(netdata_publish_cachestat_t));
  198. cachestat_pid[current_pid] = curr;
  199. cachestat_save_pid_values(curr, publish);
  200. return;
  201. }
  202. cachestat_save_pid_values(curr, publish);
  203. }
  204. /**
  205. * Read APPS table
  206. *
  207. * Read the apps table and store data inside the structure.
  208. */
  209. static void read_apps_table()
  210. {
  211. netdata_cachestat_pid_t *cv = cachestat_vector;
  212. uint32_t key;
  213. struct pid_stat *pids = root_of_pids;
  214. int fd = map_fd[NETDATA_CACHESTAT_PID_STATS];
  215. size_t length = sizeof(netdata_cachestat_pid_t)*ebpf_nprocs;
  216. while (pids) {
  217. key = pids->pid;
  218. if (bpf_map_lookup_elem(fd, &key, cv)) {
  219. pids = pids->next;
  220. continue;
  221. }
  222. cachestat_apps_accumulator(cv);
  223. cachestat_fill_pid(key, cv);
  224. // We are cleaning to avoid passing data read from one process to other.
  225. memset(cv, 0, length);
  226. pids = pids->next;
  227. }
  228. }
  229. /**
  230. * Create apps charts
  231. *
  232. * Call ebpf_create_chart to create the charts on apps submenu.
  233. *
  234. * @param em a pointer to the structure with the default values.
  235. */
  236. void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
  237. {
  238. UNUSED(em);
  239. struct target *root = ptr;
  240. ebpf_create_charts_on_apps(NETDATA_CACHESTAT_HIT_RATIO_CHART,
  241. "The ratio is calculated dividing the Hit pages per total cache accesses without counting dirties.",
  242. EBPF_COMMON_DIMENSION_PERCENTAGE,
  243. NETDATA_APPS_CACHESTAT_GROUP,
  244. NETDATA_EBPF_CHART_TYPE_LINE,
  245. 20090,
  246. ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
  247. root);
  248. ebpf_create_charts_on_apps(NETDATA_CACHESTAT_DIRTY_CHART,
  249. "Number of pages marked as dirty. When a page is called dirty, this means that the data stored inside the page needs to be written to devices.",
  250. EBPF_CACHESTAT_DIMENSION_PAGE,
  251. NETDATA_APPS_CACHESTAT_GROUP,
  252. NETDATA_EBPF_CHART_TYPE_STACKED,
  253. 20091,
  254. ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
  255. root);
  256. ebpf_create_charts_on_apps(NETDATA_CACHESTAT_HIT_CHART,
  257. "Number of cache access without counting dirty pages and page additions.",
  258. EBPF_CACHESTAT_DIMENSION_HITS,
  259. NETDATA_APPS_CACHESTAT_GROUP,
  260. NETDATA_EBPF_CHART_TYPE_STACKED,
  261. 20092,
  262. ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
  263. root);
  264. ebpf_create_charts_on_apps(NETDATA_CACHESTAT_MISSES_CHART,
  265. "Page caches added without counting dirty pages",
  266. EBPF_CACHESTAT_DIMENSION_MISSES,
  267. NETDATA_APPS_CACHESTAT_GROUP,
  268. NETDATA_EBPF_CHART_TYPE_STACKED,
  269. 20093,
  270. ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
  271. root);
  272. }
  273. /*****************************************************************
  274. *
  275. * MAIN LOOP
  276. *
  277. *****************************************************************/
  278. /**
  279. * Read global counter
  280. *
  281. * Read the table with number of calls for all functions
  282. */
  283. static void read_global_table()
  284. {
  285. uint32_t idx;
  286. netdata_idx_t *val = cachestat_hash_values;
  287. netdata_idx_t *stored = cachestat_values;
  288. int fd = map_fd[NETDATA_CACHESTAT_GLOBAL_STATS];
  289. for (idx = NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU; idx < NETDATA_CACHESTAT_END; idx++) {
  290. if (!bpf_map_lookup_elem(fd, &idx, stored)) {
  291. int i;
  292. int end = ebpf_nprocs;
  293. netdata_idx_t total = 0;
  294. for (i = 0; i < end; i++)
  295. total += stored[i];
  296. val[idx] = total;
  297. }
  298. }
  299. }
  300. /**
  301. * Socket read hash
  302. *
  303. * This is the thread callback.
  304. * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
  305. *
  306. * @param ptr It is a NULL value for this thread.
  307. *
  308. * @return It always returns NULL.
  309. */
  310. void *ebpf_cachestat_read_hash(void *ptr)
  311. {
  312. read_thread_closed = 0;
  313. heartbeat_t hb;
  314. heartbeat_init(&hb);
  315. ebpf_module_t *em = (ebpf_module_t *)ptr;
  316. usec_t step = NETDATA_LATENCY_CACHESTAT_SLEEP_MS * em->update_time;
  317. while (!close_ebpf_plugin) {
  318. usec_t dt = heartbeat_next(&hb, step);
  319. (void)dt;
  320. read_global_table();
  321. }
  322. read_thread_closed = 1;
  323. return NULL;
  324. }
  325. /**
  326. * Send global
  327. *
  328. * Send global charts to Netdata
  329. */
  330. static void cachestat_send_global(netdata_publish_cachestat_t *publish)
  331. {
  332. calculate_stats(publish);
  333. netdata_publish_syscall_t *ptr = cachestat_counter_publish_aggregated;
  334. ebpf_one_dimension_write_charts(
  335. NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_RATIO_CHART, ptr[NETDATA_CACHESTAT_IDX_RATIO].dimension,
  336. publish->ratio);
  337. ebpf_one_dimension_write_charts(
  338. NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_DIRTY_CHART, ptr[NETDATA_CACHESTAT_IDX_DIRTY].dimension,
  339. cachestat_hash_values[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY]);
  340. ebpf_one_dimension_write_charts(
  341. NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_CHART, ptr[NETDATA_CACHESTAT_IDX_HIT].dimension, publish->hit);
  342. ebpf_one_dimension_write_charts(
  343. NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_MISSES_CHART, ptr[NETDATA_CACHESTAT_IDX_MISS].dimension,
  344. publish->miss);
  345. }
  346. /**
  347. * Cachestat sum PIDs
  348. *
  349. * Sum values for all PIDs associated to a group
  350. *
  351. * @param publish output structure.
  352. * @param root structure with listed IPs
  353. */
  354. void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct pid_on_target *root)
  355. {
  356. memcpy(&publish->prev, &publish->current,sizeof(publish->current));
  357. memset(&publish->current, 0, sizeof(publish->current));
  358. netdata_cachestat_pid_t *dst = &publish->current;
  359. while (root) {
  360. int32_t pid = root->pid;
  361. netdata_publish_cachestat_t *w = cachestat_pid[pid];
  362. if (w) {
  363. netdata_cachestat_pid_t *src = &w->current;
  364. dst->account_page_dirtied += src->account_page_dirtied;
  365. dst->add_to_page_cache_lru += src->add_to_page_cache_lru;
  366. dst->mark_buffer_dirty += src->mark_buffer_dirty;
  367. dst->mark_page_accessed += src->mark_page_accessed;
  368. }
  369. root = root->next;
  370. }
  371. }
  372. /**
  373. * Send data to Netdata calling auxiliar functions.
  374. *
  375. * @param root the target list.
  376. */
  377. void ebpf_cache_send_apps_data(struct target *root)
  378. {
  379. struct target *w;
  380. collected_number value;
  381. write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART);
  382. for (w = root; w; w = w->next) {
  383. if (unlikely(w->exposed && w->processes)) {
  384. ebpf_cachestat_sum_pids(&w->cachestat, w->root_pid);
  385. netdata_cachestat_pid_t *current = &w->cachestat.current;
  386. netdata_cachestat_pid_t *prev = &w->cachestat.prev;
  387. uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed;
  388. uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty;
  389. w->cachestat.dirty = current->mark_buffer_dirty;
  390. uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru;
  391. uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied;
  392. cachestat_update_publish(&w->cachestat, mpa, mbd, apcl, apd);
  393. value = (collected_number) w->cachestat.ratio;
  394. // Here we are using different approach to have a chart more smooth
  395. write_chart_dimension(w->name, value);
  396. }
  397. }
  398. write_end_chart();
  399. write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_DIRTY_CHART);
  400. for (w = root; w; w = w->next) {
  401. if (unlikely(w->exposed && w->processes)) {
  402. value = (collected_number) w->cachestat.dirty;
  403. write_chart_dimension(w->name, value);
  404. }
  405. }
  406. write_end_chart();
  407. write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_HIT_CHART);
  408. for (w = root; w; w = w->next) {
  409. if (unlikely(w->exposed && w->processes)) {
  410. value = (collected_number) w->cachestat.hit;
  411. write_chart_dimension(w->name, value);
  412. }
  413. }
  414. write_end_chart();
  415. write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_MISSES_CHART);
  416. for (w = root; w; w = w->next) {
  417. if (unlikely(w->exposed && w->processes)) {
  418. value = (collected_number) w->cachestat.miss;
  419. write_chart_dimension(w->name, value);
  420. }
  421. }
  422. write_end_chart();
  423. }
  424. /**
  425. * Main loop for this collector.
  426. */
  427. static void cachestat_collector(ebpf_module_t *em)
  428. {
  429. cachestat_threads.thread = mallocz(sizeof(netdata_thread_t));
  430. cachestat_threads.start_routine = ebpf_cachestat_read_hash;
  431. map_fd = cachestat_data.map_fd;
  432. netdata_thread_create(cachestat_threads.thread, cachestat_threads.name, NETDATA_THREAD_OPTION_JOINABLE,
  433. ebpf_cachestat_read_hash, em);
  434. netdata_publish_cachestat_t publish;
  435. memset(&publish, 0, sizeof(publish));
  436. int apps = em->apps_charts;
  437. while (!close_ebpf_plugin) {
  438. pthread_mutex_lock(&collect_data_mutex);
  439. pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
  440. if (apps)
  441. read_apps_table();
  442. pthread_mutex_lock(&lock);
  443. cachestat_send_global(&publish);
  444. if (apps)
  445. ebpf_cache_send_apps_data(apps_groups_root_target);
  446. pthread_mutex_unlock(&lock);
  447. pthread_mutex_unlock(&collect_data_mutex);
  448. }
  449. }
  450. /*****************************************************************
  451. *
  452. * INITIALIZE THREAD
  453. *
  454. *****************************************************************/
  455. /**
  456. * Create global charts
  457. *
  458. * Call ebpf_create_chart to create the charts for the collector.
  459. */
  460. static void ebpf_create_memory_charts()
  461. {
  462. ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_RATIO_CHART,
  463. "Hit is calculating using total cache added without dirties per total added because of red misses.",
  464. EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU,
  465. NULL,
  466. NETDATA_EBPF_CHART_TYPE_LINE,
  467. 21100,
  468. ebpf_create_global_dimension,
  469. cachestat_counter_publish_aggregated, 1);
  470. ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_DIRTY_CHART,
  471. "Number of dirty pages added to the page cache.",
  472. EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU,
  473. NULL,
  474. NETDATA_EBPF_CHART_TYPE_LINE,
  475. 21101,
  476. ebpf_create_global_dimension,
  477. &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY], 1);
  478. ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_CHART,
  479. "Hits are function calls that Netdata counts.",
  480. EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU,
  481. NULL,
  482. NETDATA_EBPF_CHART_TYPE_LINE,
  483. 21102,
  484. ebpf_create_global_dimension,
  485. &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT], 1);
  486. ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_MISSES_CHART,
  487. "Misses are function calls that Netdata counts.",
  488. EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU,
  489. NULL,
  490. NETDATA_EBPF_CHART_TYPE_LINE,
  491. 21103,
  492. ebpf_create_global_dimension,
  493. &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS], 1);
  494. fflush(stdout);
  495. }
  496. /**
  497. * Allocate vectors used with this thread.
  498. *
  499. * We are not testing the return, because callocz does this and shutdown the software
  500. * case it was not possible to allocate.
  501. *
  502. * @param length is the length for the vectors used inside the collector.
  503. */
  504. static void ebpf_cachestat_allocate_global_vectors(size_t length)
  505. {
  506. cachestat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_cachestat_t *));
  507. cachestat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_cachestat_pid_t));
  508. cachestat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
  509. memset(cachestat_hash_values, 0, length * sizeof(netdata_idx_t));
  510. memset(cachestat_counter_aggregated_data, 0, length * sizeof(netdata_syscall_stat_t));
  511. memset(cachestat_counter_publish_aggregated, 0, length * sizeof(netdata_publish_syscall_t));
  512. }
  513. /*****************************************************************
  514. *
  515. * MAIN THREAD
  516. *
  517. *****************************************************************/
  518. /**
  519. * Cachestat thread
  520. *
  521. * Thread used to make cachestat thread
  522. *
  523. * @param ptr a pointer to `struct ebpf_module`
  524. *
  525. * @return It always return NULL
  526. */
  527. void *ebpf_cachestat_thread(void *ptr)
  528. {
  529. netdata_thread_cleanup_push(ebpf_cachestat_cleanup, ptr);
  530. ebpf_module_t *em = (ebpf_module_t *)ptr;
  531. em->maps = cachestat_maps;
  532. fill_ebpf_data(&cachestat_data);
  533. ebpf_update_pid_table(&cachestat_maps[0], em);
  534. if (!em->enabled)
  535. goto endcachestat;
  536. pthread_mutex_lock(&lock);
  537. ebpf_cachestat_allocate_global_vectors(NETDATA_CACHESTAT_END);
  538. if (ebpf_update_kernel(&cachestat_data)) {
  539. pthread_mutex_unlock(&lock);
  540. goto endcachestat;
  541. }
  542. probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, cachestat_data.map_fd);
  543. if (!probe_links) {
  544. pthread_mutex_unlock(&lock);
  545. goto endcachestat;
  546. }
  547. int algorithms[NETDATA_CACHESTAT_END] = {
  548. NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
  549. };
  550. ebpf_global_labels(cachestat_counter_aggregated_data, cachestat_counter_publish_aggregated,
  551. cachestat_counter_dimension_name, cachestat_counter_dimension_name,
  552. algorithms, NETDATA_CACHESTAT_END);
  553. ebpf_create_memory_charts();
  554. pthread_mutex_unlock(&lock);
  555. cachestat_collector(em);
  556. endcachestat:
  557. netdata_thread_cleanup_pop(1);
  558. return NULL;
  559. }