ebpf_cachestat.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "ebpf.h"
  3. #include "ebpf_cachestat.h"
  4. static ebpf_data_t cachestat_data;
  5. netdata_publish_cachestat_t **cachestat_pid;
  6. static struct bpf_link **probe_links = NULL;
  7. static struct bpf_object *objects = NULL;
  8. static char *cachestat_counter_dimension_name[NETDATA_CACHESTAT_END] = { "ratio", "dirty", "hit",
  9. "miss" };
  10. static netdata_syscall_stat_t cachestat_counter_aggregated_data[NETDATA_CACHESTAT_END];
  11. static netdata_publish_syscall_t cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_END];
  12. netdata_cachestat_pid_t *cachestat_vector = NULL;
  13. static netdata_idx_t cachestat_hash_values[NETDATA_CACHESTAT_END];
  14. static netdata_idx_t *cachestat_values = NULL;
  15. static int read_thread_closed = 1;
  16. struct netdata_static_thread cachestat_threads = {"CACHESTAT KERNEL",
  17. NULL, NULL, 1, NULL,
  18. NULL, NULL};
  19. static ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input = NETDATA_CACHESTAT_END,
  20. .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
  21. .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
  22. {.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
  23. .user_input = 0,
  24. .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
  25. .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
  26. {.name = "cstat_ctrl", .internal_input = NETDATA_CONTROLLER_END,
  27. .user_input = 0,
  28. .type = NETDATA_EBPF_MAP_CONTROLLER,
  29. .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
  30. {.name = NULL, .internal_input = 0, .user_input = 0,
  31. .type = NETDATA_EBPF_MAP_CONTROLLER,
  32. .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
  33. struct config cachestat_config = { .first_section = NULL,
  34. .last_section = NULL,
  35. .mutex = NETDATA_MUTEX_INITIALIZER,
  36. .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
  37. .rwlock = AVL_LOCK_INITIALIZER } };
  38. /*****************************************************************
  39. *
  40. * FUNCTIONS TO CLOSE THE THREAD
  41. *
  42. *****************************************************************/
  43. /**
  44. * Clean PID structures
  45. *
  46. * Clean the allocated structures.
  47. */
  48. void clean_cachestat_pid_structures() {
  49. struct pid_stat *pids = root_of_pids;
  50. while (pids) {
  51. freez(cachestat_pid[pids->pid]);
  52. pids = pids->next;
  53. }
  54. }
  55. /**
  56. * Clean up the main thread.
  57. *
  58. * @param ptr thread data.
  59. */
  60. static void ebpf_cachestat_cleanup(void *ptr)
  61. {
  62. ebpf_module_t *em = (ebpf_module_t *)ptr;
  63. if (!em->enabled)
  64. return;
  65. heartbeat_t hb;
  66. heartbeat_init(&hb);
  67. uint32_t tick = 2*USEC_PER_MS;
  68. while (!read_thread_closed) {
  69. usec_t dt = heartbeat_next(&hb, tick);
  70. UNUSED(dt);
  71. }
  72. ebpf_cleanup_publish_syscall(cachestat_counter_publish_aggregated);
  73. freez(cachestat_vector);
  74. freez(cachestat_values);
  75. if (probe_links) {
  76. struct bpf_program *prog;
  77. size_t i = 0 ;
  78. bpf_object__for_each_program(prog, objects) {
  79. bpf_link__destroy(probe_links[i]);
  80. i++;
  81. }
  82. bpf_object__close(objects);
  83. }
  84. }
  85. /*****************************************************************
  86. *
  87. * COMMON FUNCTIONS
  88. *
  89. *****************************************************************/
  90. /**
  91. * Update publish
  92. *
  93. * Update publish values before to write dimension.
  94. *
  95. * @param out strcuture that will receive data.
  96. * @param mpa calls for mark_page_accessed during the last second.
  97. * @param mbd calls for mark_buffer_dirty during the last second.
  98. * @param apcl calls for add_to_page_cache_lru during the last second.
  99. * @param apd calls for account_page_dirtied during the last second.
  100. */
  101. void cachestat_update_publish(netdata_publish_cachestat_t *out, uint64_t mpa, uint64_t mbd,
  102. uint64_t apcl, uint64_t apd)
  103. {
  104. // Adapted algorithm from https://github.com/iovisor/bcc/blob/master/tools/cachestat.py#L126-L138
  105. calculated_number total = (calculated_number) (((long long)mpa) - ((long long)mbd));
  106. if (total < 0)
  107. total = 0;
  108. calculated_number misses = (calculated_number) ( ((long long) apcl) - ((long long) apd) );
  109. if (misses < 0)
  110. misses = 0;
  111. // If hits are < 0, then its possible misses are overestimate due to possibly page cache read ahead adding
  112. // more pages than needed. In this case just assume misses as total and reset hits.
  113. calculated_number hits = total - misses;
  114. if (hits < 0 ) {
  115. misses = total;
  116. hits = 0;
  117. }
  118. calculated_number ratio = (total > 0) ? hits/total : 1;
  119. out->ratio = (long long )(ratio*100);
  120. out->hit = (long long)hits;
  121. out->miss = (long long)misses;
  122. }
  123. /**
  124. * Save previous values
  125. *
  126. * Save values used this time.
  127. *
  128. * @param publish
  129. */
  130. static void save_previous_values(netdata_publish_cachestat_t *publish) {
  131. publish->prev.mark_page_accessed = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED];
  132. publish->prev.account_page_dirtied = cachestat_hash_values[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED];
  133. publish->prev.add_to_page_cache_lru = cachestat_hash_values[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU];
  134. publish->prev.mark_buffer_dirty = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY];
  135. }
  136. /**
  137. * Calculate statistics
  138. *
  139. * @param publish the structure where we will store the data.
  140. */
  141. static void calculate_stats(netdata_publish_cachestat_t *publish) {
  142. if (!publish->prev.mark_page_accessed) {
  143. save_previous_values(publish);
  144. return;
  145. }
  146. uint64_t mpa = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED] - publish->prev.mark_page_accessed;
  147. uint64_t mbd = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY] - publish->prev.mark_buffer_dirty;
  148. uint64_t apcl = cachestat_hash_values[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU] - publish->prev.add_to_page_cache_lru;
  149. uint64_t apd = cachestat_hash_values[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED] - publish->prev.account_page_dirtied;
  150. save_previous_values(publish);
  151. // We are changing the original algorithm to have a smooth ratio.
  152. cachestat_update_publish(publish, mpa, mbd, apcl, apd);
  153. }
  154. /*****************************************************************
  155. *
  156. * APPS
  157. *
  158. *****************************************************************/
  159. /**
  160. * Apps Accumulator
  161. *
  162. * Sum all values read from kernel and store in the first address.
  163. *
  164. * @param out the vector with read values.
  165. */
  166. static void cachestat_apps_accumulator(netdata_cachestat_pid_t *out)
  167. {
  168. int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
  169. netdata_cachestat_pid_t *total = &out[0];
  170. for (i = 1; i < end; i++) {
  171. netdata_cachestat_pid_t *w = &out[i];
  172. total->account_page_dirtied += w->account_page_dirtied;
  173. total->add_to_page_cache_lru += w->add_to_page_cache_lru;
  174. total->mark_buffer_dirty += w->mark_buffer_dirty;
  175. total->mark_page_accessed += w->mark_page_accessed;
  176. }
  177. }
  178. /**
  179. * Save Pid values
  180. *
  181. * Save the current values inside the structure
  182. *
  183. * @param out vector used to plot charts
  184. * @param publish vector with values read from hash tables.
  185. */
  186. static inline void cachestat_save_pid_values(netdata_publish_cachestat_t *out, netdata_cachestat_pid_t *publish)
  187. {
  188. if (!out->current.mark_page_accessed) {
  189. memcpy(&out->current, &publish[0], sizeof(netdata_cachestat_pid_t));
  190. return;
  191. }
  192. memcpy(&out->prev, &out->current, sizeof(netdata_cachestat_pid_t));
  193. memcpy(&out->current, &publish[0], sizeof(netdata_cachestat_pid_t));
  194. }
  195. /**
  196. * Fill PID
  197. *
  198. * Fill PID structures
  199. *
  200. * @param current_pid pid that we are collecting data
  201. * @param out values read from hash tables;
  202. */
  203. static void cachestat_fill_pid(uint32_t current_pid, netdata_cachestat_pid_t *publish)
  204. {
  205. netdata_publish_cachestat_t *curr = cachestat_pid[current_pid];
  206. if (!curr) {
  207. curr = callocz(1, sizeof(netdata_publish_cachestat_t));
  208. cachestat_pid[current_pid] = curr;
  209. cachestat_save_pid_values(curr, publish);
  210. return;
  211. }
  212. cachestat_save_pid_values(curr, publish);
  213. }
  214. /**
  215. * Read APPS table
  216. *
  217. * Read the apps table and store data inside the structure.
  218. */
  219. static void read_apps_table()
  220. {
  221. netdata_cachestat_pid_t *cv = cachestat_vector;
  222. uint32_t key;
  223. struct pid_stat *pids = root_of_pids;
  224. int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
  225. size_t length = sizeof(netdata_cachestat_pid_t)*ebpf_nprocs;
  226. while (pids) {
  227. key = pids->pid;
  228. if (bpf_map_lookup_elem(fd, &key, cv)) {
  229. pids = pids->next;
  230. continue;
  231. }
  232. cachestat_apps_accumulator(cv);
  233. cachestat_fill_pid(key, cv);
  234. // We are cleaning to avoid passing data read from one process to other.
  235. memset(cv, 0, length);
  236. pids = pids->next;
  237. }
  238. }
  239. /**
  240. * Create apps charts
  241. *
  242. * Call ebpf_create_chart to create the charts on apps submenu.
  243. *
  244. * @param em a pointer to the structure with the default values.
  245. */
  246. void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
  247. {
  248. UNUSED(em);
  249. struct target *root = ptr;
  250. ebpf_create_charts_on_apps(NETDATA_CACHESTAT_HIT_RATIO_CHART,
  251. "The ratio is calculated dividing the Hit pages per total cache accesses without counting dirties.",
  252. EBPF_COMMON_DIMENSION_PERCENTAGE,
  253. NETDATA_APPS_CACHESTAT_GROUP,
  254. NETDATA_EBPF_CHART_TYPE_LINE,
  255. 20090,
  256. ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
  257. root);
  258. ebpf_create_charts_on_apps(NETDATA_CACHESTAT_DIRTY_CHART,
  259. "Number of pages marked as dirty. When a page is called dirty, this means that the data stored inside the page needs to be written to devices.",
  260. EBPF_CACHESTAT_DIMENSION_PAGE,
  261. NETDATA_APPS_CACHESTAT_GROUP,
  262. NETDATA_EBPF_CHART_TYPE_STACKED,
  263. 20091,
  264. ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
  265. root);
  266. ebpf_create_charts_on_apps(NETDATA_CACHESTAT_HIT_CHART,
  267. "Number of cache access without counting dirty pages and page additions.",
  268. EBPF_CACHESTAT_DIMENSION_HITS,
  269. NETDATA_APPS_CACHESTAT_GROUP,
  270. NETDATA_EBPF_CHART_TYPE_STACKED,
  271. 20092,
  272. ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
  273. root);
  274. ebpf_create_charts_on_apps(NETDATA_CACHESTAT_MISSES_CHART,
  275. "Page caches added without counting dirty pages",
  276. EBPF_CACHESTAT_DIMENSION_MISSES,
  277. NETDATA_APPS_CACHESTAT_GROUP,
  278. NETDATA_EBPF_CHART_TYPE_STACKED,
  279. 20093,
  280. ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
  281. root);
  282. }
  283. /*****************************************************************
  284. *
  285. * MAIN LOOP
  286. *
  287. *****************************************************************/
  288. /**
  289. * Read global counter
  290. *
  291. * Read the table with number of calls for all functions
  292. */
  293. static void read_global_table()
  294. {
  295. uint32_t idx;
  296. netdata_idx_t *val = cachestat_hash_values;
  297. netdata_idx_t *stored = cachestat_values;
  298. int fd = cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd;
  299. for (idx = NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU; idx < NETDATA_CACHESTAT_END; idx++) {
  300. if (!bpf_map_lookup_elem(fd, &idx, stored)) {
  301. int i;
  302. int end = ebpf_nprocs;
  303. netdata_idx_t total = 0;
  304. for (i = 0; i < end; i++)
  305. total += stored[i];
  306. val[idx] = total;
  307. }
  308. }
  309. }
  310. /**
  311. * Socket read hash
  312. *
  313. * This is the thread callback.
  314. * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
  315. *
  316. * @param ptr It is a NULL value for this thread.
  317. *
  318. * @return It always returns NULL.
  319. */
  320. void *ebpf_cachestat_read_hash(void *ptr)
  321. {
  322. read_thread_closed = 0;
  323. heartbeat_t hb;
  324. heartbeat_init(&hb);
  325. ebpf_module_t *em = (ebpf_module_t *)ptr;
  326. usec_t step = NETDATA_LATENCY_CACHESTAT_SLEEP_MS * em->update_time;
  327. while (!close_ebpf_plugin) {
  328. usec_t dt = heartbeat_next(&hb, step);
  329. (void)dt;
  330. read_global_table();
  331. }
  332. read_thread_closed = 1;
  333. return NULL;
  334. }
  335. /**
  336. * Send global
  337. *
  338. * Send global charts to Netdata
  339. */
  340. static void cachestat_send_global(netdata_publish_cachestat_t *publish)
  341. {
  342. calculate_stats(publish);
  343. netdata_publish_syscall_t *ptr = cachestat_counter_publish_aggregated;
  344. ebpf_one_dimension_write_charts(
  345. NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_RATIO_CHART, ptr[NETDATA_CACHESTAT_IDX_RATIO].dimension,
  346. publish->ratio);
  347. ebpf_one_dimension_write_charts(
  348. NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_DIRTY_CHART, ptr[NETDATA_CACHESTAT_IDX_DIRTY].dimension,
  349. cachestat_hash_values[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY]);
  350. ebpf_one_dimension_write_charts(
  351. NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_CHART, ptr[NETDATA_CACHESTAT_IDX_HIT].dimension, publish->hit);
  352. ebpf_one_dimension_write_charts(
  353. NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_MISSES_CHART, ptr[NETDATA_CACHESTAT_IDX_MISS].dimension,
  354. publish->miss);
  355. }
  356. /**
  357. * Cachestat sum PIDs
  358. *
  359. * Sum values for all PIDs associated to a group
  360. *
  361. * @param publish output structure.
  362. * @param root structure with listed IPs
  363. */
  364. void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct pid_on_target *root)
  365. {
  366. memcpy(&publish->prev, &publish->current,sizeof(publish->current));
  367. memset(&publish->current, 0, sizeof(publish->current));
  368. netdata_cachestat_pid_t *dst = &publish->current;
  369. while (root) {
  370. int32_t pid = root->pid;
  371. netdata_publish_cachestat_t *w = cachestat_pid[pid];
  372. if (w) {
  373. netdata_cachestat_pid_t *src = &w->current;
  374. dst->account_page_dirtied += src->account_page_dirtied;
  375. dst->add_to_page_cache_lru += src->add_to_page_cache_lru;
  376. dst->mark_buffer_dirty += src->mark_buffer_dirty;
  377. dst->mark_page_accessed += src->mark_page_accessed;
  378. }
  379. root = root->next;
  380. }
  381. }
  382. /**
  383. * Send data to Netdata calling auxiliar functions.
  384. *
  385. * @param root the target list.
  386. */
  387. void ebpf_cache_send_apps_data(struct target *root)
  388. {
  389. struct target *w;
  390. collected_number value;
  391. write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART);
  392. for (w = root; w; w = w->next) {
  393. if (unlikely(w->exposed && w->processes)) {
  394. ebpf_cachestat_sum_pids(&w->cachestat, w->root_pid);
  395. netdata_cachestat_pid_t *current = &w->cachestat.current;
  396. netdata_cachestat_pid_t *prev = &w->cachestat.prev;
  397. uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed;
  398. uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty;
  399. w->cachestat.dirty = current->mark_buffer_dirty;
  400. uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru;
  401. uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied;
  402. cachestat_update_publish(&w->cachestat, mpa, mbd, apcl, apd);
  403. value = (collected_number) w->cachestat.ratio;
  404. // Here we are using different approach to have a chart more smooth
  405. write_chart_dimension(w->name, value);
  406. }
  407. }
  408. write_end_chart();
  409. write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_DIRTY_CHART);
  410. for (w = root; w; w = w->next) {
  411. if (unlikely(w->exposed && w->processes)) {
  412. value = (collected_number) w->cachestat.dirty;
  413. write_chart_dimension(w->name, value);
  414. }
  415. }
  416. write_end_chart();
  417. write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_HIT_CHART);
  418. for (w = root; w; w = w->next) {
  419. if (unlikely(w->exposed && w->processes)) {
  420. value = (collected_number) w->cachestat.hit;
  421. write_chart_dimension(w->name, value);
  422. }
  423. }
  424. write_end_chart();
  425. write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_MISSES_CHART);
  426. for (w = root; w; w = w->next) {
  427. if (unlikely(w->exposed && w->processes)) {
  428. value = (collected_number) w->cachestat.miss;
  429. write_chart_dimension(w->name, value);
  430. }
  431. }
  432. write_end_chart();
  433. }
  434. /**
  435. * Main loop for this collector.
  436. */
  437. static void cachestat_collector(ebpf_module_t *em)
  438. {
  439. cachestat_threads.thread = mallocz(sizeof(netdata_thread_t));
  440. cachestat_threads.start_routine = ebpf_cachestat_read_hash;
  441. netdata_thread_create(cachestat_threads.thread, cachestat_threads.name, NETDATA_THREAD_OPTION_JOINABLE,
  442. ebpf_cachestat_read_hash, em);
  443. netdata_publish_cachestat_t publish;
  444. memset(&publish, 0, sizeof(publish));
  445. int apps = em->apps_charts;
  446. while (!close_ebpf_plugin) {
  447. pthread_mutex_lock(&collect_data_mutex);
  448. pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
  449. if (apps)
  450. read_apps_table();
  451. pthread_mutex_lock(&lock);
  452. cachestat_send_global(&publish);
  453. if (apps)
  454. ebpf_cache_send_apps_data(apps_groups_root_target);
  455. pthread_mutex_unlock(&lock);
  456. pthread_mutex_unlock(&collect_data_mutex);
  457. }
  458. }
  459. /*****************************************************************
  460. *
  461. * INITIALIZE THREAD
  462. *
  463. *****************************************************************/
  464. /**
  465. * Create global charts
  466. *
  467. * Call ebpf_create_chart to create the charts for the collector.
  468. */
  469. static void ebpf_create_memory_charts()
  470. {
  471. ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_RATIO_CHART,
  472. "Hit is calculating using total cache added without dirties per total added because of red misses.",
  473. EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU,
  474. NULL,
  475. NETDATA_EBPF_CHART_TYPE_LINE,
  476. 21100,
  477. ebpf_create_global_dimension,
  478. cachestat_counter_publish_aggregated, 1);
  479. ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_DIRTY_CHART,
  480. "Number of dirty pages added to the page cache.",
  481. EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU,
  482. NULL,
  483. NETDATA_EBPF_CHART_TYPE_LINE,
  484. 21101,
  485. ebpf_create_global_dimension,
  486. &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY], 1);
  487. ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_CHART,
  488. "Hits are function calls that Netdata counts.",
  489. EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU,
  490. NULL,
  491. NETDATA_EBPF_CHART_TYPE_LINE,
  492. 21102,
  493. ebpf_create_global_dimension,
  494. &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT], 1);
  495. ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_MISSES_CHART,
  496. "Misses are function calls that Netdata counts.",
  497. EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU,
  498. NULL,
  499. NETDATA_EBPF_CHART_TYPE_LINE,
  500. 21103,
  501. ebpf_create_global_dimension,
  502. &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS], 1);
  503. fflush(stdout);
  504. }
  505. /**
  506. * Allocate vectors used with this thread.
  507. *
  508. * We are not testing the return, because callocz does this and shutdown the software
  509. * case it was not possible to allocate.
  510. *
  511. * @param length is the length for the vectors used inside the collector.
  512. */
  513. static void ebpf_cachestat_allocate_global_vectors(size_t length)
  514. {
  515. cachestat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_cachestat_t *));
  516. cachestat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_cachestat_pid_t));
  517. cachestat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
  518. memset(cachestat_hash_values, 0, length * sizeof(netdata_idx_t));
  519. memset(cachestat_counter_aggregated_data, 0, length * sizeof(netdata_syscall_stat_t));
  520. memset(cachestat_counter_publish_aggregated, 0, length * sizeof(netdata_publish_syscall_t));
  521. }
  522. /*****************************************************************
  523. *
  524. * MAIN THREAD
  525. *
  526. *****************************************************************/
  527. /**
  528. * Cachestat thread
  529. *
  530. * Thread used to make cachestat thread
  531. *
  532. * @param ptr a pointer to `struct ebpf_module`
  533. *
  534. * @return It always return NULL
  535. */
  536. void *ebpf_cachestat_thread(void *ptr)
  537. {
  538. netdata_thread_cleanup_push(ebpf_cachestat_cleanup, ptr);
  539. ebpf_module_t *em = (ebpf_module_t *)ptr;
  540. em->maps = cachestat_maps;
  541. fill_ebpf_data(&cachestat_data);
  542. ebpf_update_pid_table(&cachestat_maps[NETDATA_CACHESTAT_PID_STATS], em);
  543. if (!em->enabled)
  544. goto endcachestat;
  545. pthread_mutex_lock(&lock);
  546. ebpf_cachestat_allocate_global_vectors(NETDATA_CACHESTAT_END);
  547. if (ebpf_update_kernel(&cachestat_data)) {
  548. pthread_mutex_unlock(&lock);
  549. goto endcachestat;
  550. }
  551. probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, cachestat_data.map_fd);
  552. if (!probe_links) {
  553. pthread_mutex_unlock(&lock);
  554. goto endcachestat;
  555. }
  556. int algorithms[NETDATA_CACHESTAT_END] = {
  557. NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
  558. };
  559. ebpf_global_labels(cachestat_counter_aggregated_data, cachestat_counter_publish_aggregated,
  560. cachestat_counter_dimension_name, cachestat_counter_dimension_name,
  561. algorithms, NETDATA_CACHESTAT_END);
  562. ebpf_create_memory_charts();
  563. pthread_mutex_unlock(&lock);
  564. cachestat_collector(em);
  565. endcachestat:
  566. netdata_thread_cleanup_pop(1);
  567. return NULL;
  568. }