proc_vmstat.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "plugin_proc.h"
  3. #define PLUGIN_PROC_MODULE_VMSTAT_NAME "/proc/vmstat"
  4. #define OOM_KILL_STRING "oom_kill"
  5. int do_proc_vmstat(int update_every, usec_t dt) {
  6. (void)dt;
  7. static procfile *ff = NULL;
  8. static int do_swapio = -1, do_io = -1, do_pgfaults = -1, do_oom_kill = -1, do_numa = -1, do_thp = -1, do_zswapio = -1, do_balloon = -1, do_ksm = -1;
  9. static int has_numa = -1;
  10. static ARL_BASE *arl_base = NULL;
  11. static unsigned long long numa_foreign = 0ULL;
  12. static unsigned long long numa_hint_faults = 0ULL;
  13. static unsigned long long numa_hint_faults_local = 0ULL;
  14. static unsigned long long numa_huge_pte_updates = 0ULL;
  15. static unsigned long long numa_interleave = 0ULL;
  16. static unsigned long long numa_local = 0ULL;
  17. static unsigned long long numa_other = 0ULL;
  18. static unsigned long long numa_pages_migrated = 0ULL;
  19. static unsigned long long numa_pte_updates = 0ULL;
  20. static unsigned long long pgfault = 0ULL;
  21. static unsigned long long pgmajfault = 0ULL;
  22. static unsigned long long pgpgin = 0ULL;
  23. static unsigned long long pgpgout = 0ULL;
  24. static unsigned long long pswpin = 0ULL;
  25. static unsigned long long pswpout = 0ULL;
  26. static unsigned long long oom_kill = 0ULL;
  27. // THP page migration
  28. // static unsigned long long pgmigrate_success = 0ULL;
  29. // static unsigned long long pgmigrate_fail = 0ULL;
  30. // static unsigned long long thp_migration_success = 0ULL;
  31. // static unsigned long long thp_migration_fail = 0ULL;
  32. // static unsigned long long thp_migration_split = 0ULL;
  33. // Compaction cost model
  34. // https://lore.kernel.org/lkml/20121022080525.GB2198@suse.de/
  35. // static unsigned long long compact_migrate_scanned = 0ULL;
  36. // static unsigned long long compact_free_scanned = 0ULL;
  37. // static unsigned long long compact_isolated = 0ULL;
  38. // THP defragmentation
  39. static unsigned long long compact_stall = 0ULL; // incremented when an application stalls allocating THP
  40. static unsigned long long compact_fail = 0ULL; // defragmentation events that failed
  41. static unsigned long long compact_success = 0ULL; // defragmentation events that succeeded
  42. // ?
  43. // static unsigned long long compact_daemon_wake = 0ULL;
  44. // static unsigned long long compact_daemon_migrate_scanned = 0ULL;
  45. // static unsigned long long compact_daemon_free_scanned = 0ULL;
  46. // ?
  47. // static unsigned long long htlb_buddy_alloc_success = 0ULL;
  48. // static unsigned long long htlb_buddy_alloc_fail = 0ULL;
  49. // ?
  50. // static unsigned long long cma_alloc_success = 0ULL;
  51. // static unsigned long long cma_alloc_fail = 0ULL;
  52. // ?
  53. // static unsigned long long unevictable_pgs_culled = 0ULL;
  54. // static unsigned long long unevictable_pgs_scanned = 0ULL;
  55. // static unsigned long long unevictable_pgs_rescued = 0ULL;
  56. // static unsigned long long unevictable_pgs_mlocked = 0ULL;
  57. // static unsigned long long unevictable_pgs_munlocked = 0ULL;
  58. // static unsigned long long unevictable_pgs_cleared = 0ULL;
  59. // static unsigned long long unevictable_pgs_stranded = 0ULL;
  60. // THP handling of page faults
  61. static unsigned long long thp_fault_alloc = 0ULL; // is incremented every time a huge page is successfully allocated to handle a page fault. This applies to both the first time a page is faulted and for COW faults.
  62. static unsigned long long thp_fault_fallback = 0ULL; // is incremented if a page fault fails to allocate a huge page and instead falls back to using small pages.
  63. static unsigned long long thp_fault_fallback_charge = 0ULL; // is incremented if a page fault fails to charge a huge page and instead falls back to using small pages even though the allocation was successful.
  64. // khugepaged collapsing of small pages into huge pages
  65. static unsigned long long thp_collapse_alloc = 0ULL; // is incremented by khugepaged when it has found a range of pages to collapse into one huge page and has successfully allocated a new huge page to store the data.
  66. static unsigned long long thp_collapse_alloc_failed = 0ULL; // is incremented if khugepaged found a range of pages that should be collapsed into one huge page but failed the allocation.
  67. // THP handling of file allocations
  68. static unsigned long long thp_file_alloc = 0ULL; // is incremented every time a file huge page is successfully allocated
  69. static unsigned long long thp_file_fallback = 0ULL; // is incremented if a file huge page is attempted to be allocated but fails and instead falls back to using small pages
  70. static unsigned long long thp_file_fallback_charge = 0ULL; // is incremented if a file huge page cannot be charged and instead falls back to using small pages even though the allocation was successful
  71. static unsigned long long thp_file_mapped = 0ULL; // is incremented every time a file huge page is mapped into user address space
  72. // THP splitting of huge pages into small pages
  73. static unsigned long long thp_split_page = 0ULL;
  74. static unsigned long long thp_split_page_failed = 0ULL;
  75. static unsigned long long thp_deferred_split_page = 0ULL; // is incremented when a huge page is put onto split queue. This happens when a huge page is partially unmapped and splitting it would free up some memory. Pages on split queue are going to be split under memory pressure
  76. static unsigned long long thp_split_pmd = 0ULL; // is incremented every time a PMD split into table of PTEs. This can happen, for instance, when application calls mprotect() or munmap() on part of huge page. It doesn’t split huge page, only page table entry
  77. // ?
  78. // static unsigned long long thp_scan_exceed_none_pte = 0ULL;
  79. // static unsigned long long thp_scan_exceed_swap_pte = 0ULL;
  80. // static unsigned long long thp_scan_exceed_share_pte = 0ULL;
  81. // static unsigned long long thp_split_pud = 0ULL;
  82. // THP Zero Huge Page
  83. static unsigned long long thp_zero_page_alloc = 0ULL; // is incremented every time a huge zero page used for thp is successfully allocated. Note, it doesn’t count every map of the huge zero page, only its allocation
  84. static unsigned long long thp_zero_page_alloc_failed = 0ULL; // is incremented if kernel fails to allocate huge zero page and falls back to using small pages
  85. // THP Swap Out
  86. static unsigned long long thp_swpout = 0ULL; // is incremented every time a huge page is swapout in one piece without splitting
  87. static unsigned long long thp_swpout_fallback = 0ULL; // is incremented if a huge page has to be split before swapout. Usually because failed to allocate some continuous swap space for the huge page
  88. // memory ballooning
  89. // Current size of balloon is (balloon_inflate - balloon_deflate) pages
  90. static unsigned long long balloon_inflate = 0ULL;
  91. static unsigned long long balloon_deflate = 0ULL;
  92. static unsigned long long balloon_migrate = 0ULL;
  93. // ?
  94. // static unsigned long long swap_ra = 0ULL;
  95. // static unsigned long long swap_ra_hit = 0ULL;
  96. static unsigned long long ksm_swpin_copy = 0ULL; // is incremented every time a KSM page is copied when swapping in
  97. static unsigned long long cow_ksm = 0ULL; // is incremented every time a KSM page triggers copy on write (COW) when users try to write to a KSM page, we have to make a copy
  98. // zswap
  99. static unsigned long long zswpin = 0ULL;
  100. static unsigned long long zswpout = 0ULL;
  101. // ?
  102. // static unsigned long long direct_map_level2_splits = 0ULL;
  103. // static unsigned long long direct_map_level3_splits = 0ULL;
  104. // static unsigned long long nr_unstable = 0ULL;
  105. if(unlikely(!ff)) {
  106. char filename[FILENAME_MAX + 1];
  107. snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/vmstat");
  108. ff = procfile_open(config_get("plugin:proc:/proc/vmstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
  109. if(unlikely(!ff)) return 1;
  110. }
  111. ff = procfile_readall(ff);
  112. if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
  113. size_t lines = procfile_lines(ff), l;
  114. if(unlikely(!arl_base)) {
  115. do_swapio = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "swap i/o", CONFIG_BOOLEAN_AUTO);
  116. do_io = config_get_boolean("plugin:proc:/proc/vmstat", "disk i/o", CONFIG_BOOLEAN_YES);
  117. do_pgfaults = config_get_boolean("plugin:proc:/proc/vmstat", "memory page faults", CONFIG_BOOLEAN_YES);
  118. do_oom_kill = config_get_boolean("plugin:proc:/proc/vmstat", "out of memory kills", CONFIG_BOOLEAN_AUTO);
  119. do_numa = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "system-wide numa metric summary", CONFIG_BOOLEAN_AUTO);
  120. do_thp = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "transparent huge pages", CONFIG_BOOLEAN_AUTO);
  121. do_zswapio = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "zswap i/o", CONFIG_BOOLEAN_AUTO);
  122. do_balloon = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "memory ballooning", CONFIG_BOOLEAN_AUTO);
  123. do_ksm = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "kernel same memory", CONFIG_BOOLEAN_AUTO);
  124. arl_base = arl_create("vmstat", NULL, 60);
  125. arl_expect(arl_base, "pgfault", &pgfault);
  126. arl_expect(arl_base, "pgmajfault", &pgmajfault);
  127. arl_expect(arl_base, "pgpgin", &pgpgin);
  128. arl_expect(arl_base, "pgpgout", &pgpgout);
  129. arl_expect(arl_base, "pswpin", &pswpin);
  130. arl_expect(arl_base, "pswpout", &pswpout);
  131. int has_oom_kill = 0;
  132. for (l = 0; l < lines; l++) {
  133. if (!strcmp(procfile_lineword(ff, l, 0), OOM_KILL_STRING)) {
  134. has_oom_kill = 1;
  135. break;
  136. }
  137. }
  138. if (has_oom_kill)
  139. arl_expect(arl_base, OOM_KILL_STRING, &oom_kill);
  140. else
  141. do_oom_kill = CONFIG_BOOLEAN_NO;
  142. if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO &&
  143. (get_numa_node_count() >= 2 ||
  144. netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
  145. arl_expect(arl_base, "numa_foreign", &numa_foreign);
  146. arl_expect(arl_base, "numa_hint_faults_local", &numa_hint_faults_local);
  147. arl_expect(arl_base, "numa_hint_faults", &numa_hint_faults);
  148. arl_expect(arl_base, "numa_huge_pte_updates", &numa_huge_pte_updates);
  149. arl_expect(arl_base, "numa_interleave", &numa_interleave);
  150. arl_expect(arl_base, "numa_local", &numa_local);
  151. arl_expect(arl_base, "numa_other", &numa_other);
  152. arl_expect(arl_base, "numa_pages_migrated", &numa_pages_migrated);
  153. arl_expect(arl_base, "numa_pte_updates", &numa_pte_updates);
  154. }
  155. else {
  156. // Do not expect numa metrics when they are not needed.
  157. // By not adding them, the ARL will stop processing the file
  158. // when all the expected metrics are collected.
  159. // Also ARL will not parse their values.
  160. has_numa = 0;
  161. do_numa = CONFIG_BOOLEAN_NO;
  162. }
  163. if(do_thp == CONFIG_BOOLEAN_YES || do_thp == CONFIG_BOOLEAN_AUTO) {
  164. // arl_expect(arl_base, "pgmigrate_success", &pgmigrate_success);
  165. // arl_expect(arl_base, "pgmigrate_fail", &pgmigrate_fail);
  166. // arl_expect(arl_base, "thp_migration_success", &thp_migration_success);
  167. // arl_expect(arl_base, "thp_migration_fail", &thp_migration_fail);
  168. // arl_expect(arl_base, "thp_migration_split", &thp_migration_split);
  169. // arl_expect(arl_base, "compact_migrate_scanned", &compact_migrate_scanned);
  170. // arl_expect(arl_base, "compact_free_scanned", &compact_free_scanned);
  171. // arl_expect(arl_base, "compact_isolated", &compact_isolated);
  172. arl_expect(arl_base, "compact_stall", &compact_stall);
  173. arl_expect(arl_base, "compact_fail", &compact_fail);
  174. arl_expect(arl_base, "compact_success", &compact_success);
  175. // arl_expect(arl_base, "compact_daemon_wake", &compact_daemon_wake);
  176. // arl_expect(arl_base, "compact_daemon_migrate_scanned", &compact_daemon_migrate_scanned);
  177. // arl_expect(arl_base, "compact_daemon_free_scanned", &compact_daemon_free_scanned);
  178. arl_expect(arl_base, "thp_fault_alloc", &thp_fault_alloc);
  179. arl_expect(arl_base, "thp_fault_fallback", &thp_fault_fallback);
  180. arl_expect(arl_base, "thp_fault_fallback_charge", &thp_fault_fallback_charge);
  181. arl_expect(arl_base, "thp_collapse_alloc", &thp_collapse_alloc);
  182. arl_expect(arl_base, "thp_collapse_alloc_failed", &thp_collapse_alloc_failed);
  183. arl_expect(arl_base, "thp_file_alloc", &thp_file_alloc);
  184. arl_expect(arl_base, "thp_file_fallback", &thp_file_fallback);
  185. arl_expect(arl_base, "thp_file_fallback_charge", &thp_file_fallback_charge);
  186. arl_expect(arl_base, "thp_file_mapped", &thp_file_mapped);
  187. arl_expect(arl_base, "thp_split_page", &thp_split_page);
  188. arl_expect(arl_base, "thp_split_page_failed", &thp_split_page_failed);
  189. arl_expect(arl_base, "thp_deferred_split_page", &thp_deferred_split_page);
  190. arl_expect(arl_base, "thp_split_pmd", &thp_split_pmd);
  191. arl_expect(arl_base, "thp_zero_page_alloc", &thp_zero_page_alloc);
  192. arl_expect(arl_base, "thp_zero_page_alloc_failed", &thp_zero_page_alloc_failed);
  193. arl_expect(arl_base, "thp_swpout", &thp_swpout);
  194. arl_expect(arl_base, "thp_swpout_fallback", &thp_swpout_fallback);
  195. }
  196. if(do_balloon == CONFIG_BOOLEAN_YES || do_balloon == CONFIG_BOOLEAN_AUTO) {
  197. arl_expect(arl_base, "balloon_inflate", &balloon_inflate);
  198. arl_expect(arl_base, "balloon_deflate", &balloon_deflate);
  199. arl_expect(arl_base, "balloon_migrate", &balloon_migrate);
  200. }
  201. if(do_ksm == CONFIG_BOOLEAN_YES || do_ksm == CONFIG_BOOLEAN_AUTO) {
  202. arl_expect(arl_base, "ksm_swpin_copy", &ksm_swpin_copy);
  203. arl_expect(arl_base, "cow_ksm", &cow_ksm);
  204. }
  205. if(do_zswapio == CONFIG_BOOLEAN_YES || do_zswapio == CONFIG_BOOLEAN_AUTO) {
  206. arl_expect(arl_base, "zswpin", &zswpin);
  207. arl_expect(arl_base, "zswpout", &zswpout);
  208. }
  209. }
  210. arl_begin(arl_base);
  211. for(l = 0; l < lines ;l++) {
  212. size_t words = procfile_linewords(ff, l);
  213. if(unlikely(words < 2)) {
  214. if(unlikely(words)) collector_error("Cannot read /proc/vmstat line %zu. Expected 2 params, read %zu.", l, words);
  215. continue;
  216. }
  217. if(unlikely(arl_check(arl_base,
  218. procfile_lineword(ff, l, 0),
  219. procfile_lineword(ff, l, 1)))) break;
  220. }
  221. // --------------------------------------------------------------------
  222. if(do_swapio == CONFIG_BOOLEAN_YES || (do_swapio == CONFIG_BOOLEAN_AUTO &&
  223. (pswpin || pswpout ||
  224. netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
  225. do_swapio = CONFIG_BOOLEAN_YES;
  226. static RRDSET *st_swapio = NULL;
  227. static RRDDIM *rd_in = NULL, *rd_out = NULL;
  228. if(unlikely(!st_swapio)) {
  229. st_swapio = rrdset_create_localhost(
  230. "system"
  231. , "swapio"
  232. , NULL
  233. , "swap"
  234. , NULL
  235. , "Swap I/O"
  236. , "KiB/s"
  237. , PLUGIN_PROC_NAME
  238. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  239. , NETDATA_CHART_PRIO_SYSTEM_SWAPIO
  240. , update_every
  241. , RRDSET_TYPE_AREA
  242. );
  243. rd_in = rrddim_add(st_swapio, "in", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
  244. rd_out = rrddim_add(st_swapio, "out", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
  245. }
  246. rrddim_set_by_pointer(st_swapio, rd_in, pswpin);
  247. rrddim_set_by_pointer(st_swapio, rd_out, pswpout);
  248. rrdset_done(st_swapio);
  249. }
  250. // --------------------------------------------------------------------
  251. if(do_io) {
  252. static RRDSET *st_io = NULL;
  253. static RRDDIM *rd_in = NULL, *rd_out = NULL;
  254. if(unlikely(!st_io)) {
  255. st_io = rrdset_create_localhost(
  256. "system"
  257. , "pgpgio"
  258. , NULL
  259. , "disk"
  260. , NULL
  261. , "Memory Paged from/to disk"
  262. , "KiB/s"
  263. , PLUGIN_PROC_NAME
  264. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  265. , NETDATA_CHART_PRIO_SYSTEM_PGPGIO
  266. , update_every
  267. , RRDSET_TYPE_AREA
  268. );
  269. rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  270. rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
  271. }
  272. rrddim_set_by_pointer(st_io, rd_in, pgpgin);
  273. rrddim_set_by_pointer(st_io, rd_out, pgpgout);
  274. rrdset_done(st_io);
  275. }
  276. // --------------------------------------------------------------------
  277. if(do_pgfaults) {
  278. static RRDSET *st_pgfaults = NULL;
  279. static RRDDIM *rd_minor = NULL, *rd_major = NULL;
  280. if(unlikely(!st_pgfaults)) {
  281. st_pgfaults = rrdset_create_localhost(
  282. "mem"
  283. , "pgfaults"
  284. , NULL
  285. , "system"
  286. , NULL
  287. , "Memory Page Faults"
  288. , "faults/s"
  289. , PLUGIN_PROC_NAME
  290. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  291. , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS
  292. , update_every
  293. , RRDSET_TYPE_LINE
  294. );
  295. rrdset_flag_set(st_pgfaults, RRDSET_FLAG_DETAIL);
  296. rd_minor = rrddim_add(st_pgfaults, "minor", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  297. rd_major = rrddim_add(st_pgfaults, "major", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
  298. }
  299. rrddim_set_by_pointer(st_pgfaults, rd_minor, pgfault);
  300. rrddim_set_by_pointer(st_pgfaults, rd_major, pgmajfault);
  301. rrdset_done(st_pgfaults);
  302. }
  303. // --------------------------------------------------------------------
  304. if (do_oom_kill == CONFIG_BOOLEAN_YES ||
  305. (do_oom_kill == CONFIG_BOOLEAN_AUTO && (oom_kill || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
  306. static RRDSET *st_oom_kill = NULL;
  307. static RRDDIM *rd_oom_kill = NULL;
  308. do_oom_kill = CONFIG_BOOLEAN_YES;
  309. if(unlikely(!st_oom_kill)) {
  310. st_oom_kill = rrdset_create_localhost(
  311. "mem"
  312. , "oom_kill"
  313. , NULL
  314. , "system"
  315. , NULL
  316. , "Out of Memory Kills"
  317. , "kills/s"
  318. , PLUGIN_PROC_NAME
  319. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  320. , NETDATA_CHART_PRIO_MEM_SYSTEM_OOM_KILL
  321. , update_every
  322. , RRDSET_TYPE_LINE
  323. );
  324. rrdset_flag_set(st_oom_kill, RRDSET_FLAG_DETAIL);
  325. rd_oom_kill = rrddim_add(st_oom_kill, "kills", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  326. }
  327. rrddim_set_by_pointer(st_oom_kill, rd_oom_kill, oom_kill);
  328. rrdset_done(st_oom_kill);
  329. }
  330. // --------------------------------------------------------------------
  331. // Ondemand criteria for NUMA. Since this won't change at run time, we
  332. // check it only once. We check whether the node count is >= 2 because
  333. // single-node systems have uninteresting statistics (since all accesses
  334. // are local).
  335. if(unlikely(has_numa == -1))
  336. has_numa = (numa_local || numa_foreign || numa_interleave || numa_other || numa_pte_updates ||
  337. numa_huge_pte_updates || numa_hint_faults || numa_hint_faults_local || numa_pages_migrated) ? 1 : 0;
  338. if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && has_numa)) {
  339. do_numa = CONFIG_BOOLEAN_YES;
  340. static RRDSET *st_numa = NULL;
  341. static RRDDIM *rd_local = NULL, *rd_foreign = NULL, *rd_interleave = NULL, *rd_other = NULL, *rd_pte_updates = NULL, *rd_huge_pte_updates = NULL, *rd_hint_faults = NULL, *rd_hint_faults_local = NULL, *rd_pages_migrated = NULL;
  342. if(unlikely(!st_numa)) {
  343. st_numa = rrdset_create_localhost(
  344. "mem"
  345. , "numa"
  346. , NULL
  347. , "numa"
  348. , NULL
  349. , "NUMA events"
  350. , "events/s"
  351. , PLUGIN_PROC_NAME
  352. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  353. , NETDATA_CHART_PRIO_MEM_NUMA
  354. , update_every
  355. , RRDSET_TYPE_LINE
  356. );
  357. rrdset_flag_set(st_numa, RRDSET_FLAG_DETAIL);
  358. // These depend on CONFIG_NUMA in the kernel.
  359. rd_local = rrddim_add(st_numa, "local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  360. rd_foreign = rrddim_add(st_numa, "foreign", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  361. rd_interleave = rrddim_add(st_numa, "interleave", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  362. rd_other = rrddim_add(st_numa, "other", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  363. // The following stats depend on CONFIG_NUMA_BALANCING in the
  364. // kernel.
  365. rd_pte_updates = rrddim_add(st_numa, "pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  366. rd_huge_pte_updates = rrddim_add(st_numa, "huge_pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  367. rd_hint_faults = rrddim_add(st_numa, "hint_faults", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  368. rd_hint_faults_local = rrddim_add(st_numa, "hint_faults_local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  369. rd_pages_migrated = rrddim_add(st_numa, "pages_migrated", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  370. }
  371. rrddim_set_by_pointer(st_numa, rd_local, numa_local);
  372. rrddim_set_by_pointer(st_numa, rd_foreign, numa_foreign);
  373. rrddim_set_by_pointer(st_numa, rd_interleave, numa_interleave);
  374. rrddim_set_by_pointer(st_numa, rd_other, numa_other);
  375. rrddim_set_by_pointer(st_numa, rd_pte_updates, numa_pte_updates);
  376. rrddim_set_by_pointer(st_numa, rd_huge_pte_updates, numa_huge_pte_updates);
  377. rrddim_set_by_pointer(st_numa, rd_hint_faults, numa_hint_faults);
  378. rrddim_set_by_pointer(st_numa, rd_hint_faults_local, numa_hint_faults_local);
  379. rrddim_set_by_pointer(st_numa, rd_pages_migrated, numa_pages_migrated);
  380. rrdset_done(st_numa);
  381. }
  382. // --------------------------------------------------------------------
  383. if(do_balloon == CONFIG_BOOLEAN_YES || (do_balloon == CONFIG_BOOLEAN_AUTO && (balloon_inflate || balloon_deflate ||
  384. balloon_migrate || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
  385. do_balloon = CONFIG_BOOLEAN_YES;
  386. static RRDSET *st_balloon = NULL;
  387. static RRDDIM *rd_inflate = NULL, *rd_deflate = NULL, *rd_migrate = NULL;
  388. if(unlikely(!st_balloon)) {
  389. st_balloon = rrdset_create_localhost(
  390. "mem"
  391. , "balloon"
  392. , NULL
  393. , "balloon"
  394. , NULL
  395. , "Memory Ballooning Operations"
  396. , "KiB/s"
  397. , PLUGIN_PROC_NAME
  398. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  399. , NETDATA_CHART_PRIO_MEM_BALLOON
  400. , update_every
  401. , RRDSET_TYPE_LINE
  402. );
  403. rd_inflate = rrddim_add(st_balloon, "inflate", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
  404. rd_deflate = rrddim_add(st_balloon, "deflate", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
  405. rd_migrate = rrddim_add(st_balloon, "migrate", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
  406. }
  407. rrddim_set_by_pointer(st_balloon, rd_inflate, balloon_inflate);
  408. rrddim_set_by_pointer(st_balloon, rd_deflate, balloon_deflate);
  409. rrddim_set_by_pointer(st_balloon, rd_migrate, balloon_migrate);
  410. rrdset_done(st_balloon);
  411. }
  412. // --------------------------------------------------------------------
  413. if(do_zswapio == CONFIG_BOOLEAN_YES || (do_zswapio == CONFIG_BOOLEAN_AUTO &&
  414. (zswpin || zswpout ||
  415. netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
  416. do_zswapio = CONFIG_BOOLEAN_YES;
  417. static RRDSET *st_zswapio = NULL;
  418. static RRDDIM *rd_in = NULL, *rd_out = NULL;
  419. if(unlikely(!st_zswapio)) {
  420. st_zswapio = rrdset_create_localhost(
  421. "system"
  422. , "zswapio"
  423. , NULL
  424. , "zswap"
  425. , NULL
  426. , "ZSwap I/O"
  427. , "KiB/s"
  428. , PLUGIN_PROC_NAME
  429. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  430. , NETDATA_CHART_PRIO_SYSTEM_ZSWAPIO
  431. , update_every
  432. , RRDSET_TYPE_AREA
  433. );
  434. rd_in = rrddim_add(st_zswapio, "in", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
  435. rd_out = rrddim_add(st_zswapio, "out", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
  436. }
  437. rrddim_set_by_pointer(st_zswapio, rd_in, zswpin);
  438. rrddim_set_by_pointer(st_zswapio, rd_out, zswpout);
  439. rrdset_done(st_zswapio);
  440. }
  441. // --------------------------------------------------------------------
  442. if(do_ksm == CONFIG_BOOLEAN_YES || (do_ksm == CONFIG_BOOLEAN_AUTO &&
  443. (cow_ksm || ksm_swpin_copy ||
  444. netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
  445. do_ksm = CONFIG_BOOLEAN_YES;
  446. static RRDSET *st_ksm_cow = NULL;
  447. static RRDDIM *rd_swapin = NULL, *rd_write = NULL;
  448. if(unlikely(!st_ksm_cow)) {
  449. st_ksm_cow = rrdset_create_localhost(
  450. "mem"
  451. , "ksm_cow"
  452. , NULL
  453. , "ksm"
  454. , NULL
  455. , "KSM Copy On Write Operations"
  456. , "KiB/s"
  457. , PLUGIN_PROC_NAME
  458. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  459. , NETDATA_CHART_PRIO_MEM_KSM_COW
  460. , update_every
  461. , RRDSET_TYPE_LINE
  462. );
  463. rd_swapin = rrddim_add(st_ksm_cow, "swapin", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
  464. rd_write = rrddim_add(st_ksm_cow, "write", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
  465. }
  466. rrddim_set_by_pointer(st_ksm_cow, rd_swapin, ksm_swpin_copy);
  467. rrddim_set_by_pointer(st_ksm_cow, rd_write, cow_ksm);
  468. rrdset_done(st_ksm_cow);
  469. }
  470. // --------------------------------------------------------------------
  471. if(do_thp == CONFIG_BOOLEAN_YES || do_thp == CONFIG_BOOLEAN_AUTO) {
  472. if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
  473. (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_fault_alloc || thp_fault_fallback || thp_fault_fallback_charge))) {
  474. static RRDSET *st_thp_fault = NULL;
  475. static RRDDIM *rd_alloc = NULL, *rd_fallback = NULL, *rd_fallback_charge = NULL;
  476. if(unlikely(!st_thp_fault)) {
  477. st_thp_fault = rrdset_create_localhost(
  478. "mem"
  479. , "thp_faults"
  480. , NULL
  481. , "hugepages"
  482. , NULL
  483. , "Transparent Huge Page Fault Allocations"
  484. , "events/s"
  485. , PLUGIN_PROC_NAME
  486. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  487. , NETDATA_CHART_PRIO_MEM_HUGEPAGES_FAULTS
  488. , update_every
  489. , RRDSET_TYPE_LINE
  490. );
  491. rd_alloc = rrddim_add(st_thp_fault, "alloc", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  492. rd_fallback = rrddim_add(st_thp_fault, "fallback", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
  493. rd_fallback_charge = rrddim_add(st_thp_fault, "fallback_charge", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
  494. }
  495. rrddim_set_by_pointer(st_thp_fault, rd_alloc, thp_fault_alloc);
  496. rrddim_set_by_pointer(st_thp_fault, rd_fallback, thp_fault_fallback);
  497. rrddim_set_by_pointer(st_thp_fault, rd_fallback_charge, thp_fault_fallback_charge);
  498. rrdset_done(st_thp_fault);
  499. }
  500. if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
  501. (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_fault_alloc || thp_fault_fallback || thp_fault_fallback_charge || thp_file_mapped))) {
  502. static RRDSET *st_thp_file = NULL;
  503. static RRDDIM *rd_alloc = NULL, *rd_fallback = NULL, *rd_fallback_charge = NULL, *rd_mapped = NULL;
  504. if(unlikely(!st_thp_file)) {
  505. st_thp_file = rrdset_create_localhost(
  506. "mem"
  507. , "thp_file"
  508. , NULL
  509. , "hugepages"
  510. , NULL
  511. , "Transparent Huge Page File Allocations"
  512. , "events/s"
  513. , PLUGIN_PROC_NAME
  514. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  515. , NETDATA_CHART_PRIO_MEM_HUGEPAGES_FILE
  516. , update_every
  517. , RRDSET_TYPE_LINE
  518. );
  519. rd_alloc = rrddim_add(st_thp_file, "alloc", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  520. rd_fallback = rrddim_add(st_thp_file, "fallback", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
  521. rd_mapped = rrddim_add(st_thp_file, "mapped", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  522. rd_fallback_charge = rrddim_add(st_thp_file, "fallback_charge", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
  523. }
  524. rrddim_set_by_pointer(st_thp_file, rd_alloc, thp_file_alloc);
  525. rrddim_set_by_pointer(st_thp_file, rd_fallback, thp_file_fallback);
  526. rrddim_set_by_pointer(st_thp_file, rd_mapped, thp_file_fallback_charge);
  527. rrddim_set_by_pointer(st_thp_file, rd_fallback_charge, thp_file_fallback_charge);
  528. rrdset_done(st_thp_file);
  529. }
  530. if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
  531. (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_zero_page_alloc || thp_zero_page_alloc_failed))) {
  532. static RRDSET *st_thp_zero = NULL;
  533. static RRDDIM *rd_alloc = NULL, *rd_failed = NULL;
  534. if(unlikely(!st_thp_zero)) {
  535. st_thp_zero = rrdset_create_localhost(
  536. "mem"
  537. , "thp_zero"
  538. , NULL
  539. , "hugepages"
  540. , NULL
  541. , "Transparent Huge Zero Page Allocations"
  542. , "events/s"
  543. , PLUGIN_PROC_NAME
  544. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  545. , NETDATA_CHART_PRIO_MEM_HUGEPAGES_ZERO
  546. , update_every
  547. , RRDSET_TYPE_LINE
  548. );
  549. rd_alloc = rrddim_add(st_thp_zero, "alloc", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  550. rd_failed = rrddim_add(st_thp_zero, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
  551. }
  552. rrddim_set_by_pointer(st_thp_zero, rd_alloc, thp_zero_page_alloc);
  553. rrddim_set_by_pointer(st_thp_zero, rd_failed, thp_zero_page_alloc_failed);
  554. rrdset_done(st_thp_zero);
  555. }
  556. if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
  557. (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_collapse_alloc || thp_collapse_alloc_failed))) {
  558. static RRDSET *st_khugepaged = NULL;
  559. static RRDDIM *rd_alloc = NULL, *rd_failed = NULL;
  560. if(unlikely(!st_khugepaged)) {
  561. st_khugepaged = rrdset_create_localhost(
  562. "mem"
  563. , "thp_collapse"
  564. , NULL
  565. , "hugepages"
  566. , NULL
  567. , "Transparent Huge Pages Collapsed by khugepaged"
  568. , "events/s"
  569. , PLUGIN_PROC_NAME
  570. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  571. , NETDATA_CHART_PRIO_MEM_HUGEPAGES_KHUGEPAGED
  572. , update_every
  573. , RRDSET_TYPE_LINE
  574. );
  575. rd_alloc = rrddim_add(st_khugepaged, "alloc", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  576. rd_failed = rrddim_add(st_khugepaged, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
  577. }
  578. rrddim_set_by_pointer(st_khugepaged, rd_alloc, thp_collapse_alloc);
  579. rrddim_set_by_pointer(st_khugepaged, rd_failed, thp_collapse_alloc_failed);
  580. rrdset_done(st_khugepaged);
  581. }
  582. if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
  583. (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_split_page || thp_split_page_failed || thp_deferred_split_page || thp_split_pmd))) {
  584. static RRDSET *st_thp_split = NULL;
  585. static RRDDIM *rd_split = NULL, *rd_failed = NULL, *rd_deferred_split = NULL, *rd_split_pmd = NULL;
  586. if(unlikely(!st_thp_split)) {
  587. st_thp_split = rrdset_create_localhost(
  588. "mem"
  589. , "thp_split"
  590. , NULL
  591. , "hugepages"
  592. , NULL
  593. , "Transparent Huge Page Splits"
  594. , "events/s"
  595. , PLUGIN_PROC_NAME
  596. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  597. , NETDATA_CHART_PRIO_MEM_HUGEPAGES_SPLITS
  598. , update_every
  599. , RRDSET_TYPE_LINE
  600. );
  601. rd_split = rrddim_add(st_thp_split, "split", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  602. rd_failed = rrddim_add(st_thp_split, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
  603. rd_split_pmd = rrddim_add(st_thp_split, "split_pmd", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  604. rd_deferred_split = rrddim_add(st_thp_split, "split_deferred", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  605. }
  606. rrddim_set_by_pointer(st_thp_split, rd_split, thp_split_page);
  607. rrddim_set_by_pointer(st_thp_split, rd_failed, thp_split_page_failed);
  608. rrddim_set_by_pointer(st_thp_split, rd_split_pmd, thp_split_pmd);
  609. rrddim_set_by_pointer(st_thp_split, rd_deferred_split, thp_deferred_split_page);
  610. rrdset_done(st_thp_split);
  611. }
  612. if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
  613. (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_swpout || thp_swpout_fallback))) {
  614. static RRDSET *st_tmp_swapout = NULL;
  615. static RRDDIM *rd_swapout = NULL, *rd_fallback = NULL;
  616. if(unlikely(!st_tmp_swapout)) {
  617. st_tmp_swapout = rrdset_create_localhost(
  618. "mem"
  619. , "thp_swapout"
  620. , NULL
  621. , "hugepages"
  622. , NULL
  623. , "Transparent Huge Pages Swap Out"
  624. , "events/s"
  625. , PLUGIN_PROC_NAME
  626. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  627. , NETDATA_CHART_PRIO_MEM_HUGEPAGES_SWAPOUT
  628. , update_every
  629. , RRDSET_TYPE_LINE
  630. );
  631. rd_swapout = rrddim_add(st_tmp_swapout, "swapout", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  632. rd_fallback = rrddim_add(st_tmp_swapout, "fallback", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
  633. }
  634. rrddim_set_by_pointer(st_tmp_swapout, rd_swapout, thp_swpout);
  635. rrddim_set_by_pointer(st_tmp_swapout, rd_fallback, thp_swpout_fallback);
  636. rrdset_done(st_tmp_swapout);
  637. }
  638. if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
  639. (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || compact_stall || compact_fail || compact_success))) {
  640. static RRDSET *st_thp_compact = NULL;
  641. static RRDDIM *rd_success = NULL, *rd_fail = NULL, *rd_stall = NULL;
  642. if(unlikely(!st_thp_compact)) {
  643. st_thp_compact = rrdset_create_localhost(
  644. "mem"
  645. , "thp_compact"
  646. , NULL
  647. , "hugepages"
  648. , NULL
  649. , "Transparent Huge Pages Compaction"
  650. , "events/s"
  651. , PLUGIN_PROC_NAME
  652. , PLUGIN_PROC_MODULE_VMSTAT_NAME
  653. , NETDATA_CHART_PRIO_MEM_HUGEPAGES_COMPACT
  654. , update_every
  655. , RRDSET_TYPE_LINE
  656. );
  657. rd_success = rrddim_add(st_thp_compact, "success", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  658. rd_fail = rrddim_add(st_thp_compact, "fail", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
  659. rd_stall = rrddim_add(st_thp_compact, "stall", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
  660. }
  661. rrddim_set_by_pointer(st_thp_compact, rd_success, compact_success);
  662. rrddim_set_by_pointer(st_thp_compact, rd_fail, compact_fail);
  663. rrddim_set_by_pointer(st_thp_compact, rd_stall, compact_stall);
  664. rrdset_done(st_thp_compact);
  665. }
  666. }
  667. return 0;
  668. }