pagecache.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #define NETDATA_RRD_INTERNALS
  3. #include "rrdengine.h"
  4. MRG *main_mrg = NULL;
  5. PGC *main_cache = NULL;
  6. PGC *open_cache = NULL;
  7. PGC *extent_cache = NULL;
  8. struct rrdeng_cache_efficiency_stats rrdeng_cache_efficiency_stats = {};
  9. static void main_cache_free_clean_page_callback(PGC *cache __maybe_unused, PGC_ENTRY entry __maybe_unused)
  10. {
  11. // Release storage associated with the page
  12. dbengine_page_free(entry.data, entry.size);
  13. }
  14. static void main_cache_flush_dirty_page_init_callback(PGC *cache __maybe_unused, Word_t section) {
  15. struct rrdengine_instance *ctx = (struct rrdengine_instance *) section;
  16. // mark ctx as having flushing in progress
  17. __atomic_add_fetch(&ctx->atomic.extents_currently_being_flushed, 1, __ATOMIC_RELAXED);
  18. }
  19. static void main_cache_flush_dirty_page_callback(PGC *cache __maybe_unused, PGC_ENTRY *entries_array __maybe_unused, PGC_PAGE **pages_array __maybe_unused, size_t entries __maybe_unused)
  20. {
  21. if(!entries)
  22. return;
  23. struct rrdengine_instance *ctx = (struct rrdengine_instance *) entries_array[0].section;
  24. size_t bytes_per_point = CTX_POINT_SIZE_BYTES(ctx);
  25. struct page_descr_with_data *base = NULL;
  26. for (size_t Index = 0 ; Index < entries; Index++) {
  27. time_t start_time_s = entries_array[Index].start_time_s;
  28. time_t end_time_s = entries_array[Index].end_time_s;
  29. struct page_descr_with_data *descr = page_descriptor_get();
  30. descr->id = mrg_metric_uuid(main_mrg, (METRIC *) entries_array[Index].metric_id);
  31. descr->metric_id = entries_array[Index].metric_id;
  32. descr->start_time_ut = start_time_s * USEC_PER_SEC;
  33. descr->end_time_ut = end_time_s * USEC_PER_SEC;
  34. descr->update_every_s = entries_array[Index].update_every_s;
  35. descr->type = ctx->config.page_type;
  36. descr->page_length = (end_time_s - (start_time_s - descr->update_every_s)) / descr->update_every_s * bytes_per_point;
  37. if(descr->page_length > entries_array[Index].size) {
  38. descr->page_length = entries_array[Index].size;
  39. error_limit_static_global_var(erl, 1, 0);
  40. error_limit(&erl, "DBENGINE: page exceeds the maximum size, adjusting it to max.");
  41. }
  42. descr->page = pgc_page_data(pages_array[Index]);
  43. DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(base, descr, link.prev, link.next);
  44. internal_fatal(descr->page_length > RRDENG_BLOCK_SIZE, "DBENGINE: faulty page length calculation");
  45. }
  46. struct completion completion;
  47. completion_init(&completion);
  48. rrdeng_enq_cmd(ctx, RRDENG_OPCODE_EXTENT_WRITE, base, &completion, STORAGE_PRIORITY_INTERNAL_DBENGINE, NULL, NULL);
  49. completion_wait_for(&completion);
  50. completion_destroy(&completion);
  51. }
  52. static void open_cache_free_clean_page_callback(PGC *cache __maybe_unused, PGC_ENTRY entry __maybe_unused)
  53. {
  54. struct rrdengine_datafile *datafile = entry.data;
  55. datafile_release(datafile, DATAFILE_ACQUIRE_OPEN_CACHE);
  56. }
  57. static void open_cache_flush_dirty_page_callback(PGC *cache __maybe_unused, PGC_ENTRY *entries_array __maybe_unused, PGC_PAGE **pages_array __maybe_unused, size_t entries __maybe_unused)
  58. {
  59. ;
  60. }
  61. static void extent_cache_free_clean_page_callback(PGC *cache __maybe_unused, PGC_ENTRY entry __maybe_unused)
  62. {
  63. dbengine_extent_free(entry.data, entry.size);
  64. }
  65. static void extent_cache_flush_dirty_page_callback(PGC *cache __maybe_unused, PGC_ENTRY *entries_array __maybe_unused, PGC_PAGE **pages_array __maybe_unused, size_t entries __maybe_unused)
  66. {
  67. ;
  68. }
  69. inline TIME_RANGE_COMPARE is_page_in_time_range(time_t page_first_time_s, time_t page_last_time_s, time_t wanted_start_time_s, time_t wanted_end_time_s) {
  70. // page_first_time_s <= wanted_end_time_s && page_last_time_s >= wanted_start_time_s
  71. if(page_last_time_s < wanted_start_time_s)
  72. return PAGE_IS_IN_THE_PAST;
  73. if(page_first_time_s > wanted_end_time_s)
  74. return PAGE_IS_IN_THE_FUTURE;
  75. return PAGE_IS_IN_RANGE;
  76. }
  77. static inline struct page_details *pdc_find_page_for_time(
  78. Pcvoid_t PArray,
  79. time_t wanted_time_s,
  80. size_t *gaps,
  81. PDC_PAGE_STATUS mode,
  82. PDC_PAGE_STATUS skip_list
  83. ) {
  84. Word_t PIndexF = wanted_time_s, PIndexL = wanted_time_s;
  85. Pvoid_t *PValueF, *PValueL;
  86. struct page_details *pdF = NULL, *pdL = NULL;
  87. bool firstF = true, firstL = true;
  88. PDC_PAGE_STATUS ignore_list = PDC_PAGE_QUERY_GLOBAL_SKIP_LIST | skip_list;
  89. while ((PValueF = PDCJudyLFirstThenNext(PArray, &PIndexF, &firstF))) {
  90. pdF = *PValueF;
  91. PDC_PAGE_STATUS status = __atomic_load_n(&pdF->status, __ATOMIC_ACQUIRE);
  92. if (!(status & (ignore_list | mode)))
  93. break;
  94. pdF = NULL;
  95. }
  96. while ((PValueL = PDCJudyLLastThenPrev(PArray, &PIndexL, &firstL))) {
  97. pdL = *PValueL;
  98. PDC_PAGE_STATUS status = __atomic_load_n(&pdL->status, __ATOMIC_ACQUIRE);
  99. if(status & mode) {
  100. // don't go all the way back to the beginning
  101. // stop at the last processed
  102. pdL = NULL;
  103. break;
  104. }
  105. if (!(status & ignore_list))
  106. break;
  107. pdL = NULL;
  108. }
  109. TIME_RANGE_COMPARE rcF = (pdF) ? is_page_in_time_range(pdF->first_time_s, pdF->last_time_s, wanted_time_s, wanted_time_s) : PAGE_IS_IN_THE_FUTURE;
  110. TIME_RANGE_COMPARE rcL = (pdL) ? is_page_in_time_range(pdL->first_time_s, pdL->last_time_s, wanted_time_s, wanted_time_s) : PAGE_IS_IN_THE_PAST;
  111. if (!pdF || pdF == pdL) {
  112. // F is missing, or they are the same
  113. // return L
  114. (*gaps) += (rcL == PAGE_IS_IN_RANGE) ? 0 : 1;
  115. return pdL;
  116. }
  117. if (!pdL) {
  118. // L is missing
  119. // return F
  120. (*gaps) += (rcF == PAGE_IS_IN_RANGE) ? 0 : 1;
  121. return pdF;
  122. }
  123. if (rcF == rcL) {
  124. // both are on the same side,
  125. // but they are different pages
  126. switch (rcF) {
  127. case PAGE_IS_IN_RANGE:
  128. // pick the higher resolution
  129. if (pdF->update_every_s && pdF->update_every_s < pdL->update_every_s)
  130. return pdF;
  131. if (pdL->update_every_s && pdL->update_every_s < pdF->update_every_s)
  132. return pdL;
  133. // same resolution - pick the one that starts earlier
  134. if (pdL->first_time_s < pdF->first_time_s)
  135. return pdL;
  136. return pdF;
  137. break;
  138. case PAGE_IS_IN_THE_FUTURE:
  139. (*gaps)++;
  140. // pick the one that starts earlier
  141. if (pdL->first_time_s < pdF->first_time_s)
  142. return pdL;
  143. return pdF;
  144. break;
  145. default:
  146. case PAGE_IS_IN_THE_PAST:
  147. (*gaps)++;
  148. return NULL;
  149. break;
  150. }
  151. }
  152. if(rcF == PAGE_IS_IN_RANGE) {
  153. // (*gaps) += 0;
  154. return pdF;
  155. }
  156. if(rcL == PAGE_IS_IN_RANGE) {
  157. // (*gaps) += 0;
  158. return pdL;
  159. }
  160. if(rcF == PAGE_IS_IN_THE_FUTURE) {
  161. (*gaps)++;
  162. return pdF;
  163. }
  164. if(rcL == PAGE_IS_IN_THE_FUTURE) {
  165. (*gaps)++;
  166. return pdL;
  167. }
  168. // impossible case
  169. (*gaps)++;
  170. return NULL;
  171. }
  172. static size_t get_page_list_from_pgc(PGC *cache, METRIC *metric, struct rrdengine_instance *ctx,
  173. time_t wanted_start_time_s, time_t wanted_end_time_s,
  174. Pvoid_t *JudyL_page_array, size_t *cache_gaps,
  175. bool open_cache_mode, PDC_PAGE_STATUS tags) {
  176. size_t pages_found_in_cache = 0;
  177. Word_t metric_id = mrg_metric_id(main_mrg, metric);
  178. time_t now_s = wanted_start_time_s;
  179. time_t dt_s = mrg_metric_get_update_every_s(main_mrg, metric);
  180. if(!dt_s)
  181. dt_s = default_rrd_update_every;
  182. time_t previous_page_end_time_s = now_s - dt_s;
  183. bool first = true;
  184. do {
  185. PGC_PAGE *page = pgc_page_get_and_acquire(
  186. cache, (Word_t)ctx, (Word_t)metric_id, now_s,
  187. (first) ? PGC_SEARCH_CLOSEST : PGC_SEARCH_NEXT);
  188. first = false;
  189. if(!page) {
  190. if(previous_page_end_time_s < wanted_end_time_s)
  191. (*cache_gaps)++;
  192. break;
  193. }
  194. time_t page_start_time_s = pgc_page_start_time_s(page);
  195. time_t page_end_time_s = pgc_page_end_time_s(page);
  196. time_t page_update_every_s = pgc_page_update_every_s(page);
  197. size_t page_length = pgc_page_data_size(cache, page);
  198. if(!page_update_every_s)
  199. page_update_every_s = dt_s;
  200. if(is_page_in_time_range(page_start_time_s, page_end_time_s, wanted_start_time_s, wanted_end_time_s) != PAGE_IS_IN_RANGE) {
  201. // not a useful page for this query
  202. pgc_page_release(cache, page);
  203. page = NULL;
  204. if(previous_page_end_time_s < wanted_end_time_s)
  205. (*cache_gaps)++;
  206. break;
  207. }
  208. if (page_start_time_s - previous_page_end_time_s > dt_s)
  209. (*cache_gaps)++;
  210. Pvoid_t *PValue = PDCJudyLIns(JudyL_page_array, (Word_t) page_start_time_s, PJE0);
  211. if (!PValue || PValue == PJERR)
  212. fatal("DBENGINE: corrupted judy array in %s()", __FUNCTION__ );
  213. if (unlikely(*PValue)) {
  214. struct page_details *pd = *PValue;
  215. UNUSED(pd);
  216. // internal_error(
  217. // pd->first_time_s != page_first_time_s ||
  218. // pd->last_time_s != page_last_time_s ||
  219. // pd->update_every_s != page_update_every_s,
  220. // "DBENGINE: duplicate page with different retention in %s cache "
  221. // "1st: %ld to %ld, ue %u, size %u "
  222. // "2nd: %ld to %ld, ue %ld size %zu "
  223. // "- ignoring the second",
  224. // cache == open_cache ? "open" : "main",
  225. // pd->first_time_s, pd->last_time_s, pd->update_every_s, pd->page_length,
  226. // page_first_time_s, page_last_time_s, page_update_every_s, page_length);
  227. pgc_page_release(cache, page);
  228. }
  229. else {
  230. internal_fatal(pgc_page_metric(page) != metric_id, "Wrong metric id in page found in cache");
  231. internal_fatal(pgc_page_section(page) != (Word_t)ctx, "Wrong section in page found in cache");
  232. struct page_details *pd = page_details_get();
  233. pd->metric_id = metric_id;
  234. pd->first_time_s = page_start_time_s;
  235. pd->last_time_s = page_end_time_s;
  236. pd->page_length = page_length;
  237. pd->update_every_s = (uint32_t) page_update_every_s;
  238. pd->page = (open_cache_mode) ? NULL : page;
  239. pd->status |= tags;
  240. if((pd->page)) {
  241. pd->status |= PDC_PAGE_READY | PDC_PAGE_PRELOADED;
  242. if(pgc_page_data(page) == DBENGINE_EMPTY_PAGE)
  243. pd->status |= PDC_PAGE_EMPTY;
  244. }
  245. if(open_cache_mode) {
  246. struct rrdengine_datafile *datafile = pgc_page_data(page);
  247. if(datafile_acquire(datafile, DATAFILE_ACQUIRE_PAGE_DETAILS)) { // for pd
  248. struct extent_io_data *xio = (struct extent_io_data *) pgc_page_custom_data(cache, page);
  249. pd->datafile.ptr = pgc_page_data(page);
  250. pd->datafile.file = xio->file;
  251. pd->datafile.extent.pos = xio->pos;
  252. pd->datafile.extent.bytes = xio->bytes;
  253. pd->datafile.fileno = pd->datafile.ptr->fileno;
  254. pd->status |= PDC_PAGE_DATAFILE_ACQUIRED | PDC_PAGE_DISK_PENDING;
  255. }
  256. else {
  257. pd->status |= PDC_PAGE_FAILED | PDC_PAGE_FAILED_TO_ACQUIRE_DATAFILE;
  258. }
  259. pgc_page_release(cache, page);
  260. }
  261. *PValue = pd;
  262. pages_found_in_cache++;
  263. }
  264. // prepare for the next iteration
  265. previous_page_end_time_s = page_end_time_s;
  266. if(page_update_every_s > 0)
  267. dt_s = page_update_every_s;
  268. // we are going to as for the NEXT page
  269. // so, set this to our first time
  270. now_s = page_start_time_s;
  271. } while(now_s <= wanted_end_time_s);
  272. return pages_found_in_cache;
  273. }
  274. static void pgc_inject_gap(struct rrdengine_instance *ctx, METRIC *metric, time_t start_time_s, time_t end_time_s) {
  275. time_t db_first_time_s, db_last_time_s, db_update_every_s;
  276. mrg_metric_get_retention(main_mrg, metric, &db_first_time_s, &db_last_time_s, &db_update_every_s);
  277. if(is_page_in_time_range(start_time_s, end_time_s, db_first_time_s, db_last_time_s) != PAGE_IS_IN_RANGE)
  278. return;
  279. PGC_ENTRY page_entry = {
  280. .hot = false,
  281. .section = (Word_t)ctx,
  282. .metric_id = (Word_t)metric,
  283. .start_time_s = MAX(start_time_s, db_first_time_s),
  284. .end_time_s = MIN(end_time_s, db_last_time_s),
  285. .update_every_s = 0,
  286. .size = 0,
  287. .data = DBENGINE_EMPTY_PAGE,
  288. };
  289. if(page_entry.start_time_s >= page_entry.end_time_s)
  290. return;
  291. PGC_PAGE *page = pgc_page_add_and_acquire(main_cache, page_entry, NULL);
  292. pgc_page_release(main_cache, page);
  293. }
  294. static size_t list_has_time_gaps(
  295. struct rrdengine_instance *ctx,
  296. METRIC *metric,
  297. Pvoid_t JudyL_page_array,
  298. time_t wanted_start_time_s,
  299. time_t wanted_end_time_s,
  300. size_t *pages_total,
  301. size_t *pages_found_pass4,
  302. size_t *pages_pending,
  303. size_t *pages_overlapping,
  304. time_t *optimal_end_time_s,
  305. bool populate_gaps
  306. ) {
  307. // we will recalculate these, so zero them
  308. *pages_pending = 0;
  309. *pages_overlapping = 0;
  310. *optimal_end_time_s = 0;
  311. bool first;
  312. Pvoid_t *PValue;
  313. Word_t this_page_start_time;
  314. struct page_details *pd;
  315. size_t gaps = 0;
  316. Word_t metric_id = mrg_metric_id(main_mrg, metric);
  317. // ------------------------------------------------------------------------
  318. // PASS 1: remove the preprocessing flags from the pages in PDC
  319. first = true;
  320. this_page_start_time = 0;
  321. while((PValue = PDCJudyLFirstThenNext(JudyL_page_array, &this_page_start_time, &first))) {
  322. pd = *PValue;
  323. pd->status &= ~(PDC_PAGE_SKIP|PDC_PAGE_PREPROCESSED);
  324. }
  325. // ------------------------------------------------------------------------
  326. // PASS 2: emulate processing to find the useful pages
  327. time_t now_s = wanted_start_time_s;
  328. time_t dt_s = mrg_metric_get_update_every_s(main_mrg, metric);
  329. if(!dt_s)
  330. dt_s = default_rrd_update_every;
  331. size_t pages_pass2 = 0, pages_pass3 = 0;
  332. while((pd = pdc_find_page_for_time(
  333. JudyL_page_array, now_s, &gaps,
  334. PDC_PAGE_PREPROCESSED, 0))) {
  335. pd->status |= PDC_PAGE_PREPROCESSED;
  336. pages_pass2++;
  337. if(pd->update_every_s)
  338. dt_s = pd->update_every_s;
  339. if(populate_gaps && pd->first_time_s > now_s)
  340. pgc_inject_gap(ctx, metric, now_s, pd->first_time_s);
  341. now_s = pd->last_time_s + dt_s;
  342. if(now_s > wanted_end_time_s) {
  343. *optimal_end_time_s = pd->last_time_s;
  344. break;
  345. }
  346. }
  347. if(populate_gaps && now_s < wanted_end_time_s)
  348. pgc_inject_gap(ctx, metric, now_s, wanted_end_time_s);
  349. // ------------------------------------------------------------------------
  350. // PASS 3: mark as skipped all the pages not useful
  351. first = true;
  352. this_page_start_time = 0;
  353. while((PValue = PDCJudyLFirstThenNext(JudyL_page_array, &this_page_start_time, &first))) {
  354. pd = *PValue;
  355. internal_fatal(pd->metric_id != metric_id, "pd has wrong metric_id");
  356. if(!(pd->status & PDC_PAGE_PREPROCESSED)) {
  357. (*pages_overlapping)++;
  358. pd->status |= PDC_PAGE_SKIP;
  359. pd->status &= ~(PDC_PAGE_READY | PDC_PAGE_DISK_PENDING);
  360. continue;
  361. }
  362. pages_pass3++;
  363. if(!pd->page) {
  364. pd->page = pgc_page_get_and_acquire(main_cache, (Word_t) ctx, (Word_t) metric_id, pd->first_time_s, PGC_SEARCH_EXACT);
  365. if(pd->page) {
  366. (*pages_found_pass4)++;
  367. pd->status &= ~PDC_PAGE_DISK_PENDING;
  368. pd->status |= PDC_PAGE_READY | PDC_PAGE_PRELOADED | PDC_PAGE_PRELOADED_PASS4;
  369. if(pgc_page_data(pd->page) == DBENGINE_EMPTY_PAGE)
  370. pd->status |= PDC_PAGE_EMPTY;
  371. }
  372. else if(!(pd->status & PDC_PAGE_FAILED) && (pd->status & PDC_PAGE_DATAFILE_ACQUIRED)) {
  373. (*pages_pending)++;
  374. pd->status |= PDC_PAGE_DISK_PENDING;
  375. internal_fatal(pd->status & PDC_PAGE_SKIP, "page is disk pending and skipped");
  376. internal_fatal(!pd->datafile.ptr, "datafile is NULL");
  377. internal_fatal(!pd->datafile.extent.bytes, "datafile.extent.bytes zero");
  378. internal_fatal(!pd->datafile.extent.pos, "datafile.extent.pos is zero");
  379. internal_fatal(!pd->datafile.fileno, "datafile.fileno is zero");
  380. }
  381. }
  382. else {
  383. pd->status &= ~PDC_PAGE_DISK_PENDING;
  384. pd->status |= (PDC_PAGE_READY | PDC_PAGE_PRELOADED);
  385. }
  386. }
  387. internal_fatal(pages_pass2 != pages_pass3,
  388. "DBENGINE: page count does not match");
  389. *pages_total = pages_pass2;
  390. return gaps;
  391. }
  392. typedef void (*page_found_callback_t)(PGC_PAGE *page, void *data);
  393. static size_t get_page_list_from_journal_v2(struct rrdengine_instance *ctx, METRIC *metric, usec_t start_time_ut, usec_t end_time_ut, page_found_callback_t callback, void *callback_data) {
  394. uuid_t *uuid = mrg_metric_uuid(main_mrg, metric);
  395. Word_t metric_id = mrg_metric_id(main_mrg, metric);
  396. time_t wanted_start_time_s = (time_t)(start_time_ut / USEC_PER_SEC);
  397. time_t wanted_end_time_s = (time_t)(end_time_ut / USEC_PER_SEC);
  398. size_t pages_found = 0;
  399. uv_rwlock_rdlock(&ctx->datafiles.rwlock);
  400. struct rrdengine_datafile *datafile;
  401. for(datafile = ctx->datafiles.first; datafile ; datafile = datafile->next) {
  402. struct journal_v2_header *j2_header = journalfile_v2_data_acquire(datafile->journalfile, NULL,
  403. wanted_start_time_s,
  404. wanted_end_time_s);
  405. if (unlikely(!j2_header))
  406. continue;
  407. time_t journal_start_time_s = (time_t)(j2_header->start_time_ut / USEC_PER_SEC);
  408. // the datafile possibly contains useful data for this query
  409. size_t journal_metric_count = (size_t)j2_header->metric_count;
  410. struct journal_metric_list *uuid_list = (struct journal_metric_list *)((uint8_t *) j2_header + j2_header->metric_offset);
  411. struct journal_metric_list *uuid_entry = bsearch(uuid,uuid_list,journal_metric_count,sizeof(*uuid_list), journal_metric_uuid_compare);
  412. if (unlikely(!uuid_entry)) {
  413. // our UUID is not in this datafile
  414. journalfile_v2_data_release(datafile->journalfile);
  415. continue;
  416. }
  417. struct journal_page_header *page_list_header = (struct journal_page_header *) ((uint8_t *) j2_header + uuid_entry->page_offset);
  418. struct journal_page_list *page_list = (struct journal_page_list *)((uint8_t *) page_list_header + sizeof(*page_list_header));
  419. struct journal_extent_list *extent_list = (void *)((uint8_t *)j2_header + j2_header->extent_offset);
  420. uint32_t uuid_page_entries = page_list_header->entries;
  421. for (uint32_t index = 0; index < uuid_page_entries; index++) {
  422. struct journal_page_list *page_entry_in_journal = &page_list[index];
  423. time_t page_first_time_s = page_entry_in_journal->delta_start_s + journal_start_time_s;
  424. time_t page_last_time_s = page_entry_in_journal->delta_end_s + journal_start_time_s;
  425. TIME_RANGE_COMPARE prc = is_page_in_time_range(page_first_time_s, page_last_time_s, wanted_start_time_s, wanted_end_time_s);
  426. if(prc == PAGE_IS_IN_THE_PAST)
  427. continue;
  428. if(prc == PAGE_IS_IN_THE_FUTURE)
  429. break;
  430. time_t page_update_every_s = page_entry_in_journal->update_every_s;
  431. size_t page_length = page_entry_in_journal->page_length;
  432. if(datafile_acquire(datafile, DATAFILE_ACQUIRE_OPEN_CACHE)) { //for open cache item
  433. // add this page to open cache
  434. bool added = false;
  435. struct extent_io_data ei = {
  436. .pos = extent_list[page_entry_in_journal->extent_index].datafile_offset,
  437. .bytes = extent_list[page_entry_in_journal->extent_index].datafile_size,
  438. .page_length = page_length,
  439. .file = datafile->file,
  440. .fileno = datafile->fileno,
  441. };
  442. PGC_PAGE *page = pgc_page_add_and_acquire(open_cache, (PGC_ENTRY) {
  443. .hot = false,
  444. .section = (Word_t) ctx,
  445. .metric_id = metric_id,
  446. .start_time_s = page_first_time_s,
  447. .end_time_s = page_last_time_s,
  448. .update_every_s = (uint32_t) page_update_every_s,
  449. .data = datafile,
  450. .size = 0,
  451. .custom_data = (uint8_t *) &ei,
  452. }, &added);
  453. if(!added)
  454. datafile_release(datafile, DATAFILE_ACQUIRE_OPEN_CACHE);
  455. callback(page, callback_data);
  456. pgc_page_release(open_cache, page);
  457. pages_found++;
  458. }
  459. }
  460. journalfile_v2_data_release(datafile->journalfile);
  461. }
  462. uv_rwlock_rdunlock(&ctx->datafiles.rwlock);
  463. return pages_found;
  464. }
  465. void add_page_details_from_journal_v2(PGC_PAGE *page, void *JudyL_pptr) {
  466. struct rrdengine_datafile *datafile = pgc_page_data(page);
  467. if(!datafile_acquire(datafile, DATAFILE_ACQUIRE_PAGE_DETAILS)) // for pd
  468. return;
  469. Pvoid_t *PValue = PDCJudyLIns(JudyL_pptr, pgc_page_start_time_s(page), PJE0);
  470. if (!PValue || PValue == PJERR)
  471. fatal("DBENGINE: corrupted judy array");
  472. if (unlikely(*PValue)) {
  473. datafile_release(datafile, DATAFILE_ACQUIRE_PAGE_DETAILS);
  474. return;
  475. }
  476. Word_t metric_id = pgc_page_metric(page);
  477. // let's add it to the judy
  478. struct extent_io_data *ei = pgc_page_custom_data(open_cache, page);
  479. struct page_details *pd = page_details_get();
  480. *PValue = pd;
  481. pd->datafile.extent.pos = ei->pos;
  482. pd->datafile.extent.bytes = ei->bytes;
  483. pd->datafile.file = ei->file;
  484. pd->datafile.fileno = ei->fileno;
  485. pd->first_time_s = pgc_page_start_time_s(page);
  486. pd->last_time_s = pgc_page_end_time_s(page);
  487. pd->datafile.ptr = datafile;
  488. pd->page_length = ei->page_length;
  489. pd->update_every_s = (uint32_t) pgc_page_update_every_s(page);
  490. pd->metric_id = metric_id;
  491. pd->status |= PDC_PAGE_DISK_PENDING | PDC_PAGE_SOURCE_JOURNAL_V2 | PDC_PAGE_DATAFILE_ACQUIRED;
  492. }
  493. // Return a judyL will all pages that have start_time_ut and end_time_ut
  494. // Pvalue of the judy will be the end time for that page
  495. // DBENGINE2:
  496. #define time_delta(finish, pass) do { if(pass) { usec_t t = pass; (pass) = (finish) - (pass); (finish) = t; } } while(0)
  497. static Pvoid_t get_page_list(
  498. struct rrdengine_instance *ctx,
  499. METRIC *metric,
  500. usec_t start_time_ut,
  501. usec_t end_time_ut,
  502. size_t *pages_to_load,
  503. time_t *optimal_end_time_s
  504. ) {
  505. *optimal_end_time_s = 0;
  506. Pvoid_t JudyL_page_array = (Pvoid_t) NULL;
  507. time_t wanted_start_time_s = (time_t)(start_time_ut / USEC_PER_SEC);
  508. time_t wanted_end_time_s = (time_t)(end_time_ut / USEC_PER_SEC);
  509. size_t pages_found_in_main_cache = 0,
  510. pages_found_in_open_cache = 0,
  511. pages_found_in_journals_v2 = 0,
  512. pages_found_pass4 = 0,
  513. pages_pending = 0,
  514. pages_overlapping = 0,
  515. pages_total = 0;
  516. size_t cache_gaps = 0, query_gaps = 0;
  517. bool done_v2 = false, done_open = false;
  518. usec_t pass1_ut = 0, pass2_ut = 0, pass3_ut = 0, pass4_ut = 0;
  519. // --------------------------------------------------------------
  520. // PASS 1: Check what the main page cache has available
  521. pass1_ut = now_monotonic_usec();
  522. size_t pages_pass1 = get_page_list_from_pgc(main_cache, metric, ctx, wanted_start_time_s, wanted_end_time_s,
  523. &JudyL_page_array, &cache_gaps,
  524. false, PDC_PAGE_SOURCE_MAIN_CACHE);
  525. query_gaps += cache_gaps;
  526. pages_found_in_main_cache += pages_pass1;
  527. pages_total += pages_pass1;
  528. if(pages_found_in_main_cache && !cache_gaps) {
  529. query_gaps = list_has_time_gaps(ctx, metric, JudyL_page_array, wanted_start_time_s, wanted_end_time_s,
  530. &pages_total, &pages_found_pass4, &pages_pending, &pages_overlapping,
  531. optimal_end_time_s, false);
  532. if (pages_total && !query_gaps)
  533. goto we_are_done;
  534. }
  535. // --------------------------------------------------------------
  536. // PASS 2: Check what the open journal page cache has available
  537. // these will be loaded from disk
  538. pass2_ut = now_monotonic_usec();
  539. size_t pages_pass2 = get_page_list_from_pgc(open_cache, metric, ctx, wanted_start_time_s, wanted_end_time_s,
  540. &JudyL_page_array, &cache_gaps,
  541. true, PDC_PAGE_SOURCE_OPEN_CACHE);
  542. query_gaps += cache_gaps;
  543. pages_found_in_open_cache += pages_pass2;
  544. pages_total += pages_pass2;
  545. done_open = true;
  546. if(pages_found_in_open_cache) {
  547. query_gaps = list_has_time_gaps(ctx, metric, JudyL_page_array, wanted_start_time_s, wanted_end_time_s,
  548. &pages_total, &pages_found_pass4, &pages_pending, &pages_overlapping,
  549. optimal_end_time_s, false);
  550. if (pages_total && !query_gaps)
  551. goto we_are_done;
  552. }
  553. // --------------------------------------------------------------
  554. // PASS 3: Check Journal v2 to fill the gaps
  555. pass3_ut = now_monotonic_usec();
  556. size_t pages_pass3 = get_page_list_from_journal_v2(ctx, metric, start_time_ut, end_time_ut,
  557. add_page_details_from_journal_v2, &JudyL_page_array);
  558. pages_found_in_journals_v2 += pages_pass3;
  559. pages_total += pages_pass3;
  560. done_v2 = true;
  561. // --------------------------------------------------------------
  562. // PASS 4: Check the cache again
  563. // and calculate the time gaps in the query
  564. // THIS IS REQUIRED AFTER JOURNAL V2 LOOKUP
  565. pass4_ut = now_monotonic_usec();
  566. query_gaps = list_has_time_gaps(ctx, metric, JudyL_page_array, wanted_start_time_s, wanted_end_time_s,
  567. &pages_total, &pages_found_pass4, &pages_pending, &pages_overlapping,
  568. optimal_end_time_s, true);
  569. we_are_done:
  570. if(pages_to_load)
  571. *pages_to_load = pages_pending;
  572. usec_t finish_ut = now_monotonic_usec();
  573. time_delta(finish_ut, pass4_ut);
  574. time_delta(finish_ut, pass3_ut);
  575. time_delta(finish_ut, pass2_ut);
  576. time_delta(finish_ut, pass1_ut);
  577. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.prep_time_in_main_cache_lookup, pass1_ut, __ATOMIC_RELAXED);
  578. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.prep_time_in_open_cache_lookup, pass2_ut, __ATOMIC_RELAXED);
  579. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.prep_time_in_journal_v2_lookup, pass3_ut, __ATOMIC_RELAXED);
  580. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.prep_time_in_pass4_lookup, pass4_ut, __ATOMIC_RELAXED);
  581. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.queries, 1, __ATOMIC_RELAXED);
  582. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.queries_planned_with_gaps, (query_gaps) ? 1 : 0, __ATOMIC_RELAXED);
  583. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.queries_open, done_open ? 1 : 0, __ATOMIC_RELAXED);
  584. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.queries_journal_v2, done_v2 ? 1 : 0, __ATOMIC_RELAXED);
  585. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_total, pages_total, __ATOMIC_RELAXED);
  586. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_meta_source_main_cache, pages_found_in_main_cache, __ATOMIC_RELAXED);
  587. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_meta_source_open_cache, pages_found_in_open_cache, __ATOMIC_RELAXED);
  588. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_meta_source_journal_v2, pages_found_in_journals_v2, __ATOMIC_RELAXED);
  589. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_main_cache, pages_found_in_main_cache, __ATOMIC_RELAXED);
  590. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_main_cache_at_pass4, pages_found_pass4, __ATOMIC_RELAXED);
  591. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_to_load_from_disk, pages_pending, __ATOMIC_RELAXED);
  592. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_overlapping_skipped, pages_overlapping, __ATOMIC_RELAXED);
  593. return JudyL_page_array;
  594. }
  595. inline void rrdeng_prep_wait(PDC *pdc) {
  596. if (unlikely(pdc && !pdc->prep_done)) {
  597. usec_t started_ut = now_monotonic_usec();
  598. completion_wait_for(&pdc->prep_completion);
  599. pdc->prep_done = true;
  600. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.query_time_wait_for_prep, now_monotonic_usec() - started_ut, __ATOMIC_RELAXED);
  601. }
  602. }
  603. void rrdeng_prep_query(struct page_details_control *pdc, bool worker) {
  604. if(worker)
  605. worker_is_busy(UV_EVENT_DBENGINE_QUERY);
  606. size_t pages_to_load = 0;
  607. pdc->page_list_JudyL = get_page_list(pdc->ctx, pdc->metric,
  608. pdc->start_time_s * USEC_PER_SEC,
  609. pdc->end_time_s * USEC_PER_SEC,
  610. &pages_to_load,
  611. &pdc->optimal_end_time_s);
  612. if (pages_to_load && pdc->page_list_JudyL) {
  613. pdc_acquire(pdc); // we get 1 for the 1st worker in the chain: do_read_page_list_work()
  614. usec_t start_ut = now_monotonic_usec();
  615. if(likely(pdc->priority == STORAGE_PRIORITY_SYNCHRONOUS))
  616. pdc_route_synchronously(pdc->ctx, pdc);
  617. else
  618. pdc_route_asynchronously(pdc->ctx, pdc);
  619. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.prep_time_to_route, now_monotonic_usec() - start_ut, __ATOMIC_RELAXED);
  620. }
  621. else
  622. completion_mark_complete(&pdc->page_completion);
  623. completion_mark_complete(&pdc->prep_completion);
  624. pdc_release_and_destroy_if_unreferenced(pdc, true, true);
  625. if(worker)
  626. worker_is_idle();
  627. }
  628. /**
  629. * Searches for pages in a time range and triggers disk I/O if necessary and possible.
  630. * @param ctx DB context
  631. * @param handle query handle as initialized
  632. * @param start_time_ut inclusive starting time in usec
  633. * @param end_time_ut inclusive ending time in usec
  634. * @return 1 / 0 (pages found or not found)
  635. */
  636. void pg_cache_preload(struct rrdeng_query_handle *handle) {
  637. if (unlikely(!handle || !handle->metric))
  638. return;
  639. __atomic_add_fetch(&handle->ctx->atomic.inflight_queries, 1, __ATOMIC_RELAXED);
  640. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.currently_running_queries, 1, __ATOMIC_RELAXED);
  641. handle->pdc = pdc_get();
  642. handle->pdc->metric = mrg_metric_dup(main_mrg, handle->metric);
  643. handle->pdc->start_time_s = handle->start_time_s;
  644. handle->pdc->end_time_s = handle->end_time_s;
  645. handle->pdc->priority = handle->priority;
  646. handle->pdc->optimal_end_time_s = handle->end_time_s;
  647. handle->pdc->ctx = handle->ctx;
  648. handle->pdc->refcount = 1;
  649. netdata_spinlock_init(&handle->pdc->refcount_spinlock);
  650. completion_init(&handle->pdc->prep_completion);
  651. completion_init(&handle->pdc->page_completion);
  652. if(ctx_is_available_for_queries(handle->ctx)) {
  653. handle->pdc->refcount++; // we get 1 for the query thread and 1 for the prep thread
  654. if(unlikely(handle->pdc->priority == STORAGE_PRIORITY_SYNCHRONOUS))
  655. rrdeng_prep_query(handle->pdc, false);
  656. else
  657. rrdeng_enq_cmd(handle->ctx, RRDENG_OPCODE_QUERY, handle->pdc, NULL, handle->priority, NULL, NULL);
  658. }
  659. else {
  660. completion_mark_complete(&handle->pdc->prep_completion);
  661. completion_mark_complete(&handle->pdc->page_completion);
  662. }
  663. }
  664. /*
  665. * Searches for the first page between start_time and end_time and gets a reference.
  666. * start_time and end_time are inclusive.
  667. * If index is NULL lookup by UUID (id).
  668. */
  669. struct pgc_page *pg_cache_lookup_next(
  670. struct rrdengine_instance *ctx,
  671. PDC *pdc,
  672. time_t now_s,
  673. time_t last_update_every_s,
  674. size_t *entries
  675. ) {
  676. if (unlikely(!pdc))
  677. return NULL;
  678. rrdeng_prep_wait(pdc);
  679. if (unlikely(!pdc->page_list_JudyL))
  680. return NULL;
  681. usec_t start_ut = now_monotonic_usec();
  682. size_t gaps = 0;
  683. bool waited = false, preloaded;
  684. PGC_PAGE *page = NULL;
  685. while(!page) {
  686. bool page_from_pd = false;
  687. preloaded = false;
  688. struct page_details *pd = pdc_find_page_for_time(
  689. pdc->page_list_JudyL, now_s, &gaps,
  690. PDC_PAGE_PROCESSED, PDC_PAGE_EMPTY);
  691. if (!pd)
  692. break;
  693. page = pd->page;
  694. page_from_pd = true;
  695. preloaded = pdc_page_status_check(pd, PDC_PAGE_PRELOADED);
  696. if(!page) {
  697. if(!completion_is_done(&pdc->page_completion)) {
  698. page = pgc_page_get_and_acquire(main_cache, (Word_t)ctx,
  699. pd->metric_id, pd->first_time_s, PGC_SEARCH_EXACT);
  700. page_from_pd = false;
  701. preloaded = pdc_page_status_check(pd, PDC_PAGE_PRELOADED);
  702. }
  703. if(!page) {
  704. pdc->completed_jobs =
  705. completion_wait_for_a_job(&pdc->page_completion, pdc->completed_jobs);
  706. page = pd->page;
  707. page_from_pd = true;
  708. preloaded = pdc_page_status_check(pd, PDC_PAGE_PRELOADED);
  709. waited = true;
  710. }
  711. }
  712. if(page && pgc_page_data(page) == DBENGINE_EMPTY_PAGE)
  713. pdc_page_status_set(pd, PDC_PAGE_EMPTY);
  714. if(!page || pdc_page_status_check(pd, PDC_PAGE_QUERY_GLOBAL_SKIP_LIST | PDC_PAGE_EMPTY)) {
  715. page = NULL;
  716. continue;
  717. }
  718. // we now have page and is not empty
  719. time_t page_start_time_s = pgc_page_start_time_s(page);
  720. time_t page_end_time_s = pgc_page_end_time_s(page);
  721. time_t page_update_every_s = pgc_page_update_every_s(page);
  722. size_t page_length = pgc_page_data_size(main_cache, page);
  723. if(unlikely(page_start_time_s == INVALID_TIME || page_end_time_s == INVALID_TIME)) {
  724. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_zero_time_skipped, 1, __ATOMIC_RELAXED);
  725. pgc_page_to_clean_evict_or_release(main_cache, page);
  726. pdc_page_status_set(pd, PDC_PAGE_INVALID | PDC_PAGE_RELEASED);
  727. pd->page = page = NULL;
  728. continue;
  729. }
  730. else if(page_length > RRDENG_BLOCK_SIZE) {
  731. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_invalid_size_skipped, 1, __ATOMIC_RELAXED);
  732. pgc_page_to_clean_evict_or_release(main_cache, page);
  733. pdc_page_status_set(pd, PDC_PAGE_INVALID | PDC_PAGE_RELEASED);
  734. pd->page = page = NULL;
  735. continue;
  736. }
  737. else {
  738. if (unlikely(page_update_every_s <= 0 || page_update_every_s > 86400)) {
  739. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_invalid_update_every_fixed, 1, __ATOMIC_RELAXED);
  740. page_update_every_s = pgc_page_fix_update_every(page, last_update_every_s);
  741. pd->update_every_s = (uint32_t) page_update_every_s;
  742. }
  743. size_t entries_by_size = page_entries_by_size(page_length, CTX_POINT_SIZE_BYTES(ctx));
  744. size_t entries_by_time = page_entries_by_time(page_start_time_s, page_end_time_s, page_update_every_s);
  745. if(unlikely(entries_by_size < entries_by_time)) {
  746. time_t fixed_page_end_time_s = (time_t)(page_start_time_s + (entries_by_size - 1) * page_update_every_s);
  747. pd->last_time_s = page_end_time_s = pgc_page_fix_end_time_s(page, fixed_page_end_time_s);
  748. entries_by_time = (page_end_time_s - (page_start_time_s - page_update_every_s)) / page_update_every_s;
  749. internal_fatal(entries_by_size != entries_by_time, "DBENGINE: wrong entries by time again!");
  750. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_invalid_entries_fixed, 1, __ATOMIC_RELAXED);
  751. }
  752. *entries = entries_by_time;
  753. }
  754. if(unlikely(page_end_time_s < now_s)) {
  755. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_past_time_skipped, 1, __ATOMIC_RELAXED);
  756. pgc_page_release(main_cache, page);
  757. pdc_page_status_set(pd, PDC_PAGE_SKIP | PDC_PAGE_RELEASED);
  758. pd->page = page = NULL;
  759. continue;
  760. }
  761. if(page_from_pd)
  762. // PDC_PAGE_RELEASED is for pdc_destroy() to not release the page twice - the caller will release it
  763. pdc_page_status_set(pd, PDC_PAGE_RELEASED | PDC_PAGE_PROCESSED);
  764. else
  765. pdc_page_status_set(pd, PDC_PAGE_PROCESSED);
  766. }
  767. if(gaps && !pdc->executed_with_gaps)
  768. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.queries_executed_with_gaps, 1, __ATOMIC_RELAXED);
  769. pdc->executed_with_gaps = +gaps;
  770. if(page) {
  771. if(waited)
  772. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.page_next_wait_loaded, 1, __ATOMIC_RELAXED);
  773. else
  774. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.page_next_nowait_loaded, 1, __ATOMIC_RELAXED);
  775. }
  776. else {
  777. if(waited)
  778. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.page_next_wait_failed, 1, __ATOMIC_RELAXED);
  779. else
  780. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.page_next_nowait_failed, 1, __ATOMIC_RELAXED);
  781. }
  782. if(waited) {
  783. if(preloaded)
  784. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.query_time_to_slow_preload_next_page, now_monotonic_usec() - start_ut, __ATOMIC_RELAXED);
  785. else
  786. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.query_time_to_slow_disk_next_page, now_monotonic_usec() - start_ut, __ATOMIC_RELAXED);
  787. }
  788. else {
  789. if(preloaded)
  790. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.query_time_to_fast_preload_next_page, now_monotonic_usec() - start_ut, __ATOMIC_RELAXED);
  791. else
  792. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.query_time_to_fast_disk_next_page, now_monotonic_usec() - start_ut, __ATOMIC_RELAXED);
  793. }
  794. return page;
  795. }
  796. void pgc_open_add_hot_page(Word_t section, Word_t metric_id, time_t start_time_s, time_t end_time_s, time_t update_every_s,
  797. struct rrdengine_datafile *datafile, uint64_t extent_offset, unsigned extent_size, uint32_t page_length) {
  798. if(!datafile_acquire(datafile, DATAFILE_ACQUIRE_OPEN_CACHE)) // for open cache item
  799. fatal("DBENGINE: cannot acquire datafile to put page in open cache");
  800. struct extent_io_data ext_io_data = {
  801. .file = datafile->file,
  802. .fileno = datafile->fileno,
  803. .pos = extent_offset,
  804. .bytes = extent_size,
  805. .page_length = page_length
  806. };
  807. PGC_ENTRY page_entry = {
  808. .hot = true,
  809. .section = section,
  810. .metric_id = metric_id,
  811. .start_time_s = start_time_s,
  812. .end_time_s = end_time_s,
  813. .update_every_s = (uint32_t) update_every_s,
  814. .size = 0,
  815. .data = datafile,
  816. .custom_data = (uint8_t *) &ext_io_data,
  817. };
  818. internal_fatal(!datafile->fileno, "DBENGINE: datafile supplied does not have a number");
  819. bool added = true;
  820. PGC_PAGE *page = pgc_page_add_and_acquire(open_cache, page_entry, &added);
  821. int tries = 100;
  822. while(!added && page_entry.end_time_s > pgc_page_end_time_s(page) && tries--) {
  823. pgc_page_to_clean_evict_or_release(open_cache, page);
  824. page = pgc_page_add_and_acquire(open_cache, page_entry, &added);
  825. }
  826. if(!added) {
  827. datafile_release(datafile, DATAFILE_ACQUIRE_OPEN_CACHE);
  828. internal_fatal(page_entry.end_time_s > pgc_page_end_time_s(page),
  829. "DBENGINE: cannot add longer page to open cache");
  830. }
  831. pgc_page_release(open_cache, (PGC_PAGE *)page);
  832. }
  833. size_t dynamic_open_cache_size(void) {
  834. size_t main_cache_size = pgc_get_wanted_cache_size(main_cache);
  835. size_t target_size = main_cache_size / 100 * 5;
  836. if(target_size < 2 * 1024 * 1024)
  837. target_size = 2 * 1024 * 1024;
  838. return target_size;
  839. }
  840. size_t dynamic_extent_cache_size(void) {
  841. size_t main_cache_size = pgc_get_wanted_cache_size(main_cache);
  842. size_t target_size = main_cache_size / 100 * 5;
  843. if(target_size < 3 * 1024 * 1024)
  844. target_size = 3 * 1024 * 1024;
  845. return target_size;
  846. }
  847. void pgc_and_mrg_initialize(void)
  848. {
  849. main_mrg = mrg_create();
  850. size_t target_cache_size = (size_t)default_rrdeng_page_cache_mb * 1024ULL * 1024ULL;
  851. size_t main_cache_size = (target_cache_size / 100) * 95;
  852. size_t open_cache_size = 0;
  853. size_t extent_cache_size = (target_cache_size / 100) * 5;
  854. if(extent_cache_size < 3 * 1024 * 1024) {
  855. extent_cache_size = 3 * 1024 * 1024;
  856. main_cache_size = target_cache_size - extent_cache_size;
  857. }
  858. extent_cache_size += (size_t)(default_rrdeng_extent_cache_mb * 1024ULL * 1024ULL);
  859. main_cache = pgc_create(
  860. "main_cache",
  861. main_cache_size,
  862. main_cache_free_clean_page_callback,
  863. (size_t) rrdeng_pages_per_extent,
  864. main_cache_flush_dirty_page_init_callback,
  865. main_cache_flush_dirty_page_callback,
  866. 10,
  867. 10240, // if there are that many threads, evict so many at once!
  868. 1000, //
  869. 5, // don't delay too much other threads
  870. PGC_OPTIONS_AUTOSCALE, // AUTOSCALE = 2x max hot pages
  871. 0, // 0 = as many as the system cpus
  872. 0
  873. );
  874. open_cache = pgc_create(
  875. "open_cache",
  876. open_cache_size, // the default is 1MB
  877. open_cache_free_clean_page_callback,
  878. 1,
  879. NULL,
  880. open_cache_flush_dirty_page_callback,
  881. 10,
  882. 10240, // if there are that many threads, evict that many at once!
  883. 1000, //
  884. 3, // don't delay too much other threads
  885. PGC_OPTIONS_AUTOSCALE | PGC_OPTIONS_EVICT_PAGES_INLINE | PGC_OPTIONS_FLUSH_PAGES_INLINE,
  886. 0, // 0 = as many as the system cpus
  887. sizeof(struct extent_io_data)
  888. );
  889. pgc_set_dynamic_target_cache_size_callback(open_cache, dynamic_open_cache_size);
  890. extent_cache = pgc_create(
  891. "extent_cache",
  892. extent_cache_size,
  893. extent_cache_free_clean_page_callback,
  894. 1,
  895. NULL,
  896. extent_cache_flush_dirty_page_callback,
  897. 5,
  898. 10, // it will lose up to that extents at once!
  899. 100, //
  900. 2, // don't delay too much other threads
  901. PGC_OPTIONS_AUTOSCALE | PGC_OPTIONS_EVICT_PAGES_INLINE | PGC_OPTIONS_FLUSH_PAGES_INLINE,
  902. 0, // 0 = as many as the system cpus
  903. 0
  904. );
  905. pgc_set_dynamic_target_cache_size_callback(extent_cache, dynamic_extent_cache_size);
  906. }