pdc.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #define NETDATA_RRD_INTERNALS
  3. #include "pdc.h"
  4. struct extent_page_details_list {
  5. uv_file file;
  6. uint64_t extent_offset;
  7. uint32_t extent_size;
  8. unsigned number_of_pages_in_JudyL;
  9. Pvoid_t page_details_by_metric_id_JudyL;
  10. struct page_details_control *pdc;
  11. struct rrdengine_datafile *datafile;
  12. struct rrdeng_cmd *cmd;
  13. bool head_to_datafile_extent_queries_pending_for_extent;
  14. struct {
  15. struct extent_page_details_list *prev;
  16. struct extent_page_details_list *next;
  17. } query;
  18. };
  19. typedef struct datafile_extent_offset_list {
  20. uv_file file;
  21. unsigned fileno;
  22. Pvoid_t extent_pd_list_by_extent_offset_JudyL;
  23. } DEOL;
  24. // ----------------------------------------------------------------------------
  25. // PDC cache
  26. static struct {
  27. struct {
  28. ARAL *ar;
  29. } pdc;
  30. struct {
  31. ARAL *ar;
  32. } pd;
  33. struct {
  34. ARAL *ar;
  35. } epdl;
  36. struct {
  37. ARAL *ar;
  38. } deol;
  39. } pdc_globals = {};
  40. void pdc_init(void) {
  41. pdc_globals.pdc.ar = aral_create(
  42. "dbengine-pdc",
  43. sizeof(PDC),
  44. 0,
  45. 65536,
  46. NULL,
  47. NULL, NULL, false, false
  48. );
  49. }
  50. PDC *pdc_get(void) {
  51. PDC *pdc = aral_mallocz(pdc_globals.pdc.ar);
  52. memset(pdc, 0, sizeof(PDC));
  53. return pdc;
  54. }
  55. static void pdc_release(PDC *pdc) {
  56. aral_freez(pdc_globals.pdc.ar, pdc);
  57. }
  58. size_t pdc_cache_size(void) {
  59. return aral_overhead(pdc_globals.pdc.ar) + aral_structures(pdc_globals.pdc.ar);
  60. }
  61. // ----------------------------------------------------------------------------
  62. // PD cache
  63. void page_details_init(void) {
  64. pdc_globals.pd.ar = aral_create(
  65. "dbengine-pd",
  66. sizeof(struct page_details),
  67. 0,
  68. 65536,
  69. NULL,
  70. NULL, NULL, false, false
  71. );
  72. }
  73. struct page_details *page_details_get(void) {
  74. struct page_details *pd = aral_mallocz(pdc_globals.pd.ar);
  75. memset(pd, 0, sizeof(struct page_details));
  76. return pd;
  77. }
  78. static void page_details_release(struct page_details *pd) {
  79. aral_freez(pdc_globals.pd.ar, pd);
  80. }
  81. size_t pd_cache_size(void) {
  82. return aral_overhead(pdc_globals.pd.ar) + aral_structures(pdc_globals.pd.ar);
  83. }
  84. // ----------------------------------------------------------------------------
  85. // epdl cache
  86. void epdl_init(void) {
  87. pdc_globals.epdl.ar = aral_create(
  88. "dbengine-epdl",
  89. sizeof(EPDL),
  90. 0,
  91. 65536,
  92. NULL,
  93. NULL, NULL, false, false
  94. );
  95. }
  96. static EPDL *epdl_get(void) {
  97. EPDL *epdl = aral_mallocz(pdc_globals.epdl.ar);
  98. memset(epdl, 0, sizeof(EPDL));
  99. return epdl;
  100. }
  101. static void epdl_release(EPDL *epdl) {
  102. aral_freez(pdc_globals.epdl.ar, epdl);
  103. }
  104. size_t epdl_cache_size(void) {
  105. return aral_overhead(pdc_globals.epdl.ar) + aral_structures(pdc_globals.epdl.ar);
  106. }
  107. // ----------------------------------------------------------------------------
  108. // deol cache
  109. void deol_init(void) {
  110. pdc_globals.deol.ar = aral_create(
  111. "dbengine-deol",
  112. sizeof(DEOL),
  113. 0,
  114. 65536,
  115. NULL,
  116. NULL, NULL, false, false
  117. );
  118. }
  119. static DEOL *deol_get(void) {
  120. DEOL *deol = aral_mallocz(pdc_globals.deol.ar);
  121. memset(deol, 0, sizeof(DEOL));
  122. return deol;
  123. }
  124. static void deol_release(DEOL *deol) {
  125. aral_freez(pdc_globals.deol.ar, deol);
  126. }
  127. size_t deol_cache_size(void) {
  128. return aral_overhead(pdc_globals.deol.ar) + aral_structures(pdc_globals.deol.ar);
  129. }
  130. // ----------------------------------------------------------------------------
  131. // extent with buffer cache
  132. static struct {
  133. struct {
  134. SPINLOCK spinlock;
  135. struct extent_buffer *available_items;
  136. size_t available;
  137. } protected;
  138. struct {
  139. size_t allocated;
  140. size_t allocated_bytes;
  141. } atomics;
  142. size_t max_size;
  143. } extent_buffer_globals = {
  144. .protected = {
  145. .spinlock = NETDATA_SPINLOCK_INITIALIZER,
  146. .available_items = NULL,
  147. .available = 0,
  148. },
  149. .atomics = {
  150. .allocated = 0,
  151. .allocated_bytes = 0,
  152. },
  153. .max_size = MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE,
  154. };
  155. void extent_buffer_init(void) {
  156. size_t max_extent_uncompressed = MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE;
  157. size_t max_size = (size_t)LZ4_compressBound(MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE);
  158. if(max_size < max_extent_uncompressed)
  159. max_size = max_extent_uncompressed;
  160. extent_buffer_globals.max_size = max_size;
  161. }
  162. void extent_buffer_cleanup1(void) {
  163. struct extent_buffer *item = NULL;
  164. if(!netdata_spinlock_trylock(&extent_buffer_globals.protected.spinlock))
  165. return;
  166. if(extent_buffer_globals.protected.available_items && extent_buffer_globals.protected.available > 1) {
  167. item = extent_buffer_globals.protected.available_items;
  168. DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(extent_buffer_globals.protected.available_items, item, cache.prev, cache.next);
  169. extent_buffer_globals.protected.available--;
  170. }
  171. netdata_spinlock_unlock(&extent_buffer_globals.protected.spinlock);
  172. if(item) {
  173. size_t bytes = sizeof(struct extent_buffer) + item->bytes;
  174. freez(item);
  175. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated, 1, __ATOMIC_RELAXED);
  176. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated_bytes, bytes, __ATOMIC_RELAXED);
  177. }
  178. }
  179. struct extent_buffer *extent_buffer_get(size_t size) {
  180. internal_fatal(size > extent_buffer_globals.max_size, "DBENGINE: extent size is too big");
  181. struct extent_buffer *eb = NULL;
  182. if(size < extent_buffer_globals.max_size)
  183. size = extent_buffer_globals.max_size;
  184. netdata_spinlock_lock(&extent_buffer_globals.protected.spinlock);
  185. if(likely(extent_buffer_globals.protected.available_items)) {
  186. eb = extent_buffer_globals.protected.available_items;
  187. DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(extent_buffer_globals.protected.available_items, eb, cache.prev, cache.next);
  188. extent_buffer_globals.protected.available--;
  189. }
  190. netdata_spinlock_unlock(&extent_buffer_globals.protected.spinlock);
  191. if(unlikely(eb && eb->bytes < size)) {
  192. size_t bytes = sizeof(struct extent_buffer) + eb->bytes;
  193. freez(eb);
  194. eb = NULL;
  195. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated, 1, __ATOMIC_RELAXED);
  196. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated_bytes, bytes, __ATOMIC_RELAXED);
  197. }
  198. if(unlikely(!eb)) {
  199. size_t bytes = sizeof(struct extent_buffer) + size;
  200. eb = mallocz(bytes);
  201. eb->bytes = size;
  202. __atomic_add_fetch(&extent_buffer_globals.atomics.allocated, 1, __ATOMIC_RELAXED);
  203. __atomic_add_fetch(&extent_buffer_globals.atomics.allocated_bytes, bytes, __ATOMIC_RELAXED);
  204. }
  205. return eb;
  206. }
  207. void extent_buffer_release(struct extent_buffer *eb) {
  208. if(unlikely(!eb)) return;
  209. netdata_spinlock_lock(&extent_buffer_globals.protected.spinlock);
  210. DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(extent_buffer_globals.protected.available_items, eb, cache.prev, cache.next);
  211. extent_buffer_globals.protected.available++;
  212. netdata_spinlock_unlock(&extent_buffer_globals.protected.spinlock);
  213. }
  214. size_t extent_buffer_cache_size(void) {
  215. return __atomic_load_n(&extent_buffer_globals.atomics.allocated_bytes, __ATOMIC_RELAXED);
  216. }
  217. // ----------------------------------------------------------------------------
  218. // epdl logic
  219. static void epdl_destroy(EPDL *epdl)
  220. {
  221. Pvoid_t *pd_by_start_time_s_JudyL;
  222. Word_t metric_id_index = 0;
  223. bool metric_id_first = true;
  224. while ((pd_by_start_time_s_JudyL = PDCJudyLFirstThenNext(
  225. epdl->page_details_by_metric_id_JudyL,
  226. &metric_id_index, &metric_id_first)))
  227. PDCJudyLFreeArray(pd_by_start_time_s_JudyL, PJE0);
  228. PDCJudyLFreeArray(&epdl->page_details_by_metric_id_JudyL, PJE0);
  229. epdl_release(epdl);
  230. }
  231. static void epdl_mark_all_not_loaded_pages_as_failed(EPDL *epdl, PDC_PAGE_STATUS tags, size_t *statistics_counter)
  232. {
  233. size_t pages_matched = 0;
  234. Word_t metric_id_index = 0;
  235. bool metric_id_first = true;
  236. Pvoid_t *pd_by_start_time_s_JudyL;
  237. while((pd_by_start_time_s_JudyL = PDCJudyLFirstThenNext(epdl->page_details_by_metric_id_JudyL, &metric_id_index, &metric_id_first))) {
  238. Word_t start_time_index = 0;
  239. bool start_time_first = true;
  240. Pvoid_t *PValue;
  241. while ((PValue = PDCJudyLFirstThenNext(*pd_by_start_time_s_JudyL, &start_time_index, &start_time_first))) {
  242. struct page_details *pd = *PValue;
  243. if(!pd->page && !pdc_page_status_check(pd, PDC_PAGE_FAILED|PDC_PAGE_READY)) {
  244. pdc_page_status_set(pd, PDC_PAGE_FAILED | tags);
  245. pages_matched++;
  246. }
  247. }
  248. }
  249. if(pages_matched && statistics_counter)
  250. __atomic_add_fetch(statistics_counter, pages_matched, __ATOMIC_RELAXED);
  251. }
  252. /*
  253. static bool epdl_check_if_pages_are_already_in_cache(struct rrdengine_instance *ctx, EPDL *epdl, PDC_PAGE_STATUS tags)
  254. {
  255. size_t count_remaining = 0;
  256. size_t found = 0;
  257. Word_t metric_id_index = 0;
  258. bool metric_id_first = true;
  259. Pvoid_t *pd_by_start_time_s_JudyL;
  260. while((pd_by_start_time_s_JudyL = PDCJudyLFirstThenNext(epdl->page_details_by_metric_id_JudyL, &metric_id_index, &metric_id_first))) {
  261. Word_t start_time_index = 0;
  262. bool start_time_first = true;
  263. Pvoid_t *PValue;
  264. while ((PValue = PDCJudyLFirstThenNext(*pd_by_start_time_s_JudyL, &start_time_index, &start_time_first))) {
  265. struct page_details *pd = *PValue;
  266. if (pd->page)
  267. continue;
  268. pd->page = pgc_page_get_and_acquire(main_cache, (Word_t) ctx, pd->metric_id, pd->first_time_s, PGC_SEARCH_EXACT);
  269. if (pd->page) {
  270. found++;
  271. pdc_page_status_set(pd, PDC_PAGE_READY | tags);
  272. }
  273. else
  274. count_remaining++;
  275. }
  276. }
  277. if(found) {
  278. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_preloaded, found, __ATOMIC_RELAXED);
  279. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_main_cache, found, __ATOMIC_RELAXED);
  280. }
  281. return count_remaining == 0;
  282. }
  283. */
  284. // ----------------------------------------------------------------------------
  285. // PDC logic
  286. static void pdc_destroy(PDC *pdc) {
  287. mrg_metric_release(main_mrg, pdc->metric);
  288. completion_destroy(&pdc->prep_completion);
  289. completion_destroy(&pdc->page_completion);
  290. Pvoid_t *PValue;
  291. struct page_details *pd;
  292. Word_t time_index = 0;
  293. bool first_then_next = true;
  294. size_t unroutable = 0, cancelled = 0;
  295. while((PValue = PDCJudyLFirstThenNext(pdc->page_list_JudyL, &time_index, &first_then_next))) {
  296. pd = *PValue;
  297. // no need for atomics here - we are done...
  298. PDC_PAGE_STATUS status = pd->status;
  299. if(status & PDC_PAGE_DATAFILE_ACQUIRED) {
  300. datafile_release(pd->datafile.ptr, DATAFILE_ACQUIRE_PAGE_DETAILS);
  301. pd->datafile.ptr = NULL;
  302. }
  303. internal_fatal(pd->datafile.ptr, "DBENGINE: page details has a datafile.ptr that is not released.");
  304. if(!pd->page && !(status & (PDC_PAGE_READY | PDC_PAGE_FAILED | PDC_PAGE_RELEASED | PDC_PAGE_SKIP | PDC_PAGE_INVALID | PDC_PAGE_CANCELLED))) {
  305. // pdc_page_status_set(pd, PDC_PAGE_FAILED);
  306. unroutable++;
  307. }
  308. else if(!pd->page && (status & PDC_PAGE_CANCELLED))
  309. cancelled++;
  310. if(pd->page && !(status & PDC_PAGE_RELEASED)) {
  311. pgc_page_release(main_cache, pd->page);
  312. // pdc_page_status_set(pd, PDC_PAGE_RELEASED);
  313. }
  314. page_details_release(pd);
  315. }
  316. PDCJudyLFreeArray(&pdc->page_list_JudyL, PJE0);
  317. __atomic_sub_fetch(&rrdeng_cache_efficiency_stats.currently_running_queries, 1, __ATOMIC_RELAXED);
  318. __atomic_sub_fetch(&pdc->ctx->atomic.inflight_queries, 1, __ATOMIC_RELAXED);
  319. pdc_release(pdc);
  320. if(unroutable)
  321. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_fail_unroutable, unroutable, __ATOMIC_RELAXED);
  322. if(cancelled)
  323. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_fail_cancelled, cancelled, __ATOMIC_RELAXED);
  324. }
  325. void pdc_acquire(PDC *pdc) {
  326. netdata_spinlock_lock(&pdc->refcount_spinlock);
  327. if(pdc->refcount < 1)
  328. fatal("DBENGINE: pdc is not referenced and cannot be acquired");
  329. pdc->refcount++;
  330. netdata_spinlock_unlock(&pdc->refcount_spinlock);
  331. }
  332. bool pdc_release_and_destroy_if_unreferenced(PDC *pdc, bool worker, bool router __maybe_unused) {
  333. if(unlikely(!pdc))
  334. return true;
  335. netdata_spinlock_lock(&pdc->refcount_spinlock);
  336. if(pdc->refcount <= 0)
  337. fatal("DBENGINE: pdc is not referenced and cannot be released");
  338. pdc->refcount--;
  339. if (pdc->refcount <= 1 && worker) {
  340. // when 1 refcount is remaining, and we are a worker,
  341. // we can mark the job completed:
  342. // - if the remaining refcount is from the query caller, we will wake it up
  343. // - if the remaining refcount is from another worker, the query thread is already away
  344. completion_mark_complete(&pdc->page_completion);
  345. }
  346. if (pdc->refcount == 0) {
  347. netdata_spinlock_unlock(&pdc->refcount_spinlock);
  348. pdc_destroy(pdc);
  349. return true;
  350. }
  351. netdata_spinlock_unlock(&pdc->refcount_spinlock);
  352. return false;
  353. }
  354. void epdl_cmd_queued(void *epdl_ptr, struct rrdeng_cmd *cmd) {
  355. EPDL *epdl = epdl_ptr;
  356. epdl->cmd = cmd;
  357. }
  358. void epdl_cmd_dequeued(void *epdl_ptr) {
  359. EPDL *epdl = epdl_ptr;
  360. epdl->cmd = NULL;
  361. }
  362. static struct rrdeng_cmd *epdl_get_cmd(void *epdl_ptr) {
  363. EPDL *epdl = epdl_ptr;
  364. return epdl->cmd;
  365. }
  366. static bool epdl_pending_add(EPDL *epdl) {
  367. bool added_new;
  368. netdata_spinlock_lock(&epdl->datafile->extent_queries.spinlock);
  369. Pvoid_t *PValue = JudyLIns(&epdl->datafile->extent_queries.pending_epdl_by_extent_offset_judyL, epdl->extent_offset, PJE0);
  370. internal_fatal(!PValue || PValue == PJERR, "DBENGINE: corrupted pending extent judy");
  371. EPDL *base = *PValue;
  372. if(!base) {
  373. added_new = true;
  374. epdl->head_to_datafile_extent_queries_pending_for_extent = true;
  375. }
  376. else {
  377. added_new = false;
  378. epdl->head_to_datafile_extent_queries_pending_for_extent = false;
  379. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_extent_merged, 1, __ATOMIC_RELAXED);
  380. if(base->pdc->priority > epdl->pdc->priority)
  381. rrdeng_req_cmd(epdl_get_cmd, base, epdl->pdc->priority);
  382. }
  383. DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(base, epdl, query.prev, query.next);
  384. *PValue = base;
  385. netdata_spinlock_unlock(&epdl->datafile->extent_queries.spinlock);
  386. return added_new;
  387. }
  388. static void epdl_pending_del(EPDL *epdl) {
  389. netdata_spinlock_lock(&epdl->datafile->extent_queries.spinlock);
  390. if(epdl->head_to_datafile_extent_queries_pending_for_extent) {
  391. epdl->head_to_datafile_extent_queries_pending_for_extent = false;
  392. int rc = JudyLDel(&epdl->datafile->extent_queries.pending_epdl_by_extent_offset_judyL, epdl->extent_offset, PJE0);
  393. (void) rc;
  394. internal_fatal(!rc, "DBENGINE: epdl not found in pending list");
  395. }
  396. netdata_spinlock_unlock(&epdl->datafile->extent_queries.spinlock);
  397. }
  398. void pdc_to_epdl_router(struct rrdengine_instance *ctx, PDC *pdc, execute_extent_page_details_list_t exec_first_extent_list, execute_extent_page_details_list_t exec_rest_extent_list)
  399. {
  400. Pvoid_t *PValue;
  401. Pvoid_t *PValue1;
  402. Pvoid_t *PValue2;
  403. Word_t time_index = 0;
  404. struct page_details *pd = NULL;
  405. // this is the entire page list
  406. // Lets do some deduplication
  407. // 1. Per datafile
  408. // 2. Per extent
  409. // 3. Pages per extent will be added to the cache either as acquired or not
  410. Pvoid_t JudyL_datafile_list = NULL;
  411. DEOL *deol;
  412. EPDL *epdl;
  413. if (pdc->page_list_JudyL) {
  414. bool first_then_next = true;
  415. while((PValue = PDCJudyLFirstThenNext(pdc->page_list_JudyL, &time_index, &first_then_next))) {
  416. pd = *PValue;
  417. internal_fatal(!pd,
  418. "DBENGINE: pdc page list has an empty page details entry");
  419. if (!(pd->status & PDC_PAGE_DISK_PENDING))
  420. continue;
  421. internal_fatal(!(pd->status & PDC_PAGE_DATAFILE_ACQUIRED),
  422. "DBENGINE: page details has not acquired the datafile");
  423. internal_fatal((pd->status & (PDC_PAGE_READY | PDC_PAGE_FAILED)),
  424. "DBENGINE: page details has disk pending flag but it is ready/failed");
  425. internal_fatal(pd->page,
  426. "DBENGINE: page details has a page linked to it, but it is marked for loading");
  427. PValue1 = PDCJudyLIns(&JudyL_datafile_list, pd->datafile.fileno, PJE0);
  428. if (PValue1 && !*PValue1) {
  429. *PValue1 = deol = deol_get();
  430. deol->extent_pd_list_by_extent_offset_JudyL = NULL;
  431. deol->fileno = pd->datafile.fileno;
  432. }
  433. else
  434. deol = *PValue1;
  435. PValue2 = PDCJudyLIns(&deol->extent_pd_list_by_extent_offset_JudyL, pd->datafile.extent.pos, PJE0);
  436. if (PValue2 && !*PValue2) {
  437. *PValue2 = epdl = epdl_get();
  438. epdl->page_details_by_metric_id_JudyL = NULL;
  439. epdl->number_of_pages_in_JudyL = 0;
  440. epdl->file = pd->datafile.file;
  441. epdl->extent_offset = pd->datafile.extent.pos;
  442. epdl->extent_size = pd->datafile.extent.bytes;
  443. epdl->datafile = pd->datafile.ptr;
  444. }
  445. else
  446. epdl = *PValue2;
  447. epdl->number_of_pages_in_JudyL++;
  448. Pvoid_t *pd_by_first_time_s_judyL = PDCJudyLIns(&epdl->page_details_by_metric_id_JudyL, pd->metric_id, PJE0);
  449. Pvoid_t *pd_pptr = PDCJudyLIns(pd_by_first_time_s_judyL, pd->first_time_s, PJE0);
  450. *pd_pptr = pd;
  451. }
  452. size_t extent_list_no = 0;
  453. Word_t datafile_no = 0;
  454. first_then_next = true;
  455. while((PValue = PDCJudyLFirstThenNext(JudyL_datafile_list, &datafile_no, &first_then_next))) {
  456. deol = *PValue;
  457. bool first_then_next_extent = true;
  458. Word_t pos = 0;
  459. while ((PValue = PDCJudyLFirstThenNext(deol->extent_pd_list_by_extent_offset_JudyL, &pos, &first_then_next_extent))) {
  460. epdl = *PValue;
  461. internal_fatal(!epdl, "DBENGINE: extent_list is not populated properly");
  462. // The extent page list can be dispatched to a worker
  463. // It will need to populate the cache with "acquired" pages that are in the list (pd) only
  464. // the rest of the extent pages will be added to the cache butnot acquired
  465. pdc_acquire(pdc); // we do this for the next worker: do_read_extent_work()
  466. epdl->pdc = pdc;
  467. if(epdl_pending_add(epdl)) {
  468. if (extent_list_no++ == 0)
  469. exec_first_extent_list(ctx, epdl, pdc->priority);
  470. else
  471. exec_rest_extent_list(ctx, epdl, pdc->priority);
  472. }
  473. }
  474. PDCJudyLFreeArray(&deol->extent_pd_list_by_extent_offset_JudyL, PJE0);
  475. deol_release(deol);
  476. }
  477. PDCJudyLFreeArray(&JudyL_datafile_list, PJE0);
  478. }
  479. pdc_release_and_destroy_if_unreferenced(pdc, true, true);
  480. }
  481. void collect_page_flags_to_buffer(BUFFER *wb, RRDENG_COLLECT_PAGE_FLAGS flags) {
  482. if(flags & RRDENG_PAGE_PAST_COLLECTION)
  483. buffer_strcat(wb, "PAST_COLLECTION ");
  484. if(flags & RRDENG_PAGE_REPEATED_COLLECTION)
  485. buffer_strcat(wb, "REPEATED_COLLECTION ");
  486. if(flags & RRDENG_PAGE_BIG_GAP)
  487. buffer_strcat(wb, "BIG_GAP ");
  488. if(flags & RRDENG_PAGE_GAP)
  489. buffer_strcat(wb, "GAP ");
  490. if(flags & RRDENG_PAGE_FUTURE_POINT)
  491. buffer_strcat(wb, "FUTURE_POINT ");
  492. if(flags & RRDENG_PAGE_CREATED_IN_FUTURE)
  493. buffer_strcat(wb, "CREATED_IN_FUTURE ");
  494. if(flags & RRDENG_PAGE_COMPLETED_IN_FUTURE)
  495. buffer_strcat(wb, "COMPLETED_IN_FUTURE ");
  496. if(flags & RRDENG_PAGE_UNALIGNED)
  497. buffer_strcat(wb, "UNALIGNED ");
  498. if(flags & RRDENG_PAGE_CONFLICT)
  499. buffer_strcat(wb, "CONFLICT ");
  500. if(flags & RRDENG_PAGE_FULL)
  501. buffer_strcat(wb, "PAGE_FULL");
  502. if(flags & RRDENG_PAGE_COLLECT_FINALIZE)
  503. buffer_strcat(wb, "COLLECT_FINALIZE");
  504. if(flags & RRDENG_PAGE_UPDATE_EVERY_CHANGE)
  505. buffer_strcat(wb, "UPDATE_EVERY_CHANGE");
  506. if(flags & RRDENG_PAGE_STEP_TOO_SMALL)
  507. buffer_strcat(wb, "STEP_TOO_SMALL");
  508. if(flags & RRDENG_PAGE_STEP_UNALIGNED)
  509. buffer_strcat(wb, "STEP_UNALIGNED");
  510. }
  511. inline VALIDATED_PAGE_DESCRIPTOR validate_extent_page_descr(const struct rrdeng_extent_page_descr *descr, time_t now_s, time_t overwrite_zero_update_every_s, bool have_read_error) {
  512. return validate_page(
  513. (uuid_t *)descr->uuid,
  514. (time_t) (descr->start_time_ut / USEC_PER_SEC),
  515. (time_t) (descr->end_time_ut / USEC_PER_SEC),
  516. 0,
  517. descr->page_length,
  518. descr->type,
  519. 0,
  520. now_s,
  521. overwrite_zero_update_every_s,
  522. have_read_error,
  523. "loaded", 0);
  524. }
  525. VALIDATED_PAGE_DESCRIPTOR validate_page(
  526. uuid_t *uuid,
  527. time_t start_time_s,
  528. time_t end_time_s,
  529. time_t update_every_s, // can be zero, if unknown
  530. size_t page_length,
  531. uint8_t page_type,
  532. size_t entries, // can be zero, if unknown
  533. time_t now_s, // can be zero, to disable future timestamp check
  534. time_t overwrite_zero_update_every_s, // can be zero, if unknown
  535. bool have_read_error,
  536. const char *msg,
  537. RRDENG_COLLECT_PAGE_FLAGS flags) {
  538. VALIDATED_PAGE_DESCRIPTOR vd = {
  539. .start_time_s = start_time_s,
  540. .end_time_s = end_time_s,
  541. .update_every_s = update_every_s,
  542. .page_length = page_length,
  543. .type = page_type,
  544. .is_valid = true,
  545. };
  546. // always calculate entries by size
  547. vd.point_size = page_type_size[vd.type];
  548. vd.entries = page_entries_by_size(vd.page_length, vd.point_size);
  549. // allow to be called without entries (when loading pages from disk)
  550. if(!entries)
  551. entries = vd.entries;
  552. // allow to be called without update every (when loading pages from disk)
  553. if(!update_every_s) {
  554. vd.update_every_s = (vd.entries > 1) ? ((vd.end_time_s - vd.start_time_s) / (time_t) (vd.entries - 1))
  555. : overwrite_zero_update_every_s;
  556. update_every_s = vd.update_every_s;
  557. }
  558. // another such set of checks exists in
  559. // update_metric_retention_and_granularity_by_uuid()
  560. bool updated = false;
  561. if( have_read_error ||
  562. vd.page_length == 0 ||
  563. vd.page_length > RRDENG_BLOCK_SIZE ||
  564. vd.start_time_s > vd.end_time_s ||
  565. (now_s && vd.end_time_s > now_s) ||
  566. vd.start_time_s == 0 ||
  567. vd.end_time_s == 0 ||
  568. (vd.start_time_s == vd.end_time_s && vd.entries > 1) ||
  569. (vd.update_every_s == 0 && vd.entries > 1)
  570. )
  571. vd.is_valid = false;
  572. else {
  573. if(unlikely(vd.entries != entries || vd.update_every_s != update_every_s))
  574. updated = true;
  575. if (likely(vd.update_every_s)) {
  576. size_t entries_by_time = page_entries_by_time(vd.start_time_s, vd.end_time_s, vd.update_every_s);
  577. if (vd.entries != entries_by_time) {
  578. if (overwrite_zero_update_every_s < vd.update_every_s)
  579. vd.update_every_s = overwrite_zero_update_every_s;
  580. time_t new_end_time_s = (time_t)(vd.start_time_s + (vd.entries - 1) * vd.update_every_s);
  581. if(new_end_time_s <= vd.end_time_s) {
  582. // end time is wrong
  583. vd.end_time_s = new_end_time_s;
  584. }
  585. else {
  586. // update every is wrong
  587. vd.update_every_s = overwrite_zero_update_every_s;
  588. vd.end_time_s = (time_t)(vd.start_time_s + (vd.entries - 1) * vd.update_every_s);
  589. }
  590. updated = true;
  591. }
  592. }
  593. else if(overwrite_zero_update_every_s) {
  594. vd.update_every_s = overwrite_zero_update_every_s;
  595. updated = true;
  596. }
  597. }
  598. if(unlikely(!vd.is_valid || updated)) {
  599. #ifndef NETDATA_INTERNAL_CHECKS
  600. error_limit_static_global_var(erl, 1, 0);
  601. #endif
  602. char uuid_str[UUID_STR_LEN + 1];
  603. uuid_unparse(*uuid, uuid_str);
  604. BUFFER *wb = NULL;
  605. if(flags) {
  606. wb = buffer_create(0, NULL);
  607. collect_page_flags_to_buffer(wb, flags);
  608. }
  609. if(!vd.is_valid) {
  610. #ifdef NETDATA_INTERNAL_CHECKS
  611. internal_error(true,
  612. #else
  613. error_limit(&erl,
  614. #endif
  615. "DBENGINE: metric '%s' %s invalid page of type %u "
  616. "from %ld to %ld (now %ld), update every %ld, page length %zu, entries %zu (flags: %s)",
  617. uuid_str, msg, vd.type,
  618. vd.start_time_s, vd.end_time_s, now_s, vd.update_every_s, vd.page_length, vd.entries, wb?buffer_tostring(wb):""
  619. );
  620. }
  621. else {
  622. const char *err_valid = (vd.is_valid) ? "" : "found invalid, ";
  623. const char *err_start = (vd.start_time_s == start_time_s) ? "" : "start time updated, ";
  624. const char *err_end = (vd.end_time_s == end_time_s) ? "" : "end time updated, ";
  625. const char *err_update = (vd.update_every_s == update_every_s) ? "" : "update every updated, ";
  626. const char *err_length = (vd.page_length == page_length) ? "" : "page length updated, ";
  627. const char *err_entries = (vd.entries == entries) ? "" : "entries updated, ";
  628. const char *err_future = (now_s && vd.end_time_s <= now_s) ? "" : "future end time, ";
  629. #ifdef NETDATA_INTERNAL_CHECKS
  630. internal_error(true,
  631. #else
  632. error_limit(&erl,
  633. #endif
  634. "DBENGINE: metric '%s' %s page of type %u "
  635. "from %ld to %ld (now %ld), update every %ld, page length %zu, entries %zu (flags: %s), "
  636. "found inconsistent - the right is "
  637. "from %ld to %ld, update every %ld, page length %zu, entries %zu: "
  638. "%s%s%s%s%s%s%s",
  639. uuid_str, msg, vd.type,
  640. start_time_s, end_time_s, now_s, update_every_s, page_length, entries, wb?buffer_tostring(wb):"",
  641. vd.start_time_s, vd.end_time_s, vd.update_every_s, vd.page_length, vd.entries,
  642. err_valid, err_start, err_end, err_update, err_length, err_entries, err_future
  643. );
  644. }
  645. buffer_free(wb);
  646. }
  647. return vd;
  648. }
  649. static inline struct page_details *epdl_get_pd_load_link_list_from_metric_start_time(EPDL *epdl, Word_t metric_id, time_t start_time_s) {
  650. if(unlikely(epdl->head_to_datafile_extent_queries_pending_for_extent))
  651. // stop appending more pages to this epdl
  652. epdl_pending_del(epdl);
  653. struct page_details *pd_list = NULL;
  654. for(EPDL *ep = epdl; ep ;ep = ep->query.next) {
  655. Pvoid_t *pd_by_start_time_s_judyL = PDCJudyLGet(ep->page_details_by_metric_id_JudyL, metric_id, PJE0);
  656. internal_fatal(pd_by_start_time_s_judyL == PJERR, "DBENGINE: corrupted extent metrics JudyL");
  657. if (unlikely(pd_by_start_time_s_judyL && *pd_by_start_time_s_judyL)) {
  658. Pvoid_t *pd_pptr = PDCJudyLGet(*pd_by_start_time_s_judyL, start_time_s, PJE0);
  659. internal_fatal(pd_pptr == PJERR, "DBENGINE: corrupted metric page details JudyHS");
  660. if(likely(pd_pptr && *pd_pptr)) {
  661. struct page_details *pd = *pd_pptr;
  662. internal_fatal(metric_id != pd->metric_id, "DBENGINE: metric ids do not match");
  663. if(likely(!pd->page)) {
  664. if (unlikely(__atomic_load_n(&ep->pdc->workers_should_stop, __ATOMIC_RELAXED)))
  665. pdc_page_status_set(pd, PDC_PAGE_FAILED | PDC_PAGE_CANCELLED);
  666. else
  667. DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(pd_list, pd, load.prev, load.next);
  668. }
  669. }
  670. }
  671. }
  672. return pd_list;
  673. }
  674. static void epdl_extent_loading_error_log(struct rrdengine_instance *ctx, EPDL *epdl, struct rrdeng_extent_page_descr *descr, const char *msg) {
  675. char uuid[UUID_STR_LEN] = "";
  676. time_t start_time_s = 0;
  677. time_t end_time_s = 0;
  678. bool used_epdl = false;
  679. bool used_descr = false;
  680. if (descr) {
  681. start_time_s = (time_t)(descr->start_time_ut / USEC_PER_SEC);
  682. end_time_s = (time_t)(descr->end_time_ut / USEC_PER_SEC);
  683. uuid_unparse_lower(descr->uuid, uuid);
  684. used_descr = true;
  685. }
  686. else if (epdl) {
  687. struct page_details *pd = NULL;
  688. Word_t start = 0;
  689. Pvoid_t *pd_by_start_time_s_judyL = PDCJudyLFirst(epdl->page_details_by_metric_id_JudyL, &start, PJE0);
  690. if(pd_by_start_time_s_judyL) {
  691. start = 0;
  692. Pvoid_t *pd_pptr = PDCJudyLFirst(*pd_by_start_time_s_judyL, &start, PJE0);
  693. if(pd_pptr) {
  694. pd = *pd_pptr;
  695. start_time_s = pd->first_time_s;
  696. end_time_s = pd->last_time_s;
  697. METRIC *metric = (METRIC *)pd->metric_id;
  698. uuid_t *u = mrg_metric_uuid(main_mrg, metric);
  699. uuid_unparse_lower(*u, uuid);
  700. used_epdl = true;
  701. }
  702. }
  703. }
  704. if(!used_epdl && !used_descr && epdl && epdl->pdc) {
  705. start_time_s = epdl->pdc->start_time_s;
  706. end_time_s = epdl->pdc->end_time_s;
  707. }
  708. char start_time_str[LOG_DATE_LENGTH + 1] = "";
  709. if(start_time_s)
  710. log_date(start_time_str, LOG_DATE_LENGTH, start_time_s);
  711. char end_time_str[LOG_DATE_LENGTH + 1] = "";
  712. if(end_time_s)
  713. log_date(end_time_str, LOG_DATE_LENGTH, end_time_s);
  714. error_limit_static_global_var(erl, 1, 0);
  715. error_limit(&erl,
  716. "DBENGINE: error while reading extent from datafile %u of tier %d, at offset %" PRIu64 " (%u bytes) "
  717. "%s from %ld (%s) to %ld (%s) %s%s: "
  718. "%s",
  719. epdl->datafile->fileno, ctx->config.tier,
  720. epdl->extent_offset, epdl->extent_size,
  721. used_epdl ? "to extract page (PD)" : used_descr ? "expected page (DESCR)" : "part of a query (PDC)",
  722. start_time_s, start_time_str, end_time_s, end_time_str,
  723. used_epdl || used_descr ? " of metric " : "",
  724. used_epdl || used_descr ? uuid : "",
  725. msg);
  726. }
  727. static bool epdl_populate_pages_from_extent_data(
  728. struct rrdengine_instance *ctx,
  729. void *data,
  730. size_t data_length,
  731. EPDL *epdl,
  732. bool worker,
  733. PDC_PAGE_STATUS tags,
  734. bool cached_extent)
  735. {
  736. int ret;
  737. unsigned i, count;
  738. void *uncompressed_buf = NULL;
  739. uint32_t payload_length, payload_offset, trailer_offset, uncompressed_payload_length = 0;
  740. bool have_read_error = false;
  741. /* persistent structures */
  742. struct rrdeng_df_extent_header *header;
  743. struct rrdeng_df_extent_trailer *trailer;
  744. struct extent_buffer *eb = NULL;
  745. uLong crc;
  746. bool can_use_data = true;
  747. if(data_length < sizeof(*header) + sizeof(header->descr[0]) + sizeof(*trailer)) {
  748. can_use_data = false;
  749. // added to satisfy the requirements of older compilers (prevent warnings)
  750. payload_length = 0;
  751. payload_offset = 0;
  752. trailer_offset = 0;
  753. count = 0;
  754. header = NULL;
  755. trailer = NULL;
  756. }
  757. else {
  758. header = data;
  759. payload_length = header->payload_length;
  760. count = header->number_of_pages;
  761. payload_offset = sizeof(*header) + sizeof(header->descr[0]) * count;
  762. trailer_offset = data_length - sizeof(*trailer);
  763. trailer = data + trailer_offset;
  764. }
  765. if( !can_use_data ||
  766. count < 1 ||
  767. count > MAX_PAGES_PER_EXTENT ||
  768. (header->compression_algorithm != RRD_NO_COMPRESSION && header->compression_algorithm != RRD_LZ4) ||
  769. (payload_length != trailer_offset - payload_offset) ||
  770. (data_length != payload_offset + payload_length + sizeof(*trailer))
  771. ) {
  772. epdl_extent_loading_error_log(ctx, epdl, NULL, "header is INVALID");
  773. return false;
  774. }
  775. crc = crc32(0L, Z_NULL, 0);
  776. crc = crc32(crc, data, epdl->extent_size - sizeof(*trailer));
  777. ret = crc32cmp(trailer->checksum, crc);
  778. if (unlikely(ret)) {
  779. ctx_io_error(ctx);
  780. have_read_error = true;
  781. epdl_extent_loading_error_log(ctx, epdl, NULL, "CRC32 checksum FAILED");
  782. }
  783. if(worker)
  784. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_DECOMPRESSION);
  785. if (likely(!have_read_error && RRD_NO_COMPRESSION != header->compression_algorithm)) {
  786. // find the uncompressed extent size
  787. uncompressed_payload_length = 0;
  788. for (i = 0; i < count; ++i) {
  789. size_t page_length = header->descr[i].page_length;
  790. if(page_length > RRDENG_BLOCK_SIZE) {
  791. have_read_error = true;
  792. break;
  793. }
  794. uncompressed_payload_length += header->descr[i].page_length;
  795. }
  796. if(unlikely(uncompressed_payload_length > MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE))
  797. have_read_error = true;
  798. if(likely(!have_read_error)) {
  799. eb = extent_buffer_get(uncompressed_payload_length);
  800. uncompressed_buf = eb->data;
  801. ret = LZ4_decompress_safe(data + payload_offset, uncompressed_buf,
  802. (int) payload_length, (int) uncompressed_payload_length);
  803. __atomic_add_fetch(&ctx->stats.before_decompress_bytes, payload_length, __ATOMIC_RELAXED);
  804. __atomic_add_fetch(&ctx->stats.after_decompress_bytes, ret, __ATOMIC_RELAXED);
  805. }
  806. }
  807. if(worker)
  808. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_LOOKUP);
  809. size_t stats_data_from_main_cache = 0;
  810. size_t stats_data_from_extent = 0;
  811. size_t stats_load_compressed = 0;
  812. size_t stats_load_uncompressed = 0;
  813. size_t stats_load_invalid_page = 0;
  814. size_t stats_cache_hit_while_inserting = 0;
  815. uint32_t page_offset = 0, page_length;
  816. time_t now_s = max_acceptable_collected_time();
  817. for (i = 0; i < count; i++, page_offset += page_length) {
  818. page_length = header->descr[i].page_length;
  819. time_t start_time_s = (time_t) (header->descr[i].start_time_ut / USEC_PER_SEC);
  820. if(!page_length || !start_time_s) {
  821. char log[200 + 1];
  822. snprintfz(log, 200, "page %u (out of %u) is EMPTY", i, count);
  823. epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log);
  824. continue;
  825. }
  826. METRIC *metric = mrg_metric_get_and_acquire(main_mrg, &header->descr[i].uuid, (Word_t)ctx);
  827. Word_t metric_id = (Word_t)metric;
  828. if(!metric) {
  829. char log[200 + 1];
  830. snprintfz(log, 200, "page %u (out of %u) has unknown UUID", i, count);
  831. epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log);
  832. continue;
  833. }
  834. mrg_metric_release(main_mrg, metric);
  835. struct page_details *pd_list = epdl_get_pd_load_link_list_from_metric_start_time(epdl, metric_id, start_time_s);
  836. if(likely(!pd_list))
  837. continue;
  838. VALIDATED_PAGE_DESCRIPTOR vd = validate_extent_page_descr(
  839. &header->descr[i], now_s,
  840. (pd_list) ? pd_list->update_every_s : 0,
  841. have_read_error);
  842. if(worker)
  843. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_ALLOCATION);
  844. void *page_data;
  845. if (unlikely(!vd.is_valid)) {
  846. page_data = DBENGINE_EMPTY_PAGE;
  847. stats_load_invalid_page++;
  848. }
  849. else {
  850. if (RRD_NO_COMPRESSION == header->compression_algorithm) {
  851. page_data = dbengine_page_alloc(vd.page_length);
  852. memcpy(page_data, data + payload_offset + page_offset, (size_t) vd.page_length);
  853. stats_load_uncompressed++;
  854. }
  855. else {
  856. if (unlikely(page_offset + vd.page_length > uncompressed_payload_length)) {
  857. char log[200 + 1];
  858. snprintfz(log, 200, "page %u (out of %u) offset %u + page length %zu, "
  859. "exceeds the uncompressed buffer size %u",
  860. i, count, page_offset, vd.page_length, uncompressed_payload_length);
  861. epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log);
  862. page_data = DBENGINE_EMPTY_PAGE;
  863. stats_load_invalid_page++;
  864. }
  865. else {
  866. page_data = dbengine_page_alloc(vd.page_length);
  867. memcpy(page_data, uncompressed_buf + page_offset, vd.page_length);
  868. stats_load_compressed++;
  869. }
  870. }
  871. }
  872. if(worker)
  873. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_POPULATION);
  874. PGC_ENTRY page_entry = {
  875. .hot = false,
  876. .section = (Word_t)ctx,
  877. .metric_id = metric_id,
  878. .start_time_s = vd.start_time_s,
  879. .end_time_s = vd.end_time_s,
  880. .update_every_s = vd.update_every_s,
  881. .size = (size_t) ((page_data == DBENGINE_EMPTY_PAGE) ? 0 : vd.page_length),
  882. .data = page_data
  883. };
  884. bool added = true;
  885. PGC_PAGE *page = pgc_page_add_and_acquire(main_cache, page_entry, &added);
  886. if (false == added) {
  887. dbengine_page_free(page_data, vd.page_length);
  888. stats_cache_hit_while_inserting++;
  889. stats_data_from_main_cache++;
  890. }
  891. else
  892. stats_data_from_extent++;
  893. struct page_details *pd = pd_list;
  894. do {
  895. if(pd != pd_list)
  896. pgc_page_dup(main_cache, page);
  897. pd->page = page;
  898. pd->page_length = pgc_page_data_size(main_cache, page);
  899. pdc_page_status_set(pd, PDC_PAGE_READY | tags | ((page_data == DBENGINE_EMPTY_PAGE) ? PDC_PAGE_EMPTY : 0));
  900. pd = pd->load.next;
  901. } while(pd);
  902. if(worker)
  903. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_LOOKUP);
  904. }
  905. if(stats_data_from_main_cache)
  906. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_main_cache, stats_data_from_main_cache, __ATOMIC_RELAXED);
  907. if(cached_extent)
  908. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_extent_cache, stats_data_from_extent, __ATOMIC_RELAXED);
  909. else {
  910. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_disk, stats_data_from_extent, __ATOMIC_RELAXED);
  911. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.extents_loaded_from_disk, 1, __ATOMIC_RELAXED);
  912. }
  913. if(stats_cache_hit_while_inserting)
  914. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_loaded_but_cache_hit_while_inserting, stats_cache_hit_while_inserting, __ATOMIC_RELAXED);
  915. if(stats_load_compressed)
  916. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_compressed, stats_load_compressed, __ATOMIC_RELAXED);
  917. if(stats_load_uncompressed)
  918. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_uncompressed, stats_load_uncompressed, __ATOMIC_RELAXED);
  919. if(stats_load_invalid_page)
  920. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_fail_invalid_page_in_extent, stats_load_invalid_page, __ATOMIC_RELAXED);
  921. if(worker)
  922. worker_is_idle();
  923. extent_buffer_release(eb);
  924. return true;
  925. }
  926. static inline void *datafile_extent_read(struct rrdengine_instance *ctx, uv_file file, unsigned pos, unsigned size_bytes)
  927. {
  928. void *buffer;
  929. uv_fs_t request;
  930. unsigned real_io_size = ALIGN_BYTES_CEILING(size_bytes);
  931. int ret = posix_memalign(&buffer, RRDFILE_ALIGNMENT, real_io_size);
  932. if (unlikely(ret))
  933. fatal("DBENGINE: posix_memalign(): %s", strerror(ret));
  934. uv_buf_t iov = uv_buf_init(buffer, real_io_size);
  935. ret = uv_fs_read(NULL, &request, file, &iov, 1, pos, NULL);
  936. if (unlikely(-1 == ret)) {
  937. ctx_io_error(ctx);
  938. posix_memfree(buffer);
  939. buffer = NULL;
  940. }
  941. else
  942. ctx_io_read_op_bytes(ctx, real_io_size);
  943. uv_fs_req_cleanup(&request);
  944. return buffer;
  945. }
  946. static inline void datafile_extent_read_free(void *buffer) {
  947. posix_memfree(buffer);
  948. }
  949. void epdl_find_extent_and_populate_pages(struct rrdengine_instance *ctx, EPDL *epdl, bool worker) {
  950. size_t *statistics_counter = NULL;
  951. PDC_PAGE_STATUS not_loaded_pages_tag = 0, loaded_pages_tag = 0;
  952. bool should_stop = __atomic_load_n(&epdl->pdc->workers_should_stop, __ATOMIC_RELAXED);
  953. for(EPDL *ep = epdl->query.next; ep ;ep = ep->query.next) {
  954. internal_fatal(ep->datafile != epdl->datafile, "DBENGINE: datafiles do not match");
  955. internal_fatal(ep->extent_offset != epdl->extent_offset, "DBENGINE: extent offsets do not match");
  956. internal_fatal(ep->extent_size != epdl->extent_size, "DBENGINE: extent sizes do not match");
  957. internal_fatal(ep->file != epdl->file, "DBENGINE: files do not match");
  958. if(!__atomic_load_n(&ep->pdc->workers_should_stop, __ATOMIC_RELAXED)) {
  959. should_stop = false;
  960. break;
  961. }
  962. }
  963. if(unlikely(should_stop)) {
  964. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_cancelled;
  965. not_loaded_pages_tag = PDC_PAGE_CANCELLED;
  966. goto cleanup;
  967. }
  968. if(worker)
  969. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_CACHE_LOOKUP);
  970. bool extent_found_in_cache = false;
  971. void *extent_compressed_data = NULL;
  972. PGC_PAGE *extent_cache_page = pgc_page_get_and_acquire(
  973. extent_cache, (Word_t)ctx,
  974. (Word_t)epdl->datafile->fileno, (time_t)epdl->extent_offset,
  975. PGC_SEARCH_EXACT);
  976. if(extent_cache_page) {
  977. extent_compressed_data = pgc_page_data(extent_cache_page);
  978. internal_fatal(epdl->extent_size != pgc_page_data_size(extent_cache, extent_cache_page),
  979. "DBENGINE: cache size does not match the expected size");
  980. loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_CACHE;
  981. not_loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_CACHE;
  982. extent_found_in_cache = true;
  983. }
  984. else {
  985. if(worker)
  986. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_MMAP);
  987. void *extent_data = datafile_extent_read(ctx, epdl->file, epdl->extent_offset, epdl->extent_size);
  988. if(extent_data != NULL) {
  989. void *copied_extent_compressed_data = dbengine_extent_alloc(epdl->extent_size);
  990. memcpy(copied_extent_compressed_data, extent_data, epdl->extent_size);
  991. datafile_extent_read_free(extent_data);
  992. if(worker)
  993. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_CACHE_LOOKUP);
  994. bool added = false;
  995. extent_cache_page = pgc_page_add_and_acquire(extent_cache, (PGC_ENTRY) {
  996. .hot = false,
  997. .section = (Word_t) ctx,
  998. .metric_id = (Word_t) epdl->datafile->fileno,
  999. .start_time_s = (time_t) epdl->extent_offset,
  1000. .size = epdl->extent_size,
  1001. .end_time_s = 0,
  1002. .update_every_s = 0,
  1003. .data = copied_extent_compressed_data,
  1004. }, &added);
  1005. if (!added) {
  1006. dbengine_extent_free(copied_extent_compressed_data, epdl->extent_size);
  1007. internal_fatal(epdl->extent_size != pgc_page_data_size(extent_cache, extent_cache_page),
  1008. "DBENGINE: cache size does not match the expected size");
  1009. }
  1010. extent_compressed_data = pgc_page_data(extent_cache_page);
  1011. loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_DISK;
  1012. not_loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_DISK;
  1013. }
  1014. }
  1015. if(extent_compressed_data) {
  1016. // Need to decompress and then process the pagelist
  1017. bool extent_used = epdl_populate_pages_from_extent_data(
  1018. ctx, extent_compressed_data, epdl->extent_size,
  1019. epdl, worker, loaded_pages_tag, extent_found_in_cache);
  1020. if(extent_used) {
  1021. // since the extent was used, all the pages that are not
  1022. // loaded from this extent, were not found in the extent
  1023. not_loaded_pages_tag |= PDC_PAGE_FAILED_NOT_IN_EXTENT;
  1024. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_not_found;
  1025. }
  1026. else {
  1027. not_loaded_pages_tag |= PDC_PAGE_FAILED_INVALID_EXTENT;
  1028. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_invalid_extent;
  1029. }
  1030. }
  1031. else {
  1032. not_loaded_pages_tag |= PDC_PAGE_FAILED_TO_MAP_EXTENT;
  1033. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_cant_mmap_extent;
  1034. }
  1035. if(extent_cache_page)
  1036. pgc_page_release(extent_cache, extent_cache_page);
  1037. cleanup:
  1038. // remove it from the datafile extent_queries
  1039. // this can be called multiple times safely
  1040. epdl_pending_del(epdl);
  1041. // mark all pending pages as failed
  1042. for(EPDL *ep = epdl; ep ;ep = ep->query.next) {
  1043. epdl_mark_all_not_loaded_pages_as_failed(
  1044. ep, not_loaded_pages_tag, statistics_counter);
  1045. }
  1046. for(EPDL *ep = epdl, *next = NULL; ep ; ep = next) {
  1047. next = ep->query.next;
  1048. completion_mark_complete_a_job(&ep->pdc->page_completion);
  1049. pdc_release_and_destroy_if_unreferenced(ep->pdc, true, false);
  1050. // Free the Judy that holds the requested pagelist and the extents
  1051. epdl_destroy(ep);
  1052. }
  1053. if(worker)
  1054. worker_is_idle();
  1055. }