pdc.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #define NETDATA_RRD_INTERNALS
  3. #include "pdc.h"
  4. struct extent_page_details_list {
  5. uv_file file;
  6. uint64_t extent_offset;
  7. uint32_t extent_size;
  8. unsigned number_of_pages_in_JudyL;
  9. Pvoid_t page_details_by_metric_id_JudyL;
  10. struct page_details_control *pdc;
  11. struct rrdengine_datafile *datafile;
  12. struct rrdeng_cmd *cmd;
  13. bool head_to_datafile_extent_queries_pending_for_extent;
  14. struct {
  15. struct extent_page_details_list *prev;
  16. struct extent_page_details_list *next;
  17. } query;
  18. };
  19. typedef struct datafile_extent_offset_list {
  20. uv_file file;
  21. unsigned fileno;
  22. Pvoid_t extent_pd_list_by_extent_offset_JudyL;
  23. } DEOL;
  24. // ----------------------------------------------------------------------------
  25. // PDC cache
  26. static struct {
  27. struct {
  28. ARAL *ar;
  29. } pdc;
  30. struct {
  31. ARAL *ar;
  32. } pd;
  33. struct {
  34. ARAL *ar;
  35. } epdl;
  36. struct {
  37. ARAL *ar;
  38. } deol;
  39. } pdc_globals = {};
  40. void pdc_init(void) {
  41. pdc_globals.pdc.ar = aral_create(
  42. "dbengine-pdc",
  43. sizeof(PDC),
  44. 0,
  45. 65536,
  46. NULL,
  47. NULL, NULL, false, false
  48. );
  49. }
  50. PDC *pdc_get(void) {
  51. PDC *pdc = aral_mallocz(pdc_globals.pdc.ar);
  52. memset(pdc, 0, sizeof(PDC));
  53. return pdc;
  54. }
  55. static void pdc_release(PDC *pdc) {
  56. aral_freez(pdc_globals.pdc.ar, pdc);
  57. }
  58. size_t pdc_cache_size(void) {
  59. return aral_overhead(pdc_globals.pdc.ar) + aral_structures(pdc_globals.pdc.ar);
  60. }
  61. // ----------------------------------------------------------------------------
  62. // PD cache
  63. void page_details_init(void) {
  64. pdc_globals.pd.ar = aral_create(
  65. "dbengine-pd",
  66. sizeof(struct page_details),
  67. 0,
  68. 65536,
  69. NULL,
  70. NULL, NULL, false, false
  71. );
  72. }
  73. struct page_details *page_details_get(void) {
  74. struct page_details *pd = aral_mallocz(pdc_globals.pd.ar);
  75. memset(pd, 0, sizeof(struct page_details));
  76. return pd;
  77. }
  78. static void page_details_release(struct page_details *pd) {
  79. aral_freez(pdc_globals.pd.ar, pd);
  80. }
  81. size_t pd_cache_size(void) {
  82. return aral_overhead(pdc_globals.pd.ar) + aral_structures(pdc_globals.pd.ar);
  83. }
  84. // ----------------------------------------------------------------------------
  85. // epdl cache
  86. void epdl_init(void) {
  87. pdc_globals.epdl.ar = aral_create(
  88. "dbengine-epdl",
  89. sizeof(EPDL),
  90. 0,
  91. 65536,
  92. NULL,
  93. NULL, NULL, false, false
  94. );
  95. }
  96. static EPDL *epdl_get(void) {
  97. EPDL *epdl = aral_mallocz(pdc_globals.epdl.ar);
  98. memset(epdl, 0, sizeof(EPDL));
  99. return epdl;
  100. }
  101. static void epdl_release(EPDL *epdl) {
  102. aral_freez(pdc_globals.epdl.ar, epdl);
  103. }
  104. size_t epdl_cache_size(void) {
  105. return aral_overhead(pdc_globals.epdl.ar) + aral_structures(pdc_globals.epdl.ar);
  106. }
  107. // ----------------------------------------------------------------------------
  108. // deol cache
  109. void deol_init(void) {
  110. pdc_globals.deol.ar = aral_create(
  111. "dbengine-deol",
  112. sizeof(DEOL),
  113. 0,
  114. 65536,
  115. NULL,
  116. NULL, NULL, false, false
  117. );
  118. }
  119. static DEOL *deol_get(void) {
  120. DEOL *deol = aral_mallocz(pdc_globals.deol.ar);
  121. memset(deol, 0, sizeof(DEOL));
  122. return deol;
  123. }
  124. static void deol_release(DEOL *deol) {
  125. aral_freez(pdc_globals.deol.ar, deol);
  126. }
  127. size_t deol_cache_size(void) {
  128. return aral_overhead(pdc_globals.deol.ar) + aral_structures(pdc_globals.deol.ar);
  129. }
  130. // ----------------------------------------------------------------------------
  131. // extent with buffer cache
  132. static struct {
  133. struct {
  134. SPINLOCK spinlock;
  135. struct extent_buffer *available_items;
  136. size_t available;
  137. } protected;
  138. struct {
  139. size_t allocated;
  140. size_t allocated_bytes;
  141. } atomics;
  142. size_t max_size;
  143. } extent_buffer_globals = {
  144. .protected = {
  145. .spinlock = NETDATA_SPINLOCK_INITIALIZER,
  146. .available_items = NULL,
  147. .available = 0,
  148. },
  149. .atomics = {
  150. .allocated = 0,
  151. .allocated_bytes = 0,
  152. },
  153. .max_size = MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE,
  154. };
  155. void extent_buffer_init(void) {
  156. size_t max_extent_uncompressed = MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE;
  157. size_t max_size = (size_t)LZ4_compressBound(MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE);
  158. if(max_size < max_extent_uncompressed)
  159. max_size = max_extent_uncompressed;
  160. extent_buffer_globals.max_size = max_size;
  161. }
  162. void extent_buffer_cleanup1(void) {
  163. struct extent_buffer *item = NULL;
  164. if(!spinlock_trylock(&extent_buffer_globals.protected.spinlock))
  165. return;
  166. if(extent_buffer_globals.protected.available_items && extent_buffer_globals.protected.available > 1) {
  167. item = extent_buffer_globals.protected.available_items;
  168. DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(extent_buffer_globals.protected.available_items, item, cache.prev, cache.next);
  169. extent_buffer_globals.protected.available--;
  170. }
  171. spinlock_unlock(&extent_buffer_globals.protected.spinlock);
  172. if(item) {
  173. size_t bytes = sizeof(struct extent_buffer) + item->bytes;
  174. freez(item);
  175. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated, 1, __ATOMIC_RELAXED);
  176. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated_bytes, bytes, __ATOMIC_RELAXED);
  177. }
  178. }
  179. struct extent_buffer *extent_buffer_get(size_t size) {
  180. internal_fatal(size > extent_buffer_globals.max_size, "DBENGINE: extent size is too big");
  181. struct extent_buffer *eb = NULL;
  182. if(size < extent_buffer_globals.max_size)
  183. size = extent_buffer_globals.max_size;
  184. spinlock_lock(&extent_buffer_globals.protected.spinlock);
  185. if(likely(extent_buffer_globals.protected.available_items)) {
  186. eb = extent_buffer_globals.protected.available_items;
  187. DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(extent_buffer_globals.protected.available_items, eb, cache.prev, cache.next);
  188. extent_buffer_globals.protected.available--;
  189. }
  190. spinlock_unlock(&extent_buffer_globals.protected.spinlock);
  191. if(unlikely(eb && eb->bytes < size)) {
  192. size_t bytes = sizeof(struct extent_buffer) + eb->bytes;
  193. freez(eb);
  194. eb = NULL;
  195. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated, 1, __ATOMIC_RELAXED);
  196. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated_bytes, bytes, __ATOMIC_RELAXED);
  197. }
  198. if(unlikely(!eb)) {
  199. size_t bytes = sizeof(struct extent_buffer) + size;
  200. eb = mallocz(bytes);
  201. eb->bytes = size;
  202. __atomic_add_fetch(&extent_buffer_globals.atomics.allocated, 1, __ATOMIC_RELAXED);
  203. __atomic_add_fetch(&extent_buffer_globals.atomics.allocated_bytes, bytes, __ATOMIC_RELAXED);
  204. }
  205. return eb;
  206. }
  207. void extent_buffer_release(struct extent_buffer *eb) {
  208. if(unlikely(!eb)) return;
  209. spinlock_lock(&extent_buffer_globals.protected.spinlock);
  210. DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(extent_buffer_globals.protected.available_items, eb, cache.prev, cache.next);
  211. extent_buffer_globals.protected.available++;
  212. spinlock_unlock(&extent_buffer_globals.protected.spinlock);
  213. }
  214. size_t extent_buffer_cache_size(void) {
  215. return __atomic_load_n(&extent_buffer_globals.atomics.allocated_bytes, __ATOMIC_RELAXED);
  216. }
  217. // ----------------------------------------------------------------------------
  218. // epdl logic
  219. static void epdl_destroy(EPDL *epdl)
  220. {
  221. Pvoid_t *pd_by_start_time_s_JudyL;
  222. Word_t metric_id_index = 0;
  223. bool metric_id_first = true;
  224. while ((pd_by_start_time_s_JudyL = PDCJudyLFirstThenNext(
  225. epdl->page_details_by_metric_id_JudyL,
  226. &metric_id_index, &metric_id_first)))
  227. PDCJudyLFreeArray(pd_by_start_time_s_JudyL, PJE0);
  228. PDCJudyLFreeArray(&epdl->page_details_by_metric_id_JudyL, PJE0);
  229. epdl_release(epdl);
  230. }
  231. static void epdl_mark_all_not_loaded_pages_as_failed(EPDL *epdl, PDC_PAGE_STATUS tags, size_t *statistics_counter)
  232. {
  233. size_t pages_matched = 0;
  234. Word_t metric_id_index = 0;
  235. bool metric_id_first = true;
  236. Pvoid_t *pd_by_start_time_s_JudyL;
  237. while((pd_by_start_time_s_JudyL = PDCJudyLFirstThenNext(epdl->page_details_by_metric_id_JudyL, &metric_id_index, &metric_id_first))) {
  238. Word_t start_time_index = 0;
  239. bool start_time_first = true;
  240. Pvoid_t *PValue;
  241. while ((PValue = PDCJudyLFirstThenNext(*pd_by_start_time_s_JudyL, &start_time_index, &start_time_first))) {
  242. struct page_details *pd = *PValue;
  243. if(!pd->page && !pdc_page_status_check(pd, PDC_PAGE_FAILED|PDC_PAGE_READY)) {
  244. pdc_page_status_set(pd, PDC_PAGE_FAILED | tags);
  245. pages_matched++;
  246. }
  247. }
  248. }
  249. if(pages_matched && statistics_counter)
  250. __atomic_add_fetch(statistics_counter, pages_matched, __ATOMIC_RELAXED);
  251. }
  252. /*
  253. static bool epdl_check_if_pages_are_already_in_cache(struct rrdengine_instance *ctx, EPDL *epdl, PDC_PAGE_STATUS tags)
  254. {
  255. size_t count_remaining = 0;
  256. size_t found = 0;
  257. Word_t metric_id_index = 0;
  258. bool metric_id_first = true;
  259. Pvoid_t *pd_by_start_time_s_JudyL;
  260. while((pd_by_start_time_s_JudyL = PDCJudyLFirstThenNext(epdl->page_details_by_metric_id_JudyL, &metric_id_index, &metric_id_first))) {
  261. Word_t start_time_index = 0;
  262. bool start_time_first = true;
  263. Pvoid_t *PValue;
  264. while ((PValue = PDCJudyLFirstThenNext(*pd_by_start_time_s_JudyL, &start_time_index, &start_time_first))) {
  265. struct page_details *pd = *PValue;
  266. if (pd->page)
  267. continue;
  268. pd->page = pgc_page_get_and_acquire(main_cache, (Word_t) ctx, pd->metric_id, pd->first_time_s, PGC_SEARCH_EXACT);
  269. if (pd->page) {
  270. found++;
  271. pdc_page_status_set(pd, PDC_PAGE_READY | tags);
  272. }
  273. else
  274. count_remaining++;
  275. }
  276. }
  277. if(found) {
  278. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_preloaded, found, __ATOMIC_RELAXED);
  279. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_main_cache, found, __ATOMIC_RELAXED);
  280. }
  281. return count_remaining == 0;
  282. }
  283. */
  284. // ----------------------------------------------------------------------------
  285. // PDC logic
  286. static void pdc_destroy(PDC *pdc) {
  287. mrg_metric_release(main_mrg, pdc->metric);
  288. completion_destroy(&pdc->prep_completion);
  289. completion_destroy(&pdc->page_completion);
  290. Pvoid_t *PValue;
  291. struct page_details *pd;
  292. Word_t time_index = 0;
  293. bool first_then_next = true;
  294. size_t unroutable = 0, cancelled = 0;
  295. while((PValue = PDCJudyLFirstThenNext(pdc->page_list_JudyL, &time_index, &first_then_next))) {
  296. pd = *PValue;
  297. // no need for atomics here - we are done...
  298. PDC_PAGE_STATUS status = pd->status;
  299. if(status & PDC_PAGE_DATAFILE_ACQUIRED) {
  300. datafile_release(pd->datafile.ptr, DATAFILE_ACQUIRE_PAGE_DETAILS);
  301. pd->datafile.ptr = NULL;
  302. }
  303. internal_fatal(pd->datafile.ptr, "DBENGINE: page details has a datafile.ptr that is not released.");
  304. if(!pd->page && !(status & (PDC_PAGE_READY | PDC_PAGE_FAILED | PDC_PAGE_RELEASED | PDC_PAGE_SKIP | PDC_PAGE_INVALID | PDC_PAGE_CANCELLED))) {
  305. // pdc_page_status_set(pd, PDC_PAGE_FAILED);
  306. unroutable++;
  307. }
  308. else if(!pd->page && (status & PDC_PAGE_CANCELLED))
  309. cancelled++;
  310. if(pd->page && !(status & PDC_PAGE_RELEASED)) {
  311. pgc_page_release(main_cache, pd->page);
  312. // pdc_page_status_set(pd, PDC_PAGE_RELEASED);
  313. }
  314. page_details_release(pd);
  315. }
  316. PDCJudyLFreeArray(&pdc->page_list_JudyL, PJE0);
  317. __atomic_sub_fetch(&rrdeng_cache_efficiency_stats.currently_running_queries, 1, __ATOMIC_RELAXED);
  318. __atomic_sub_fetch(&pdc->ctx->atomic.inflight_queries, 1, __ATOMIC_RELAXED);
  319. pdc_release(pdc);
  320. if(unroutable)
  321. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_fail_unroutable, unroutable, __ATOMIC_RELAXED);
  322. if(cancelled)
  323. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_fail_cancelled, cancelled, __ATOMIC_RELAXED);
  324. }
  325. void pdc_acquire(PDC *pdc) {
  326. spinlock_lock(&pdc->refcount_spinlock);
  327. if(pdc->refcount < 1)
  328. fatal("DBENGINE: pdc is not referenced and cannot be acquired");
  329. pdc->refcount++;
  330. spinlock_unlock(&pdc->refcount_spinlock);
  331. }
  332. bool pdc_release_and_destroy_if_unreferenced(PDC *pdc, bool worker, bool router __maybe_unused) {
  333. if(unlikely(!pdc))
  334. return true;
  335. spinlock_lock(&pdc->refcount_spinlock);
  336. if(pdc->refcount <= 0)
  337. fatal("DBENGINE: pdc is not referenced and cannot be released");
  338. pdc->refcount--;
  339. if (pdc->refcount <= 1 && worker) {
  340. // when 1 refcount is remaining, and we are a worker,
  341. // we can mark the job completed:
  342. // - if the remaining refcount is from the query caller, we will wake it up
  343. // - if the remaining refcount is from another worker, the query thread is already away
  344. completion_mark_complete(&pdc->page_completion);
  345. }
  346. if (pdc->refcount == 0) {
  347. spinlock_unlock(&pdc->refcount_spinlock);
  348. pdc_destroy(pdc);
  349. return true;
  350. }
  351. spinlock_unlock(&pdc->refcount_spinlock);
  352. return false;
  353. }
  354. void epdl_cmd_queued(void *epdl_ptr, struct rrdeng_cmd *cmd) {
  355. EPDL *epdl = epdl_ptr;
  356. epdl->cmd = cmd;
  357. }
  358. void epdl_cmd_dequeued(void *epdl_ptr) {
  359. EPDL *epdl = epdl_ptr;
  360. epdl->cmd = NULL;
  361. }
  362. static struct rrdeng_cmd *epdl_get_cmd(void *epdl_ptr) {
  363. EPDL *epdl = epdl_ptr;
  364. return epdl->cmd;
  365. }
  366. static bool epdl_pending_add(EPDL *epdl) {
  367. bool added_new;
  368. spinlock_lock(&epdl->datafile->extent_queries.spinlock);
  369. Pvoid_t *PValue = JudyLIns(&epdl->datafile->extent_queries.pending_epdl_by_extent_offset_judyL, epdl->extent_offset, PJE0);
  370. internal_fatal(!PValue || PValue == PJERR, "DBENGINE: corrupted pending extent judy");
  371. EPDL *base = *PValue;
  372. if(!base) {
  373. added_new = true;
  374. epdl->head_to_datafile_extent_queries_pending_for_extent = true;
  375. }
  376. else {
  377. added_new = false;
  378. epdl->head_to_datafile_extent_queries_pending_for_extent = false;
  379. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_extent_merged, 1, __ATOMIC_RELAXED);
  380. if(base->pdc->priority > epdl->pdc->priority)
  381. rrdeng_req_cmd(epdl_get_cmd, base, epdl->pdc->priority);
  382. }
  383. DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(base, epdl, query.prev, query.next);
  384. *PValue = base;
  385. spinlock_unlock(&epdl->datafile->extent_queries.spinlock);
  386. return added_new;
  387. }
  388. static void epdl_pending_del(EPDL *epdl) {
  389. spinlock_lock(&epdl->datafile->extent_queries.spinlock);
  390. if(epdl->head_to_datafile_extent_queries_pending_for_extent) {
  391. epdl->head_to_datafile_extent_queries_pending_for_extent = false;
  392. int rc = JudyLDel(&epdl->datafile->extent_queries.pending_epdl_by_extent_offset_judyL, epdl->extent_offset, PJE0);
  393. (void) rc;
  394. internal_fatal(!rc, "DBENGINE: epdl not found in pending list");
  395. }
  396. spinlock_unlock(&epdl->datafile->extent_queries.spinlock);
  397. }
  398. void pdc_to_epdl_router(struct rrdengine_instance *ctx, PDC *pdc, execute_extent_page_details_list_t exec_first_extent_list, execute_extent_page_details_list_t exec_rest_extent_list)
  399. {
  400. Pvoid_t *PValue;
  401. Pvoid_t *PValue1;
  402. Pvoid_t *PValue2;
  403. Word_t time_index = 0;
  404. struct page_details *pd = NULL;
  405. // this is the entire page list
  406. // Lets do some deduplication
  407. // 1. Per datafile
  408. // 2. Per extent
  409. // 3. Pages per extent will be added to the cache either as acquired or not
  410. Pvoid_t JudyL_datafile_list = NULL;
  411. DEOL *deol;
  412. EPDL *epdl;
  413. if (pdc->page_list_JudyL) {
  414. bool first_then_next = true;
  415. while((PValue = PDCJudyLFirstThenNext(pdc->page_list_JudyL, &time_index, &first_then_next))) {
  416. pd = *PValue;
  417. internal_fatal(!pd,
  418. "DBENGINE: pdc page list has an empty page details entry");
  419. if (!(pd->status & PDC_PAGE_DISK_PENDING))
  420. continue;
  421. internal_fatal(!(pd->status & PDC_PAGE_DATAFILE_ACQUIRED),
  422. "DBENGINE: page details has not acquired the datafile");
  423. internal_fatal((pd->status & (PDC_PAGE_READY | PDC_PAGE_FAILED)),
  424. "DBENGINE: page details has disk pending flag but it is ready/failed");
  425. internal_fatal(pd->page,
  426. "DBENGINE: page details has a page linked to it, but it is marked for loading");
  427. PValue1 = PDCJudyLIns(&JudyL_datafile_list, pd->datafile.fileno, PJE0);
  428. if (PValue1 && !*PValue1) {
  429. *PValue1 = deol = deol_get();
  430. deol->extent_pd_list_by_extent_offset_JudyL = NULL;
  431. deol->fileno = pd->datafile.fileno;
  432. }
  433. else
  434. deol = *PValue1;
  435. PValue2 = PDCJudyLIns(&deol->extent_pd_list_by_extent_offset_JudyL, pd->datafile.extent.pos, PJE0);
  436. if (PValue2 && !*PValue2) {
  437. *PValue2 = epdl = epdl_get();
  438. epdl->page_details_by_metric_id_JudyL = NULL;
  439. epdl->number_of_pages_in_JudyL = 0;
  440. epdl->file = pd->datafile.file;
  441. epdl->extent_offset = pd->datafile.extent.pos;
  442. epdl->extent_size = pd->datafile.extent.bytes;
  443. epdl->datafile = pd->datafile.ptr;
  444. }
  445. else
  446. epdl = *PValue2;
  447. epdl->number_of_pages_in_JudyL++;
  448. Pvoid_t *pd_by_first_time_s_judyL = PDCJudyLIns(&epdl->page_details_by_metric_id_JudyL, pd->metric_id, PJE0);
  449. Pvoid_t *pd_pptr = PDCJudyLIns(pd_by_first_time_s_judyL, pd->first_time_s, PJE0);
  450. *pd_pptr = pd;
  451. }
  452. size_t extent_list_no = 0;
  453. Word_t datafile_no = 0;
  454. first_then_next = true;
  455. while((PValue = PDCJudyLFirstThenNext(JudyL_datafile_list, &datafile_no, &first_then_next))) {
  456. deol = *PValue;
  457. bool first_then_next_extent = true;
  458. Word_t pos = 0;
  459. while ((PValue = PDCJudyLFirstThenNext(deol->extent_pd_list_by_extent_offset_JudyL, &pos, &first_then_next_extent))) {
  460. epdl = *PValue;
  461. internal_fatal(!epdl, "DBENGINE: extent_list is not populated properly");
  462. // The extent page list can be dispatched to a worker
  463. // It will need to populate the cache with "acquired" pages that are in the list (pd) only
  464. // the rest of the extent pages will be added to the cache butnot acquired
  465. pdc_acquire(pdc); // we do this for the next worker: do_read_extent_work()
  466. epdl->pdc = pdc;
  467. if(epdl_pending_add(epdl)) {
  468. if (extent_list_no++ == 0)
  469. exec_first_extent_list(ctx, epdl, pdc->priority);
  470. else
  471. exec_rest_extent_list(ctx, epdl, pdc->priority);
  472. }
  473. }
  474. PDCJudyLFreeArray(&deol->extent_pd_list_by_extent_offset_JudyL, PJE0);
  475. deol_release(deol);
  476. }
  477. PDCJudyLFreeArray(&JudyL_datafile_list, PJE0);
  478. }
  479. pdc_release_and_destroy_if_unreferenced(pdc, true, true);
  480. }
  481. void collect_page_flags_to_buffer(BUFFER *wb, RRDENG_COLLECT_PAGE_FLAGS flags) {
  482. if(flags & RRDENG_PAGE_PAST_COLLECTION)
  483. buffer_strcat(wb, "PAST_COLLECTION ");
  484. if(flags & RRDENG_PAGE_REPEATED_COLLECTION)
  485. buffer_strcat(wb, "REPEATED_COLLECTION ");
  486. if(flags & RRDENG_PAGE_BIG_GAP)
  487. buffer_strcat(wb, "BIG_GAP ");
  488. if(flags & RRDENG_PAGE_GAP)
  489. buffer_strcat(wb, "GAP ");
  490. if(flags & RRDENG_PAGE_FUTURE_POINT)
  491. buffer_strcat(wb, "FUTURE_POINT ");
  492. if(flags & RRDENG_PAGE_CREATED_IN_FUTURE)
  493. buffer_strcat(wb, "CREATED_IN_FUTURE ");
  494. if(flags & RRDENG_PAGE_COMPLETED_IN_FUTURE)
  495. buffer_strcat(wb, "COMPLETED_IN_FUTURE ");
  496. if(flags & RRDENG_PAGE_UNALIGNED)
  497. buffer_strcat(wb, "UNALIGNED ");
  498. if(flags & RRDENG_PAGE_CONFLICT)
  499. buffer_strcat(wb, "CONFLICT ");
  500. if(flags & RRDENG_PAGE_FULL)
  501. buffer_strcat(wb, "PAGE_FULL");
  502. if(flags & RRDENG_PAGE_COLLECT_FINALIZE)
  503. buffer_strcat(wb, "COLLECT_FINALIZE");
  504. if(flags & RRDENG_PAGE_UPDATE_EVERY_CHANGE)
  505. buffer_strcat(wb, "UPDATE_EVERY_CHANGE");
  506. if(flags & RRDENG_PAGE_STEP_TOO_SMALL)
  507. buffer_strcat(wb, "STEP_TOO_SMALL");
  508. if(flags & RRDENG_PAGE_STEP_UNALIGNED)
  509. buffer_strcat(wb, "STEP_UNALIGNED");
  510. }
  511. inline VALIDATED_PAGE_DESCRIPTOR validate_extent_page_descr(const struct rrdeng_extent_page_descr *descr, time_t now_s, uint32_t overwrite_zero_update_every_s, bool have_read_error) {
  512. time_t start_time_s = (time_t) (descr->start_time_ut / USEC_PER_SEC);
  513. time_t end_time_s = 0;
  514. size_t entries = 0;
  515. switch (descr->type) {
  516. case PAGE_METRICS:
  517. case PAGE_TIER:
  518. end_time_s = descr->end_time_ut / USEC_PER_SEC;
  519. entries = 0;
  520. break;
  521. case PAGE_GORILLA_METRICS:
  522. end_time_s = start_time_s + descr->gorilla.delta_time_s;
  523. entries = descr->gorilla.entries;
  524. break;
  525. default:
  526. // Nothing to do. Validate page will notify the user.
  527. break;
  528. }
  529. return validate_page(
  530. (uuid_t *)descr->uuid,
  531. start_time_s,
  532. end_time_s,
  533. 0,
  534. descr->page_length,
  535. descr->type,
  536. entries,
  537. now_s,
  538. overwrite_zero_update_every_s,
  539. have_read_error,
  540. "loaded", 0);
  541. }
  542. VALIDATED_PAGE_DESCRIPTOR validate_page(
  543. uuid_t *uuid,
  544. time_t start_time_s,
  545. time_t end_time_s,
  546. uint32_t update_every_s, // can be zero, if unknown
  547. size_t page_length,
  548. uint8_t page_type,
  549. size_t entries, // can be zero, if unknown
  550. time_t now_s, // can be zero, to disable future timestamp check
  551. uint32_t overwrite_zero_update_every_s, // can be zero, if unknown
  552. bool have_read_error,
  553. const char *msg,
  554. RRDENG_COLLECT_PAGE_FLAGS flags)
  555. {
  556. VALIDATED_PAGE_DESCRIPTOR vd = {
  557. .start_time_s = start_time_s,
  558. .end_time_s = end_time_s,
  559. .update_every_s = update_every_s,
  560. .page_length = page_length,
  561. .point_size = page_type_size[page_type],
  562. .type = page_type,
  563. .is_valid = true,
  564. };
  565. bool known_page_type = true;
  566. switch (page_type) {
  567. case PAGE_METRICS:
  568. case PAGE_TIER:
  569. // always calculate entries by size
  570. vd.entries = page_entries_by_size(vd.page_length, vd.point_size);
  571. // allow to be called without entries (when loading pages from disk)
  572. if(!entries)
  573. entries = vd.entries;
  574. break;
  575. case PAGE_GORILLA_METRICS:
  576. internal_fatal(entries == 0, "0 number of entries found on gorilla page");
  577. vd.entries = entries;
  578. break;
  579. default:
  580. known_page_type = false;
  581. break;
  582. }
  583. // allow to be called without update every (when loading pages from disk)
  584. if(!update_every_s) {
  585. vd.update_every_s = (vd.entries > 1) ? ((vd.end_time_s - vd.start_time_s) / (time_t) (vd.entries - 1))
  586. : overwrite_zero_update_every_s;
  587. update_every_s = vd.update_every_s;
  588. }
  589. // another such set of checks exists in
  590. // update_metric_retention_and_granularity_by_uuid()
  591. bool updated = false;
  592. size_t max_page_length = RRDENG_BLOCK_SIZE;
  593. // If gorilla can not compress the data we might end up needing slightly more
  594. // than 4KiB. However, gorilla pages extend the page length by increments of
  595. // 512 bytes.
  596. max_page_length += ((page_type == PAGE_GORILLA_METRICS) * GORILLA_BUFFER_SIZE);
  597. if (!known_page_type ||
  598. have_read_error ||
  599. vd.page_length == 0 ||
  600. vd.page_length > max_page_length ||
  601. vd.start_time_s > vd.end_time_s ||
  602. (now_s && vd.end_time_s > now_s) ||
  603. vd.start_time_s <= 0 ||
  604. vd.end_time_s <= 0 ||
  605. (vd.start_time_s == vd.end_time_s && vd.entries > 1) ||
  606. (vd.update_every_s == 0 && vd.entries > 1))
  607. {
  608. vd.is_valid = false;
  609. }
  610. else {
  611. if(unlikely(vd.entries != entries || vd.update_every_s != update_every_s))
  612. updated = true;
  613. if (likely(vd.update_every_s)) {
  614. size_t entries_by_time = page_entries_by_time(vd.start_time_s, vd.end_time_s, vd.update_every_s);
  615. if (vd.entries != entries_by_time) {
  616. if (overwrite_zero_update_every_s < vd.update_every_s)
  617. vd.update_every_s = overwrite_zero_update_every_s;
  618. time_t new_end_time_s = (time_t)(vd.start_time_s + (vd.entries - 1) * vd.update_every_s);
  619. if(new_end_time_s <= vd.end_time_s) {
  620. // end time is wrong
  621. vd.end_time_s = new_end_time_s;
  622. }
  623. else {
  624. // update every is wrong
  625. vd.update_every_s = overwrite_zero_update_every_s;
  626. vd.end_time_s = (time_t)(vd.start_time_s + (vd.entries - 1) * vd.update_every_s);
  627. }
  628. updated = true;
  629. }
  630. }
  631. else if(overwrite_zero_update_every_s) {
  632. vd.update_every_s = overwrite_zero_update_every_s;
  633. updated = true;
  634. }
  635. }
  636. if(unlikely(!vd.is_valid || updated)) {
  637. #ifndef NETDATA_INTERNAL_CHECKS
  638. nd_log_limit_static_global_var(erl, 1, 0);
  639. #endif
  640. char uuid_str[UUID_STR_LEN + 1];
  641. uuid_unparse(*uuid, uuid_str);
  642. BUFFER *wb = NULL;
  643. if(flags) {
  644. wb = buffer_create(0, NULL);
  645. collect_page_flags_to_buffer(wb, flags);
  646. }
  647. if(!vd.is_valid) {
  648. #ifdef NETDATA_INTERNAL_CHECKS
  649. internal_error(true,
  650. #else
  651. nd_log_limit(&erl, NDLS_DAEMON, NDLP_ERR,
  652. #endif
  653. "DBENGINE: metric '%s' %s invalid page of type %u "
  654. "from %ld to %ld (now %ld), update every %u, page length %zu, entries %zu (flags: %s)",
  655. uuid_str, msg, vd.type,
  656. vd.start_time_s, vd.end_time_s, now_s, vd.update_every_s, vd.page_length, vd.entries, wb?buffer_tostring(wb):""
  657. );
  658. }
  659. else {
  660. const char *err_valid = "";
  661. const char *err_start = (vd.start_time_s == start_time_s) ? "" : "start time updated, ";
  662. const char *err_end = (vd.end_time_s == end_time_s) ? "" : "end time updated, ";
  663. const char *err_update = (vd.update_every_s == update_every_s) ? "" : "update every updated, ";
  664. const char *err_length = (vd.page_length == page_length) ? "" : "page length updated, ";
  665. const char *err_entries = (vd.entries == entries) ? "" : "entries updated, ";
  666. const char *err_future = (now_s && vd.end_time_s <= now_s) ? "" : "future end time, ";
  667. #ifdef NETDATA_INTERNAL_CHECKS
  668. internal_error(true,
  669. #else
  670. nd_log_limit(&erl, NDLS_DAEMON, NDLP_ERR,
  671. #endif
  672. "DBENGINE: metric '%s' %s page of type %u "
  673. "from %ld to %ld (now %ld), update every %u, page length %zu, entries %zu (flags: %s), "
  674. "found inconsistent - the right is "
  675. "from %ld to %ld, update every %u, page length %zu, entries %zu: "
  676. "%s%s%s%s%s%s%s",
  677. uuid_str, msg, vd.type,
  678. start_time_s, end_time_s, now_s, update_every_s, page_length, entries, wb?buffer_tostring(wb):"",
  679. vd.start_time_s, vd.end_time_s, vd.update_every_s, vd.page_length, vd.entries,
  680. err_valid, err_start, err_end, err_update, err_length, err_entries, err_future
  681. );
  682. }
  683. buffer_free(wb);
  684. }
  685. return vd;
  686. }
  687. static inline struct page_details *epdl_get_pd_load_link_list_from_metric_start_time(EPDL *epdl, Word_t metric_id, time_t start_time_s) {
  688. if(unlikely(epdl->head_to_datafile_extent_queries_pending_for_extent))
  689. // stop appending more pages to this epdl
  690. epdl_pending_del(epdl);
  691. struct page_details *pd_list = NULL;
  692. for(EPDL *ep = epdl; ep ;ep = ep->query.next) {
  693. Pvoid_t *pd_by_start_time_s_judyL = PDCJudyLGet(ep->page_details_by_metric_id_JudyL, metric_id, PJE0);
  694. internal_fatal(pd_by_start_time_s_judyL == PJERR, "DBENGINE: corrupted extent metrics JudyL");
  695. if (unlikely(pd_by_start_time_s_judyL && *pd_by_start_time_s_judyL)) {
  696. Pvoid_t *pd_pptr = PDCJudyLGet(*pd_by_start_time_s_judyL, start_time_s, PJE0);
  697. internal_fatal(pd_pptr == PJERR, "DBENGINE: corrupted metric page details JudyHS");
  698. if(likely(pd_pptr && *pd_pptr)) {
  699. struct page_details *pd = *pd_pptr;
  700. internal_fatal(metric_id != pd->metric_id, "DBENGINE: metric ids do not match");
  701. if(likely(!pd->page)) {
  702. if (unlikely(__atomic_load_n(&ep->pdc->workers_should_stop, __ATOMIC_RELAXED)))
  703. pdc_page_status_set(pd, PDC_PAGE_FAILED | PDC_PAGE_CANCELLED);
  704. else
  705. DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(pd_list, pd, load.prev, load.next);
  706. }
  707. }
  708. }
  709. }
  710. return pd_list;
  711. }
  712. static void epdl_extent_loading_error_log(struct rrdengine_instance *ctx, EPDL *epdl, struct rrdeng_extent_page_descr *descr, const char *msg) {
  713. char uuid[UUID_STR_LEN] = "";
  714. time_t start_time_s = 0;
  715. time_t end_time_s = 0;
  716. bool used_epdl = false;
  717. bool used_descr = false;
  718. if (descr) {
  719. start_time_s = (time_t)(descr->start_time_ut / USEC_PER_SEC);
  720. switch (descr->type) {
  721. case PAGE_METRICS:
  722. case PAGE_TIER:
  723. end_time_s = (time_t)(descr->end_time_ut / USEC_PER_SEC);
  724. break;
  725. case PAGE_GORILLA_METRICS:
  726. end_time_s = (time_t) start_time_s + (descr->gorilla.delta_time_s);
  727. break;
  728. }
  729. uuid_unparse_lower(descr->uuid, uuid);
  730. used_descr = true;
  731. }
  732. else {
  733. struct page_details *pd = NULL;
  734. Word_t start = 0;
  735. Pvoid_t *pd_by_start_time_s_judyL = PDCJudyLFirst(epdl->page_details_by_metric_id_JudyL, &start, PJE0);
  736. if(pd_by_start_time_s_judyL) {
  737. start = 0;
  738. Pvoid_t *pd_pptr = PDCJudyLFirst(*pd_by_start_time_s_judyL, &start, PJE0);
  739. if(pd_pptr) {
  740. pd = *pd_pptr;
  741. start_time_s = pd->first_time_s;
  742. end_time_s = pd->last_time_s;
  743. METRIC *metric = (METRIC *)pd->metric_id;
  744. uuid_t *u = mrg_metric_uuid(main_mrg, metric);
  745. uuid_unparse_lower(*u, uuid);
  746. used_epdl = true;
  747. }
  748. }
  749. }
  750. if(!used_epdl && !used_descr && epdl->pdc) {
  751. start_time_s = epdl->pdc->start_time_s;
  752. end_time_s = epdl->pdc->end_time_s;
  753. }
  754. char start_time_str[LOG_DATE_LENGTH + 1] = "";
  755. if(start_time_s)
  756. log_date(start_time_str, LOG_DATE_LENGTH, start_time_s);
  757. char end_time_str[LOG_DATE_LENGTH + 1] = "";
  758. if(end_time_s)
  759. log_date(end_time_str, LOG_DATE_LENGTH, end_time_s);
  760. nd_log_limit_static_global_var(erl, 1, 0);
  761. nd_log_limit(&erl, NDLS_DAEMON, NDLP_ERR,
  762. "DBENGINE: error while reading extent from datafile %u of tier %d, at offset %" PRIu64 " (%u bytes) "
  763. "%s from %ld (%s) to %ld (%s) %s%s: "
  764. "%s",
  765. epdl->datafile->fileno, ctx->config.tier,
  766. epdl->extent_offset, epdl->extent_size,
  767. used_epdl ? "to extract page (PD)" : used_descr ? "expected page (DESCR)" : "part of a query (PDC)",
  768. start_time_s, start_time_str, end_time_s, end_time_str,
  769. used_epdl || used_descr ? " of metric " : "",
  770. used_epdl || used_descr ? uuid : "",
  771. msg);
  772. }
  773. static bool epdl_populate_pages_from_extent_data(
  774. struct rrdengine_instance *ctx,
  775. void *data,
  776. size_t data_length,
  777. EPDL *epdl,
  778. bool worker,
  779. PDC_PAGE_STATUS tags,
  780. bool cached_extent)
  781. {
  782. int ret;
  783. unsigned i, count;
  784. void *uncompressed_buf = NULL;
  785. uint32_t payload_length, payload_offset, trailer_offset, uncompressed_payload_length = 0;
  786. bool have_read_error = false;
  787. /* persistent structures */
  788. struct rrdeng_df_extent_header *header;
  789. struct rrdeng_df_extent_trailer *trailer;
  790. struct extent_buffer *eb = NULL;
  791. uLong crc;
  792. bool can_use_data = true;
  793. if(data_length < sizeof(*header) + sizeof(header->descr[0]) + sizeof(*trailer)) {
  794. can_use_data = false;
  795. // added to satisfy the requirements of older compilers (prevent warnings)
  796. payload_length = 0;
  797. payload_offset = 0;
  798. trailer_offset = 0;
  799. count = 0;
  800. header = NULL;
  801. trailer = NULL;
  802. }
  803. else {
  804. header = data;
  805. payload_length = header->payload_length;
  806. count = header->number_of_pages;
  807. payload_offset = sizeof(*header) + sizeof(header->descr[0]) * count;
  808. trailer_offset = data_length - sizeof(*trailer);
  809. trailer = data + trailer_offset;
  810. }
  811. if( !can_use_data ||
  812. count < 1 ||
  813. count > MAX_PAGES_PER_EXTENT ||
  814. (header->compression_algorithm != RRD_NO_COMPRESSION && header->compression_algorithm != RRD_LZ4) ||
  815. (payload_length != trailer_offset - payload_offset) ||
  816. (data_length != payload_offset + payload_length + sizeof(*trailer))
  817. ) {
  818. epdl_extent_loading_error_log(ctx, epdl, NULL, "header is INVALID");
  819. return false;
  820. }
  821. crc = crc32(0L, Z_NULL, 0);
  822. crc = crc32(crc, data, epdl->extent_size - sizeof(*trailer));
  823. ret = crc32cmp(trailer->checksum, crc);
  824. if (unlikely(ret)) {
  825. ctx_io_error(ctx);
  826. have_read_error = true;
  827. epdl_extent_loading_error_log(ctx, epdl, NULL, "CRC32 checksum FAILED");
  828. }
  829. if(worker)
  830. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_DECOMPRESSION);
  831. if (likely(!have_read_error && RRD_NO_COMPRESSION != header->compression_algorithm)) {
  832. // find the uncompressed extent size
  833. uncompressed_payload_length = 0;
  834. for (i = 0; i < count; ++i) {
  835. size_t page_length = header->descr[i].page_length;
  836. if (page_length > RRDENG_BLOCK_SIZE && (header->descr[i].type != PAGE_GORILLA_METRICS ||
  837. (header->descr[i].type == PAGE_GORILLA_METRICS &&
  838. (page_length - RRDENG_BLOCK_SIZE) % GORILLA_BUFFER_SIZE))) {
  839. have_read_error = true;
  840. break;
  841. }
  842. uncompressed_payload_length += header->descr[i].page_length;
  843. }
  844. if(unlikely(uncompressed_payload_length > MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE))
  845. have_read_error = true;
  846. if(likely(!have_read_error)) {
  847. eb = extent_buffer_get(uncompressed_payload_length);
  848. uncompressed_buf = eb->data;
  849. ret = LZ4_decompress_safe(data + payload_offset, uncompressed_buf,
  850. (int) payload_length, (int) uncompressed_payload_length);
  851. __atomic_add_fetch(&ctx->stats.before_decompress_bytes, payload_length, __ATOMIC_RELAXED);
  852. __atomic_add_fetch(&ctx->stats.after_decompress_bytes, ret, __ATOMIC_RELAXED);
  853. }
  854. }
  855. if(worker)
  856. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_LOOKUP);
  857. size_t stats_data_from_main_cache = 0;
  858. size_t stats_data_from_extent = 0;
  859. size_t stats_load_compressed = 0;
  860. size_t stats_load_uncompressed = 0;
  861. size_t stats_load_invalid_page = 0;
  862. size_t stats_cache_hit_while_inserting = 0;
  863. uint32_t page_offset = 0, page_length;
  864. time_t now_s = max_acceptable_collected_time();
  865. for (i = 0; i < count; i++, page_offset += page_length) {
  866. page_length = header->descr[i].page_length;
  867. time_t start_time_s = (time_t) (header->descr[i].start_time_ut / USEC_PER_SEC);
  868. if(!page_length || !start_time_s) {
  869. char log[200 + 1];
  870. snprintfz(log, sizeof(log) - 1, "page %u (out of %u) is EMPTY", i, count);
  871. epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log);
  872. continue;
  873. }
  874. METRIC *metric = mrg_metric_get_and_acquire(main_mrg, &header->descr[i].uuid, (Word_t)ctx);
  875. Word_t metric_id = (Word_t)metric;
  876. if(!metric) {
  877. char log[200 + 1];
  878. snprintfz(log, sizeof(log) - 1, "page %u (out of %u) has unknown UUID", i, count);
  879. epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log);
  880. continue;
  881. }
  882. mrg_metric_release(main_mrg, metric);
  883. struct page_details *pd_list = epdl_get_pd_load_link_list_from_metric_start_time(epdl, metric_id, start_time_s);
  884. if(likely(!pd_list))
  885. continue;
  886. VALIDATED_PAGE_DESCRIPTOR vd = validate_extent_page_descr(
  887. &header->descr[i], now_s,
  888. (pd_list) ? pd_list->update_every_s : 0,
  889. have_read_error);
  890. if(worker)
  891. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_ALLOCATION);
  892. PGD *pgd;
  893. if (unlikely(!vd.is_valid)) {
  894. pgd = PGD_EMPTY;
  895. stats_load_invalid_page++;
  896. }
  897. else {
  898. if (RRD_NO_COMPRESSION == header->compression_algorithm) {
  899. pgd = pgd_create_from_disk_data(header->descr[i].type,
  900. data + payload_offset + page_offset,
  901. vd.page_length);
  902. stats_load_uncompressed++;
  903. }
  904. else {
  905. if (unlikely(page_offset + vd.page_length > uncompressed_payload_length)) {
  906. char log[200 + 1];
  907. snprintfz(log, sizeof(log) - 1, "page %u (out of %u) offset %u + page length %zu, "
  908. "exceeds the uncompressed buffer size %u",
  909. i, count, page_offset, vd.page_length, uncompressed_payload_length);
  910. epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log);
  911. pgd = PGD_EMPTY;
  912. stats_load_invalid_page++;
  913. }
  914. else {
  915. pgd = pgd_create_from_disk_data(header->descr[i].type,
  916. uncompressed_buf + page_offset,
  917. vd.page_length);
  918. stats_load_compressed++;
  919. }
  920. }
  921. }
  922. if(worker)
  923. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_POPULATION);
  924. PGC_ENTRY page_entry = {
  925. .hot = false,
  926. .section = (Word_t)ctx,
  927. .metric_id = metric_id,
  928. .start_time_s = vd.start_time_s,
  929. .end_time_s = vd.end_time_s,
  930. .update_every_s = (uint32_t) vd.update_every_s,
  931. .size = pgd_memory_footprint(pgd), // the footprint of the entire PGD, for accurate memory management
  932. .data = pgd,
  933. };
  934. bool added = true;
  935. PGC_PAGE *page = pgc_page_add_and_acquire(main_cache, page_entry, &added);
  936. if (false == added) {
  937. pgd_free(pgd);
  938. stats_cache_hit_while_inserting++;
  939. stats_data_from_main_cache++;
  940. }
  941. else
  942. stats_data_from_extent++;
  943. struct page_details *pd = pd_list;
  944. do {
  945. if(pd != pd_list)
  946. pgc_page_dup(main_cache, page);
  947. pd->page = page;
  948. pdc_page_status_set(pd, PDC_PAGE_READY | tags | (pgd_is_empty(pgd) ? PDC_PAGE_EMPTY : 0));
  949. pd = pd->load.next;
  950. } while(pd);
  951. if(worker)
  952. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_LOOKUP);
  953. }
  954. if(stats_data_from_main_cache)
  955. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_main_cache, stats_data_from_main_cache, __ATOMIC_RELAXED);
  956. if(cached_extent)
  957. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_extent_cache, stats_data_from_extent, __ATOMIC_RELAXED);
  958. else {
  959. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_disk, stats_data_from_extent, __ATOMIC_RELAXED);
  960. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.extents_loaded_from_disk, 1, __ATOMIC_RELAXED);
  961. }
  962. if(stats_cache_hit_while_inserting)
  963. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_loaded_but_cache_hit_while_inserting, stats_cache_hit_while_inserting, __ATOMIC_RELAXED);
  964. if(stats_load_compressed)
  965. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_compressed, stats_load_compressed, __ATOMIC_RELAXED);
  966. if(stats_load_uncompressed)
  967. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_uncompressed, stats_load_uncompressed, __ATOMIC_RELAXED);
  968. if(stats_load_invalid_page)
  969. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_fail_invalid_page_in_extent, stats_load_invalid_page, __ATOMIC_RELAXED);
  970. if(worker)
  971. worker_is_idle();
  972. extent_buffer_release(eb);
  973. return true;
  974. }
  975. static inline void *datafile_extent_read(struct rrdengine_instance *ctx, uv_file file, unsigned pos, unsigned size_bytes)
  976. {
  977. void *buffer;
  978. uv_fs_t request;
  979. unsigned real_io_size = ALIGN_BYTES_CEILING(size_bytes);
  980. int ret = posix_memalign(&buffer, RRDFILE_ALIGNMENT, real_io_size);
  981. if (unlikely(ret))
  982. fatal("DBENGINE: posix_memalign(): %s", strerror(ret));
  983. uv_buf_t iov = uv_buf_init(buffer, real_io_size);
  984. ret = uv_fs_read(NULL, &request, file, &iov, 1, pos, NULL);
  985. if (unlikely(-1 == ret)) {
  986. ctx_io_error(ctx);
  987. posix_memfree(buffer);
  988. buffer = NULL;
  989. }
  990. else
  991. ctx_io_read_op_bytes(ctx, real_io_size);
  992. uv_fs_req_cleanup(&request);
  993. return buffer;
  994. }
  995. static inline void datafile_extent_read_free(void *buffer) {
  996. posix_memfree(buffer);
  997. }
  998. void epdl_find_extent_and_populate_pages(struct rrdengine_instance *ctx, EPDL *epdl, bool worker) {
  999. if(worker)
  1000. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_CACHE_LOOKUP);
  1001. size_t *statistics_counter = NULL;
  1002. PDC_PAGE_STATUS not_loaded_pages_tag = 0, loaded_pages_tag = 0;
  1003. bool should_stop = __atomic_load_n(&epdl->pdc->workers_should_stop, __ATOMIC_RELAXED);
  1004. for(EPDL *ep = epdl->query.next; ep ;ep = ep->query.next) {
  1005. internal_fatal(ep->datafile != epdl->datafile, "DBENGINE: datafiles do not match");
  1006. internal_fatal(ep->extent_offset != epdl->extent_offset, "DBENGINE: extent offsets do not match");
  1007. internal_fatal(ep->extent_size != epdl->extent_size, "DBENGINE: extent sizes do not match");
  1008. internal_fatal(ep->file != epdl->file, "DBENGINE: files do not match");
  1009. if(!__atomic_load_n(&ep->pdc->workers_should_stop, __ATOMIC_RELAXED)) {
  1010. should_stop = false;
  1011. break;
  1012. }
  1013. }
  1014. if(unlikely(should_stop)) {
  1015. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_cancelled;
  1016. not_loaded_pages_tag = PDC_PAGE_CANCELLED;
  1017. goto cleanup;
  1018. }
  1019. bool extent_found_in_cache = false;
  1020. void *extent_compressed_data = NULL;
  1021. PGC_PAGE *extent_cache_page = pgc_page_get_and_acquire(
  1022. extent_cache, (Word_t)ctx,
  1023. (Word_t)epdl->datafile->fileno, (time_t)epdl->extent_offset,
  1024. PGC_SEARCH_EXACT);
  1025. if(extent_cache_page) {
  1026. extent_compressed_data = pgc_page_data(extent_cache_page);
  1027. internal_fatal(epdl->extent_size != pgc_page_data_size(extent_cache, extent_cache_page),
  1028. "DBENGINE: cache size does not match the expected size");
  1029. loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_CACHE;
  1030. not_loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_CACHE;
  1031. extent_found_in_cache = true;
  1032. }
  1033. else {
  1034. if(worker)
  1035. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_MMAP);
  1036. void *extent_data = datafile_extent_read(ctx, epdl->file, epdl->extent_offset, epdl->extent_size);
  1037. if(extent_data != NULL) {
  1038. void *copied_extent_compressed_data = dbengine_extent_alloc(epdl->extent_size);
  1039. memcpy(copied_extent_compressed_data, extent_data, epdl->extent_size);
  1040. datafile_extent_read_free(extent_data);
  1041. if(worker)
  1042. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_CACHE_LOOKUP);
  1043. bool added = false;
  1044. extent_cache_page = pgc_page_add_and_acquire(extent_cache, (PGC_ENTRY) {
  1045. .hot = false,
  1046. .section = (Word_t) ctx,
  1047. .metric_id = (Word_t) epdl->datafile->fileno,
  1048. .start_time_s = (time_t) epdl->extent_offset,
  1049. .size = epdl->extent_size,
  1050. .end_time_s = 0,
  1051. .update_every_s = 0,
  1052. .data = copied_extent_compressed_data,
  1053. }, &added);
  1054. if (!added) {
  1055. dbengine_extent_free(copied_extent_compressed_data, epdl->extent_size);
  1056. internal_fatal(epdl->extent_size != pgc_page_data_size(extent_cache, extent_cache_page),
  1057. "DBENGINE: cache size does not match the expected size");
  1058. }
  1059. extent_compressed_data = pgc_page_data(extent_cache_page);
  1060. loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_DISK;
  1061. not_loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_DISK;
  1062. }
  1063. }
  1064. if(extent_compressed_data) {
  1065. // Need to decompress and then process the pagelist
  1066. bool extent_used = epdl_populate_pages_from_extent_data(
  1067. ctx, extent_compressed_data, epdl->extent_size,
  1068. epdl, worker, loaded_pages_tag, extent_found_in_cache);
  1069. if(extent_used) {
  1070. // since the extent was used, all the pages that are not
  1071. // loaded from this extent, were not found in the extent
  1072. not_loaded_pages_tag |= PDC_PAGE_FAILED_NOT_IN_EXTENT;
  1073. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_not_found;
  1074. }
  1075. else {
  1076. not_loaded_pages_tag |= PDC_PAGE_FAILED_INVALID_EXTENT;
  1077. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_invalid_extent;
  1078. }
  1079. }
  1080. else {
  1081. not_loaded_pages_tag |= PDC_PAGE_FAILED_TO_MAP_EXTENT;
  1082. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_cant_mmap_extent;
  1083. }
  1084. if(extent_cache_page)
  1085. pgc_page_release(extent_cache, extent_cache_page);
  1086. cleanup:
  1087. // remove it from the datafile extent_queries
  1088. // this can be called multiple times safely
  1089. epdl_pending_del(epdl);
  1090. // mark all pending pages as failed
  1091. for(EPDL *ep = epdl; ep ;ep = ep->query.next) {
  1092. epdl_mark_all_not_loaded_pages_as_failed(
  1093. ep, not_loaded_pages_tag, statistics_counter);
  1094. }
  1095. for(EPDL *ep = epdl, *next = NULL; ep ; ep = next) {
  1096. next = ep->query.next;
  1097. completion_mark_complete_a_job(&ep->pdc->page_completion);
  1098. pdc_release_and_destroy_if_unreferenced(ep->pdc, true, false);
  1099. // Free the Judy that holds the requested pagelist and the extents
  1100. epdl_destroy(ep);
  1101. }
  1102. if(worker)
  1103. worker_is_idle();
  1104. }