pdc.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #define NETDATA_RRD_INTERNALS
  3. #include "pdc.h"
  4. struct extent_page_details_list {
  5. uv_file file;
  6. uint64_t extent_offset;
  7. uint32_t extent_size;
  8. unsigned number_of_pages_in_JudyL;
  9. Pvoid_t page_details_by_metric_id_JudyL;
  10. struct page_details_control *pdc;
  11. struct rrdengine_datafile *datafile;
  12. struct rrdeng_cmd *cmd;
  13. bool head_to_datafile_extent_queries_pending_for_extent;
  14. struct {
  15. struct extent_page_details_list *prev;
  16. struct extent_page_details_list *next;
  17. } query;
  18. };
  19. typedef struct datafile_extent_offset_list {
  20. uv_file file;
  21. unsigned fileno;
  22. Pvoid_t extent_pd_list_by_extent_offset_JudyL;
  23. } DEOL;
  24. // ----------------------------------------------------------------------------
  25. // PDC cache
  26. static struct {
  27. struct {
  28. ARAL *ar;
  29. } pdc;
  30. struct {
  31. ARAL *ar;
  32. } pd;
  33. struct {
  34. ARAL *ar;
  35. } epdl;
  36. struct {
  37. ARAL *ar;
  38. } deol;
  39. } pdc_globals = {};
  40. void pdc_init(void) {
  41. pdc_globals.pdc.ar = aral_create(
  42. "dbengine-pdc",
  43. sizeof(PDC),
  44. 0,
  45. 65536,
  46. NULL,
  47. NULL, NULL, false, false
  48. );
  49. }
  50. PDC *pdc_get(void) {
  51. PDC *pdc = aral_mallocz(pdc_globals.pdc.ar);
  52. memset(pdc, 0, sizeof(PDC));
  53. return pdc;
  54. }
  55. static void pdc_release(PDC *pdc) {
  56. aral_freez(pdc_globals.pdc.ar, pdc);
  57. }
  58. size_t pdc_cache_size(void) {
  59. return aral_overhead(pdc_globals.pdc.ar) + aral_structures(pdc_globals.pdc.ar);
  60. }
  61. // ----------------------------------------------------------------------------
  62. // PD cache
  63. void page_details_init(void) {
  64. pdc_globals.pd.ar = aral_create(
  65. "dbengine-pd",
  66. sizeof(struct page_details),
  67. 0,
  68. 65536,
  69. NULL,
  70. NULL, NULL, false, false
  71. );
  72. }
  73. struct page_details *page_details_get(void) {
  74. struct page_details *pd = aral_mallocz(pdc_globals.pd.ar);
  75. memset(pd, 0, sizeof(struct page_details));
  76. return pd;
  77. }
  78. static void page_details_release(struct page_details *pd) {
  79. aral_freez(pdc_globals.pd.ar, pd);
  80. }
  81. size_t pd_cache_size(void) {
  82. return aral_overhead(pdc_globals.pd.ar) + aral_structures(pdc_globals.pd.ar);
  83. }
  84. // ----------------------------------------------------------------------------
  85. // epdl cache
  86. void epdl_init(void) {
  87. pdc_globals.epdl.ar = aral_create(
  88. "dbengine-epdl",
  89. sizeof(EPDL),
  90. 0,
  91. 65536,
  92. NULL,
  93. NULL, NULL, false, false
  94. );
  95. }
  96. static EPDL *epdl_get(void) {
  97. EPDL *epdl = aral_mallocz(pdc_globals.epdl.ar);
  98. memset(epdl, 0, sizeof(EPDL));
  99. return epdl;
  100. }
  101. static void epdl_release(EPDL *epdl) {
  102. aral_freez(pdc_globals.epdl.ar, epdl);
  103. }
  104. size_t epdl_cache_size(void) {
  105. return aral_overhead(pdc_globals.epdl.ar) + aral_structures(pdc_globals.epdl.ar);
  106. }
  107. // ----------------------------------------------------------------------------
  108. // deol cache
  109. void deol_init(void) {
  110. pdc_globals.deol.ar = aral_create(
  111. "dbengine-deol",
  112. sizeof(DEOL),
  113. 0,
  114. 65536,
  115. NULL,
  116. NULL, NULL, false, false
  117. );
  118. }
  119. static DEOL *deol_get(void) {
  120. DEOL *deol = aral_mallocz(pdc_globals.deol.ar);
  121. memset(deol, 0, sizeof(DEOL));
  122. return deol;
  123. }
  124. static void deol_release(DEOL *deol) {
  125. aral_freez(pdc_globals.deol.ar, deol);
  126. }
  127. size_t deol_cache_size(void) {
  128. return aral_overhead(pdc_globals.deol.ar) + aral_structures(pdc_globals.deol.ar);
  129. }
  130. // ----------------------------------------------------------------------------
  131. // extent with buffer cache
  132. static struct {
  133. struct {
  134. SPINLOCK spinlock;
  135. struct extent_buffer *available_items;
  136. size_t available;
  137. } protected;
  138. struct {
  139. size_t allocated;
  140. size_t allocated_bytes;
  141. } atomics;
  142. size_t max_size;
  143. } extent_buffer_globals = {
  144. .protected = {
  145. .spinlock = NETDATA_SPINLOCK_INITIALIZER,
  146. .available_items = NULL,
  147. .available = 0,
  148. },
  149. .atomics = {
  150. .allocated = 0,
  151. .allocated_bytes = 0,
  152. },
  153. .max_size = MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE,
  154. };
  155. void extent_buffer_init(void) {
  156. size_t max_extent_uncompressed = MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE;
  157. size_t max_size = (size_t)LZ4_compressBound(MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE);
  158. if(max_size < max_extent_uncompressed)
  159. max_size = max_extent_uncompressed;
  160. extent_buffer_globals.max_size = max_size;
  161. }
  162. void extent_buffer_cleanup1(void) {
  163. struct extent_buffer *item = NULL;
  164. if(!spinlock_trylock(&extent_buffer_globals.protected.spinlock))
  165. return;
  166. if(extent_buffer_globals.protected.available_items && extent_buffer_globals.protected.available > 1) {
  167. item = extent_buffer_globals.protected.available_items;
  168. DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(extent_buffer_globals.protected.available_items, item, cache.prev, cache.next);
  169. extent_buffer_globals.protected.available--;
  170. }
  171. spinlock_unlock(&extent_buffer_globals.protected.spinlock);
  172. if(item) {
  173. size_t bytes = sizeof(struct extent_buffer) + item->bytes;
  174. freez(item);
  175. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated, 1, __ATOMIC_RELAXED);
  176. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated_bytes, bytes, __ATOMIC_RELAXED);
  177. }
  178. }
  179. struct extent_buffer *extent_buffer_get(size_t size) {
  180. internal_fatal(size > extent_buffer_globals.max_size, "DBENGINE: extent size is too big");
  181. struct extent_buffer *eb = NULL;
  182. if(size < extent_buffer_globals.max_size)
  183. size = extent_buffer_globals.max_size;
  184. spinlock_lock(&extent_buffer_globals.protected.spinlock);
  185. if(likely(extent_buffer_globals.protected.available_items)) {
  186. eb = extent_buffer_globals.protected.available_items;
  187. DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(extent_buffer_globals.protected.available_items, eb, cache.prev, cache.next);
  188. extent_buffer_globals.protected.available--;
  189. }
  190. spinlock_unlock(&extent_buffer_globals.protected.spinlock);
  191. if(unlikely(eb && eb->bytes < size)) {
  192. size_t bytes = sizeof(struct extent_buffer) + eb->bytes;
  193. freez(eb);
  194. eb = NULL;
  195. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated, 1, __ATOMIC_RELAXED);
  196. __atomic_sub_fetch(&extent_buffer_globals.atomics.allocated_bytes, bytes, __ATOMIC_RELAXED);
  197. }
  198. if(unlikely(!eb)) {
  199. size_t bytes = sizeof(struct extent_buffer) + size;
  200. eb = mallocz(bytes);
  201. eb->bytes = size;
  202. __atomic_add_fetch(&extent_buffer_globals.atomics.allocated, 1, __ATOMIC_RELAXED);
  203. __atomic_add_fetch(&extent_buffer_globals.atomics.allocated_bytes, bytes, __ATOMIC_RELAXED);
  204. }
  205. return eb;
  206. }
  207. void extent_buffer_release(struct extent_buffer *eb) {
  208. if(unlikely(!eb)) return;
  209. spinlock_lock(&extent_buffer_globals.protected.spinlock);
  210. DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(extent_buffer_globals.protected.available_items, eb, cache.prev, cache.next);
  211. extent_buffer_globals.protected.available++;
  212. spinlock_unlock(&extent_buffer_globals.protected.spinlock);
  213. }
  214. size_t extent_buffer_cache_size(void) {
  215. return __atomic_load_n(&extent_buffer_globals.atomics.allocated_bytes, __ATOMIC_RELAXED);
  216. }
  217. // ----------------------------------------------------------------------------
  218. // epdl logic
  219. static void epdl_destroy(EPDL *epdl)
  220. {
  221. Pvoid_t *pd_by_start_time_s_JudyL;
  222. Word_t metric_id_index = 0;
  223. bool metric_id_first = true;
  224. while ((pd_by_start_time_s_JudyL = PDCJudyLFirstThenNext(
  225. epdl->page_details_by_metric_id_JudyL,
  226. &metric_id_index, &metric_id_first)))
  227. PDCJudyLFreeArray(pd_by_start_time_s_JudyL, PJE0);
  228. PDCJudyLFreeArray(&epdl->page_details_by_metric_id_JudyL, PJE0);
  229. epdl_release(epdl);
  230. }
  231. static void epdl_mark_all_not_loaded_pages_as_failed(EPDL *epdl, PDC_PAGE_STATUS tags, size_t *statistics_counter)
  232. {
  233. size_t pages_matched = 0;
  234. Word_t metric_id_index = 0;
  235. bool metric_id_first = true;
  236. Pvoid_t *pd_by_start_time_s_JudyL;
  237. while((pd_by_start_time_s_JudyL = PDCJudyLFirstThenNext(epdl->page_details_by_metric_id_JudyL, &metric_id_index, &metric_id_first))) {
  238. Word_t start_time_index = 0;
  239. bool start_time_first = true;
  240. Pvoid_t *PValue;
  241. while ((PValue = PDCJudyLFirstThenNext(*pd_by_start_time_s_JudyL, &start_time_index, &start_time_first))) {
  242. struct page_details *pd = *PValue;
  243. if(!pd->page && !pdc_page_status_check(pd, PDC_PAGE_FAILED|PDC_PAGE_READY)) {
  244. pdc_page_status_set(pd, PDC_PAGE_FAILED | tags);
  245. pages_matched++;
  246. }
  247. }
  248. }
  249. if(pages_matched && statistics_counter)
  250. __atomic_add_fetch(statistics_counter, pages_matched, __ATOMIC_RELAXED);
  251. }
  252. /*
  253. static bool epdl_check_if_pages_are_already_in_cache(struct rrdengine_instance *ctx, EPDL *epdl, PDC_PAGE_STATUS tags)
  254. {
  255. size_t count_remaining = 0;
  256. size_t found = 0;
  257. Word_t metric_id_index = 0;
  258. bool metric_id_first = true;
  259. Pvoid_t *pd_by_start_time_s_JudyL;
  260. while((pd_by_start_time_s_JudyL = PDCJudyLFirstThenNext(epdl->page_details_by_metric_id_JudyL, &metric_id_index, &metric_id_first))) {
  261. Word_t start_time_index = 0;
  262. bool start_time_first = true;
  263. Pvoid_t *PValue;
  264. while ((PValue = PDCJudyLFirstThenNext(*pd_by_start_time_s_JudyL, &start_time_index, &start_time_first))) {
  265. struct page_details *pd = *PValue;
  266. if (pd->page)
  267. continue;
  268. pd->page = pgc_page_get_and_acquire(main_cache, (Word_t) ctx, pd->metric_id, pd->first_time_s, PGC_SEARCH_EXACT);
  269. if (pd->page) {
  270. found++;
  271. pdc_page_status_set(pd, PDC_PAGE_READY | tags);
  272. }
  273. else
  274. count_remaining++;
  275. }
  276. }
  277. if(found) {
  278. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_preloaded, found, __ATOMIC_RELAXED);
  279. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_main_cache, found, __ATOMIC_RELAXED);
  280. }
  281. return count_remaining == 0;
  282. }
  283. */
  284. // ----------------------------------------------------------------------------
  285. // PDC logic
  286. static void pdc_destroy(PDC *pdc) {
  287. mrg_metric_release(main_mrg, pdc->metric);
  288. completion_destroy(&pdc->prep_completion);
  289. completion_destroy(&pdc->page_completion);
  290. Pvoid_t *PValue;
  291. struct page_details *pd;
  292. Word_t time_index = 0;
  293. bool first_then_next = true;
  294. size_t unroutable = 0, cancelled = 0;
  295. while((PValue = PDCJudyLFirstThenNext(pdc->page_list_JudyL, &time_index, &first_then_next))) {
  296. pd = *PValue;
  297. // no need for atomics here - we are done...
  298. PDC_PAGE_STATUS status = pd->status;
  299. if(status & PDC_PAGE_DATAFILE_ACQUIRED) {
  300. datafile_release(pd->datafile.ptr, DATAFILE_ACQUIRE_PAGE_DETAILS);
  301. pd->datafile.ptr = NULL;
  302. }
  303. internal_fatal(pd->datafile.ptr, "DBENGINE: page details has a datafile.ptr that is not released.");
  304. if(!pd->page && !(status & (PDC_PAGE_READY | PDC_PAGE_FAILED | PDC_PAGE_RELEASED | PDC_PAGE_SKIP | PDC_PAGE_INVALID | PDC_PAGE_CANCELLED))) {
  305. // pdc_page_status_set(pd, PDC_PAGE_FAILED);
  306. unroutable++;
  307. }
  308. else if(!pd->page && (status & PDC_PAGE_CANCELLED))
  309. cancelled++;
  310. if(pd->page && !(status & PDC_PAGE_RELEASED)) {
  311. pgc_page_release(main_cache, pd->page);
  312. // pdc_page_status_set(pd, PDC_PAGE_RELEASED);
  313. }
  314. page_details_release(pd);
  315. }
  316. PDCJudyLFreeArray(&pdc->page_list_JudyL, PJE0);
  317. __atomic_sub_fetch(&rrdeng_cache_efficiency_stats.currently_running_queries, 1, __ATOMIC_RELAXED);
  318. __atomic_sub_fetch(&pdc->ctx->atomic.inflight_queries, 1, __ATOMIC_RELAXED);
  319. pdc_release(pdc);
  320. if(unroutable)
  321. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_fail_unroutable, unroutable, __ATOMIC_RELAXED);
  322. if(cancelled)
  323. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_fail_cancelled, cancelled, __ATOMIC_RELAXED);
  324. }
  325. void pdc_acquire(PDC *pdc) {
  326. spinlock_lock(&pdc->refcount_spinlock);
  327. if(pdc->refcount < 1)
  328. fatal("DBENGINE: pdc is not referenced and cannot be acquired");
  329. pdc->refcount++;
  330. spinlock_unlock(&pdc->refcount_spinlock);
  331. }
  332. bool pdc_release_and_destroy_if_unreferenced(PDC *pdc, bool worker, bool router __maybe_unused) {
  333. if(unlikely(!pdc))
  334. return true;
  335. spinlock_lock(&pdc->refcount_spinlock);
  336. if(pdc->refcount <= 0)
  337. fatal("DBENGINE: pdc is not referenced and cannot be released");
  338. pdc->refcount--;
  339. if (pdc->refcount <= 1 && worker) {
  340. // when 1 refcount is remaining, and we are a worker,
  341. // we can mark the job completed:
  342. // - if the remaining refcount is from the query caller, we will wake it up
  343. // - if the remaining refcount is from another worker, the query thread is already away
  344. completion_mark_complete(&pdc->page_completion);
  345. }
  346. if (pdc->refcount == 0) {
  347. spinlock_unlock(&pdc->refcount_spinlock);
  348. pdc_destroy(pdc);
  349. return true;
  350. }
  351. spinlock_unlock(&pdc->refcount_spinlock);
  352. return false;
  353. }
  354. void epdl_cmd_queued(void *epdl_ptr, struct rrdeng_cmd *cmd) {
  355. EPDL *epdl = epdl_ptr;
  356. epdl->cmd = cmd;
  357. }
  358. void epdl_cmd_dequeued(void *epdl_ptr) {
  359. EPDL *epdl = epdl_ptr;
  360. epdl->cmd = NULL;
  361. }
  362. static struct rrdeng_cmd *epdl_get_cmd(void *epdl_ptr) {
  363. EPDL *epdl = epdl_ptr;
  364. return epdl->cmd;
  365. }
  366. static bool epdl_pending_add(EPDL *epdl) {
  367. bool added_new;
  368. spinlock_lock(&epdl->datafile->extent_queries.spinlock);
  369. Pvoid_t *PValue = JudyLIns(&epdl->datafile->extent_queries.pending_epdl_by_extent_offset_judyL, epdl->extent_offset, PJE0);
  370. internal_fatal(!PValue || PValue == PJERR, "DBENGINE: corrupted pending extent judy");
  371. EPDL *base = *PValue;
  372. if(!base) {
  373. added_new = true;
  374. epdl->head_to_datafile_extent_queries_pending_for_extent = true;
  375. }
  376. else {
  377. added_new = false;
  378. epdl->head_to_datafile_extent_queries_pending_for_extent = false;
  379. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_extent_merged, 1, __ATOMIC_RELAXED);
  380. if(base->pdc->priority > epdl->pdc->priority)
  381. rrdeng_req_cmd(epdl_get_cmd, base, epdl->pdc->priority);
  382. }
  383. DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(base, epdl, query.prev, query.next);
  384. *PValue = base;
  385. spinlock_unlock(&epdl->datafile->extent_queries.spinlock);
  386. return added_new;
  387. }
  388. static void epdl_pending_del(EPDL *epdl) {
  389. spinlock_lock(&epdl->datafile->extent_queries.spinlock);
  390. if(epdl->head_to_datafile_extent_queries_pending_for_extent) {
  391. epdl->head_to_datafile_extent_queries_pending_for_extent = false;
  392. int rc = JudyLDel(&epdl->datafile->extent_queries.pending_epdl_by_extent_offset_judyL, epdl->extent_offset, PJE0);
  393. (void) rc;
  394. internal_fatal(!rc, "DBENGINE: epdl not found in pending list");
  395. }
  396. spinlock_unlock(&epdl->datafile->extent_queries.spinlock);
  397. }
  398. void pdc_to_epdl_router(struct rrdengine_instance *ctx, PDC *pdc, execute_extent_page_details_list_t exec_first_extent_list, execute_extent_page_details_list_t exec_rest_extent_list)
  399. {
  400. Pvoid_t *PValue;
  401. Pvoid_t *PValue1;
  402. Pvoid_t *PValue2;
  403. Word_t time_index = 0;
  404. struct page_details *pd = NULL;
  405. // this is the entire page list
  406. // Lets do some deduplication
  407. // 1. Per datafile
  408. // 2. Per extent
  409. // 3. Pages per extent will be added to the cache either as acquired or not
  410. Pvoid_t JudyL_datafile_list = NULL;
  411. DEOL *deol;
  412. EPDL *epdl;
  413. if (pdc->page_list_JudyL) {
  414. bool first_then_next = true;
  415. while((PValue = PDCJudyLFirstThenNext(pdc->page_list_JudyL, &time_index, &first_then_next))) {
  416. pd = *PValue;
  417. internal_fatal(!pd,
  418. "DBENGINE: pdc page list has an empty page details entry");
  419. if (!(pd->status & PDC_PAGE_DISK_PENDING))
  420. continue;
  421. internal_fatal(!(pd->status & PDC_PAGE_DATAFILE_ACQUIRED),
  422. "DBENGINE: page details has not acquired the datafile");
  423. internal_fatal((pd->status & (PDC_PAGE_READY | PDC_PAGE_FAILED)),
  424. "DBENGINE: page details has disk pending flag but it is ready/failed");
  425. internal_fatal(pd->page,
  426. "DBENGINE: page details has a page linked to it, but it is marked for loading");
  427. PValue1 = PDCJudyLIns(&JudyL_datafile_list, pd->datafile.fileno, PJE0);
  428. if (PValue1 && !*PValue1) {
  429. *PValue1 = deol = deol_get();
  430. deol->extent_pd_list_by_extent_offset_JudyL = NULL;
  431. deol->fileno = pd->datafile.fileno;
  432. }
  433. else
  434. deol = *PValue1;
  435. PValue2 = PDCJudyLIns(&deol->extent_pd_list_by_extent_offset_JudyL, pd->datafile.extent.pos, PJE0);
  436. if (PValue2 && !*PValue2) {
  437. *PValue2 = epdl = epdl_get();
  438. epdl->page_details_by_metric_id_JudyL = NULL;
  439. epdl->number_of_pages_in_JudyL = 0;
  440. epdl->file = pd->datafile.file;
  441. epdl->extent_offset = pd->datafile.extent.pos;
  442. epdl->extent_size = pd->datafile.extent.bytes;
  443. epdl->datafile = pd->datafile.ptr;
  444. }
  445. else
  446. epdl = *PValue2;
  447. epdl->number_of_pages_in_JudyL++;
  448. Pvoid_t *pd_by_first_time_s_judyL = PDCJudyLIns(&epdl->page_details_by_metric_id_JudyL, pd->metric_id, PJE0);
  449. Pvoid_t *pd_pptr = PDCJudyLIns(pd_by_first_time_s_judyL, pd->first_time_s, PJE0);
  450. *pd_pptr = pd;
  451. }
  452. size_t extent_list_no = 0;
  453. Word_t datafile_no = 0;
  454. first_then_next = true;
  455. while((PValue = PDCJudyLFirstThenNext(JudyL_datafile_list, &datafile_no, &first_then_next))) {
  456. deol = *PValue;
  457. bool first_then_next_extent = true;
  458. Word_t pos = 0;
  459. while ((PValue = PDCJudyLFirstThenNext(deol->extent_pd_list_by_extent_offset_JudyL, &pos, &first_then_next_extent))) {
  460. epdl = *PValue;
  461. internal_fatal(!epdl, "DBENGINE: extent_list is not populated properly");
  462. // The extent page list can be dispatched to a worker
  463. // It will need to populate the cache with "acquired" pages that are in the list (pd) only
  464. // the rest of the extent pages will be added to the cache butnot acquired
  465. pdc_acquire(pdc); // we do this for the next worker: do_read_extent_work()
  466. epdl->pdc = pdc;
  467. if(epdl_pending_add(epdl)) {
  468. if (extent_list_no++ == 0)
  469. exec_first_extent_list(ctx, epdl, pdc->priority);
  470. else
  471. exec_rest_extent_list(ctx, epdl, pdc->priority);
  472. }
  473. }
  474. PDCJudyLFreeArray(&deol->extent_pd_list_by_extent_offset_JudyL, PJE0);
  475. deol_release(deol);
  476. }
  477. PDCJudyLFreeArray(&JudyL_datafile_list, PJE0);
  478. }
  479. pdc_release_and_destroy_if_unreferenced(pdc, true, true);
  480. }
  481. void collect_page_flags_to_buffer(BUFFER *wb, RRDENG_COLLECT_PAGE_FLAGS flags) {
  482. if(flags & RRDENG_PAGE_PAST_COLLECTION)
  483. buffer_strcat(wb, "PAST_COLLECTION ");
  484. if(flags & RRDENG_PAGE_REPEATED_COLLECTION)
  485. buffer_strcat(wb, "REPEATED_COLLECTION ");
  486. if(flags & RRDENG_PAGE_BIG_GAP)
  487. buffer_strcat(wb, "BIG_GAP ");
  488. if(flags & RRDENG_PAGE_GAP)
  489. buffer_strcat(wb, "GAP ");
  490. if(flags & RRDENG_PAGE_FUTURE_POINT)
  491. buffer_strcat(wb, "FUTURE_POINT ");
  492. if(flags & RRDENG_PAGE_CREATED_IN_FUTURE)
  493. buffer_strcat(wb, "CREATED_IN_FUTURE ");
  494. if(flags & RRDENG_PAGE_COMPLETED_IN_FUTURE)
  495. buffer_strcat(wb, "COMPLETED_IN_FUTURE ");
  496. if(flags & RRDENG_PAGE_UNALIGNED)
  497. buffer_strcat(wb, "UNALIGNED ");
  498. if(flags & RRDENG_PAGE_CONFLICT)
  499. buffer_strcat(wb, "CONFLICT ");
  500. if(flags & RRDENG_PAGE_FULL)
  501. buffer_strcat(wb, "PAGE_FULL");
  502. if(flags & RRDENG_PAGE_COLLECT_FINALIZE)
  503. buffer_strcat(wb, "COLLECT_FINALIZE");
  504. if(flags & RRDENG_PAGE_UPDATE_EVERY_CHANGE)
  505. buffer_strcat(wb, "UPDATE_EVERY_CHANGE");
  506. if(flags & RRDENG_PAGE_STEP_TOO_SMALL)
  507. buffer_strcat(wb, "STEP_TOO_SMALL");
  508. if(flags & RRDENG_PAGE_STEP_UNALIGNED)
  509. buffer_strcat(wb, "STEP_UNALIGNED");
  510. }
  511. inline VALIDATED_PAGE_DESCRIPTOR validate_extent_page_descr(const struct rrdeng_extent_page_descr *descr, time_t now_s, time_t overwrite_zero_update_every_s, bool have_read_error) {
  512. return validate_page(
  513. (uuid_t *)descr->uuid,
  514. (time_t) (descr->start_time_ut / USEC_PER_SEC),
  515. (time_t) (descr->end_time_ut / USEC_PER_SEC),
  516. 0,
  517. descr->page_length,
  518. descr->type,
  519. 0,
  520. now_s,
  521. overwrite_zero_update_every_s,
  522. have_read_error,
  523. "loaded", 0);
  524. }
  525. VALIDATED_PAGE_DESCRIPTOR validate_page(
  526. uuid_t *uuid,
  527. time_t start_time_s,
  528. time_t end_time_s,
  529. time_t update_every_s, // can be zero, if unknown
  530. size_t page_length,
  531. uint8_t page_type,
  532. size_t entries, // can be zero, if unknown
  533. time_t now_s, // can be zero, to disable future timestamp check
  534. time_t overwrite_zero_update_every_s, // can be zero, if unknown
  535. bool have_read_error,
  536. const char *msg,
  537. RRDENG_COLLECT_PAGE_FLAGS flags) {
  538. VALIDATED_PAGE_DESCRIPTOR vd = {
  539. .start_time_s = start_time_s,
  540. .end_time_s = end_time_s,
  541. .update_every_s = update_every_s,
  542. .page_length = page_length,
  543. .type = page_type,
  544. .is_valid = true,
  545. };
  546. // always calculate entries by size
  547. vd.point_size = page_type_size[vd.type];
  548. vd.entries = page_entries_by_size(vd.page_length, vd.point_size);
  549. // allow to be called without entries (when loading pages from disk)
  550. if(!entries)
  551. entries = vd.entries;
  552. // allow to be called without update every (when loading pages from disk)
  553. if(!update_every_s) {
  554. vd.update_every_s = (vd.entries > 1) ? ((vd.end_time_s - vd.start_time_s) / (time_t) (vd.entries - 1))
  555. : overwrite_zero_update_every_s;
  556. update_every_s = vd.update_every_s;
  557. }
  558. // another such set of checks exists in
  559. // update_metric_retention_and_granularity_by_uuid()
  560. bool updated = false;
  561. if( have_read_error ||
  562. vd.page_length == 0 ||
  563. vd.page_length > RRDENG_BLOCK_SIZE ||
  564. vd.start_time_s > vd.end_time_s ||
  565. (now_s && vd.end_time_s > now_s) ||
  566. vd.start_time_s <= 0 ||
  567. vd.end_time_s <= 0 ||
  568. vd.update_every_s < 0 ||
  569. (vd.start_time_s == vd.end_time_s && vd.entries > 1) ||
  570. (vd.update_every_s == 0 && vd.entries > 1)
  571. )
  572. vd.is_valid = false;
  573. else {
  574. if(unlikely(vd.entries != entries || vd.update_every_s != update_every_s))
  575. updated = true;
  576. if (likely(vd.update_every_s)) {
  577. size_t entries_by_time = page_entries_by_time(vd.start_time_s, vd.end_time_s, vd.update_every_s);
  578. if (vd.entries != entries_by_time) {
  579. if (overwrite_zero_update_every_s < vd.update_every_s)
  580. vd.update_every_s = overwrite_zero_update_every_s;
  581. time_t new_end_time_s = (time_t)(vd.start_time_s + (vd.entries - 1) * vd.update_every_s);
  582. if(new_end_time_s <= vd.end_time_s) {
  583. // end time is wrong
  584. vd.end_time_s = new_end_time_s;
  585. }
  586. else {
  587. // update every is wrong
  588. vd.update_every_s = overwrite_zero_update_every_s;
  589. vd.end_time_s = (time_t)(vd.start_time_s + (vd.entries - 1) * vd.update_every_s);
  590. }
  591. updated = true;
  592. }
  593. }
  594. else if(overwrite_zero_update_every_s) {
  595. vd.update_every_s = overwrite_zero_update_every_s;
  596. updated = true;
  597. }
  598. }
  599. if(unlikely(!vd.is_valid || updated)) {
  600. #ifndef NETDATA_INTERNAL_CHECKS
  601. error_limit_static_global_var(erl, 1, 0);
  602. #endif
  603. char uuid_str[UUID_STR_LEN + 1];
  604. uuid_unparse(*uuid, uuid_str);
  605. BUFFER *wb = NULL;
  606. if(flags) {
  607. wb = buffer_create(0, NULL);
  608. collect_page_flags_to_buffer(wb, flags);
  609. }
  610. if(!vd.is_valid) {
  611. #ifdef NETDATA_INTERNAL_CHECKS
  612. internal_error(true,
  613. #else
  614. error_limit(&erl,
  615. #endif
  616. "DBENGINE: metric '%s' %s invalid page of type %u "
  617. "from %ld to %ld (now %ld), update every %ld, page length %zu, entries %zu (flags: %s)",
  618. uuid_str, msg, vd.type,
  619. vd.start_time_s, vd.end_time_s, now_s, vd.update_every_s, vd.page_length, vd.entries, wb?buffer_tostring(wb):""
  620. );
  621. }
  622. else {
  623. const char *err_valid = (vd.is_valid) ? "" : "found invalid, ";
  624. const char *err_start = (vd.start_time_s == start_time_s) ? "" : "start time updated, ";
  625. const char *err_end = (vd.end_time_s == end_time_s) ? "" : "end time updated, ";
  626. const char *err_update = (vd.update_every_s == update_every_s) ? "" : "update every updated, ";
  627. const char *err_length = (vd.page_length == page_length) ? "" : "page length updated, ";
  628. const char *err_entries = (vd.entries == entries) ? "" : "entries updated, ";
  629. const char *err_future = (now_s && vd.end_time_s <= now_s) ? "" : "future end time, ";
  630. #ifdef NETDATA_INTERNAL_CHECKS
  631. internal_error(true,
  632. #else
  633. error_limit(&erl,
  634. #endif
  635. "DBENGINE: metric '%s' %s page of type %u "
  636. "from %ld to %ld (now %ld), update every %ld, page length %zu, entries %zu (flags: %s), "
  637. "found inconsistent - the right is "
  638. "from %ld to %ld, update every %ld, page length %zu, entries %zu: "
  639. "%s%s%s%s%s%s%s",
  640. uuid_str, msg, vd.type,
  641. start_time_s, end_time_s, now_s, update_every_s, page_length, entries, wb?buffer_tostring(wb):"",
  642. vd.start_time_s, vd.end_time_s, vd.update_every_s, vd.page_length, vd.entries,
  643. err_valid, err_start, err_end, err_update, err_length, err_entries, err_future
  644. );
  645. }
  646. buffer_free(wb);
  647. }
  648. return vd;
  649. }
  650. static inline struct page_details *epdl_get_pd_load_link_list_from_metric_start_time(EPDL *epdl, Word_t metric_id, time_t start_time_s) {
  651. if(unlikely(epdl->head_to_datafile_extent_queries_pending_for_extent))
  652. // stop appending more pages to this epdl
  653. epdl_pending_del(epdl);
  654. struct page_details *pd_list = NULL;
  655. for(EPDL *ep = epdl; ep ;ep = ep->query.next) {
  656. Pvoid_t *pd_by_start_time_s_judyL = PDCJudyLGet(ep->page_details_by_metric_id_JudyL, metric_id, PJE0);
  657. internal_fatal(pd_by_start_time_s_judyL == PJERR, "DBENGINE: corrupted extent metrics JudyL");
  658. if (unlikely(pd_by_start_time_s_judyL && *pd_by_start_time_s_judyL)) {
  659. Pvoid_t *pd_pptr = PDCJudyLGet(*pd_by_start_time_s_judyL, start_time_s, PJE0);
  660. internal_fatal(pd_pptr == PJERR, "DBENGINE: corrupted metric page details JudyHS");
  661. if(likely(pd_pptr && *pd_pptr)) {
  662. struct page_details *pd = *pd_pptr;
  663. internal_fatal(metric_id != pd->metric_id, "DBENGINE: metric ids do not match");
  664. if(likely(!pd->page)) {
  665. if (unlikely(__atomic_load_n(&ep->pdc->workers_should_stop, __ATOMIC_RELAXED)))
  666. pdc_page_status_set(pd, PDC_PAGE_FAILED | PDC_PAGE_CANCELLED);
  667. else
  668. DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(pd_list, pd, load.prev, load.next);
  669. }
  670. }
  671. }
  672. }
  673. return pd_list;
  674. }
  675. static void epdl_extent_loading_error_log(struct rrdengine_instance *ctx, EPDL *epdl, struct rrdeng_extent_page_descr *descr, const char *msg) {
  676. char uuid[UUID_STR_LEN] = "";
  677. time_t start_time_s = 0;
  678. time_t end_time_s = 0;
  679. bool used_epdl = false;
  680. bool used_descr = false;
  681. if (descr) {
  682. start_time_s = (time_t)(descr->start_time_ut / USEC_PER_SEC);
  683. end_time_s = (time_t)(descr->end_time_ut / USEC_PER_SEC);
  684. uuid_unparse_lower(descr->uuid, uuid);
  685. used_descr = true;
  686. }
  687. else {
  688. struct page_details *pd = NULL;
  689. Word_t start = 0;
  690. Pvoid_t *pd_by_start_time_s_judyL = PDCJudyLFirst(epdl->page_details_by_metric_id_JudyL, &start, PJE0);
  691. if(pd_by_start_time_s_judyL) {
  692. start = 0;
  693. Pvoid_t *pd_pptr = PDCJudyLFirst(*pd_by_start_time_s_judyL, &start, PJE0);
  694. if(pd_pptr) {
  695. pd = *pd_pptr;
  696. start_time_s = pd->first_time_s;
  697. end_time_s = pd->last_time_s;
  698. METRIC *metric = (METRIC *)pd->metric_id;
  699. uuid_t *u = mrg_metric_uuid(main_mrg, metric);
  700. uuid_unparse_lower(*u, uuid);
  701. used_epdl = true;
  702. }
  703. }
  704. }
  705. if(!used_epdl && !used_descr && epdl->pdc) {
  706. start_time_s = epdl->pdc->start_time_s;
  707. end_time_s = epdl->pdc->end_time_s;
  708. }
  709. char start_time_str[LOG_DATE_LENGTH + 1] = "";
  710. if(start_time_s)
  711. log_date(start_time_str, LOG_DATE_LENGTH, start_time_s);
  712. char end_time_str[LOG_DATE_LENGTH + 1] = "";
  713. if(end_time_s)
  714. log_date(end_time_str, LOG_DATE_LENGTH, end_time_s);
  715. error_limit_static_global_var(erl, 1, 0);
  716. error_limit(&erl,
  717. "DBENGINE: error while reading extent from datafile %u of tier %d, at offset %" PRIu64 " (%u bytes) "
  718. "%s from %ld (%s) to %ld (%s) %s%s: "
  719. "%s",
  720. epdl->datafile->fileno, ctx->config.tier,
  721. epdl->extent_offset, epdl->extent_size,
  722. used_epdl ? "to extract page (PD)" : used_descr ? "expected page (DESCR)" : "part of a query (PDC)",
  723. start_time_s, start_time_str, end_time_s, end_time_str,
  724. used_epdl || used_descr ? " of metric " : "",
  725. used_epdl || used_descr ? uuid : "",
  726. msg);
  727. }
  728. static bool epdl_populate_pages_from_extent_data(
  729. struct rrdengine_instance *ctx,
  730. void *data,
  731. size_t data_length,
  732. EPDL *epdl,
  733. bool worker,
  734. PDC_PAGE_STATUS tags,
  735. bool cached_extent)
  736. {
  737. int ret;
  738. unsigned i, count;
  739. void *uncompressed_buf = NULL;
  740. uint32_t payload_length, payload_offset, trailer_offset, uncompressed_payload_length = 0;
  741. bool have_read_error = false;
  742. /* persistent structures */
  743. struct rrdeng_df_extent_header *header;
  744. struct rrdeng_df_extent_trailer *trailer;
  745. struct extent_buffer *eb = NULL;
  746. uLong crc;
  747. bool can_use_data = true;
  748. if(data_length < sizeof(*header) + sizeof(header->descr[0]) + sizeof(*trailer)) {
  749. can_use_data = false;
  750. // added to satisfy the requirements of older compilers (prevent warnings)
  751. payload_length = 0;
  752. payload_offset = 0;
  753. trailer_offset = 0;
  754. count = 0;
  755. header = NULL;
  756. trailer = NULL;
  757. }
  758. else {
  759. header = data;
  760. payload_length = header->payload_length;
  761. count = header->number_of_pages;
  762. payload_offset = sizeof(*header) + sizeof(header->descr[0]) * count;
  763. trailer_offset = data_length - sizeof(*trailer);
  764. trailer = data + trailer_offset;
  765. }
  766. if( !can_use_data ||
  767. count < 1 ||
  768. count > MAX_PAGES_PER_EXTENT ||
  769. (header->compression_algorithm != RRD_NO_COMPRESSION && header->compression_algorithm != RRD_LZ4) ||
  770. (payload_length != trailer_offset - payload_offset) ||
  771. (data_length != payload_offset + payload_length + sizeof(*trailer))
  772. ) {
  773. epdl_extent_loading_error_log(ctx, epdl, NULL, "header is INVALID");
  774. return false;
  775. }
  776. crc = crc32(0L, Z_NULL, 0);
  777. crc = crc32(crc, data, epdl->extent_size - sizeof(*trailer));
  778. ret = crc32cmp(trailer->checksum, crc);
  779. if (unlikely(ret)) {
  780. ctx_io_error(ctx);
  781. have_read_error = true;
  782. epdl_extent_loading_error_log(ctx, epdl, NULL, "CRC32 checksum FAILED");
  783. }
  784. if(worker)
  785. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_DECOMPRESSION);
  786. if (likely(!have_read_error && RRD_NO_COMPRESSION != header->compression_algorithm)) {
  787. // find the uncompressed extent size
  788. uncompressed_payload_length = 0;
  789. for (i = 0; i < count; ++i) {
  790. size_t page_length = header->descr[i].page_length;
  791. if(page_length > RRDENG_BLOCK_SIZE) {
  792. have_read_error = true;
  793. break;
  794. }
  795. uncompressed_payload_length += header->descr[i].page_length;
  796. }
  797. if(unlikely(uncompressed_payload_length > MAX_PAGES_PER_EXTENT * RRDENG_BLOCK_SIZE))
  798. have_read_error = true;
  799. if(likely(!have_read_error)) {
  800. eb = extent_buffer_get(uncompressed_payload_length);
  801. uncompressed_buf = eb->data;
  802. ret = LZ4_decompress_safe(data + payload_offset, uncompressed_buf,
  803. (int) payload_length, (int) uncompressed_payload_length);
  804. __atomic_add_fetch(&ctx->stats.before_decompress_bytes, payload_length, __ATOMIC_RELAXED);
  805. __atomic_add_fetch(&ctx->stats.after_decompress_bytes, ret, __ATOMIC_RELAXED);
  806. }
  807. }
  808. if(worker)
  809. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_LOOKUP);
  810. size_t stats_data_from_main_cache = 0;
  811. size_t stats_data_from_extent = 0;
  812. size_t stats_load_compressed = 0;
  813. size_t stats_load_uncompressed = 0;
  814. size_t stats_load_invalid_page = 0;
  815. size_t stats_cache_hit_while_inserting = 0;
  816. uint32_t page_offset = 0, page_length;
  817. time_t now_s = max_acceptable_collected_time();
  818. for (i = 0; i < count; i++, page_offset += page_length) {
  819. page_length = header->descr[i].page_length;
  820. time_t start_time_s = (time_t) (header->descr[i].start_time_ut / USEC_PER_SEC);
  821. if(!page_length || !start_time_s) {
  822. char log[200 + 1];
  823. snprintfz(log, 200, "page %u (out of %u) is EMPTY", i, count);
  824. epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log);
  825. continue;
  826. }
  827. METRIC *metric = mrg_metric_get_and_acquire(main_mrg, &header->descr[i].uuid, (Word_t)ctx);
  828. Word_t metric_id = (Word_t)metric;
  829. if(!metric) {
  830. char log[200 + 1];
  831. snprintfz(log, 200, "page %u (out of %u) has unknown UUID", i, count);
  832. epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log);
  833. continue;
  834. }
  835. mrg_metric_release(main_mrg, metric);
  836. struct page_details *pd_list = epdl_get_pd_load_link_list_from_metric_start_time(epdl, metric_id, start_time_s);
  837. if(likely(!pd_list))
  838. continue;
  839. VALIDATED_PAGE_DESCRIPTOR vd = validate_extent_page_descr(
  840. &header->descr[i], now_s,
  841. (pd_list) ? pd_list->update_every_s : 0,
  842. have_read_error);
  843. if(worker)
  844. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_ALLOCATION);
  845. void *page_data;
  846. if (unlikely(!vd.is_valid)) {
  847. page_data = DBENGINE_EMPTY_PAGE;
  848. stats_load_invalid_page++;
  849. }
  850. else {
  851. if (RRD_NO_COMPRESSION == header->compression_algorithm) {
  852. page_data = dbengine_page_alloc(vd.page_length);
  853. memcpy(page_data, data + payload_offset + page_offset, (size_t) vd.page_length);
  854. stats_load_uncompressed++;
  855. }
  856. else {
  857. if (unlikely(page_offset + vd.page_length > uncompressed_payload_length)) {
  858. char log[200 + 1];
  859. snprintfz(log, 200, "page %u (out of %u) offset %u + page length %zu, "
  860. "exceeds the uncompressed buffer size %u",
  861. i, count, page_offset, vd.page_length, uncompressed_payload_length);
  862. epdl_extent_loading_error_log(ctx, epdl, &header->descr[i], log);
  863. page_data = DBENGINE_EMPTY_PAGE;
  864. stats_load_invalid_page++;
  865. }
  866. else {
  867. page_data = dbengine_page_alloc(vd.page_length);
  868. memcpy(page_data, uncompressed_buf + page_offset, vd.page_length);
  869. stats_load_compressed++;
  870. }
  871. }
  872. }
  873. if(worker)
  874. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_POPULATION);
  875. PGC_ENTRY page_entry = {
  876. .hot = false,
  877. .section = (Word_t)ctx,
  878. .metric_id = metric_id,
  879. .start_time_s = vd.start_time_s,
  880. .end_time_s = vd.end_time_s,
  881. .update_every_s = (uint32_t) vd.update_every_s,
  882. .size = (size_t) ((page_data == DBENGINE_EMPTY_PAGE) ? 0 : vd.page_length),
  883. .data = page_data
  884. };
  885. bool added = true;
  886. PGC_PAGE *page = pgc_page_add_and_acquire(main_cache, page_entry, &added);
  887. if (false == added) {
  888. dbengine_page_free(page_data, vd.page_length);
  889. stats_cache_hit_while_inserting++;
  890. stats_data_from_main_cache++;
  891. }
  892. else
  893. stats_data_from_extent++;
  894. struct page_details *pd = pd_list;
  895. do {
  896. if(pd != pd_list)
  897. pgc_page_dup(main_cache, page);
  898. pd->page = page;
  899. pd->page_length = pgc_page_data_size(main_cache, page);
  900. pdc_page_status_set(pd, PDC_PAGE_READY | tags | ((page_data == DBENGINE_EMPTY_PAGE) ? PDC_PAGE_EMPTY : 0));
  901. pd = pd->load.next;
  902. } while(pd);
  903. if(worker)
  904. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_PAGE_LOOKUP);
  905. }
  906. if(stats_data_from_main_cache)
  907. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_main_cache, stats_data_from_main_cache, __ATOMIC_RELAXED);
  908. if(cached_extent)
  909. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_extent_cache, stats_data_from_extent, __ATOMIC_RELAXED);
  910. else {
  911. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_data_source_disk, stats_data_from_extent, __ATOMIC_RELAXED);
  912. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.extents_loaded_from_disk, 1, __ATOMIC_RELAXED);
  913. }
  914. if(stats_cache_hit_while_inserting)
  915. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_loaded_but_cache_hit_while_inserting, stats_cache_hit_while_inserting, __ATOMIC_RELAXED);
  916. if(stats_load_compressed)
  917. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_compressed, stats_load_compressed, __ATOMIC_RELAXED);
  918. if(stats_load_uncompressed)
  919. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_ok_uncompressed, stats_load_uncompressed, __ATOMIC_RELAXED);
  920. if(stats_load_invalid_page)
  921. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.pages_load_fail_invalid_page_in_extent, stats_load_invalid_page, __ATOMIC_RELAXED);
  922. if(worker)
  923. worker_is_idle();
  924. extent_buffer_release(eb);
  925. return true;
  926. }
  927. static inline void *datafile_extent_read(struct rrdengine_instance *ctx, uv_file file, unsigned pos, unsigned size_bytes)
  928. {
  929. void *buffer;
  930. uv_fs_t request;
  931. unsigned real_io_size = ALIGN_BYTES_CEILING(size_bytes);
  932. int ret = posix_memalign(&buffer, RRDFILE_ALIGNMENT, real_io_size);
  933. if (unlikely(ret))
  934. fatal("DBENGINE: posix_memalign(): %s", strerror(ret));
  935. uv_buf_t iov = uv_buf_init(buffer, real_io_size);
  936. ret = uv_fs_read(NULL, &request, file, &iov, 1, pos, NULL);
  937. if (unlikely(-1 == ret)) {
  938. ctx_io_error(ctx);
  939. posix_memfree(buffer);
  940. buffer = NULL;
  941. }
  942. else
  943. ctx_io_read_op_bytes(ctx, real_io_size);
  944. uv_fs_req_cleanup(&request);
  945. return buffer;
  946. }
  947. static inline void datafile_extent_read_free(void *buffer) {
  948. posix_memfree(buffer);
  949. }
  950. void epdl_find_extent_and_populate_pages(struct rrdengine_instance *ctx, EPDL *epdl, bool worker) {
  951. if(worker)
  952. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_CACHE_LOOKUP);
  953. size_t *statistics_counter = NULL;
  954. PDC_PAGE_STATUS not_loaded_pages_tag = 0, loaded_pages_tag = 0;
  955. bool should_stop = __atomic_load_n(&epdl->pdc->workers_should_stop, __ATOMIC_RELAXED);
  956. for(EPDL *ep = epdl->query.next; ep ;ep = ep->query.next) {
  957. internal_fatal(ep->datafile != epdl->datafile, "DBENGINE: datafiles do not match");
  958. internal_fatal(ep->extent_offset != epdl->extent_offset, "DBENGINE: extent offsets do not match");
  959. internal_fatal(ep->extent_size != epdl->extent_size, "DBENGINE: extent sizes do not match");
  960. internal_fatal(ep->file != epdl->file, "DBENGINE: files do not match");
  961. if(!__atomic_load_n(&ep->pdc->workers_should_stop, __ATOMIC_RELAXED)) {
  962. should_stop = false;
  963. break;
  964. }
  965. }
  966. if(unlikely(should_stop)) {
  967. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_cancelled;
  968. not_loaded_pages_tag = PDC_PAGE_CANCELLED;
  969. goto cleanup;
  970. }
  971. bool extent_found_in_cache = false;
  972. void *extent_compressed_data = NULL;
  973. PGC_PAGE *extent_cache_page = pgc_page_get_and_acquire(
  974. extent_cache, (Word_t)ctx,
  975. (Word_t)epdl->datafile->fileno, (time_t)epdl->extent_offset,
  976. PGC_SEARCH_EXACT);
  977. if(extent_cache_page) {
  978. extent_compressed_data = pgc_page_data(extent_cache_page);
  979. internal_fatal(epdl->extent_size != pgc_page_data_size(extent_cache, extent_cache_page),
  980. "DBENGINE: cache size does not match the expected size");
  981. loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_CACHE;
  982. not_loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_CACHE;
  983. extent_found_in_cache = true;
  984. }
  985. else {
  986. if(worker)
  987. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_MMAP);
  988. void *extent_data = datafile_extent_read(ctx, epdl->file, epdl->extent_offset, epdl->extent_size);
  989. if(extent_data != NULL) {
  990. void *copied_extent_compressed_data = dbengine_extent_alloc(epdl->extent_size);
  991. memcpy(copied_extent_compressed_data, extent_data, epdl->extent_size);
  992. datafile_extent_read_free(extent_data);
  993. if(worker)
  994. worker_is_busy(UV_EVENT_DBENGINE_EXTENT_CACHE_LOOKUP);
  995. bool added = false;
  996. extent_cache_page = pgc_page_add_and_acquire(extent_cache, (PGC_ENTRY) {
  997. .hot = false,
  998. .section = (Word_t) ctx,
  999. .metric_id = (Word_t) epdl->datafile->fileno,
  1000. .start_time_s = (time_t) epdl->extent_offset,
  1001. .size = epdl->extent_size,
  1002. .end_time_s = 0,
  1003. .update_every_s = 0,
  1004. .data = copied_extent_compressed_data,
  1005. }, &added);
  1006. if (!added) {
  1007. dbengine_extent_free(copied_extent_compressed_data, epdl->extent_size);
  1008. internal_fatal(epdl->extent_size != pgc_page_data_size(extent_cache, extent_cache_page),
  1009. "DBENGINE: cache size does not match the expected size");
  1010. }
  1011. extent_compressed_data = pgc_page_data(extent_cache_page);
  1012. loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_DISK;
  1013. not_loaded_pages_tag |= PDC_PAGE_EXTENT_FROM_DISK;
  1014. }
  1015. }
  1016. if(extent_compressed_data) {
  1017. // Need to decompress and then process the pagelist
  1018. bool extent_used = epdl_populate_pages_from_extent_data(
  1019. ctx, extent_compressed_data, epdl->extent_size,
  1020. epdl, worker, loaded_pages_tag, extent_found_in_cache);
  1021. if(extent_used) {
  1022. // since the extent was used, all the pages that are not
  1023. // loaded from this extent, were not found in the extent
  1024. not_loaded_pages_tag |= PDC_PAGE_FAILED_NOT_IN_EXTENT;
  1025. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_not_found;
  1026. }
  1027. else {
  1028. not_loaded_pages_tag |= PDC_PAGE_FAILED_INVALID_EXTENT;
  1029. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_invalid_extent;
  1030. }
  1031. }
  1032. else {
  1033. not_loaded_pages_tag |= PDC_PAGE_FAILED_TO_MAP_EXTENT;
  1034. statistics_counter = &rrdeng_cache_efficiency_stats.pages_load_fail_cant_mmap_extent;
  1035. }
  1036. if(extent_cache_page)
  1037. pgc_page_release(extent_cache, extent_cache_page);
  1038. cleanup:
  1039. // remove it from the datafile extent_queries
  1040. // this can be called multiple times safely
  1041. epdl_pending_del(epdl);
  1042. // mark all pending pages as failed
  1043. for(EPDL *ep = epdl; ep ;ep = ep->query.next) {
  1044. epdl_mark_all_not_loaded_pages_as_failed(
  1045. ep, not_loaded_pages_tag, statistics_counter);
  1046. }
  1047. for(EPDL *ep = epdl, *next = NULL; ep ; ep = next) {
  1048. next = ep->query.next;
  1049. completion_mark_complete_a_job(&ep->pdc->page_completion);
  1050. pdc_release_and_destroy_if_unreferenced(ep->pdc, true, false);
  1051. // Free the Judy that holds the requested pagelist and the extents
  1052. epdl_destroy(ep);
  1053. }
  1054. if(worker)
  1055. worker_is_idle();
  1056. }