rrdengineapi.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "rrdengine.h"
  3. /* Default global database instance */
  4. struct rrdengine_instance multidb_ctx;
  5. int default_rrdeng_page_cache_mb = 32;
  6. int default_rrdeng_disk_quota_mb = 256;
  7. int default_multidb_disk_quota_mb = 256;
  8. /* Default behaviour is to unblock data collection if the page cache is full of dirty pages by dropping metrics */
  9. uint8_t rrdeng_drop_metrics_under_page_cache_pressure = 1;
  10. static inline struct rrdengine_instance *get_rrdeng_ctx_from_host(RRDHOST *host)
  11. {
  12. return host->rrdeng_ctx;
  13. }
  14. /* This UUID is not unique across hosts */
  15. void rrdeng_generate_legacy_uuid(const char *dim_id, char *chart_id, uuid_t *ret_uuid)
  16. {
  17. EVP_MD_CTX *evpctx;
  18. unsigned char hash_value[EVP_MAX_MD_SIZE];
  19. unsigned int hash_len;
  20. evpctx = EVP_MD_CTX_create();
  21. EVP_DigestInit_ex(evpctx, EVP_sha256(), NULL);
  22. EVP_DigestUpdate(evpctx, dim_id, strlen(dim_id));
  23. EVP_DigestUpdate(evpctx, chart_id, strlen(chart_id));
  24. EVP_DigestFinal_ex(evpctx, hash_value, &hash_len);
  25. EVP_MD_CTX_destroy(evpctx);
  26. fatal_assert(hash_len > sizeof(uuid_t));
  27. memcpy(ret_uuid, hash_value, sizeof(uuid_t));
  28. }
  29. /* Transform legacy UUID to be unique across hosts deterministically */
  30. void rrdeng_convert_legacy_uuid_to_multihost(char machine_guid[GUID_LEN + 1], uuid_t *legacy_uuid, uuid_t *ret_uuid)
  31. {
  32. EVP_MD_CTX *evpctx;
  33. unsigned char hash_value[EVP_MAX_MD_SIZE];
  34. unsigned int hash_len;
  35. evpctx = EVP_MD_CTX_create();
  36. EVP_DigestInit_ex(evpctx, EVP_sha256(), NULL);
  37. EVP_DigestUpdate(evpctx, machine_guid, GUID_LEN);
  38. EVP_DigestUpdate(evpctx, *legacy_uuid, sizeof(uuid_t));
  39. EVP_DigestFinal_ex(evpctx, hash_value, &hash_len);
  40. EVP_MD_CTX_destroy(evpctx);
  41. fatal_assert(hash_len > sizeof(uuid_t));
  42. memcpy(ret_uuid, hash_value, sizeof(uuid_t));
  43. }
  44. void rrdeng_metric_init(RRDDIM *rd)
  45. {
  46. struct page_cache *pg_cache;
  47. struct rrdengine_instance *ctx;
  48. uuid_t legacy_uuid;
  49. uuid_t multihost_legacy_uuid;
  50. Pvoid_t *PValue;
  51. struct pg_cache_page_index *page_index = NULL;
  52. int is_multihost_child = 0;
  53. RRDHOST *host = rd->rrdset->rrdhost;
  54. ctx = get_rrdeng_ctx_from_host(rd->rrdset->rrdhost);
  55. if (unlikely(!ctx)) {
  56. error("Failed to fetch multidb context");
  57. return;
  58. }
  59. pg_cache = &ctx->pg_cache;
  60. rrdeng_generate_legacy_uuid(rd->id, rd->rrdset->id, &legacy_uuid);
  61. if (host != localhost && host->rrdeng_ctx == &multidb_ctx)
  62. is_multihost_child = 1;
  63. uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
  64. PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, &legacy_uuid, sizeof(uuid_t));
  65. if (likely(NULL != PValue)) {
  66. page_index = *PValue;
  67. }
  68. uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
  69. if (is_multihost_child || NULL == PValue) {
  70. /* First time we see the legacy UUID or metric belongs to child host in multi-host DB.
  71. * Drop legacy support, normal path */
  72. uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
  73. PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, &rd->state->metric_uuid, sizeof(uuid_t));
  74. if (likely(NULL != PValue)) {
  75. page_index = *PValue;
  76. }
  77. uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
  78. if (NULL == PValue) {
  79. uv_rwlock_wrlock(&pg_cache->metrics_index.lock);
  80. PValue = JudyHSIns(&pg_cache->metrics_index.JudyHS_array, &rd->state->metric_uuid, sizeof(uuid_t), PJE0);
  81. fatal_assert(NULL == *PValue); /* TODO: figure out concurrency model */
  82. *PValue = page_index = create_page_index(&rd->state->metric_uuid);
  83. page_index->prev = pg_cache->metrics_index.last_page_index;
  84. pg_cache->metrics_index.last_page_index = page_index;
  85. uv_rwlock_wrunlock(&pg_cache->metrics_index.lock);
  86. }
  87. } else {
  88. /* There are legacy UUIDs in the database, implement backward compatibility */
  89. rrdeng_convert_legacy_uuid_to_multihost(rd->rrdset->rrdhost->machine_guid, &legacy_uuid,
  90. &multihost_legacy_uuid);
  91. int need_to_store = uuid_compare(rd->state->metric_uuid, multihost_legacy_uuid);
  92. uuid_copy(rd->state->metric_uuid, multihost_legacy_uuid);
  93. if (unlikely(need_to_store))
  94. (void)sql_store_dimension(&rd->state->metric_uuid, rd->rrdset->chart_uuid, rd->id, rd->name, rd->multiplier, rd->divisor,
  95. rd->algorithm);
  96. }
  97. rd->state->rrdeng_uuid = &page_index->id;
  98. rd->state->page_index = page_index;
  99. }
  100. /*
  101. * Gets a handle for storing metrics to the database.
  102. * The handle must be released with rrdeng_store_metric_final().
  103. */
  104. void rrdeng_store_metric_init(RRDDIM *rd)
  105. {
  106. struct rrdeng_collect_handle *handle;
  107. struct rrdengine_instance *ctx;
  108. struct pg_cache_page_index *page_index;
  109. ctx = get_rrdeng_ctx_from_host(rd->rrdset->rrdhost);
  110. handle = callocz(1, sizeof(struct rrdeng_collect_handle));
  111. handle->ctx = ctx;
  112. handle->descr = NULL;
  113. handle->prev_descr = NULL;
  114. handle->unaligned_page = 0;
  115. rd->state->handle = (STORAGE_COLLECT_HANDLE *)handle;
  116. page_index = rd->state->page_index;
  117. uv_rwlock_wrlock(&page_index->lock);
  118. ++page_index->writers;
  119. uv_rwlock_wrunlock(&page_index->lock);
  120. }
  121. /* The page must be populated and referenced */
  122. static int page_has_only_empty_metrics(struct rrdeng_page_descr *descr)
  123. {
  124. unsigned i;
  125. uint8_t has_only_empty_metrics = 1;
  126. storage_number *page;
  127. page = descr->pg_cache_descr->page;
  128. for (i = 0 ; i < descr->page_length / sizeof(storage_number); ++i) {
  129. if (SN_EMPTY_SLOT != page[i]) {
  130. has_only_empty_metrics = 0;
  131. break;
  132. }
  133. }
  134. return has_only_empty_metrics;
  135. }
  136. void rrdeng_store_metric_flush_current_page(RRDDIM *rd)
  137. {
  138. struct rrdeng_collect_handle *handle;
  139. struct rrdengine_instance *ctx;
  140. struct rrdeng_page_descr *descr;
  141. handle = (struct rrdeng_collect_handle *)rd->state->handle;
  142. ctx = handle->ctx;
  143. if (unlikely(!ctx))
  144. return;
  145. descr = handle->descr;
  146. if (unlikely(NULL == descr)) {
  147. return;
  148. }
  149. if (likely(descr->page_length)) {
  150. int page_is_empty;
  151. rrd_stat_atomic_add(&ctx->stats.metric_API_producers, -1);
  152. if (handle->prev_descr) {
  153. /* unpin old second page */
  154. pg_cache_put(ctx, handle->prev_descr);
  155. }
  156. page_is_empty = page_has_only_empty_metrics(descr);
  157. if (page_is_empty) {
  158. debug(D_RRDENGINE, "Page has empty metrics only, deleting:");
  159. if (unlikely(debug_flags & D_RRDENGINE))
  160. print_page_cache_descr(descr);
  161. pg_cache_put(ctx, descr);
  162. pg_cache_punch_hole(ctx, descr, 1, 0, NULL);
  163. handle->prev_descr = NULL;
  164. } else {
  165. /*
  166. * Disable pinning for now as it leads to deadlocks. When a collector stops collecting the extra pinned page
  167. * eventually gets rotated but it cannot be destroyed due to the extra reference.
  168. */
  169. /* added 1 extra reference to keep 2 dirty pages pinned per metric, expected refcnt = 2 */
  170. /* rrdeng_page_descr_mutex_lock(ctx, descr);
  171. ret = pg_cache_try_get_unsafe(descr, 0);
  172. rrdeng_page_descr_mutex_unlock(ctx, descr);
  173. fatal_assert(1 == ret);*/
  174. rrdeng_commit_page(ctx, descr, handle->page_correlation_id);
  175. /* handle->prev_descr = descr;*/
  176. }
  177. } else {
  178. dbengine_page_free(descr->pg_cache_descr->page);
  179. rrdeng_destroy_pg_cache_descr(ctx, descr->pg_cache_descr);
  180. freez(descr);
  181. }
  182. handle->descr = NULL;
  183. }
  184. void rrdeng_store_metric_next(RRDDIM *rd, usec_t point_in_time, storage_number number)
  185. {
  186. struct rrdeng_collect_handle *handle = (struct rrdeng_collect_handle *)rd->state->handle;
  187. struct rrdengine_instance *ctx;
  188. struct page_cache *pg_cache;
  189. struct rrdeng_page_descr *descr;
  190. storage_number *page;
  191. uint8_t must_flush_unaligned_page = 0, perfect_page_alignment = 0;
  192. ctx = handle->ctx;
  193. pg_cache = &ctx->pg_cache;
  194. descr = handle->descr;
  195. if (descr) {
  196. /* Make alignment decisions */
  197. if (descr->page_length == rd->rrdset->rrddim_page_alignment) {
  198. /* this is the leading dimension that defines chart alignment */
  199. perfect_page_alignment = 1;
  200. }
  201. /* is the metric far enough out of alignment with the others? */
  202. if (unlikely(descr->page_length + sizeof(number) < rd->rrdset->rrddim_page_alignment)) {
  203. handle->unaligned_page = 1;
  204. debug(D_RRDENGINE, "Metric page is not aligned with chart:");
  205. if (unlikely(debug_flags & D_RRDENGINE))
  206. print_page_cache_descr(descr);
  207. }
  208. if (unlikely(handle->unaligned_page &&
  209. /* did the other metrics change page? */
  210. rd->rrdset->rrddim_page_alignment <= sizeof(number))) {
  211. debug(D_RRDENGINE, "Flushing unaligned metric page.");
  212. must_flush_unaligned_page = 1;
  213. handle->unaligned_page = 0;
  214. }
  215. }
  216. if (unlikely(NULL == descr ||
  217. descr->page_length + sizeof(number) > RRDENG_BLOCK_SIZE ||
  218. must_flush_unaligned_page)) {
  219. rrdeng_store_metric_flush_current_page(rd);
  220. page = rrdeng_create_page(ctx, &rd->state->page_index->id, &descr);
  221. fatal_assert(page);
  222. handle->descr = descr;
  223. handle->page_correlation_id = rrd_atomic_fetch_add(&pg_cache->committed_page_index.latest_corr_id, 1);
  224. if (0 == rd->rrdset->rrddim_page_alignment) {
  225. /* this is the leading dimension that defines chart alignment */
  226. perfect_page_alignment = 1;
  227. }
  228. }
  229. page = descr->pg_cache_descr->page;
  230. page[descr->page_length / sizeof(number)] = number;
  231. pg_cache_atomic_set_pg_info(descr, point_in_time, descr->page_length + sizeof(number));
  232. if (perfect_page_alignment)
  233. rd->rrdset->rrddim_page_alignment = descr->page_length;
  234. if (unlikely(INVALID_TIME == descr->start_time)) {
  235. unsigned long new_metric_API_producers, old_metric_API_max_producers, ret_metric_API_max_producers;
  236. descr->start_time = point_in_time;
  237. new_metric_API_producers = rrd_atomic_add_fetch(&ctx->stats.metric_API_producers, 1);
  238. while (unlikely(new_metric_API_producers > (old_metric_API_max_producers = ctx->metric_API_max_producers))) {
  239. /* Increase ctx->metric_API_max_producers */
  240. ret_metric_API_max_producers = ulong_compare_and_swap(&ctx->metric_API_max_producers,
  241. old_metric_API_max_producers,
  242. new_metric_API_producers);
  243. if (old_metric_API_max_producers == ret_metric_API_max_producers) {
  244. /* success */
  245. break;
  246. }
  247. }
  248. pg_cache_insert(ctx, rd->state->page_index, descr);
  249. } else {
  250. pg_cache_add_new_metric_time(rd->state->page_index, descr);
  251. }
  252. }
  253. /*
  254. * Releases the database reference from the handle for storing metrics.
  255. * Returns 1 if it's safe to delete the dimension.
  256. */
  257. int rrdeng_store_metric_finalize(RRDDIM *rd)
  258. {
  259. struct rrdeng_collect_handle *handle;
  260. struct rrdengine_instance *ctx;
  261. struct pg_cache_page_index *page_index;
  262. uint8_t can_delete_metric = 0;
  263. handle = (struct rrdeng_collect_handle *)rd->state->handle;
  264. ctx = handle->ctx;
  265. page_index = rd->state->page_index;
  266. rrdeng_store_metric_flush_current_page(rd);
  267. if (handle->prev_descr) {
  268. /* unpin old second page */
  269. pg_cache_put(ctx, handle->prev_descr);
  270. }
  271. uv_rwlock_wrlock(&page_index->lock);
  272. if (!--page_index->writers && !page_index->page_count) {
  273. can_delete_metric = 1;
  274. }
  275. uv_rwlock_wrunlock(&page_index->lock);
  276. freez(handle);
  277. return can_delete_metric;
  278. }
  279. /* Returns 1 if the data collection interval is well defined, 0 otherwise */
  280. static int metrics_with_known_interval(struct rrdeng_page_descr *descr)
  281. {
  282. unsigned page_entries;
  283. if (unlikely(INVALID_TIME == descr->start_time || INVALID_TIME == descr->end_time))
  284. return 0;
  285. page_entries = descr->page_length / sizeof(storage_number);
  286. if (likely(page_entries > 1)) {
  287. return 1;
  288. }
  289. return 0;
  290. }
  291. static inline uint32_t *pginfo_to_dt(struct rrdeng_page_info *page_info)
  292. {
  293. return (uint32_t *)&page_info->scratch[0];
  294. }
  295. static inline uint32_t *pginfo_to_points(struct rrdeng_page_info *page_info)
  296. {
  297. return (uint32_t *)&page_info->scratch[sizeof(uint32_t)];
  298. }
  299. /**
  300. * Calculates the regions of different data collection intervals in a netdata chart in the time range
  301. * [start_time,end_time]. This call takes the netdata chart read lock.
  302. * @param st the netdata chart whose data collection interval boundaries are calculated.
  303. * @param start_time inclusive starting time in usec
  304. * @param end_time inclusive ending time in usec
  305. * @param region_info_arrayp It allocates (*region_info_arrayp) and populates it with information of regions of a
  306. * reference dimension that that have different data collection intervals and overlap with the time range
  307. * [start_time,end_time]. The caller must free (*region_info_arrayp) with freez(). If region_info_arrayp is set
  308. * to NULL nothing was allocated.
  309. * @param max_intervalp is dereferenced and set to be the largest data collection interval of all regions.
  310. * @return number of regions with different data collection intervals.
  311. */
  312. unsigned rrdeng_variable_step_boundaries(RRDSET *st, time_t start_time, time_t end_time,
  313. struct rrdeng_region_info **region_info_arrayp, unsigned *max_intervalp, struct context_param *context_param_list)
  314. {
  315. struct pg_cache_page_index *page_index;
  316. struct rrdengine_instance *ctx;
  317. unsigned pages_nr;
  318. RRDDIM *rd_iter, *rd;
  319. struct rrdeng_page_info *page_info_array, *curr, *prev, *old_prev;
  320. unsigned i, j, page_entries, region_points, page_points, regions, max_interval;
  321. time_t now;
  322. usec_t dt, current_position_time, max_time = 0, min_time, curr_time, first_valid_time_in_page;
  323. struct rrdeng_region_info *region_info_array;
  324. uint8_t is_first_region_initialized;
  325. ctx = get_rrdeng_ctx_from_host(st->rrdhost);
  326. regions = 1;
  327. *max_intervalp = max_interval = 0;
  328. region_info_array = NULL;
  329. *region_info_arrayp = NULL;
  330. page_info_array = NULL;
  331. RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
  332. rrdset_rdlock(st);
  333. for(rd_iter = temp_rd?temp_rd:st->dimensions, rd = NULL, min_time = (usec_t)-1 ; rd_iter ; rd_iter = rd_iter->next) {
  334. /*
  335. * Choose oldest dimension as reference. This is not equivalent to the union of all dimensions
  336. * but it is a best effort approximation with a bias towards older metrics in a chart. It
  337. * matches netdata behaviour in the sense that dimensions are generally aligned in a chart
  338. * and older dimensions contain more information about the time range. It does not work well
  339. * for metrics that have recently stopped being collected.
  340. */
  341. curr_time = pg_cache_oldest_time_in_range(ctx, rd_iter->state->rrdeng_uuid,
  342. start_time * USEC_PER_SEC, end_time * USEC_PER_SEC);
  343. if (INVALID_TIME != curr_time && curr_time < min_time) {
  344. rd = rd_iter;
  345. min_time = curr_time;
  346. }
  347. }
  348. rrdset_unlock(st);
  349. if (NULL == rd) {
  350. return 1;
  351. }
  352. pages_nr = pg_cache_preload(ctx, rd->state->rrdeng_uuid, start_time * USEC_PER_SEC, end_time * USEC_PER_SEC,
  353. &page_info_array, &page_index);
  354. if (pages_nr) {
  355. /* conservative allocation, will reduce the size later if necessary */
  356. region_info_array = mallocz(sizeof(*region_info_array) * pages_nr);
  357. }
  358. is_first_region_initialized = 0;
  359. region_points = 0;
  360. int is_out_of_order_reported = 0;
  361. /* pages loop */
  362. for (i = 0, curr = NULL, prev = NULL ; i < pages_nr ; ++i) {
  363. old_prev = prev;
  364. prev = curr;
  365. curr = &page_info_array[i];
  366. *pginfo_to_points(curr) = 0; /* initialize to invalid page */
  367. *pginfo_to_dt(curr) = 0; /* no known data collection interval yet */
  368. if (unlikely(INVALID_TIME == curr->start_time || INVALID_TIME == curr->end_time ||
  369. curr->end_time < curr->start_time)) {
  370. info("Ignoring page with invalid timestamps.");
  371. prev = old_prev;
  372. continue;
  373. }
  374. page_entries = curr->page_length / sizeof(storage_number);
  375. fatal_assert(0 != page_entries);
  376. if (likely(1 != page_entries)) {
  377. dt = (curr->end_time - curr->start_time) / (page_entries - 1);
  378. *pginfo_to_dt(curr) = ROUND_USEC_TO_SEC(dt);
  379. if (unlikely(0 == *pginfo_to_dt(curr)))
  380. *pginfo_to_dt(curr) = 1;
  381. } else {
  382. dt = 0;
  383. }
  384. for (j = 0, page_points = 0 ; j < page_entries ; ++j) {
  385. uint8_t is_metric_out_of_order, is_metric_earlier_than_range;
  386. is_metric_earlier_than_range = 0;
  387. is_metric_out_of_order = 0;
  388. current_position_time = curr->start_time + j * dt;
  389. now = current_position_time / USEC_PER_SEC;
  390. if (now > end_time) { /* there will be no more pages in the time range */
  391. break;
  392. }
  393. if (now < start_time)
  394. is_metric_earlier_than_range = 1;
  395. if (unlikely(current_position_time < max_time)) /* just went back in time */
  396. is_metric_out_of_order = 1;
  397. if (is_metric_earlier_than_range || unlikely(is_metric_out_of_order)) {
  398. if (unlikely(is_metric_out_of_order))
  399. is_out_of_order_reported++;
  400. continue; /* next entry */
  401. }
  402. /* here is a valid metric */
  403. ++page_points;
  404. region_info_array[regions - 1].points = ++region_points;
  405. max_time = current_position_time;
  406. if (1 == page_points)
  407. first_valid_time_in_page = current_position_time;
  408. if (unlikely(!is_first_region_initialized)) {
  409. fatal_assert(1 == regions);
  410. /* this is the first region */
  411. region_info_array[0].start_time = current_position_time;
  412. is_first_region_initialized = 1;
  413. }
  414. }
  415. *pginfo_to_points(curr) = page_points;
  416. if (0 == page_points) {
  417. prev = old_prev;
  418. continue;
  419. }
  420. if (unlikely(0 == *pginfo_to_dt(curr))) { /* unknown data collection interval */
  421. fatal_assert(1 == page_points);
  422. if (likely(NULL != prev)) { /* get interval from previous page */
  423. *pginfo_to_dt(curr) = *pginfo_to_dt(prev);
  424. } else { /* there is no previous page in the query */
  425. struct rrdeng_page_info db_page_info;
  426. /* go to database */
  427. pg_cache_get_filtered_info_prev(ctx, page_index, curr->start_time,
  428. metrics_with_known_interval, &db_page_info);
  429. if (unlikely(db_page_info.start_time == INVALID_TIME || db_page_info.end_time == INVALID_TIME ||
  430. 0 == db_page_info.page_length)) { /* nothing in the database, default to update_every */
  431. *pginfo_to_dt(curr) = rd->update_every;
  432. } else {
  433. unsigned db_entries;
  434. usec_t db_dt;
  435. db_entries = db_page_info.page_length / sizeof(storage_number);
  436. db_dt = (db_page_info.end_time - db_page_info.start_time) / (db_entries - 1);
  437. *pginfo_to_dt(curr) = ROUND_USEC_TO_SEC(db_dt);
  438. if (unlikely(0 == *pginfo_to_dt(curr)))
  439. *pginfo_to_dt(curr) = 1;
  440. }
  441. }
  442. }
  443. if (likely(prev) && unlikely(*pginfo_to_dt(curr) != *pginfo_to_dt(prev))) {
  444. info("Data collection interval change detected in query: %"PRIu32" -> %"PRIu32,
  445. *pginfo_to_dt(prev), *pginfo_to_dt(curr));
  446. region_info_array[regions++ - 1].points -= page_points;
  447. region_info_array[regions - 1].points = region_points = page_points;
  448. region_info_array[regions - 1].start_time = first_valid_time_in_page;
  449. }
  450. if (*pginfo_to_dt(curr) > max_interval)
  451. max_interval = *pginfo_to_dt(curr);
  452. region_info_array[regions - 1].update_every = *pginfo_to_dt(curr);
  453. }
  454. if (page_info_array)
  455. freez(page_info_array);
  456. if (region_info_array) {
  457. if (likely(is_first_region_initialized)) {
  458. /* free unnecessary memory */
  459. region_info_array = reallocz(region_info_array, sizeof(*region_info_array) * regions);
  460. *region_info_arrayp = region_info_array;
  461. *max_intervalp = max_interval;
  462. } else {
  463. /* empty result */
  464. freez(region_info_array);
  465. }
  466. }
  467. if (is_out_of_order_reported)
  468. info("Ignored %d metrics with out of order timestamp in %u regions.", is_out_of_order_reported, regions);
  469. return regions;
  470. }
  471. /*
  472. * Gets a handle for loading metrics from the database.
  473. * The handle must be released with rrdeng_load_metric_final().
  474. */
  475. void rrdeng_load_metric_init(RRDDIM *rd, struct rrddim_query_handle *rrdimm_handle, time_t start_time, time_t end_time)
  476. {
  477. struct rrdeng_query_handle *handle;
  478. struct rrdengine_instance *ctx;
  479. unsigned pages_nr;
  480. ctx = get_rrdeng_ctx_from_host(rd->rrdset->rrdhost);
  481. rrdimm_handle->start_time = start_time;
  482. rrdimm_handle->end_time = end_time;
  483. handle = callocz(1, sizeof(struct rrdeng_query_handle));
  484. handle->next_page_time = start_time;
  485. handle->now = start_time;
  486. handle->position = 0;
  487. handle->ctx = ctx;
  488. handle->descr = NULL;
  489. rrdimm_handle->handle = (STORAGE_QUERY_HANDLE *)handle;
  490. pages_nr = pg_cache_preload(ctx, rd->state->rrdeng_uuid, start_time * USEC_PER_SEC, end_time * USEC_PER_SEC,
  491. NULL, &handle->page_index);
  492. if (unlikely(NULL == handle->page_index || 0 == pages_nr))
  493. /* there are no metrics to load */
  494. handle->next_page_time = INVALID_TIME;
  495. }
  496. static int rrdeng_load_page_next(struct rrddim_query_handle *rrdimm_handle) {
  497. struct rrdeng_query_handle *handle = (struct rrdeng_query_handle *)rrdimm_handle->handle;
  498. struct rrdengine_instance *ctx = handle->ctx;
  499. struct rrdeng_page_descr *descr = handle->descr;
  500. uint32_t page_length;
  501. usec_t page_end_time;
  502. unsigned position;
  503. if (likely(descr)) {
  504. // Drop old page's reference
  505. #ifdef NETDATA_INTERNAL_CHECKS
  506. rrd_stat_atomic_add(&ctx->stats.metric_API_consumers, -1);
  507. #endif
  508. pg_cache_put(ctx, descr);
  509. handle->descr = NULL;
  510. handle->next_page_time = (handle->page_end_time / USEC_PER_SEC) + 1;
  511. if (unlikely(handle->next_page_time > rrdimm_handle->end_time))
  512. return 1;
  513. }
  514. usec_t next_page_time = handle->next_page_time * USEC_PER_SEC;
  515. descr = pg_cache_lookup_next(ctx, handle->page_index, &handle->page_index->id, next_page_time, rrdimm_handle->end_time * USEC_PER_SEC);
  516. if (NULL == descr)
  517. return 1;
  518. #ifdef NETDATA_INTERNAL_CHECKS
  519. rrd_stat_atomic_add(&ctx->stats.metric_API_consumers, 1);
  520. #endif
  521. handle->descr = descr;
  522. pg_cache_atomic_get_pg_info(descr, &page_end_time, &page_length);
  523. if (unlikely(INVALID_TIME == descr->start_time || INVALID_TIME == page_end_time))
  524. return 1;
  525. if (unlikely(descr->start_time != page_end_time && next_page_time > descr->start_time)) {
  526. // we're in the middle of the page somewhere
  527. unsigned entries = page_length / sizeof(storage_number);
  528. position = ((uint64_t)(next_page_time - descr->start_time)) * (entries - 1) /
  529. (page_end_time - descr->start_time);
  530. }
  531. else
  532. position = 0;
  533. handle->page_end_time = page_end_time;
  534. handle->page_length = page_length;
  535. handle->page = descr->pg_cache_descr->page;
  536. usec_t entries = handle->entries = page_length / sizeof(storage_number);
  537. if (likely(entries > 1))
  538. handle->dt = (page_end_time - descr->start_time) / (entries - 1);
  539. else
  540. handle->dt = 0;
  541. handle->dt_sec = handle->dt / USEC_PER_SEC;
  542. handle->position = position;
  543. return 0;
  544. }
  545. /* Returns the metric and sets its timestamp into current_time */
  546. storage_number rrdeng_load_metric_next(struct rrddim_query_handle *rrdimm_handle, time_t *current_time) {
  547. struct rrdeng_query_handle *handle = (struct rrdeng_query_handle *)rrdimm_handle->handle;
  548. if (unlikely(INVALID_TIME == handle->next_page_time))
  549. return SN_EMPTY_SLOT;
  550. struct rrdeng_page_descr *descr = handle->descr;
  551. unsigned position = handle->position + 1;
  552. time_t now = handle->now + handle->dt_sec;
  553. if (unlikely(!descr || position >= handle->entries)) {
  554. // We need to get a new page
  555. if(rrdeng_load_page_next(rrdimm_handle)) {
  556. // next calls will not load any more metrics
  557. handle->next_page_time = INVALID_TIME;
  558. return SN_EMPTY_SLOT;
  559. }
  560. descr = handle->descr;
  561. position = handle->position;
  562. now = (descr->start_time + position * handle->dt) / USEC_PER_SEC;
  563. }
  564. storage_number ret = handle->page[position];
  565. handle->position = position;
  566. handle->now = now;
  567. if (unlikely(now >= rrdimm_handle->end_time)) {
  568. // next calls will not load any more metrics
  569. handle->next_page_time = INVALID_TIME;
  570. }
  571. *current_time = now;
  572. return ret;
  573. }
  574. int rrdeng_load_metric_is_finished(struct rrddim_query_handle *rrdimm_handle)
  575. {
  576. struct rrdeng_query_handle *handle = (struct rrdeng_query_handle *)rrdimm_handle->handle;
  577. return (INVALID_TIME == handle->next_page_time);
  578. }
  579. /*
  580. * Releases the database reference from the handle for loading metrics.
  581. */
  582. void rrdeng_load_metric_finalize(struct rrddim_query_handle *rrdimm_handle)
  583. {
  584. struct rrdeng_query_handle *handle = (struct rrdeng_query_handle *)rrdimm_handle->handle;
  585. struct rrdengine_instance *ctx = handle->ctx;
  586. struct rrdeng_page_descr *descr = handle->descr;
  587. if (descr) {
  588. #ifdef NETDATA_INTERNAL_CHECKS
  589. rrd_stat_atomic_add(&ctx->stats.metric_API_consumers, -1);
  590. #endif
  591. pg_cache_put(ctx, descr);
  592. }
  593. // whatever is allocated at rrdeng_load_metric_init() should be freed here
  594. freez(handle);
  595. rrdimm_handle->handle = NULL;
  596. }
  597. time_t rrdeng_metric_latest_time(RRDDIM *rd)
  598. {
  599. struct pg_cache_page_index *page_index;
  600. page_index = rd->state->page_index;
  601. return page_index->latest_time / USEC_PER_SEC;
  602. }
  603. time_t rrdeng_metric_oldest_time(RRDDIM *rd)
  604. {
  605. struct pg_cache_page_index *page_index;
  606. page_index = rd->state->page_index;
  607. return page_index->oldest_time / USEC_PER_SEC;
  608. }
  609. int rrdeng_metric_latest_time_by_uuid(uuid_t *dim_uuid, time_t *first_entry_t, time_t *last_entry_t)
  610. {
  611. struct page_cache *pg_cache;
  612. struct rrdengine_instance *ctx;
  613. Pvoid_t *PValue;
  614. struct pg_cache_page_index *page_index = NULL;
  615. ctx = get_rrdeng_ctx_from_host(localhost);
  616. if (unlikely(!ctx)) {
  617. error("Failed to fetch multidb context");
  618. return 1;
  619. }
  620. pg_cache = &ctx->pg_cache;
  621. uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
  622. PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, dim_uuid, sizeof(uuid_t));
  623. if (likely(NULL != PValue)) {
  624. page_index = *PValue;
  625. }
  626. uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
  627. if (likely(page_index)) {
  628. *first_entry_t = page_index->oldest_time / USEC_PER_SEC;
  629. *last_entry_t = page_index->latest_time / USEC_PER_SEC;
  630. return 0;
  631. }
  632. return 1;
  633. }
  634. /* Also gets a reference for the page */
  635. void *rrdeng_create_page(struct rrdengine_instance *ctx, uuid_t *id, struct rrdeng_page_descr **ret_descr)
  636. {
  637. struct rrdeng_page_descr *descr;
  638. struct page_cache_descr *pg_cache_descr;
  639. void *page;
  640. /* TODO: check maximum number of pages in page cache limit */
  641. descr = pg_cache_create_descr();
  642. descr->id = id; /* TODO: add page type: metric, log, something? */
  643. page = dbengine_page_alloc(); /*TODO: add page size */
  644. rrdeng_page_descr_mutex_lock(ctx, descr);
  645. pg_cache_descr = descr->pg_cache_descr;
  646. pg_cache_descr->page = page;
  647. pg_cache_descr->flags = RRD_PAGE_DIRTY /*| RRD_PAGE_LOCKED */ | RRD_PAGE_POPULATED /* | BEING_COLLECTED */;
  648. pg_cache_descr->refcnt = 1;
  649. debug(D_RRDENGINE, "Created new page:");
  650. if (unlikely(debug_flags & D_RRDENGINE))
  651. print_page_cache_descr(descr);
  652. rrdeng_page_descr_mutex_unlock(ctx, descr);
  653. *ret_descr = descr;
  654. return page;
  655. }
  656. /* The page must not be empty */
  657. void rrdeng_commit_page(struct rrdengine_instance *ctx, struct rrdeng_page_descr *descr,
  658. Word_t page_correlation_id)
  659. {
  660. struct page_cache *pg_cache = &ctx->pg_cache;
  661. Pvoid_t *PValue;
  662. unsigned nr_committed_pages;
  663. if (unlikely(NULL == descr)) {
  664. debug(D_RRDENGINE, "%s: page descriptor is NULL, page has already been force-committed.", __func__);
  665. return;
  666. }
  667. fatal_assert(descr->page_length);
  668. uv_rwlock_wrlock(&pg_cache->committed_page_index.lock);
  669. PValue = JudyLIns(&pg_cache->committed_page_index.JudyL_array, page_correlation_id, PJE0);
  670. *PValue = descr;
  671. nr_committed_pages = ++pg_cache->committed_page_index.nr_committed_pages;
  672. uv_rwlock_wrunlock(&pg_cache->committed_page_index.lock);
  673. if (nr_committed_pages >= pg_cache_hard_limit(ctx) / 2) {
  674. /* over 50% of pages have not been committed yet */
  675. if (ctx->drop_metrics_under_page_cache_pressure &&
  676. nr_committed_pages >= pg_cache_committed_hard_limit(ctx)) {
  677. /* 100% of pages are dirty */
  678. struct rrdeng_cmd cmd;
  679. cmd.opcode = RRDENG_INVALIDATE_OLDEST_MEMORY_PAGE;
  680. rrdeng_enq_cmd(&ctx->worker_config, &cmd);
  681. } else {
  682. if (0 == (unsigned long) ctx->stats.pg_cache_over_half_dirty_events) {
  683. /* only print the first time */
  684. errno = 0;
  685. error("Failed to flush dirty buffers quickly enough in dbengine instance \"%s\". "
  686. "Metric data at risk of not being stored in the database, "
  687. "please reduce disk load or use a faster disk.", ctx->dbfiles_path);
  688. }
  689. rrd_stat_atomic_add(&ctx->stats.pg_cache_over_half_dirty_events, 1);
  690. rrd_stat_atomic_add(&global_pg_cache_over_half_dirty_events, 1);
  691. }
  692. }
  693. pg_cache_put(ctx, descr);
  694. }
  695. /* Gets a reference for the page */
  696. void *rrdeng_get_latest_page(struct rrdengine_instance *ctx, uuid_t *id, void **handle)
  697. {
  698. struct rrdeng_page_descr *descr;
  699. struct page_cache_descr *pg_cache_descr;
  700. debug(D_RRDENGINE, "Reading existing page:");
  701. descr = pg_cache_lookup(ctx, NULL, id, INVALID_TIME);
  702. if (NULL == descr) {
  703. *handle = NULL;
  704. return NULL;
  705. }
  706. *handle = descr;
  707. pg_cache_descr = descr->pg_cache_descr;
  708. return pg_cache_descr->page;
  709. }
  710. /* Gets a reference for the page */
  711. void *rrdeng_get_page(struct rrdengine_instance *ctx, uuid_t *id, usec_t point_in_time, void **handle)
  712. {
  713. struct rrdeng_page_descr *descr;
  714. struct page_cache_descr *pg_cache_descr;
  715. debug(D_RRDENGINE, "Reading existing page:");
  716. descr = pg_cache_lookup(ctx, NULL, id, point_in_time);
  717. if (NULL == descr) {
  718. *handle = NULL;
  719. return NULL;
  720. }
  721. *handle = descr;
  722. pg_cache_descr = descr->pg_cache_descr;
  723. return pg_cache_descr->page;
  724. }
  725. /*
  726. * Gathers Database Engine statistics.
  727. * Careful when modifying this function.
  728. * You must not change the indices of the statistics or user code will break.
  729. * You must not exceed RRDENG_NR_STATS or it will crash.
  730. */
  731. void rrdeng_get_37_statistics(struct rrdengine_instance *ctx, unsigned long long *array)
  732. {
  733. if (ctx == NULL)
  734. return;
  735. struct page_cache *pg_cache = &ctx->pg_cache;
  736. array[0] = (uint64_t)ctx->stats.metric_API_producers;
  737. array[1] = (uint64_t)ctx->stats.metric_API_consumers;
  738. array[2] = (uint64_t)pg_cache->page_descriptors;
  739. array[3] = (uint64_t)pg_cache->populated_pages;
  740. array[4] = (uint64_t)pg_cache->committed_page_index.nr_committed_pages;
  741. array[5] = (uint64_t)ctx->stats.pg_cache_insertions;
  742. array[6] = (uint64_t)ctx->stats.pg_cache_deletions;
  743. array[7] = (uint64_t)ctx->stats.pg_cache_hits;
  744. array[8] = (uint64_t)ctx->stats.pg_cache_misses;
  745. array[9] = (uint64_t)ctx->stats.pg_cache_backfills;
  746. array[10] = (uint64_t)ctx->stats.pg_cache_evictions;
  747. array[11] = (uint64_t)ctx->stats.before_compress_bytes;
  748. array[12] = (uint64_t)ctx->stats.after_compress_bytes;
  749. array[13] = (uint64_t)ctx->stats.before_decompress_bytes;
  750. array[14] = (uint64_t)ctx->stats.after_decompress_bytes;
  751. array[15] = (uint64_t)ctx->stats.io_write_bytes;
  752. array[16] = (uint64_t)ctx->stats.io_write_requests;
  753. array[17] = (uint64_t)ctx->stats.io_read_bytes;
  754. array[18] = (uint64_t)ctx->stats.io_read_requests;
  755. array[19] = (uint64_t)ctx->stats.io_write_extent_bytes;
  756. array[20] = (uint64_t)ctx->stats.io_write_extents;
  757. array[21] = (uint64_t)ctx->stats.io_read_extent_bytes;
  758. array[22] = (uint64_t)ctx->stats.io_read_extents;
  759. array[23] = (uint64_t)ctx->stats.datafile_creations;
  760. array[24] = (uint64_t)ctx->stats.datafile_deletions;
  761. array[25] = (uint64_t)ctx->stats.journalfile_creations;
  762. array[26] = (uint64_t)ctx->stats.journalfile_deletions;
  763. array[27] = (uint64_t)ctx->stats.page_cache_descriptors;
  764. array[28] = (uint64_t)ctx->stats.io_errors;
  765. array[29] = (uint64_t)ctx->stats.fs_errors;
  766. array[30] = (uint64_t)global_io_errors;
  767. array[31] = (uint64_t)global_fs_errors;
  768. array[32] = (uint64_t)rrdeng_reserved_file_descriptors;
  769. array[33] = (uint64_t)ctx->stats.pg_cache_over_half_dirty_events;
  770. array[34] = (uint64_t)global_pg_cache_over_half_dirty_events;
  771. array[35] = (uint64_t)ctx->stats.flushing_pressure_page_deletions;
  772. array[36] = (uint64_t)global_flushing_pressure_page_deletions;
  773. fatal_assert(RRDENG_NR_STATS == 37);
  774. }
  775. /* Releases reference to page */
  776. void rrdeng_put_page(struct rrdengine_instance *ctx, void *handle)
  777. {
  778. (void)ctx;
  779. pg_cache_put(ctx, (struct rrdeng_page_descr *)handle);
  780. }
  781. /*
  782. * Returns 0 on success, negative on error
  783. */
  784. int rrdeng_init(RRDHOST *host, struct rrdengine_instance **ctxp, char *dbfiles_path, unsigned page_cache_mb,
  785. unsigned disk_space_mb)
  786. {
  787. struct rrdengine_instance *ctx;
  788. int error;
  789. uint32_t max_open_files;
  790. max_open_files = rlimit_nofile.rlim_cur / 4;
  791. /* reserve RRDENG_FD_BUDGET_PER_INSTANCE file descriptors for this instance */
  792. rrd_stat_atomic_add(&rrdeng_reserved_file_descriptors, RRDENG_FD_BUDGET_PER_INSTANCE);
  793. if (rrdeng_reserved_file_descriptors > max_open_files) {
  794. error(
  795. "Exceeded the budget of available file descriptors (%u/%u), cannot create new dbengine instance.",
  796. (unsigned)rrdeng_reserved_file_descriptors, (unsigned)max_open_files);
  797. rrd_stat_atomic_add(&global_fs_errors, 1);
  798. rrd_stat_atomic_add(&rrdeng_reserved_file_descriptors, -RRDENG_FD_BUDGET_PER_INSTANCE);
  799. return UV_EMFILE;
  800. }
  801. if (NULL == ctxp) {
  802. ctx = &multidb_ctx;
  803. memset(ctx, 0, sizeof(*ctx));
  804. } else {
  805. *ctxp = ctx = callocz(1, sizeof(*ctx));
  806. }
  807. ctx->global_compress_alg = RRD_LZ4;
  808. if (page_cache_mb < RRDENG_MIN_PAGE_CACHE_SIZE_MB)
  809. page_cache_mb = RRDENG_MIN_PAGE_CACHE_SIZE_MB;
  810. ctx->max_cache_pages = page_cache_mb * (1048576LU / RRDENG_BLOCK_SIZE);
  811. /* try to keep 5% of the page cache free */
  812. ctx->cache_pages_low_watermark = (ctx->max_cache_pages * 95LLU) / 100;
  813. if (disk_space_mb < RRDENG_MIN_DISK_SPACE_MB)
  814. disk_space_mb = RRDENG_MIN_DISK_SPACE_MB;
  815. ctx->max_disk_space = disk_space_mb * 1048576LLU;
  816. strncpyz(ctx->dbfiles_path, dbfiles_path, sizeof(ctx->dbfiles_path) - 1);
  817. ctx->dbfiles_path[sizeof(ctx->dbfiles_path) - 1] = '\0';
  818. if (NULL == host)
  819. strncpyz(ctx->machine_guid, registry_get_this_machine_guid(), GUID_LEN);
  820. else
  821. strncpyz(ctx->machine_guid, host->machine_guid, GUID_LEN);
  822. ctx->drop_metrics_under_page_cache_pressure = rrdeng_drop_metrics_under_page_cache_pressure;
  823. ctx->metric_API_max_producers = 0;
  824. ctx->quiesce = NO_QUIESCE;
  825. ctx->metalog_ctx = NULL; /* only set this after the metadata log has finished initializing */
  826. ctx->host = host;
  827. memset(&ctx->worker_config, 0, sizeof(ctx->worker_config));
  828. ctx->worker_config.ctx = ctx;
  829. init_page_cache(ctx);
  830. init_commit_log(ctx);
  831. error = init_rrd_files(ctx);
  832. if (error) {
  833. goto error_after_init_rrd_files;
  834. }
  835. completion_init(&ctx->rrdengine_completion);
  836. fatal_assert(0 == uv_thread_create(&ctx->worker_config.thread, rrdeng_worker, &ctx->worker_config));
  837. /* wait for worker thread to initialize */
  838. completion_wait_for(&ctx->rrdengine_completion);
  839. completion_destroy(&ctx->rrdengine_completion);
  840. uv_thread_set_name_np(ctx->worker_config.thread, "LIBUV_WORKER");
  841. if (ctx->worker_config.error) {
  842. goto error_after_rrdeng_worker;
  843. }
  844. error = metalog_init(ctx);
  845. if (error) {
  846. error("Failed to initialize metadata log file event loop.");
  847. goto error_after_rrdeng_worker;
  848. }
  849. return 0;
  850. error_after_rrdeng_worker:
  851. finalize_rrd_files(ctx);
  852. error_after_init_rrd_files:
  853. free_page_cache(ctx);
  854. if (ctx != &multidb_ctx) {
  855. freez(ctx);
  856. *ctxp = NULL;
  857. }
  858. rrd_stat_atomic_add(&rrdeng_reserved_file_descriptors, -RRDENG_FD_BUDGET_PER_INSTANCE);
  859. return UV_EIO;
  860. }
  861. /*
  862. * Returns 0 on success, 1 on error
  863. */
  864. int rrdeng_exit(struct rrdengine_instance *ctx)
  865. {
  866. struct rrdeng_cmd cmd;
  867. if (NULL == ctx) {
  868. return 1;
  869. }
  870. /* TODO: add page to page cache */
  871. cmd.opcode = RRDENG_SHUTDOWN;
  872. rrdeng_enq_cmd(&ctx->worker_config, &cmd);
  873. fatal_assert(0 == uv_thread_join(&ctx->worker_config.thread));
  874. finalize_rrd_files(ctx);
  875. //metalog_exit(ctx->metalog_ctx);
  876. free_page_cache(ctx);
  877. if (ctx != &multidb_ctx) {
  878. freez(ctx);
  879. }
  880. rrd_stat_atomic_add(&rrdeng_reserved_file_descriptors, -RRDENG_FD_BUDGET_PER_INSTANCE);
  881. return 0;
  882. }
  883. void rrdeng_prepare_exit(struct rrdengine_instance *ctx)
  884. {
  885. struct rrdeng_cmd cmd;
  886. if (NULL == ctx) {
  887. return;
  888. }
  889. completion_init(&ctx->rrdengine_completion);
  890. cmd.opcode = RRDENG_QUIESCE;
  891. rrdeng_enq_cmd(&ctx->worker_config, &cmd);
  892. /* wait for dbengine to quiesce */
  893. completion_wait_for(&ctx->rrdengine_completion);
  894. completion_destroy(&ctx->rrdengine_completion);
  895. //metalog_prepare_exit(ctx->metalog_ctx);
  896. }