rrdengine.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #ifndef NETDATA_RRDENGINE_H
  3. #define NETDATA_RRDENGINE_H
  4. #ifndef _GNU_SOURCE
  5. #define _GNU_SOURCE
  6. #endif
  7. #include <fcntl.h>
  8. #include <lz4.h>
  9. #include <Judy.h>
  10. #include <openssl/sha.h>
  11. #include <openssl/evp.h>
  12. #include "daemon/common.h"
  13. #include "../rrd.h"
  14. #include "rrddiskprotocol.h"
  15. #include "rrdenginelib.h"
  16. #include "datafile.h"
  17. #include "journalfile.h"
  18. #include "rrdengineapi.h"
  19. #include "pagecache.h"
  20. #include "metric.h"
  21. #include "cache.h"
  22. #include "pdc.h"
  23. #include "page.h"
  24. extern unsigned rrdeng_pages_per_extent;
  25. /* Forward declarations */
  26. struct rrdengine_instance;
  27. struct rrdeng_cmd;
  28. #define MAX_PAGES_PER_EXTENT (64) /* TODO: can go higher only when journal supports bigger than 4KiB transactions */
  29. #define RRDENG_FILE_NUMBER_SCAN_TMPL "%1u-%10u"
  30. #define RRDENG_FILE_NUMBER_PRINT_TMPL "%1.1u-%10.10u"
  31. typedef enum __attribute__ ((__packed__)) {
  32. // final status for all pages
  33. // if a page does not have one of these, it is considered unroutable
  34. PDC_PAGE_READY = (1 << 0), // ready to be processed (pd->page is not null)
  35. PDC_PAGE_FAILED = (1 << 1), // failed to be loaded (pd->page is null)
  36. PDC_PAGE_SKIP = (1 << 2), // don't use this page, it is not good for us
  37. PDC_PAGE_INVALID = (1 << 3), // don't use this page, it is invalid
  38. PDC_PAGE_EMPTY = (1 << 4), // the page is empty, does not have any data
  39. // other statuses for tracking issues
  40. PDC_PAGE_PREPROCESSED = (1 << 5), // used during preprocessing
  41. PDC_PAGE_PROCESSED = (1 << 6), // processed by the query caller
  42. PDC_PAGE_RELEASED = (1 << 7), // already released
  43. // data found in cache (preloaded) or on disk?
  44. PDC_PAGE_PRELOADED = (1 << 8), // data found in memory
  45. PDC_PAGE_DISK_PENDING = (1 << 9), // data need to be loaded from disk
  46. // worker related statuses
  47. PDC_PAGE_FAILED_INVALID_EXTENT = (1 << 10),
  48. PDC_PAGE_FAILED_NOT_IN_EXTENT = (1 << 11),
  49. PDC_PAGE_FAILED_TO_MAP_EXTENT = (1 << 12),
  50. PDC_PAGE_FAILED_TO_ACQUIRE_DATAFILE= (1 << 13),
  51. PDC_PAGE_EXTENT_FROM_CACHE = (1 << 14),
  52. PDC_PAGE_EXTENT_FROM_DISK = (1 << 15),
  53. PDC_PAGE_CANCELLED = (1 << 16), // the query thread had left when we try to load the page
  54. PDC_PAGE_SOURCE_MAIN_CACHE = (1 << 17),
  55. PDC_PAGE_SOURCE_OPEN_CACHE = (1 << 18),
  56. PDC_PAGE_SOURCE_JOURNAL_V2 = (1 << 19),
  57. PDC_PAGE_PRELOADED_PASS4 = (1 << 20),
  58. // datafile acquired
  59. PDC_PAGE_DATAFILE_ACQUIRED = (1 << 30),
  60. } PDC_PAGE_STATUS;
  61. #define PDC_PAGE_QUERY_GLOBAL_SKIP_LIST (PDC_PAGE_FAILED | PDC_PAGE_SKIP | PDC_PAGE_INVALID | PDC_PAGE_RELEASED)
  62. typedef struct page_details_control {
  63. struct rrdengine_instance *ctx;
  64. struct metric *metric;
  65. struct completion prep_completion;
  66. struct completion page_completion; // sync between the query thread and the workers
  67. Pvoid_t page_list_JudyL; // the list of page details
  68. unsigned completed_jobs; // the number of jobs completed last time the query thread checked
  69. bool workers_should_stop; // true when the query thread left and the workers should stop
  70. bool prep_done;
  71. PDC_PAGE_STATUS common_status;
  72. size_t pages_to_load_from_disk;
  73. SPINLOCK refcount_spinlock; // spinlock to protect refcount
  74. int32_t refcount; // the number of workers currently working on this request + 1 for the query thread
  75. size_t executed_with_gaps;
  76. time_t start_time_s;
  77. time_t end_time_s;
  78. STORAGE_PRIORITY priority;
  79. time_t optimal_end_time_s;
  80. } PDC;
  81. PDC *pdc_get(void);
  82. struct page_details {
  83. struct {
  84. struct rrdengine_datafile *ptr;
  85. uv_file file;
  86. unsigned fileno;
  87. struct {
  88. uint64_t pos;
  89. uint32_t bytes;
  90. } extent;
  91. } datafile;
  92. struct pgc_page *page;
  93. Word_t metric_id;
  94. time_t first_time_s;
  95. time_t last_time_s;
  96. uint32_t update_every_s;
  97. PDC_PAGE_STATUS status;
  98. struct {
  99. struct page_details *prev;
  100. struct page_details *next;
  101. } load;
  102. };
  103. struct page_details *page_details_get(void);
  104. #define pdc_page_status_check(pd, flag) (__atomic_load_n(&((pd)->status), __ATOMIC_ACQUIRE) & (flag))
  105. #define pdc_page_status_set(pd, flag) __atomic_or_fetch(&((pd)->status), flag, __ATOMIC_RELEASE)
  106. #define pdc_page_status_clear(pd, flag) __atomic_and_fetch(&((od)->status), ~(flag), __ATOMIC_RELEASE)
  107. struct jv2_extents_info {
  108. size_t index;
  109. uint64_t pos;
  110. unsigned bytes;
  111. size_t number_of_pages;
  112. };
  113. struct jv2_metrics_info {
  114. uuid_t *uuid;
  115. uint32_t page_list_header;
  116. time_t first_time_s;
  117. time_t last_time_s;
  118. size_t number_of_pages;
  119. Pvoid_t JudyL_pages_by_start_time;
  120. };
  121. struct jv2_page_info {
  122. time_t start_time_s;
  123. time_t end_time_s;
  124. time_t update_every_s;
  125. size_t page_length;
  126. uint32_t extent_index;
  127. void *custom_data;
  128. // private
  129. struct pgc_page *page;
  130. };
  131. typedef enum __attribute__ ((__packed__)) {
  132. RRDENG_1ST_METRIC_WRITER = (1 << 0),
  133. } RRDENG_COLLECT_HANDLE_OPTIONS;
  134. typedef enum __attribute__ ((__packed__)) {
  135. RRDENG_PAGE_PAST_COLLECTION = (1 << 0),
  136. RRDENG_PAGE_REPEATED_COLLECTION = (1 << 1),
  137. RRDENG_PAGE_BIG_GAP = (1 << 2),
  138. RRDENG_PAGE_GAP = (1 << 3),
  139. RRDENG_PAGE_FUTURE_POINT = (1 << 4),
  140. RRDENG_PAGE_CREATED_IN_FUTURE = (1 << 5),
  141. RRDENG_PAGE_COMPLETED_IN_FUTURE = (1 << 6),
  142. RRDENG_PAGE_UNALIGNED = (1 << 7),
  143. RRDENG_PAGE_CONFLICT = (1 << 8),
  144. RRDENG_PAGE_FULL = (1 << 9),
  145. RRDENG_PAGE_COLLECT_FINALIZE = (1 << 10),
  146. RRDENG_PAGE_UPDATE_EVERY_CHANGE = (1 << 11),
  147. RRDENG_PAGE_STEP_TOO_SMALL = (1 << 12),
  148. RRDENG_PAGE_STEP_UNALIGNED = (1 << 13),
  149. } RRDENG_COLLECT_PAGE_FLAGS;
  150. struct rrdeng_collect_handle {
  151. struct storage_collect_handle common; // has to be first item
  152. RRDENG_COLLECT_PAGE_FLAGS page_flags;
  153. RRDENG_COLLECT_HANDLE_OPTIONS options;
  154. uint8_t type;
  155. struct rrdengine_instance *ctx;
  156. struct metric *metric;
  157. struct pgc_page *pgc_page;
  158. struct pgd *page_data;
  159. size_t page_data_size;
  160. struct pg_alignment *alignment;
  161. uint32_t page_entries_max;
  162. uint32_t page_position; // keep track of the current page size, to make sure we don't exceed it
  163. usec_t page_start_time_ut;
  164. usec_t page_end_time_ut;
  165. usec_t update_every_ut;
  166. };
  167. struct rrdeng_query_handle {
  168. struct metric *metric;
  169. struct pgc_page *page;
  170. struct rrdengine_instance *ctx;
  171. struct pgd_cursor pgdc;
  172. struct page_details_control *pdc;
  173. // the request
  174. time_t start_time_s;
  175. time_t end_time_s;
  176. STORAGE_PRIORITY priority;
  177. // internal data
  178. time_t now_s;
  179. time_t dt_s;
  180. unsigned position;
  181. unsigned entries;
  182. #ifdef NETDATA_INTERNAL_CHECKS
  183. usec_t started_time_s;
  184. pid_t query_pid;
  185. struct rrdeng_query_handle *prev, *next;
  186. #endif
  187. };
  188. struct rrdeng_query_handle *rrdeng_query_handle_get(void);
  189. void rrdeng_query_handle_release(struct rrdeng_query_handle *handle);
  190. enum rrdeng_opcode {
  191. /* can be used to return empty status or flush the command queue */
  192. RRDENG_OPCODE_NOOP = 0,
  193. RRDENG_OPCODE_QUERY,
  194. RRDENG_OPCODE_EXTENT_WRITE,
  195. RRDENG_OPCODE_EXTENT_READ,
  196. RRDENG_OPCODE_FLUSHED_TO_OPEN,
  197. RRDENG_OPCODE_DATABASE_ROTATE,
  198. RRDENG_OPCODE_JOURNAL_INDEX,
  199. RRDENG_OPCODE_FLUSH_INIT,
  200. RRDENG_OPCODE_EVICT_INIT,
  201. RRDENG_OPCODE_CTX_SHUTDOWN,
  202. RRDENG_OPCODE_CTX_QUIESCE,
  203. RRDENG_OPCODE_CTX_POPULATE_MRG,
  204. RRDENG_OPCODE_SHUTDOWN_EVLOOP,
  205. RRDENG_OPCODE_CLEANUP,
  206. RRDENG_OPCODE_MAX
  207. };
  208. // WORKERS IDS:
  209. // RRDENG_MAX_OPCODE : reserved for the cleanup
  210. // RRDENG_MAX_OPCODE + opcode : reserved for the callbacks of each opcode
  211. // RRDENG_MAX_OPCODE + RRDENG_MAX_OPCODE : reserved for the timer
  212. #define RRDENG_TIMER_CB (RRDENG_OPCODE_MAX + RRDENG_OPCODE_MAX)
  213. #define RRDENG_FLUSH_TRANSACTION_BUFFER_CB (RRDENG_TIMER_CB + 1)
  214. #define RRDENG_OPCODES_WAITING (RRDENG_TIMER_CB + 2)
  215. #define RRDENG_WORKS_DISPATCHED (RRDENG_TIMER_CB + 3)
  216. #define RRDENG_WORKS_EXECUTING (RRDENG_TIMER_CB + 4)
  217. struct extent_io_data {
  218. unsigned fileno;
  219. uv_file file;
  220. uint64_t pos;
  221. unsigned bytes;
  222. uint16_t page_length;
  223. };
  224. struct extent_io_descriptor {
  225. struct rrdengine_instance *ctx;
  226. uv_fs_t uv_fs_request;
  227. uv_buf_t iov;
  228. uv_file file;
  229. void *buf;
  230. struct wal *wal;
  231. uint64_t pos;
  232. unsigned bytes;
  233. struct completion *completion;
  234. unsigned descr_count;
  235. struct page_descr_with_data *descr_array[MAX_PAGES_PER_EXTENT];
  236. struct rrdengine_datafile *datafile;
  237. struct extent_io_descriptor *next; /* multiple requests to be served by the same cached extent */
  238. };
  239. struct generic_io_descriptor {
  240. struct rrdengine_instance *ctx;
  241. uv_fs_t req;
  242. uv_buf_t iov;
  243. void *buf;
  244. void *data;
  245. uint64_t pos;
  246. unsigned bytes;
  247. struct completion *completion;
  248. };
  249. typedef struct wal {
  250. uint64_t transaction_id;
  251. void *buf;
  252. size_t size;
  253. size_t buf_size;
  254. struct generic_io_descriptor io_descr;
  255. struct {
  256. struct wal *prev;
  257. struct wal *next;
  258. } cache;
  259. } WAL;
  260. WAL *wal_get(struct rrdengine_instance *ctx, unsigned size);
  261. void wal_release(WAL *wal);
  262. /*
  263. * Debug statistics not used by code logic.
  264. * They only describe operations since DB engine instance load time.
  265. */
  266. struct rrdengine_statistics {
  267. rrdeng_stats_t before_decompress_bytes;
  268. rrdeng_stats_t after_decompress_bytes;
  269. rrdeng_stats_t before_compress_bytes;
  270. rrdeng_stats_t after_compress_bytes;
  271. rrdeng_stats_t io_write_bytes;
  272. rrdeng_stats_t io_write_requests;
  273. rrdeng_stats_t io_read_bytes;
  274. rrdeng_stats_t io_read_requests;
  275. rrdeng_stats_t datafile_creations;
  276. rrdeng_stats_t datafile_deletions;
  277. rrdeng_stats_t journalfile_creations;
  278. rrdeng_stats_t journalfile_deletions;
  279. rrdeng_stats_t io_errors;
  280. rrdeng_stats_t fs_errors;
  281. };
  282. /* I/O errors global counter */
  283. extern rrdeng_stats_t global_io_errors;
  284. /* File-System errors global counter */
  285. extern rrdeng_stats_t global_fs_errors;
  286. /* number of File-Descriptors that have been reserved by dbengine */
  287. extern rrdeng_stats_t rrdeng_reserved_file_descriptors;
  288. /* inability to flush global counters */
  289. extern rrdeng_stats_t global_pg_cache_over_half_dirty_events;
  290. extern rrdeng_stats_t global_flushing_pressure_page_deletions; /* number of deleted pages */
  291. struct rrdengine_instance {
  292. struct {
  293. bool legacy; // true when the db is autonomous for a single host
  294. int tier; // the tier of this ctx
  295. uint8_t page_type; // default page type for this context
  296. uint64_t max_disk_space; // the max disk space this ctx is allowed to use
  297. uint8_t global_compress_alg; // the wanted compression algorithm
  298. char dbfiles_path[FILENAME_MAX + 1];
  299. } config;
  300. struct {
  301. uv_rwlock_t rwlock; // the linked list of datafiles is protected by this lock
  302. struct rrdengine_datafile *first; // oldest - the newest with ->first->prev
  303. } datafiles;
  304. struct {
  305. RW_SPINLOCK spinlock;
  306. Pvoid_t JudyL;
  307. } njfv2idx;
  308. struct {
  309. unsigned last_fileno; // newest index of datafile and journalfile
  310. unsigned last_flush_fileno; // newest index of datafile received data
  311. size_t collectors_running;
  312. size_t collectors_running_duplicate;
  313. size_t inflight_queries; // the number of queries currently running
  314. uint64_t current_disk_space; // the current disk space size used
  315. uint64_t transaction_id; // the transaction id of the next extent flushing
  316. bool migration_to_v2_running;
  317. bool now_deleting_files;
  318. unsigned extents_currently_being_flushed; // non-zero until we commit data to disk (both datafile and journal file)
  319. time_t first_time_s;
  320. } atomic;
  321. struct {
  322. bool exit_mode;
  323. bool enabled; // when set (before shutdown), queries are prohibited
  324. struct completion completion;
  325. } quiesce;
  326. struct {
  327. struct {
  328. size_t size;
  329. struct completion *array;
  330. } populate_mrg;
  331. bool create_new_datafile_pair;
  332. } loading;
  333. struct rrdengine_statistics stats;
  334. };
  335. #define ctx_current_disk_space_get(ctx) __atomic_load_n(&(ctx)->atomic.current_disk_space, __ATOMIC_RELAXED)
  336. #define ctx_current_disk_space_increase(ctx, size) __atomic_add_fetch(&(ctx)->atomic.current_disk_space, size, __ATOMIC_RELAXED)
  337. #define ctx_current_disk_space_decrease(ctx, size) __atomic_sub_fetch(&(ctx)->atomic.current_disk_space, size, __ATOMIC_RELAXED)
  338. static inline void ctx_io_read_op_bytes(struct rrdengine_instance *ctx, size_t bytes) {
  339. __atomic_add_fetch(&ctx->stats.io_read_bytes, bytes, __ATOMIC_RELAXED);
  340. __atomic_add_fetch(&ctx->stats.io_read_requests, 1, __ATOMIC_RELAXED);
  341. }
  342. static inline void ctx_io_write_op_bytes(struct rrdengine_instance *ctx, size_t bytes) {
  343. __atomic_add_fetch(&ctx->stats.io_write_bytes, bytes, __ATOMIC_RELAXED);
  344. __atomic_add_fetch(&ctx->stats.io_write_requests, 1, __ATOMIC_RELAXED);
  345. }
  346. static inline void ctx_io_error(struct rrdengine_instance *ctx) {
  347. __atomic_add_fetch(&ctx->stats.io_errors, 1, __ATOMIC_RELAXED);
  348. rrd_stat_atomic_add(&global_io_errors, 1);
  349. }
  350. static inline void ctx_fs_error(struct rrdengine_instance *ctx) {
  351. __atomic_add_fetch(&ctx->stats.fs_errors, 1, __ATOMIC_RELAXED);
  352. rrd_stat_atomic_add(&global_fs_errors, 1);
  353. }
  354. #define ctx_last_fileno_get(ctx) __atomic_load_n(&(ctx)->atomic.last_fileno, __ATOMIC_RELAXED)
  355. #define ctx_last_fileno_increment(ctx) __atomic_add_fetch(&(ctx)->atomic.last_fileno, 1, __ATOMIC_RELAXED)
  356. #define ctx_last_flush_fileno_get(ctx) __atomic_load_n(&(ctx)->atomic.last_flush_fileno, __ATOMIC_RELAXED)
  357. static inline void ctx_last_flush_fileno_set(struct rrdengine_instance *ctx, unsigned fileno) {
  358. unsigned old_fileno = ctx_last_flush_fileno_get(ctx);
  359. do {
  360. if(old_fileno >= fileno)
  361. return;
  362. } while(!__atomic_compare_exchange_n(&ctx->atomic.last_flush_fileno, &old_fileno, fileno, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED));
  363. }
  364. #define ctx_is_available_for_queries(ctx) (__atomic_load_n(&(ctx)->quiesce.enabled, __ATOMIC_RELAXED) == false && __atomic_load_n(&(ctx)->quiesce.exit_mode, __ATOMIC_RELAXED) == false)
  365. void *dbengine_extent_alloc(size_t size);
  366. void dbengine_extent_free(void *extent, size_t size);
  367. bool rrdeng_ctx_exceeded_disk_quota(struct rrdengine_instance *ctx);
  368. int init_rrd_files(struct rrdengine_instance *ctx);
  369. void finalize_rrd_files(struct rrdengine_instance *ctx);
  370. bool rrdeng_dbengine_spawn(struct rrdengine_instance *ctx);
  371. void dbengine_event_loop(void *arg);
  372. typedef void (*enqueue_callback_t)(struct rrdeng_cmd *cmd);
  373. typedef void (*dequeue_callback_t)(struct rrdeng_cmd *cmd);
  374. void rrdeng_enqueue_epdl_cmd(struct rrdeng_cmd *cmd);
  375. void rrdeng_dequeue_epdl_cmd(struct rrdeng_cmd *cmd);
  376. typedef struct rrdeng_cmd *(*requeue_callback_t)(void *data);
  377. void rrdeng_req_cmd(requeue_callback_t get_cmd_cb, void *data, STORAGE_PRIORITY priority);
  378. void rrdeng_enq_cmd(struct rrdengine_instance *ctx, enum rrdeng_opcode opcode, void *data,
  379. struct completion *completion, enum storage_priority priority,
  380. enqueue_callback_t enqueue_cb, dequeue_callback_t dequeue_cb);
  381. void pdc_route_asynchronously(struct rrdengine_instance *ctx, struct page_details_control *pdc);
  382. void pdc_route_synchronously(struct rrdengine_instance *ctx, struct page_details_control *pdc);
  383. void pdc_acquire(PDC *pdc);
  384. bool pdc_release_and_destroy_if_unreferenced(PDC *pdc, bool worker, bool router);
  385. uint64_t rrdeng_target_data_file_size(struct rrdengine_instance *ctx);
  386. struct page_descr_with_data *page_descriptor_get(void);
  387. typedef struct validated_page_descriptor {
  388. time_t start_time_s;
  389. time_t end_time_s;
  390. time_t update_every_s;
  391. size_t page_length;
  392. size_t point_size;
  393. size_t entries;
  394. uint8_t type;
  395. bool is_valid;
  396. } VALIDATED_PAGE_DESCRIPTOR;
  397. #define page_entries_by_time(start_time_s, end_time_s, update_every_s) \
  398. ((update_every_s) ? (((end_time_s) - ((start_time_s) - (update_every_s))) / (update_every_s)) : 1)
  399. #define page_entries_by_size(page_length_in_bytes, point_size_in_bytes) \
  400. ((page_length_in_bytes) / (point_size_in_bytes))
  401. VALIDATED_PAGE_DESCRIPTOR validate_page(uuid_t *uuid,
  402. time_t start_time_s,
  403. time_t end_time_s,
  404. time_t update_every_s,
  405. size_t page_length,
  406. uint8_t page_type,
  407. size_t entries,
  408. time_t now_s,
  409. time_t overwrite_zero_update_every_s,
  410. bool have_read_error,
  411. const char *msg,
  412. RRDENG_COLLECT_PAGE_FLAGS flags);
  413. VALIDATED_PAGE_DESCRIPTOR validate_extent_page_descr(const struct rrdeng_extent_page_descr *descr, time_t now_s, time_t overwrite_zero_update_every_s, bool have_read_error);
  414. void collect_page_flags_to_buffer(BUFFER *wb, RRDENG_COLLECT_PAGE_FLAGS flags);
  415. typedef enum {
  416. PAGE_IS_IN_THE_PAST = -1,
  417. PAGE_IS_IN_RANGE = 0,
  418. PAGE_IS_IN_THE_FUTURE = 1,
  419. } TIME_RANGE_COMPARE;
  420. TIME_RANGE_COMPARE is_page_in_time_range(time_t page_first_time_s, time_t page_last_time_s, time_t wanted_start_time_s, time_t wanted_end_time_s);
  421. static inline time_t max_acceptable_collected_time(void) {
  422. return now_realtime_sec() + 1;
  423. }
  424. void datafile_delete(struct rrdengine_instance *ctx, struct rrdengine_datafile *datafile, bool update_retention, bool worker);
  425. static inline int journal_metric_uuid_compare(const void *key, const void *metric) {
  426. return uuid_memcmp((uuid_t *)key, &(((struct journal_metric_list *) metric)->uuid));
  427. }
  428. #endif /* NETDATA_RRDENGINE_H */