Просмотр исходного кода

inline dbengine query critical path (#19537)

* inline dbengine query critical path

* more dbengine inlining

* optimize pgc_page_add to optimistically allocate the new entry

* backfill related inlining

* more inlining

* simple re-organization of members of sender_state

* descriptive SSL error logs

* nd-poll() wanted mismatch is now a debug statement

* improve SSL error handling

* restore ssl error handling code

* strict error checking on enable_streaming replication flag

* selected functions flattening

* size replication queue

* fix replication step inicfg parsing; fix local variable exposure

* replication sender code cleanup

* process opcodes every 100ms and reorganize so that the latency per processing can be computed

* more inlining at critical sections

* make replication step 20m

* configure replication prefetch

* use the inicfg range function for libuv threads

* move libuv initialize closer to the beginning

* replication fixes

* replication defaults tuning

* aral to use 2MiB page sizes and nd_mmap() to madvise HUGEPAGE when size matches

* use dedicated aral for replication sender

* tune replication sender to max 6 threads

* aral incoming lock now tries all partitions

* health should be running together with replication

* refcount added refcount_release_and_acquire_for_deletion_advanced()

* minimize aral_lock() use

* aral having page_lock

* aral_page refcount with simple atomics

* cleanup aral
Costa Tsaousis 1 месяц назад
Родитель
Сommit
62f1b458e7

+ 4 - 17
src/daemon/config/netdata-conf-global.c

@@ -75,21 +75,10 @@ void netdata_conf_glibc_malloc_initialize(size_t wanted_arenas, size_t trim_thre
 #endif
 }
 
-static void libuv_initialize(void) {
-    libuv_worker_threads = (int)netdata_conf_cpus() * 6;
-
-    if(libuv_worker_threads < MIN_LIBUV_WORKER_THREADS)
-        libuv_worker_threads = MIN_LIBUV_WORKER_THREADS;
-
-    if(libuv_worker_threads > MAX_LIBUV_WORKER_THREADS)
-        libuv_worker_threads = MAX_LIBUV_WORKER_THREADS;
-
-
-    libuv_worker_threads = inicfg_get_number(&netdata_config, CONFIG_SECTION_GLOBAL, "libuv worker threads", libuv_worker_threads);
-    if(libuv_worker_threads < MIN_LIBUV_WORKER_THREADS) {
-        libuv_worker_threads = MIN_LIBUV_WORKER_THREADS;
-        inicfg_set_number(&netdata_config, CONFIG_SECTION_GLOBAL, "libuv worker threads", libuv_worker_threads);
-    }
+void libuv_initialize(void) {
+    libuv_worker_threads = (int)inicfg_get_number_range(
+        &netdata_config, CONFIG_SECTION_GLOBAL, "libuv worker threads",
+        (int)netdata_conf_cpus() * 6, MIN_LIBUV_WORKER_THREADS, MAX_LIBUV_WORKER_THREADS);
 
     char buf[20 + 1];
     snprintfz(buf, sizeof(buf) - 1, "%d", libuv_worker_threads);
@@ -120,8 +109,6 @@ void netdata_conf_section_global(void) {
 
     os_get_system_cpus_uncached();
     os_get_system_pid_max();
-
-    libuv_initialize();
 }
 
 void netdata_conf_section_global_run_as_user(const char **user) {

+ 1 - 0
src/daemon/config/netdata-conf-global.h

@@ -9,6 +9,7 @@ void netdata_conf_section_global(void);
 void netdata_conf_section_global_run_as_user(const char **user);
 
 size_t netdata_conf_cpus(void);
+void libuv_initialize(void);
 
 void netdata_conf_glibc_malloc_initialize(size_t wanted_arenas, size_t trim_threshold);
 

+ 1 - 0
src/daemon/config/netdata-conf.c

@@ -36,5 +36,6 @@ bool netdata_conf_load(char *filename, char overwrite_used, const char **user) {
 
     netdata_conf_backwards_compatibility();
     netdata_conf_section_global_run_as_user(user);
+    libuv_initialize();
     return ret;
 }

+ 1 - 1
src/daemon/pulse/pulse-daemon-memory.c

@@ -146,7 +146,7 @@ void pulse_daemon_memory_do(bool extended __maybe_unused) {
                               (collected_number)dictionary_stats_memory_total(dictionary_stats_category_functions));
 
         rrddim_set_by_pointer(st_memory, rd_replication,
-                              (collected_number)dictionary_stats_memory_total(dictionary_stats_category_replication) + (collected_number)replication_sender_allocated_memory());
+                              (collected_number)dictionary_stats_memory_total(dictionary_stats_category_replication) + replication_sender_allocated_memory());
 #else
         uint64_t metadata =
             aral_by_size_used_bytes() +

+ 11 - 11
src/daemon/pulse/pulse-db-dbengine.c

@@ -3,7 +3,7 @@
 #define PULSE_INTERNALS 1
 #include "pulse-db-dbengine.h"
 
-size_t pulse_dbengine_total_memory = 0;
+int64_t pulse_dbengine_total_memory = 0;
 
 #if defined(ENABLE_DBENGINE)
 
@@ -660,21 +660,21 @@ void pulse_dbengine_do(bool extended) {
 
     struct rrdeng_buffer_sizes dbmem = rrdeng_pulse_memory_sizes();
 
-    size_t buffers_total_size = dbmem.xt_buf + dbmem.wal;
+    int64_t buffers_total_size = (int64_t)dbmem.xt_buf + (int64_t)dbmem.wal;
 
-    size_t aral_structures_total_size = 0, aral_used_total_size = 0;
-    size_t aral_padding_total_size = 0;
+    int64_t aral_structures_total_size = 0, aral_used_total_size = 0;
+    int64_t aral_padding_total_size = 0;
     for(size_t i = 0; i < RRDENG_MEM_MAX ; i++) {
-        buffers_total_size += aral_free_bytes_from_stats(dbmem.as[i]);
-        aral_structures_total_size += aral_structures_bytes_from_stats(dbmem.as[i]);
-        aral_used_total_size += aral_used_bytes_from_stats(dbmem.as[i]);
-        aral_padding_total_size += aral_padding_bytes_from_stats(dbmem.as[i]);
+        buffers_total_size += (int64_t)aral_free_bytes_from_stats(dbmem.as[i]);
+        aral_structures_total_size += (int64_t)aral_structures_bytes_from_stats(dbmem.as[i]);
+        aral_used_total_size += (int64_t)aral_used_bytes_from_stats(dbmem.as[i]);
+        aral_padding_total_size += (int64_t)aral_padding_bytes_from_stats(dbmem.as[i]);
     }
 
     pulse_dbengine_total_memory =
-        pgc_main_stats.size + (ssize_t)pgc_open_stats.size + pgc_extent_stats.size +
+        pgc_main_stats.size + pgc_open_stats.size + pgc_extent_stats.size +
         mrg_stats.size +
-        buffers_total_size + aral_structures_total_size + aral_padding_total_size + pgd_padding_bytes();
+        buffers_total_size + aral_structures_total_size + aral_padding_total_size + (int64_t)pgd_padding_bytes();
 
     // we need all the above for the total dbengine memory as reported by the non-extended netdata memory chart
     if(!main_cache || !main_mrg || !extended)
@@ -685,7 +685,7 @@ void pulse_dbengine_do(bool extended) {
     dbengine2_cache_statistics_charts(&extent_cache_ptrs, &pgc_extent_stats, &pgc_extent_stats_old, "extent", 135300);
     mrg_get_statistics(main_mrg, &mrg_stats);
 
-    size_t priority = 135000;
+    int priority = 135000;
     {
         static RRDSET *st_pgc_memory = NULL;
         static RRDDIM *rd_pgc_memory_main = NULL;

+ 1 - 1
src/daemon/pulse/pulse-db-dbengine.h

@@ -6,7 +6,7 @@
 #include "daemon/common.h"
 
 #if defined(PULSE_INTERNALS)
-extern size_t pulse_dbengine_total_memory;
+extern int64_t pulse_dbengine_total_memory;
 
 #if defined(ENABLE_DBENGINE)
 void pulse_dbengine_do(bool extended);

+ 4 - 4
src/daemon/pulse/pulse-queries.c

@@ -32,22 +32,22 @@ static struct query_statistics {
     PAD64(uint64_t) exporters_db_points_read;
 } query_statistics = { 0 };
 
-void pulse_queries_ml_query_completed(size_t points_read) {
+ALWAYS_INLINE void pulse_queries_ml_query_completed(size_t points_read) {
     __atomic_fetch_add(&query_statistics.ml_queries_made, 1, __ATOMIC_RELAXED);
     __atomic_fetch_add(&query_statistics.ml_db_points_read, points_read, __ATOMIC_RELAXED);
 }
 
-void pulse_queries_exporters_query_completed(size_t points_read) {
+ALWAYS_INLINE void pulse_queries_exporters_query_completed(size_t points_read) {
     __atomic_fetch_add(&query_statistics.exporters_queries_made, 1, __ATOMIC_RELAXED);
     __atomic_fetch_add(&query_statistics.exporters_db_points_read, points_read, __ATOMIC_RELAXED);
 }
 
-void pulse_queries_backfill_query_completed(size_t points_read) {
+ALWAYS_INLINE void pulse_queries_backfill_query_completed(size_t points_read) {
     __atomic_fetch_add(&query_statistics.backfill_queries_made, 1, __ATOMIC_RELAXED);
     __atomic_fetch_add(&query_statistics.backfill_db_points_read, points_read, __ATOMIC_RELAXED);
 }
 
-void pulse_queries_rrdr_query_completed(size_t queries, uint64_t db_points_read, uint64_t result_points_generated, QUERY_SOURCE query_source) {
+ALWAYS_INLINE void pulse_queries_rrdr_query_completed(size_t queries, uint64_t db_points_read, uint64_t result_points_generated, QUERY_SOURCE query_source) {
     switch(query_source) {
         case QUERY_SOURCE_API_DATA:
             __atomic_fetch_add(&query_statistics.api_data_queries_made, queries, __ATOMIC_RELAXED);

+ 150 - 147
src/database/engine/cache.c

@@ -93,23 +93,23 @@ struct pgc {
         bool use_all_ram;
 
         size_t partitions;
-        size_t clean_size;
+        int64_t clean_size;
         size_t max_dirty_pages_per_call;
         size_t max_pages_per_inline_eviction;
         size_t max_skip_pages_per_inline_eviction;
         size_t max_flushes_inline;
         size_t max_workers_evict_inline;
         size_t additional_bytes_per_page;
-        size_t out_of_memory_protection_bytes;
+        int64_t out_of_memory_protection_bytes;
         free_clean_page_callback pgc_free_clean_cb;
         save_dirty_page_callback pgc_save_dirty_cb;
         save_dirty_init_callback pgc_save_init_cb;
         PGC_OPTIONS options;
 
-        size_t severe_pressure_per1000;
-        size_t aggressive_evict_per1000;
-        size_t healthy_size_per1000;
-        size_t evict_low_threshold_per1000;
+        ssize_t severe_pressure_per1000;
+        ssize_t aggressive_evict_per1000;
+        ssize_t healthy_size_per1000;
+        ssize_t evict_low_threshold_per1000;
 
         dynamic_target_cache_size_callback dynamic_target_size_cb;
         nominal_page_size_callback nominal_page_size_cb;
@@ -130,7 +130,7 @@ struct pgc {
 
     struct {
         SPINLOCK spinlock;
-        size_t per1000;
+        ssize_t per1000;
     } usage;
 
     struct pgc_queue clean;       // LRU is applied here to free memory from the cache
@@ -334,21 +334,21 @@ static inline void pgc_size_histogram_del(PGC *cache, struct pgc_size_histogram
 // ----------------------------------------------------------------------------
 // evictions control
 
-static inline uint64_t pgc_threshold(size_t threshold, uint64_t wanted, uint64_t current, uint64_t clean) {
+static ALWAYS_INLINE int64_t pgc_threshold(ssize_t threshold, int64_t wanted, int64_t current, int64_t clean) {
     if(current < clean)
         current = clean;
 
     if(wanted < current - clean)
         wanted = current - clean;
 
-    uint64_t ret = wanted * threshold / 1000ULL;
+    int64_t ret = wanted * threshold / 1000LL;
     if(ret < current - clean)
         ret = current - clean;
 
     return ret;
 }
 
-static inline size_t cache_usage_per1000(PGC *cache, size_t *size_to_evict) {
+static ssize_t cache_usage_per1000(PGC *cache, int64_t *size_to_evict) {
 
     if(size_to_evict)
         spinlock_lock(&cache->usage.spinlock);
@@ -356,33 +356,33 @@ static inline size_t cache_usage_per1000(PGC *cache, size_t *size_to_evict) {
     else if(!spinlock_trylock(&cache->usage.spinlock))
         return __atomic_load_n(&cache->usage.per1000, __ATOMIC_RELAXED);
 
-    uint64_t wanted_cache_size;
+    int64_t wanted_cache_size;
 
-    const uint64_t dirty = __atomic_load_n(&cache->dirty.stats->size, __ATOMIC_RELAXED);
-    const uint64_t hot = __atomic_load_n(&cache->hot.stats->size, __ATOMIC_RELAXED);
-    const uint64_t clean = __atomic_load_n(&cache->clean.stats->size, __ATOMIC_RELAXED);
-    const uint64_t evicting = __atomic_load_n(&cache->stats.evicting_size, __ATOMIC_RELAXED);
-    const uint64_t flushing = __atomic_load_n(&cache->stats.flushing_size, __ATOMIC_RELAXED);
-    const uint64_t current_cache_size = __atomic_load_n(&cache->stats.size, __ATOMIC_RELAXED);
-    const uint64_t all_pages_size = hot + dirty + clean + evicting + flushing;
-    const uint64_t index = current_cache_size > all_pages_size ? current_cache_size - all_pages_size : 0;
-    const uint64_t referenced_size = __atomic_load_n(&cache->stats.referenced_size, __ATOMIC_RELAXED);
+    const int64_t dirty = __atomic_load_n(&cache->dirty.stats->size, __ATOMIC_RELAXED);
+    const int64_t hot = __atomic_load_n(&cache->hot.stats->size, __ATOMIC_RELAXED);
+    const int64_t clean = __atomic_load_n(&cache->clean.stats->size, __ATOMIC_RELAXED);
+    const int64_t evicting = __atomic_load_n(&cache->stats.evicting_size, __ATOMIC_RELAXED);
+    const int64_t flushing = __atomic_load_n(&cache->stats.flushing_size, __ATOMIC_RELAXED);
+    const int64_t current_cache_size = __atomic_load_n(&cache->stats.size, __ATOMIC_RELAXED);
+    const int64_t all_pages_size = hot + dirty + clean + evicting + flushing;
+    const int64_t index = current_cache_size > all_pages_size ? current_cache_size - all_pages_size : 0;
+    const int64_t referenced_size = __atomic_load_n(&cache->stats.referenced_size, __ATOMIC_RELAXED);
 
     if(cache->config.options & PGC_OPTIONS_AUTOSCALE) {
-        const uint64_t dirty_max = __atomic_load_n(&cache->dirty.stats->max_size, __ATOMIC_RELAXED);
-        const uint64_t hot_max = __atomic_load_n(&cache->hot.stats->max_size, __ATOMIC_RELAXED);
+        const int64_t dirty_max = __atomic_load_n(&cache->dirty.stats->max_size, __ATOMIC_RELAXED);
+        const int64_t hot_max = __atomic_load_n(&cache->hot.stats->max_size, __ATOMIC_RELAXED);
 
         // our promise to users
-        const uint64_t max_size1 = MAX(hot_max, hot) * 2;
+        const int64_t max_size1 = MAX(hot_max, hot) * 2;
 
         // protection against slow flushing
-        const uint64_t max_size2 = hot_max + ((dirty_max * 2 < hot_max * 2 / 3) ? hot_max * 2 / 3 : dirty_max * 2) + index;
+        const int64_t max_size2 = hot_max + ((dirty_max * 2 < hot_max * 2 / 3) ? hot_max * 2 / 3 : dirty_max * 2) + index;
 
         // the final wanted cache size
         wanted_cache_size = MIN(max_size1, max_size2);
 
         if(cache->config.dynamic_target_size_cb) {
-            const uint64_t wanted_cache_size_cb = cache->config.dynamic_target_size_cb();
+            const int64_t wanted_cache_size_cb = cache->config.dynamic_target_size_cb();
             if(wanted_cache_size_cb > wanted_cache_size)
                 wanted_cache_size = wanted_cache_size_cb;
         }
@@ -394,9 +394,9 @@ static inline size_t cache_usage_per1000(PGC *cache, size_t *size_to_evict) {
         wanted_cache_size = hot + dirty + index + cache->config.clean_size;
 
     // calculate the absolute minimum we can go
-    const uint64_t min_cache_size1 = (referenced_size > hot ? referenced_size : hot) + dirty + index;
-    const uint64_t min_cache_size2 = (current_cache_size > clean) ? current_cache_size - clean : min_cache_size1;
-    const uint64_t min_cache_size = MAX(min_cache_size1, min_cache_size2);
+    const int64_t min_cache_size1 = (referenced_size > hot ? referenced_size : hot) + dirty + index;
+    const int64_t min_cache_size2 = (current_cache_size > clean) ? current_cache_size - clean : min_cache_size1;
+    const int64_t min_cache_size = MAX(min_cache_size1, min_cache_size2);
 
     if(cache->config.out_of_memory_protection_bytes) {
         // out of memory protection
@@ -404,10 +404,12 @@ static inline size_t cache_usage_per1000(PGC *cache, size_t *size_to_evict) {
         if(sm.ram_total_bytes) {
             // when the total exists, ram_available_bytes is also right
 
-            const uint64_t min_available = cache->config.out_of_memory_protection_bytes;
-            if (sm.ram_available_bytes < min_available) {
+            const int64_t ram_available_bytes = (int64_t)sm.ram_available_bytes;
+
+            const int64_t min_available = cache->config.out_of_memory_protection_bytes;
+            if (ram_available_bytes < min_available) {
                 // we must shrink
-                uint64_t must_lose = min_available - sm.ram_available_bytes;
+                int64_t must_lose = min_available - ram_available_bytes;
 
                 if(current_cache_size > must_lose)
                     wanted_cache_size = current_cache_size - must_lose;
@@ -416,7 +418,7 @@ static inline size_t cache_usage_per1000(PGC *cache, size_t *size_to_evict) {
             }
             else if(cache->config.use_all_ram) {
                 // we can grow
-                wanted_cache_size = current_cache_size + (sm.ram_available_bytes - min_available);
+                wanted_cache_size = current_cache_size + (ram_available_bytes - min_available);
             }
         }
     }
@@ -429,21 +431,21 @@ static inline size_t cache_usage_per1000(PGC *cache, size_t *size_to_evict) {
     if(unlikely(wanted_cache_size < 65536))
         wanted_cache_size = 65536;
 
-    const size_t per1000 = (size_t)(current_cache_size * 1000ULL / wanted_cache_size);
+    const ssize_t per1000 = (ssize_t)(current_cache_size * 1000LL / wanted_cache_size);
     __atomic_store_n(&cache->usage.per1000, per1000, __ATOMIC_RELAXED);
     __atomic_store_n(&cache->stats.wanted_cache_size, wanted_cache_size, __ATOMIC_RELAXED);
     __atomic_store_n(&cache->stats.current_cache_size, current_cache_size, __ATOMIC_RELAXED);
 
-    uint64_t healthy_target = pgc_threshold(cache->config.healthy_size_per1000, wanted_cache_size, current_cache_size, clean);
+    int64_t healthy_target = pgc_threshold(cache->config.healthy_size_per1000, wanted_cache_size, current_cache_size, clean);
     if(current_cache_size > healthy_target) {
-        uint64_t low_watermark_target = pgc_threshold(cache->config.evict_low_threshold_per1000, wanted_cache_size, current_cache_size, clean);
+        int64_t low_watermark_target = pgc_threshold(cache->config.evict_low_threshold_per1000, wanted_cache_size, current_cache_size, clean);
 
-        uint64_t size_to_evict_now = current_cache_size - low_watermark_target;
+        int64_t size_to_evict_now = current_cache_size - low_watermark_target;
         if(size_to_evict_now > clean)
             size_to_evict_now = clean;
 
         if(size_to_evict)
-            *size_to_evict = (size_t)size_to_evict_now;
+            *size_to_evict = size_to_evict_now;
 
         bool signal = false;
         if(per1000 >= cache->config.severe_pressure_per1000) {
@@ -466,7 +468,7 @@ static inline size_t cache_usage_per1000(PGC *cache, size_t *size_to_evict) {
     return per1000;
 }
 
-static inline bool cache_pressure(PGC *cache, size_t limit) {
+static inline bool cache_pressure(PGC *cache, ssize_t limit) {
     return (cache_usage_per1000(cache, NULL) >= limit);
 }
 
@@ -481,8 +483,8 @@ static bool evict_pages_with_filter(PGC *cache, size_t max_skip, size_t max_evic
 static inline bool flushing_critical(PGC *cache);
 static bool flush_pages(PGC *cache, size_t max_flushes, Word_t section, bool wait, bool all_of_them);
 
-static void evict_pages_inline(PGC *cache, bool on_release) {
-    const size_t per1000 = cache_usage_per1000(cache, NULL);
+static ALWAYS_INLINE void evict_pages_inline(PGC *cache, bool on_release) {
+    const ssize_t per1000 = cache_usage_per1000(cache, NULL);
 
     if(!(cache->config.options & PGC_OPTIONS_EVICT_PAGES_NO_INLINE)) {
         if (per1000 > cache->config.aggressive_evict_per1000 && !on_release) {
@@ -505,15 +507,15 @@ static void evict_pages_inline(PGC *cache, bool on_release) {
     }
 }
 
-static inline void evict_on_clean_page_added(PGC *cache) {
+static ALWAYS_INLINE void evict_on_clean_page_added(PGC *cache) {
     evict_pages_inline(cache, false);
 }
 
-static inline void evict_on_page_release_when_permitted(PGC *cache) {
+static ALWAYS_INLINE void evict_on_page_release_when_permitted(PGC *cache) {
     evict_pages_inline(cache, true);
 }
 
-static inline void flush_inline(PGC *cache, bool on_release) {
+static ALWAYS_INLINE void flush_inline(PGC *cache, bool on_release) {
     if(!(cache->config.options & PGC_OPTIONS_FLUSH_PAGES_NO_INLINE) && flushing_critical(cache)) {
         if (on_release)
             p2_add_fetch(&cache->stats.p2_waste_flush_on_release, 1);
@@ -524,11 +526,11 @@ static inline void flush_inline(PGC *cache, bool on_release) {
     }
 }
 
-static inline void flush_on_page_add(PGC *cache) {
+static ALWAYS_INLINE void flush_on_page_add(PGC *cache) {
     flush_inline(cache, false);
 }
 
-static inline void flush_on_page_hot_release(PGC *cache) {
+static ALWAYS_INLINE void flush_on_page_hot_release(PGC *cache) {
     flush_inline(cache, true);
 }
 
@@ -536,8 +538,6 @@ static inline void flush_on_page_hot_release(PGC *cache) {
 // ----------------------------------------------------------------------------
 // flushing control
 
-static bool flush_pages(PGC *cache, size_t max_flushes, Word_t section, bool wait, bool all_of_them);
-
 static inline bool flushing_critical(PGC *cache) {
     if(unlikely(__atomic_load_n(&cache->dirty.stats->size, __ATOMIC_RELAXED) > __atomic_load_n(&cache->hot.stats->max_size, __ATOMIC_RELAXED))) {
         __atomic_add_fetch(&cache->stats.events_flush_critical, 1, __ATOMIC_RELAXED);
@@ -550,11 +550,25 @@ static inline bool flushing_critical(PGC *cache) {
 // ----------------------------------------------------------------------------
 // Linked list management
 
-static inline void atomic_set_max(size_t *max, size_t desired) {
+static inline void atomic_set_max_size_t(size_t *max, size_t desired) {
     size_t expected;
 
     expected = __atomic_load_n(max, __ATOMIC_RELAXED);
 
+    do {
+
+        if(expected >= desired)
+            return;
+
+    } while(!__atomic_compare_exchange_n(max, &expected, desired,
+                                          false, __ATOMIC_RELAXED, __ATOMIC_RELAXED));
+}
+
+static inline void atomic_set_max_int64_t(int64_t *max, int64_t desired) {
+    int64_t expected;
+
+    expected = __atomic_load_n(max, __ATOMIC_RELAXED);
+
     do {
 
         if(expected >= desired)
@@ -591,25 +605,13 @@ static void pgc_section_pages_static_aral_init(void) {
     spinlock_unlock(&spinlock);
 }
 
-static ALWAYS_INLINE void
-pgc_stats_queue_judy_change(PGC *cache, struct pgc_queue *ll, size_t mem_before_judyl, size_t mem_after_judyl) {
-    if(mem_after_judyl > mem_before_judyl) {
-        __atomic_add_fetch(&ll->stats->size, mem_after_judyl - mem_before_judyl, __ATOMIC_RELAXED);
-        __atomic_add_fetch(&cache->stats.size, mem_after_judyl - mem_before_judyl, __ATOMIC_RELAXED);
-    }
-    else if(mem_after_judyl < mem_before_judyl) {
-        __atomic_sub_fetch(&ll->stats->size, mem_before_judyl - mem_after_judyl, __ATOMIC_RELAXED);
-        __atomic_sub_fetch(&cache->stats.size, mem_before_judyl - mem_after_judyl, __ATOMIC_RELAXED);
-    }
+static ALWAYS_INLINE void pgc_stats_queue_judy_change(PGC *cache, struct pgc_queue *ll, int64_t delta) {
+    __atomic_add_fetch(&ll->stats->size, delta, __ATOMIC_RELAXED);
+    __atomic_add_fetch(&cache->stats.size, delta, __ATOMIC_RELAXED);
 }
 
-static ALWAYS_INLINE void pgc_stats_index_judy_change(PGC *cache, size_t mem_before_judyl, size_t mem_after_judyl) {
-    if(mem_after_judyl > mem_before_judyl) {
-        __atomic_add_fetch(&cache->stats.size, mem_after_judyl - mem_before_judyl, __ATOMIC_RELAXED);
-    }
-    else if(mem_after_judyl < mem_before_judyl) {
-        __atomic_sub_fetch(&cache->stats.size, mem_before_judyl - mem_after_judyl, __ATOMIC_RELAXED);
-    }
+static ALWAYS_INLINE void pgc_stats_index_judy_change(PGC *cache, int64_t delta) {
+    __atomic_add_fetch(&cache->stats.size, delta, __ATOMIC_RELAXED);
 }
 
 static ALWAYS_INLINE void pgc_queue_add(PGC *cache __maybe_unused, struct pgc_queue *q, PGC_PAGE *page, bool having_lock, WAITQ_PRIORITY prio __maybe_unused) {
@@ -624,11 +626,10 @@ static ALWAYS_INLINE void pgc_queue_add(PGC *cache __maybe_unused, struct pgc_qu
     if(q->linked_list_in_sections_judy) {
         // HOT and DIRTY pages end up here.
 
-        size_t mem_before_judyl, mem_after_judyl;
+        JudyAllocThreadPulseReset();
+        int64_t mem_delta = 0;
 
-        mem_before_judyl = JudyLMemUsed(q->sections_judy);
         Pvoid_t *section_pages_pptr = JudyLIns(&q->sections_judy, page->section, PJE0);
-        mem_after_judyl = JudyLMemUsed(q->sections_judy);
 
         struct section_pages *sp = *section_pages_pptr;
         if(!sp) {
@@ -638,9 +639,11 @@ static ALWAYS_INLINE void pgc_queue_add(PGC *cache __maybe_unused, struct pgc_qu
 
             *section_pages_pptr = sp;
 
-            mem_after_judyl += sizeof(struct section_pages);
+            mem_delta += sizeof(struct section_pages);
         }
-        pgc_stats_queue_judy_change(cache, q, mem_before_judyl, mem_after_judyl);
+
+        mem_delta += JudyAllocThreadPulseGetAndReset();
+        pgc_stats_queue_judy_change(cache, q, mem_delta);
 
         sp->entries++;
         sp->size += page->assumed_size;
@@ -670,12 +673,12 @@ static ALWAYS_INLINE void pgc_queue_add(PGC *cache __maybe_unused, struct pgc_qu
         pgc_queue_unlock(cache, q);
 
     size_t entries = __atomic_add_fetch(&q->stats->entries, 1, __ATOMIC_RELAXED);
-    size_t size    = __atomic_add_fetch(&q->stats->size, page->assumed_size, __ATOMIC_RELAXED);
+    int64_t size   = __atomic_add_fetch(&q->stats->size, page->assumed_size, __ATOMIC_RELAXED);
     __atomic_add_fetch(&q->stats->added_entries, 1, __ATOMIC_RELAXED);
     __atomic_add_fetch(&q->stats->added_size, page->assumed_size, __ATOMIC_RELAXED);
 
-    atomic_set_max(&q->stats->max_entries, entries);
-    atomic_set_max(&q->stats->max_size, size);
+    atomic_set_max_size_t(&q->stats->max_entries, entries);
+    atomic_set_max_int64_t(&q->stats->max_size, size);
 
     if(cache->config.stats)
         pgc_size_histogram_add(cache, &q->stats->size_histogram, page);
@@ -711,19 +714,21 @@ static ALWAYS_INLINE void pgc_queue_del(PGC *cache __maybe_unused, struct pgc_qu
         DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(sp->base, page, link.prev, link.next);
 
         if(!sp->base) {
-            size_t mem_before_judyl, mem_after_judyl;
+            JudyAllocThreadPulseReset();
+            int64_t mem_delta = 0;
 
-            mem_before_judyl = JudyLMemUsed(q->sections_judy);
             int rc = JudyLDel(&q->sections_judy, page->section, PJE0);
-            mem_after_judyl = JudyLMemUsed(q->sections_judy);
 
             if(!rc)
                 fatal("DBENGINE CACHE: cannot delete section from Judy LL");
 
             // freez(sp);
             aral_freez(pgc_sections_aral, sp);
-            mem_after_judyl -= sizeof(struct section_pages);
-            pgc_stats_queue_judy_change(cache, q, mem_before_judyl, mem_after_judyl);
+
+            mem_delta -= sizeof(struct section_pages);
+            mem_delta += JudyAllocThreadPulseGetAndReset();
+
+            pgc_stats_queue_judy_change(cache, q, mem_delta);
         }
     }
     else {
@@ -860,7 +865,7 @@ static ALWAYS_INLINE void PGC_REFERENCED_PAGES_PLUS1(PGC *cache, PGC_PAGE *page)
     __atomic_add_fetch(&cache->stats.referenced_size, page->assumed_size, __ATOMIC_RELAXED);
 }
 
-static ALWAYS_INLINE void PGC_REFERENCED_PAGES_MINUS1(PGC *cache, size_t assumed_size) {
+static ALWAYS_INLINE void PGC_REFERENCED_PAGES_MINUS1(PGC *cache, int64_t assumed_size) {
     __atomic_sub_fetch(&cache->stats.referenced_entries, 1, __ATOMIC_RELAXED);
     __atomic_sub_fetch(&cache->stats.referenced_size, assumed_size, __ATOMIC_RELAXED);
 }
@@ -885,7 +890,7 @@ static ALWAYS_INLINE bool page_acquire(PGC *cache, PGC_PAGE *page) {
 static ALWAYS_INLINE void page_release(PGC *cache, PGC_PAGE *page, bool evict_if_necessary) {
     __atomic_add_fetch(&cache->stats.releases, 1, __ATOMIC_RELAXED);
 
-    size_t assumed_size = page->assumed_size; // take the size before we release it
+    int64_t assumed_size = page->assumed_size; // take the size before we release it
 
     if(refcount_release(&page->refcount) == 0) {
         PGC_REFERENCED_PAGES_MINUS1(cache, assumed_size);
@@ -917,7 +922,7 @@ static ALWAYS_INLINE bool non_acquired_page_get_for_deletion___while_having_clea
 static ALWAYS_INLINE bool acquired_page_get_for_deletion_or_release_it(PGC *cache __maybe_unused, PGC_PAGE *page) {
     __atomic_add_fetch(&cache->stats.acquires_for_deletion, 1, __ATOMIC_RELAXED);
 
-    size_t assumed_size = page->assumed_size; // take the size before we release it
+    int64_t assumed_size = page->assumed_size; // take the size before we release it
 
     if(refcount_release_and_acquire_for_deletion(&page->refcount)) {
         PGC_REFERENCED_PAGES_MINUS1(cache, assumed_size);
@@ -1008,26 +1013,20 @@ static void remove_this_page_from_index_unsafe(PGC *cache, PGC_PAGE *page, size_
         fatal("DBENGINE CACHE: page with start time '%ld' of metric '%lu' in section '%lu' should exist, but the index returned a different address.",
               page->start_time_s, page->metric_id, page->section);
 
-    size_t mem_before_judyl = 0, mem_after_judyl = 0;
+    JudyAllocThreadPulseReset();
 
-    mem_before_judyl += JudyLMemUsed(*pages_judy_pptr);
     if(unlikely(!JudyLDel(pages_judy_pptr, page->start_time_s, PJE0)))
         fatal("DBENGINE CACHE: page with start time '%ld' of metric '%lu' in section '%lu' exists, but cannot be deleted.",
               page->start_time_s, page->metric_id, page->section);
-    mem_after_judyl += JudyLMemUsed(*pages_judy_pptr);
 
-    mem_before_judyl += JudyLMemUsed(*metrics_judy_pptr);
     if(!*pages_judy_pptr && !JudyLDel(metrics_judy_pptr, page->metric_id, PJE0))
         fatal("DBENGINE CACHE: metric '%lu' in section '%lu' exists and is empty, but cannot be deleted.",
               page->metric_id, page->section);
-    mem_after_judyl += JudyLMemUsed(*metrics_judy_pptr);
 
-    mem_before_judyl += JudyLMemUsed(cache->index[partition].sections_judy);
     if(!*metrics_judy_pptr && !JudyLDel(&cache->index[partition].sections_judy, page->section, PJE0))
         fatal("DBENGINE CACHE: section '%lu' exists and is empty, but cannot be deleted.", page->section);
-    mem_after_judyl += JudyLMemUsed(cache->index[partition].sections_judy);
 
-    pgc_stats_index_judy_change(cache, mem_before_judyl, mem_after_judyl);
+    pgc_stats_index_judy_change(cache, JudyAllocThreadPulseGetAndReset());
 
     pointer_del(cache, page);
 }
@@ -1067,7 +1066,7 @@ static inline bool make_acquired_page_clean_and_evict_or_page_release(PGC *cache
 
 // returns true, when there is potentially more work to do
 static bool evict_pages_with_filter(PGC *cache, size_t max_skip, size_t max_evict, bool wait, bool all_of_them, evict_filter filter, void *data) {
-    size_t per1000 = cache_usage_per1000(cache, NULL);
+    ssize_t per1000 = cache_usage_per1000(cache, NULL);
 
     if(!all_of_them && per1000 < cache->config.healthy_size_per1000)
         // don't bother - not enough to do anything
@@ -1101,7 +1100,7 @@ static bool evict_pages_with_filter(PGC *cache, size_t max_skip, size_t max_evic
     size_t max_pages_to_evict = 0;
 
     do {
-        size_t max_size_to_evict = 0;
+        int64_t max_size_to_evict = 0;
         if (unlikely(all_of_them)) {
             // evict them all
             max_size_to_evict = SIZE_MAX;
@@ -1165,7 +1164,7 @@ static bool evict_pages_with_filter(PGC *cache, size_t max_skip, size_t max_evic
 
         // find a page to evict
         PGC_PAGE *pages_to_evict = NULL;
-        size_t pages_to_evict_size = 0;
+        int64_t pages_to_evict_size = 0;
         size_t pages_to_evict_count = 0;
         for(PGC_PAGE *page = cache->clean.base, *next = NULL, *first_page_we_relocated = NULL; page ; page = next) {
             next = page->link.next;
@@ -1292,7 +1291,7 @@ static bool evict_pages_with_filter(PGC *cache, size_t max_skip, size_t max_evic
 
                         timing_dbengine_evict_step(TIMING_STEP_DBENGINE_EVICT_FREE_LOOP);
 
-                        size_t page_size = page->assumed_size;
+                        int64_t page_size = page->assumed_size;
                         free_this_page(cache, page, partition);
 
                         timing_dbengine_evict_step(TIMING_STEP_DBENGINE_EVICT_FREE_PAGE);
@@ -1313,7 +1312,7 @@ static bool evict_pages_with_filter(PGC *cache, size_t max_skip, size_t max_evic
                 // just one page to be evicted
                 PGC_PAGE *page = pages_to_evict;
 
-                size_t page_size = page->assumed_size;
+                int64_t page_size = page->assumed_size;
 
                 size_t partition = pgc_indexing_partition(cache, page->metric_id);
                 pgc_index_write_lock(cache, partition);
@@ -1354,7 +1353,7 @@ premature_exit:
     return stopped_before_finishing;
 }
 
-static PGC_PAGE *page_add(PGC *cache, PGC_ENTRY *entry, bool *added) {
+static PGC_PAGE *pgc_page_add(PGC *cache, PGC_ENTRY *entry, bool *added) {
     internal_fatal(entry->start_time_s < 0 || entry->end_time_s < 0,
                    "DBENGINE CACHE: timestamps are negative");
 
@@ -1364,7 +1363,31 @@ static PGC_PAGE *page_add(PGC *cache, PGC_ENTRY *entry, bool *added) {
 
 #ifdef PGC_WITH_ARAL
     PGC_PAGE *allocation = aral_mallocz(cache->index[partition].aral);
+#else
+    PGC_PAGE *allocation = mallocz(sizeof(PGC_PAGE) + cache->config.additional_bytes_per_page);
 #endif
+    
+    allocation->refcount = 1;
+    allocation->accesses = (entry->hot) ? 0 : 1;
+    allocation->flags = 0;
+    allocation->section = entry->section;
+    allocation->metric_id = entry->metric_id;
+    allocation->start_time_s = entry->start_time_s;
+    allocation->end_time_s = entry->end_time_s,
+    allocation->update_every_s = entry->update_every_s,
+    allocation->data = entry->data;
+    allocation->assumed_size = page_assumed_size(cache, entry->size);
+    spinlock_init(&allocation->transition_spinlock);
+    allocation->link.prev = NULL;
+    allocation->link.next = NULL;
+    
+    if(cache->config.additional_bytes_per_page) {
+        if(entry->custom_data)
+            memcpy(allocation->custom_data, entry->custom_data, cache->config.additional_bytes_per_page);
+        else
+            memset(allocation->custom_data, 0, cache->config.additional_bytes_per_page);
+    }
+    
     PGC_PAGE *page;
     size_t spins = 0;
 
@@ -1379,58 +1402,29 @@ static PGC_PAGE *page_add(PGC *cache, PGC_ENTRY *entry, bool *added) {
 
         pgc_index_write_lock(cache, partition);
 
-        size_t mem_before_judyl = 0, mem_after_judyl = 0;
+        JudyAllocThreadPulseReset();
 
-        mem_before_judyl += JudyLMemUsed(cache->index[partition].sections_judy);
         Pvoid_t *metrics_judy_pptr = JudyLIns(&cache->index[partition].sections_judy, entry->section, PJE0);
         if(unlikely(!metrics_judy_pptr || metrics_judy_pptr == PJERR))
             fatal("DBENGINE CACHE: corrupted sections judy array");
-        mem_after_judyl += JudyLMemUsed(cache->index[partition].sections_judy);
 
-        mem_before_judyl += JudyLMemUsed(*metrics_judy_pptr);
         Pvoid_t *pages_judy_pptr = JudyLIns(metrics_judy_pptr, entry->metric_id, PJE0);
         if(unlikely(!pages_judy_pptr || pages_judy_pptr == PJERR))
             fatal("DBENGINE CACHE: corrupted pages judy array");
-        mem_after_judyl += JudyLMemUsed(*metrics_judy_pptr);
 
-        mem_before_judyl += JudyLMemUsed(*pages_judy_pptr);
         Pvoid_t *page_ptr = JudyLIns(pages_judy_pptr, entry->start_time_s, PJE0);
         if(unlikely(!page_ptr || page_ptr == PJERR))
             fatal("DBENGINE CACHE: corrupted page in judy array");
-        mem_after_judyl += JudyLMemUsed(*pages_judy_pptr);
 
-        pgc_stats_index_judy_change(cache, mem_before_judyl, mem_after_judyl);
+        pgc_stats_index_judy_change(cache, JudyAllocThreadPulseGetAndReset());
 
         page = *page_ptr;
 
         if (likely(!page)) {
-#ifdef PGC_WITH_ARAL
+            // consume it
             page = allocation;
             allocation = NULL;
-#else
-            page = mallocz(sizeof(PGC_PAGE) + cache->config.additional_bytes_per_page);
-#endif
-            page->refcount = 1;
-            page->accesses = (entry->hot) ? 0 : 1;
-            page->flags = 0;
-            page->section = entry->section;
-            page->metric_id = entry->metric_id;
-            page->start_time_s = entry->start_time_s;
-            page->end_time_s = entry->end_time_s,
-            page->update_every_s = entry->update_every_s,
-            page->data = entry->data;
-            page->assumed_size = page_assumed_size(cache, entry->size);
-            spinlock_init(&page->transition_spinlock);
-            page->link.prev = NULL;
-            page->link.next = NULL;
-
-            if(cache->config.additional_bytes_per_page) {
-                if(entry->custom_data)
-                    memcpy(page->custom_data, entry->custom_data, cache->config.additional_bytes_per_page);
-                else
-                    memset(page->custom_data, 0, cache->config.additional_bytes_per_page);
-            }
-
+            
             // put it in the index
             *page_ptr = page;
             pointer_add(cache, page);
@@ -1471,10 +1465,13 @@ static PGC_PAGE *page_add(PGC *cache, PGC_ENTRY *entry, bool *added) {
 
     } while(!page);
 
+    if(allocation) {
 #ifdef PGC_WITH_ARAL
-    if(allocation)
         aral_freez(cache->index[partition].aral, allocation);
+#else
+        freez(allocation);
 #endif
+    }
 
     if(spins > 1)
         p2_add_fetch(&cache->stats.p2_waste_insert_spins, spins - 1);
@@ -1734,10 +1731,16 @@ static bool flush_pages(PGC *cache, size_t max_flushes, Word_t section, bool wai
 
         PGC_ENTRY array[optimal_flush_size];
         PGC_PAGE *pages[optimal_flush_size];
-        size_t pages_added = 0, pages_added_size = 0;
-        size_t pages_removed_dirty = 0, pages_removed_dirty_size = 0;
-        size_t pages_cancelled = 0, pages_cancelled_size = 0;
-        size_t pages_made_clean = 0, pages_made_clean_size = 0;
+
+        size_t pages_added = 0,
+               pages_removed_dirty = 0,
+               pages_cancelled = 0,
+               pages_made_clean = 0;
+
+        int64_t pages_added_size = 0,
+                pages_removed_dirty_size = 0,
+                pages_cancelled_size = 0,
+                pages_made_clean_size = 0;
 
         PGC_PAGE *page = sp->base;
         while (page && pages_added < optimal_flush_size) {
@@ -1924,7 +1927,7 @@ static void *pgc_evict_thread(void *ptr) {
         if (nd_thread_signaled_to_cancel())
             break;
 
-        size_t size_to_evict = 0;
+        int64_t size_to_evict = 0;
         bool system_cleanup = false;
         if(cache_usage_per1000(cache, &size_to_evict) > cache->config.aggressive_evict_per1000)
             system_cleanup = true;
@@ -1986,7 +1989,7 @@ PGC *pgc_create(const char *name,
     cache->config.pgc_save_dirty_cb             = pgc_save_dirty_cb;
 
     // eviction strategy
-    cache->config.clean_size                    = (clean_size_bytes < 1 * 1024 * 1024) ? 1 * 1024 * 1024 : clean_size_bytes;
+    cache->config.clean_size                    = (clean_size_bytes < 1 * 1024 * 1024) ? 1 * 1024 * 1024 : (int64_t)clean_size_bytes;
     cache->config.pgc_free_clean_cb             = pgc_free_cb;
     cache->config.max_workers_evict_inline      = max_inline_evictors;
     cache->config.max_pages_per_inline_eviction = max_pages_per_inline_eviction;
@@ -2000,7 +2003,7 @@ PGC *pgc_create(const char *name,
 
     // use all ram and protection from out of memory
     cache->config.use_all_ram                       = dbengine_use_all_ram_for_caches;
-    cache->config.out_of_memory_protection_bytes    = dbengine_out_of_memory_protection;
+    cache->config.out_of_memory_protection_bytes    = (int64_t)dbengine_out_of_memory_protection;
 
     // partitions
     if(partitions == 0) partitions  = netdata_conf_cpus();
@@ -2115,11 +2118,11 @@ void pgc_destroy(PGC *cache) {
     }
 }
 
-PGC_PAGE *pgc_page_add_and_acquire(PGC *cache, PGC_ENTRY entry, bool *added) {
-    return page_add(cache, &entry, added);
+ALWAYS_INLINE PGC_PAGE *pgc_page_add_and_acquire(PGC *cache, PGC_ENTRY entry, bool *added) {
+    return pgc_page_add(cache, &entry, added);
 }
 
-PGC_PAGE *pgc_page_dup(PGC *cache, PGC_PAGE *page) {
+ALWAYS_INLINE PGC_PAGE *pgc_page_dup(PGC *cache, PGC_PAGE *page) {
     if(!page_acquire(cache, page))
         fatal("DBENGINE CACHE: tried to dup a page that is not acquired!");
 
@@ -2130,7 +2133,7 @@ ALWAYS_INLINE void pgc_page_release(PGC *cache, PGC_PAGE *page) {
     page_release(cache, page, is_page_clean(page));
 }
 
-void pgc_page_hot_to_dirty_and_release(PGC *cache, PGC_PAGE *page, bool never_flush) {
+ALWAYS_INLINE void pgc_page_hot_to_dirty_and_release(PGC *cache, PGC_PAGE *page, bool never_flush) {
     p2_add_fetch(&cache->stats.p2_workers_hot2dirty, 1);
 
 //#ifdef NETDATA_INTERNAL_CHECKS
@@ -2240,12 +2243,12 @@ bool pgc_is_page_clean(PGC_PAGE *page) {
 
 void pgc_reset_hot_max(PGC *cache) {
     size_t entries = __atomic_load_n(&cache->hot.stats->entries, __ATOMIC_RELAXED);
-    size_t size = __atomic_load_n(&cache->hot.stats->size, __ATOMIC_RELAXED);
+    int64_t size = __atomic_load_n(&cache->hot.stats->size, __ATOMIC_RELAXED);
 
     __atomic_store_n(&cache->hot.stats->max_entries, entries, __ATOMIC_RELAXED);
     __atomic_store_n(&cache->hot.stats->max_size, size, __ATOMIC_RELAXED);
 
-    size_t size_to_evict = 0;
+    int64_t size_to_evict = 0;
     cache_usage_per1000(cache, &size_to_evict);
     evict_pages(cache, 0, 0, true, false);
 }
@@ -2255,7 +2258,7 @@ void pgc_set_dynamic_target_cache_size_callback(PGC *cache, dynamic_target_cache
     cache->config.out_of_memory_protection_bytes = 0;
     cache->config.use_all_ram = false;
 
-    size_t size_to_evict = 0;
+    int64_t size_to_evict = 0;
     cache_usage_per1000(cache, &size_to_evict);
     evict_pages(cache, 0, 0, true, false);
 }
@@ -2264,11 +2267,11 @@ void pgc_set_nominal_page_size_callback(PGC *cache, nominal_page_size_callback c
     cache->config.nominal_page_size_cb = callback;
 }
 
-size_t pgc_get_current_cache_size(PGC *cache) {
+int64_t pgc_get_current_cache_size(PGC *cache) {
     return __atomic_load_n(&cache->stats.current_cache_size, __ATOMIC_RELAXED);
 }
 
-size_t pgc_get_wanted_cache_size(PGC *cache) {
+int64_t pgc_get_wanted_cache_size(PGC *cache) {
     return __atomic_load_n(&cache->stats.wanted_cache_size, __ATOMIC_RELAXED);
 }
 
@@ -2307,13 +2310,13 @@ void pgc_page_hot_set_end_time_s(PGC *cache __maybe_unused, PGC_PAGE *page, time
         if(queue_stats && cache->config.stats)
             pgc_size_histogram_del(cache, &queue_stats->size_histogram, page);
 
-        size_t old_assumed_size = page->assumed_size;
+        int64_t old_assumed_size = page->assumed_size;
 
         size_t old_size = page_size_from_assumed_size(cache, old_assumed_size);
         size_t size = old_size + additional_bytes;
         page->assumed_size = page_assumed_size(cache, size);
 
-        size_t delta = page->assumed_size - old_assumed_size;
+        int64_t delta = page->assumed_size - old_assumed_size;
         __atomic_add_fetch(&cache->stats.size, delta, __ATOMIC_RELAXED);
         __atomic_add_fetch(&cache->stats.added_size, delta, __ATOMIC_RELAXED);
         __atomic_add_fetch(&cache->stats.referenced_size, delta, __ATOMIC_RELAXED);

+ 18 - 18
src/database/engine/cache.h

@@ -51,36 +51,36 @@ struct pgc_queue_statistics {
     struct pgc_size_histogram size_histogram;
 
     PAD64(size_t) entries;
-    PAD64(size_t) size;
+    PAD64(int64_t) size;
 
     PAD64(size_t) max_entries;
-    PAD64(size_t) max_size;
+    PAD64(int64_t) max_size;
 
     PAD64(size_t) added_entries;
-    PAD64(size_t) added_size;
+    PAD64(int64_t) added_size;
 
     PAD64(size_t) removed_entries;
-    PAD64(size_t) removed_size;
+    PAD64(int64_t) removed_size;
 };
 
 struct pgc_statistics {
-    PAD64(size_t) wanted_cache_size;
-    PAD64(size_t) current_cache_size;
+    PAD64(int64_t) wanted_cache_size;
+    PAD64(int64_t) current_cache_size;
 
     // ----------------------------------------------------------------------------------------------------------------
     // volume
 
     PAD64(size_t) entries;                 // all the entries (includes clean, dirty, hot)
-    PAD64(size_t) size;                    // all the entries (includes clean, dirty, hot)
+    PAD64(int64_t) size;                   // all the entries (includes clean, dirty, hot)
 
     PAD64(size_t) referenced_entries;      // all the entries currently referenced
-    PAD64(size_t) referenced_size;         // all the entries currently referenced
+    PAD64(int64_t) referenced_size;        // all the entries currently referenced
 
     PAD64(size_t) added_entries;
-    PAD64(size_t) added_size;
+    PAD64(int64_t) added_size;
 
     PAD64(size_t) removed_entries;
-    PAD64(size_t) removed_size;
+    PAD64(int64_t) removed_size;
 
 #ifdef PGC_COUNT_POINTS_COLLECTED
     PAD64(size_t) points_collected;
@@ -90,13 +90,13 @@ struct pgc_statistics {
     // migrations
 
     PAD64(size_t) evicting_entries;
-    PAD64(size_t) evicting_size;
+    PAD64(int64_t) evicting_size;
 
     PAD64(size_t) flushing_entries;
-    PAD64(size_t) flushing_size;
+    PAD64(int64_t) flushing_size;
 
     PAD64(size_t) hot2dirty_entries;
-    PAD64(size_t) hot2dirty_size;
+    PAD64(int64_t) hot2dirty_size;
 
     PAD64(size_t) hot_empty_pages_evicted_immediately;
     PAD64(size_t) hot_empty_pages_evicted_later;
@@ -118,8 +118,8 @@ struct pgc_statistics {
     PAD64(size_t) searches_closest_misses;
 
     PAD64(size_t) flushes_completed;
-    PAD64(size_t) flushes_completed_size;
-    PAD64(size_t) flushes_cancelled_size;
+    PAD64(int64_t) flushes_completed_size;
+    PAD64(int64_t) flushes_cancelled_size;
 
     // ----------------------------------------------------------------------------------------------------------------
     // critical events
@@ -219,8 +219,8 @@ bool pgc_is_page_hot(PGC_PAGE *page);
 bool pgc_is_page_dirty(PGC_PAGE *page);
 bool pgc_is_page_clean(PGC_PAGE *page);
 void pgc_reset_hot_max(PGC *cache);
-size_t pgc_get_current_cache_size(PGC *cache);
-size_t pgc_get_wanted_cache_size(PGC *cache);
+int64_t pgc_get_current_cache_size(PGC *cache);
+int64_t pgc_get_wanted_cache_size(PGC *cache);
 
 // resetting the end time of a hot page
 void pgc_page_hot_set_end_time_s(PGC *cache, PGC_PAGE *page, time_t end_time_s, size_t additional_bytes);
@@ -232,7 +232,7 @@ void pgc_open_evict_clean_pages_of_datafile(PGC *cache, struct rrdengine_datafil
 size_t pgc_count_clean_pages_having_data_ptr(PGC *cache, Word_t section, void *ptr);
 size_t pgc_count_hot_pages_having_data_ptr(PGC *cache, Word_t section, void *ptr);
 
-typedef size_t (*dynamic_target_cache_size_callback)(void);
+typedef int64_t (*dynamic_target_cache_size_callback)(void);
 void pgc_set_dynamic_target_cache_size_callback(PGC *cache, dynamic_target_cache_size_callback callback);
 
 typedef size_t (*nominal_page_size_callback)(void *);

+ 1 - 1
src/database/engine/datafile.c

@@ -38,7 +38,7 @@ static struct rrdengine_datafile *datafile_alloc_and_init(struct rrdengine_insta
     return datafile;
 }
 
-bool datafile_acquire(struct rrdengine_datafile *df, DATAFILE_ACQUIRE_REASONS reason) {
+ALWAYS_INLINE bool datafile_acquire(struct rrdengine_datafile *df, DATAFILE_ACQUIRE_REASONS reason) {
     bool ret;
 
     spinlock_lock(&df->users.spinlock);

Некоторые файлы не были показаны из-за большого количества измененных файлов