|
@@ -28,16 +28,27 @@ struct metric {
|
|
|
static struct aral_statistics mrg_aral_statistics;
|
|
|
|
|
|
struct mrg {
|
|
|
- size_t partitions;
|
|
|
+ ARAL *aral[MRG_PARTITIONS];
|
|
|
|
|
|
- struct mrg_partition {
|
|
|
- ARAL *aral; // not protected by our spinlock - it has its own
|
|
|
+ struct pgc_index {
|
|
|
+ MRG_CACHE_LINE_PADDING(0);
|
|
|
|
|
|
RW_SPINLOCK rw_spinlock;
|
|
|
- Pvoid_t uuid_judy; // JudyHS: each UUID has a JudyL of sections (tiers)
|
|
|
+
|
|
|
+ MRG_CACHE_LINE_PADDING(1);
|
|
|
+
|
|
|
+ Pvoid_t uuid_judy; // each UUID has a JudyL of sections (tiers)
|
|
|
+
|
|
|
+ MRG_CACHE_LINE_PADDING(2);
|
|
|
|
|
|
struct mrg_statistics stats;
|
|
|
- } index[];
|
|
|
+
|
|
|
+ MRG_CACHE_LINE_PADDING(3);
|
|
|
+ } index[MRG_PARTITIONS];
|
|
|
+
|
|
|
+#ifdef NETDATA_INTERNAL_CHECKS
|
|
|
+ size_t entries_per_partition[MRG_PARTITIONS];
|
|
|
+#endif
|
|
|
};
|
|
|
|
|
|
static inline void MRG_STATS_DUPLICATE_ADD(MRG *mrg, size_t partition) {
|
|
@@ -48,12 +59,20 @@ static inline void MRG_STATS_ADDED_METRIC(MRG *mrg, size_t partition) {
|
|
|
mrg->index[partition].stats.entries++;
|
|
|
mrg->index[partition].stats.additions++;
|
|
|
mrg->index[partition].stats.size += sizeof(METRIC);
|
|
|
+
|
|
|
+#ifdef NETDATA_INTERNAL_CHECKS
|
|
|
+ __atomic_add_fetch(&mrg->entries_per_partition[partition], 1, __ATOMIC_RELAXED);
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
static inline void MRG_STATS_DELETED_METRIC(MRG *mrg, size_t partition) {
|
|
|
mrg->index[partition].stats.entries--;
|
|
|
mrg->index[partition].stats.size -= sizeof(METRIC);
|
|
|
mrg->index[partition].stats.deletions++;
|
|
|
+
|
|
|
+#ifdef NETDATA_INTERNAL_CHECKS
|
|
|
+ __atomic_sub_fetch(&mrg->entries_per_partition[partition], 1, __ATOMIC_RELAXED);
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
static inline void MRG_STATS_SEARCH_HIT(MRG *mrg, size_t partition) {
|
|
@@ -68,13 +87,18 @@ static inline void MRG_STATS_DELETE_MISS(MRG *mrg, size_t partition) {
|
|
|
mrg->index[partition].stats.delete_misses++;
|
|
|
}
|
|
|
|
|
|
-#define mrg_index_read_lock(mrg, partition) rw_spinlock_read_lock(&(mrg)->index[partition].rw_spinlock)
|
|
|
-#define mrg_index_read_unlock(mrg, partition) rw_spinlock_read_unlock(&(mrg)->index[partition].rw_spinlock)
|
|
|
-#define mrg_index_write_lock(mrg, partition) rw_spinlock_write_lock(&(mrg)->index[partition].rw_spinlock)
|
|
|
-#define mrg_index_write_unlock(mrg, partition) rw_spinlock_write_unlock(&(mrg)->index[partition].rw_spinlock)
|
|
|
-
|
|
|
-#define metric_lock(metric) spinlock_lock(&(metric)->spinlock)
|
|
|
-#define metric_unlock(metric) spinlock_unlock(&(metric)->spinlock)
|
|
|
+static inline void mrg_index_read_lock(MRG *mrg, size_t partition) {
|
|
|
+ rw_spinlock_read_lock(&mrg->index[partition].rw_spinlock);
|
|
|
+}
|
|
|
+static inline void mrg_index_read_unlock(MRG *mrg, size_t partition) {
|
|
|
+ rw_spinlock_read_unlock(&mrg->index[partition].rw_spinlock);
|
|
|
+}
|
|
|
+static inline void mrg_index_write_lock(MRG *mrg, size_t partition) {
|
|
|
+ rw_spinlock_write_lock(&mrg->index[partition].rw_spinlock);
|
|
|
+}
|
|
|
+static inline void mrg_index_write_unlock(MRG *mrg, size_t partition) {
|
|
|
+ rw_spinlock_write_unlock(&mrg->index[partition].rw_spinlock);
|
|
|
+}
|
|
|
|
|
|
static inline void mrg_stats_size_judyl_change(MRG *mrg, size_t mem_before_judyl, size_t mem_after_judyl, size_t partition) {
|
|
|
if(mem_after_judyl > mem_before_judyl)
|
|
@@ -93,8 +117,7 @@ static inline void mrg_stats_size_judyhs_removed_uuid(MRG *mrg, size_t partition
|
|
|
|
|
|
static inline size_t uuid_partition(MRG *mrg __maybe_unused, uuid_t *uuid) {
|
|
|
uint8_t *u = (uint8_t *)uuid;
|
|
|
- size_t *n = (size_t *)&u[UUID_SZ - sizeof(size_t)];
|
|
|
- return *n % mrg->partitions;
|
|
|
+ return u[UUID_SZ - 1] % MRG_PARTITIONS;
|
|
|
}
|
|
|
|
|
|
static inline bool metric_has_retention_unsafe(MRG *mrg __maybe_unused, METRIC *metric) {
|
|
@@ -119,7 +142,7 @@ static inline REFCOUNT metric_acquire(MRG *mrg __maybe_unused, METRIC *metric, b
|
|
|
REFCOUNT refcount;
|
|
|
|
|
|
if(!having_spinlock)
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
|
|
|
if(unlikely(metric->refcount < 0))
|
|
|
fatal("METRIC: refcount is %d (negative) during acquire", metric->refcount);
|
|
@@ -130,7 +153,7 @@ static inline REFCOUNT metric_acquire(MRG *mrg __maybe_unused, METRIC *metric, b
|
|
|
metric_has_retention_unsafe(mrg, metric);
|
|
|
|
|
|
if(!having_spinlock)
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
|
|
|
if(refcount == 1)
|
|
|
__atomic_add_fetch(&mrg->index[partition].stats.entries_referenced, 1, __ATOMIC_RELAXED);
|
|
@@ -145,7 +168,7 @@ static inline bool metric_release_and_can_be_deleted(MRG *mrg __maybe_unused, ME
|
|
|
size_t partition = metric->partition;
|
|
|
REFCOUNT refcount;
|
|
|
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
|
|
|
if(unlikely(metric->refcount <= 0))
|
|
|
fatal("METRIC: refcount is %d (zero or negative) during release", metric->refcount);
|
|
@@ -155,7 +178,7 @@ static inline bool metric_release_and_can_be_deleted(MRG *mrg __maybe_unused, ME
|
|
|
if(likely(metric_has_retention_unsafe(mrg, metric) || refcount != 0))
|
|
|
ret = false;
|
|
|
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
|
|
|
if(unlikely(!refcount))
|
|
|
__atomic_sub_fetch(&mrg->index[partition].stats.entries_referenced, 1, __ATOMIC_RELAXED);
|
|
@@ -168,7 +191,7 @@ static inline bool metric_release_and_can_be_deleted(MRG *mrg __maybe_unused, ME
|
|
|
static inline METRIC *metric_add_and_acquire(MRG *mrg, MRG_ENTRY *entry, bool *ret) {
|
|
|
size_t partition = uuid_partition(mrg, &entry->uuid);
|
|
|
|
|
|
- METRIC *allocation = aral_mallocz(mrg->index[partition].aral);
|
|
|
+ METRIC *allocation = aral_mallocz(mrg->aral[partition]);
|
|
|
|
|
|
mrg_index_write_lock(mrg, partition);
|
|
|
|
|
@@ -201,7 +224,7 @@ static inline METRIC *metric_add_and_acquire(MRG *mrg, MRG_ENTRY *entry, bool *r
|
|
|
if(ret)
|
|
|
*ret = false;
|
|
|
|
|
|
- aral_freez(mrg->index[partition].aral, allocation);
|
|
|
+ aral_freez(mrg->aral[partition], allocation);
|
|
|
|
|
|
return metric;
|
|
|
}
|
|
@@ -303,7 +326,7 @@ static inline bool acquired_metric_del(MRG *mrg, METRIC *metric) {
|
|
|
|
|
|
mrg_index_write_unlock(mrg, partition);
|
|
|
|
|
|
- aral_freez(mrg->index[partition].aral, metric);
|
|
|
+ aral_freez(mrg->aral[partition], metric);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -311,20 +334,22 @@ static inline bool acquired_metric_del(MRG *mrg, METRIC *metric) {
|
|
|
// ----------------------------------------------------------------------------
|
|
|
// public API
|
|
|
|
|
|
-inline MRG *mrg_create(size_t partitions) {
|
|
|
- if(partitions < 1)
|
|
|
- partitions = get_netdata_cpus();
|
|
|
-
|
|
|
- MRG *mrg = callocz(1, sizeof(MRG) + sizeof(struct mrg_partition) * partitions);
|
|
|
- mrg->partitions = partitions;
|
|
|
+inline MRG *mrg_create(void) {
|
|
|
+ MRG *mrg = callocz(1, sizeof(MRG));
|
|
|
|
|
|
- for(size_t i = 0; i < mrg->partitions ; i++) {
|
|
|
+ for(size_t i = 0; i < MRG_PARTITIONS ; i++) {
|
|
|
rw_spinlock_init(&mrg->index[i].rw_spinlock);
|
|
|
|
|
|
char buf[ARAL_MAX_NAME + 1];
|
|
|
snprintfz(buf, ARAL_MAX_NAME, "mrg[%zu]", i);
|
|
|
|
|
|
- mrg->index[i].aral = aral_create(buf, sizeof(METRIC), 0, 16384, &mrg_aral_statistics, NULL, NULL, false, false);
|
|
|
+ mrg->aral[i] = aral_create(buf,
|
|
|
+ sizeof(METRIC),
|
|
|
+ 0,
|
|
|
+ 16384,
|
|
|
+ &mrg_aral_statistics,
|
|
|
+ NULL, NULL, false,
|
|
|
+ false);
|
|
|
}
|
|
|
|
|
|
return mrg;
|
|
@@ -390,10 +415,10 @@ inline bool mrg_metric_set_first_time_s(MRG *mrg __maybe_unused, METRIC *metric,
|
|
|
if(unlikely(first_time_s < 0))
|
|
|
return false;
|
|
|
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
metric->first_time_s = first_time_s;
|
|
|
metric_has_retention_unsafe(mrg, metric);
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -418,7 +443,7 @@ inline void mrg_metric_expand_retention(MRG *mrg __maybe_unused, METRIC *metric,
|
|
|
if(unlikely(!first_time_s && !last_time_s && !update_every_s))
|
|
|
return;
|
|
|
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
|
|
|
if(unlikely(first_time_s && (!metric->first_time_s || first_time_s < metric->first_time_s)))
|
|
|
metric->first_time_s = first_time_s;
|
|
@@ -433,7 +458,7 @@ inline void mrg_metric_expand_retention(MRG *mrg __maybe_unused, METRIC *metric,
|
|
|
metric->latest_update_every_s = (uint32_t) update_every_s;
|
|
|
|
|
|
metric_has_retention_unsafe(mrg, metric);
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
}
|
|
|
|
|
|
inline bool mrg_metric_set_first_time_s_if_bigger(MRG *mrg __maybe_unused, METRIC *metric, time_t first_time_s) {
|
|
@@ -441,13 +466,13 @@ inline bool mrg_metric_set_first_time_s_if_bigger(MRG *mrg __maybe_unused, METRI
|
|
|
|
|
|
bool ret = false;
|
|
|
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
if(first_time_s > metric->first_time_s) {
|
|
|
metric->first_time_s = first_time_s;
|
|
|
ret = true;
|
|
|
}
|
|
|
metric_has_retention_unsafe(mrg, metric);
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -455,7 +480,7 @@ inline bool mrg_metric_set_first_time_s_if_bigger(MRG *mrg __maybe_unused, METRI
|
|
|
inline time_t mrg_metric_get_first_time_s(MRG *mrg __maybe_unused, METRIC *metric) {
|
|
|
time_t first_time_s;
|
|
|
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
|
|
|
if(unlikely(!metric->first_time_s)) {
|
|
|
if(metric->latest_time_s_clean)
|
|
@@ -467,13 +492,13 @@ inline time_t mrg_metric_get_first_time_s(MRG *mrg __maybe_unused, METRIC *metri
|
|
|
|
|
|
first_time_s = metric->first_time_s;
|
|
|
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
|
|
|
return first_time_s;
|
|
|
}
|
|
|
|
|
|
inline void mrg_metric_get_retention(MRG *mrg __maybe_unused, METRIC *metric, time_t *first_time_s, time_t *last_time_s, time_t *update_every_s) {
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
|
|
|
if(unlikely(!metric->first_time_s)) {
|
|
|
if(metric->latest_time_s_clean)
|
|
@@ -487,7 +512,7 @@ inline void mrg_metric_get_retention(MRG *mrg __maybe_unused, METRIC *metric, ti
|
|
|
*last_time_s = MAX(metric->latest_time_s_clean, metric->latest_time_s_hot);
|
|
|
*update_every_s = metric->latest_update_every_s;
|
|
|
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
}
|
|
|
|
|
|
inline bool mrg_metric_set_clean_latest_time_s(MRG *mrg __maybe_unused, METRIC *metric, time_t latest_time_s) {
|
|
@@ -496,7 +521,7 @@ inline bool mrg_metric_set_clean_latest_time_s(MRG *mrg __maybe_unused, METRIC *
|
|
|
if(unlikely(latest_time_s < 0))
|
|
|
return false;
|
|
|
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
|
|
|
// internal_fatal(latest_time_s > max_acceptable_collected_time(),
|
|
|
// "DBENGINE METRIC: metric latest time is in the future");
|
|
@@ -510,7 +535,7 @@ inline bool mrg_metric_set_clean_latest_time_s(MRG *mrg __maybe_unused, METRIC *
|
|
|
metric->first_time_s = latest_time_s;
|
|
|
|
|
|
metric_has_retention_unsafe(mrg, metric);
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
return true;
|
|
|
}
|
|
|
|
|
@@ -548,7 +573,7 @@ inline bool mrg_metric_zero_disk_retention(MRG *mrg __maybe_unused, METRIC *metr
|
|
|
if (min_first_time_s == LONG_MAX)
|
|
|
min_first_time_s = 0;
|
|
|
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
if (--countdown && !min_first_time_s && metric->latest_time_s_hot)
|
|
|
do_again = true;
|
|
|
else {
|
|
@@ -560,7 +585,7 @@ inline bool mrg_metric_zero_disk_retention(MRG *mrg __maybe_unused, METRIC *metr
|
|
|
|
|
|
ret = metric_has_retention_unsafe(mrg, metric);
|
|
|
}
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
} while(do_again);
|
|
|
|
|
|
return ret;
|
|
@@ -575,22 +600,22 @@ inline bool mrg_metric_set_hot_latest_time_s(MRG *mrg __maybe_unused, METRIC *me
|
|
|
if(unlikely(latest_time_s < 0))
|
|
|
return false;
|
|
|
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
metric->latest_time_s_hot = latest_time_s;
|
|
|
|
|
|
if(unlikely(!metric->first_time_s))
|
|
|
metric->first_time_s = latest_time_s;
|
|
|
|
|
|
metric_has_retention_unsafe(mrg, metric);
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
inline time_t mrg_metric_get_latest_time_s(MRG *mrg __maybe_unused, METRIC *metric) {
|
|
|
time_t max;
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
max = MAX(metric->latest_time_s_clean, metric->latest_time_s_hot);
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
return max;
|
|
|
}
|
|
|
|
|
@@ -600,9 +625,9 @@ inline bool mrg_metric_set_update_every(MRG *mrg __maybe_unused, METRIC *metric,
|
|
|
if(update_every_s <= 0)
|
|
|
return false;
|
|
|
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
metric->latest_update_every_s = (uint32_t) update_every_s;
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -613,10 +638,10 @@ inline bool mrg_metric_set_update_every_s_if_zero(MRG *mrg __maybe_unused, METRI
|
|
|
if(update_every_s <= 0)
|
|
|
return false;
|
|
|
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
if(!metric->latest_update_every_s)
|
|
|
metric->latest_update_every_s = (uint32_t) update_every_s;
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -624,16 +649,16 @@ inline bool mrg_metric_set_update_every_s_if_zero(MRG *mrg __maybe_unused, METRI
|
|
|
inline time_t mrg_metric_get_update_every_s(MRG *mrg __maybe_unused, METRIC *metric) {
|
|
|
time_t update_every_s;
|
|
|
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
update_every_s = metric->latest_update_every_s;
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
|
|
|
return update_every_s;
|
|
|
}
|
|
|
|
|
|
inline bool mrg_metric_set_writer(MRG *mrg, METRIC *metric) {
|
|
|
bool done = false;
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
if(!metric->writer) {
|
|
|
metric->writer = gettid();
|
|
|
__atomic_add_fetch(&mrg->index[metric->partition].stats.writers, 1, __ATOMIC_RELAXED);
|
|
@@ -641,19 +666,19 @@ inline bool mrg_metric_set_writer(MRG *mrg, METRIC *metric) {
|
|
|
}
|
|
|
else
|
|
|
__atomic_add_fetch(&mrg->index[metric->partition].stats.writers_conflicts, 1, __ATOMIC_RELAXED);
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
return done;
|
|
|
}
|
|
|
|
|
|
inline bool mrg_metric_clear_writer(MRG *mrg, METRIC *metric) {
|
|
|
bool done = false;
|
|
|
- metric_lock(metric);
|
|
|
+ spinlock_lock(&metric->spinlock);
|
|
|
if(metric->writer) {
|
|
|
metric->writer = 0;
|
|
|
__atomic_sub_fetch(&mrg->index[metric->partition].stats.writers, 1, __ATOMIC_RELAXED);
|
|
|
done = true;
|
|
|
}
|
|
|
- metric_unlock(metric);
|
|
|
+ spinlock_unlock(&metric->spinlock);
|
|
|
return done;
|
|
|
}
|
|
|
|
|
@@ -709,7 +734,7 @@ inline void mrg_update_metric_retention_and_granularity_by_uuid(
|
|
|
inline void mrg_get_statistics(MRG *mrg, struct mrg_statistics *s) {
|
|
|
memset(s, 0, sizeof(struct mrg_statistics));
|
|
|
|
|
|
- for(size_t i = 0; i < mrg->partitions ;i++) {
|
|
|
+ for(int i = 0; i < MRG_PARTITIONS ;i++) {
|
|
|
s->entries += __atomic_load_n(&mrg->index[i].stats.entries, __ATOMIC_RELAXED);
|
|
|
s->entries_referenced += __atomic_load_n(&mrg->index[i].stats.entries_referenced, __ATOMIC_RELAXED);
|
|
|
s->entries_with_retention += __atomic_load_n(&mrg->index[i].stats.entries_with_retention, __ATOMIC_RELAXED);
|
|
@@ -726,7 +751,7 @@ inline void mrg_get_statistics(MRG *mrg, struct mrg_statistics *s) {
|
|
|
s->writers_conflicts += __atomic_load_n(&mrg->index[i].stats.writers_conflicts, __ATOMIC_RELAXED);
|
|
|
}
|
|
|
|
|
|
- s->size += sizeof(MRG) + sizeof(struct mrg_partition) * mrg->partitions;
|
|
|
+ s->size += sizeof(MRG);
|
|
|
}
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
@@ -783,7 +808,7 @@ static void *mrg_stress(void *ptr) {
|
|
|
}
|
|
|
|
|
|
int mrg_unittest(void) {
|
|
|
- MRG *mrg = mrg_create(0);
|
|
|
+ MRG *mrg = mrg_create();
|
|
|
METRIC *m1_t0, *m2_t0, *m3_t0, *m4_t0;
|
|
|
METRIC *m1_t1, *m2_t1, *m3_t1, *m4_t1;
|
|
|
bool ret;
|
|
@@ -864,7 +889,7 @@ int mrg_unittest(void) {
|
|
|
fatal("DBENGINE METRIC: invalid entries counter");
|
|
|
|
|
|
size_t entries = 1000000;
|
|
|
- size_t threads = mrg->partitions / 3 + 1;
|
|
|
+ size_t threads = MRG_PARTITIONS / 3 + 1;
|
|
|
size_t tiers = 3;
|
|
|
size_t run_for_secs = 5;
|
|
|
info("preparing stress test of %zu entries...", entries);
|