datafile.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. #include "rrdengine.h"
  3. void datafile_list_insert(struct rrdengine_instance *ctx, struct rrdengine_datafile *datafile)
  4. {
  5. uv_rwlock_wrlock(&ctx->datafiles.rwlock);
  6. DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(ctx->datafiles.first, datafile, prev, next);
  7. uv_rwlock_wrunlock(&ctx->datafiles.rwlock);
  8. }
  9. void datafile_list_delete_unsafe(struct rrdengine_instance *ctx, struct rrdengine_datafile *datafile)
  10. {
  11. DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ctx->datafiles.first, datafile, prev, next);
  12. }
  13. static struct rrdengine_datafile *datafile_alloc_and_init(struct rrdengine_instance *ctx, unsigned tier, unsigned fileno)
  14. {
  15. fatal_assert(tier == 1);
  16. struct rrdengine_datafile *datafile = callocz(1, sizeof(struct rrdengine_datafile));
  17. datafile->tier = tier;
  18. datafile->fileno = fileno;
  19. fatal_assert(0 == uv_rwlock_init(&datafile->extent_rwlock));
  20. datafile->ctx = ctx;
  21. datafile->users.available = true;
  22. netdata_spinlock_init(&datafile->users.spinlock);
  23. netdata_spinlock_init(&datafile->writers.spinlock);
  24. netdata_spinlock_init(&datafile->extent_queries.spinlock);
  25. return datafile;
  26. }
  27. bool datafile_acquire(struct rrdengine_datafile *df, DATAFILE_ACQUIRE_REASONS reason) {
  28. bool ret;
  29. netdata_spinlock_lock(&df->users.spinlock);
  30. if(df->users.available) {
  31. ret = true;
  32. df->users.lockers++;
  33. df->users.lockers_by_reason[reason]++;
  34. }
  35. else
  36. ret = false;
  37. netdata_spinlock_unlock(&df->users.spinlock);
  38. return ret;
  39. }
  40. void datafile_release(struct rrdengine_datafile *df, DATAFILE_ACQUIRE_REASONS reason) {
  41. netdata_spinlock_lock(&df->users.spinlock);
  42. if(!df->users.lockers)
  43. fatal("DBENGINE DATAFILE: cannot release a datafile that is not acquired");
  44. df->users.lockers--;
  45. df->users.lockers_by_reason[reason]--;
  46. netdata_spinlock_unlock(&df->users.spinlock);
  47. }
  48. bool datafile_acquire_for_deletion(struct rrdengine_datafile *df) {
  49. bool can_be_deleted = false;
  50. netdata_spinlock_lock(&df->users.spinlock);
  51. df->users.available = false;
  52. if(!df->users.lockers)
  53. can_be_deleted = true;
  54. else {
  55. // there are lockers
  56. // evict any pages referencing this in the open cache
  57. netdata_spinlock_unlock(&df->users.spinlock);
  58. pgc_open_evict_clean_pages_of_datafile(open_cache, df);
  59. netdata_spinlock_lock(&df->users.spinlock);
  60. if(!df->users.lockers)
  61. can_be_deleted = true;
  62. else {
  63. // there are lockers still
  64. // count the number of pages referencing this in the open cache
  65. netdata_spinlock_unlock(&df->users.spinlock);
  66. usec_t time_to_scan_ut = now_monotonic_usec();
  67. size_t clean_pages_in_open_cache = pgc_count_clean_pages_having_data_ptr(open_cache, (Word_t)df->ctx, df);
  68. size_t hot_pages_in_open_cache = pgc_count_hot_pages_having_data_ptr(open_cache, (Word_t)df->ctx, df);
  69. time_to_scan_ut = now_monotonic_usec() - time_to_scan_ut;
  70. netdata_spinlock_lock(&df->users.spinlock);
  71. if(!df->users.lockers)
  72. can_be_deleted = true;
  73. else if(!clean_pages_in_open_cache && !hot_pages_in_open_cache) {
  74. // no pages in the open cache related to this datafile
  75. time_t now_s = now_monotonic_sec();
  76. if(!df->users.time_to_evict) {
  77. // first time we did the above
  78. df->users.time_to_evict = now_s + 120;
  79. internal_error(true, "DBENGINE: datafile %u of tier %d is not used by any open cache pages, "
  80. "but it has %u lockers (oc:%u, pd:%u), "
  81. "%zu clean and %zu hot open cache pages "
  82. "- will be deleted shortly "
  83. "(scanned open cache in %llu usecs)",
  84. df->fileno, df->ctx->config.tier,
  85. df->users.lockers,
  86. df->users.lockers_by_reason[DATAFILE_ACQUIRE_OPEN_CACHE],
  87. df->users.lockers_by_reason[DATAFILE_ACQUIRE_PAGE_DETAILS],
  88. clean_pages_in_open_cache,
  89. hot_pages_in_open_cache,
  90. time_to_scan_ut);
  91. }
  92. else if(now_s > df->users.time_to_evict) {
  93. // time expired, lets remove it
  94. can_be_deleted = true;
  95. internal_error(true, "DBENGINE: datafile %u of tier %d is not used by any open cache pages, "
  96. "but it has %u lockers (oc:%u, pd:%u), "
  97. "%zu clean and %zu hot open cache pages "
  98. "- will be deleted now "
  99. "(scanned open cache in %llu usecs)",
  100. df->fileno, df->ctx->config.tier,
  101. df->users.lockers,
  102. df->users.lockers_by_reason[DATAFILE_ACQUIRE_OPEN_CACHE],
  103. df->users.lockers_by_reason[DATAFILE_ACQUIRE_PAGE_DETAILS],
  104. clean_pages_in_open_cache,
  105. hot_pages_in_open_cache,
  106. time_to_scan_ut);
  107. }
  108. }
  109. else
  110. internal_error(true, "DBENGINE: datafile %u of tier %d "
  111. "has %u lockers (oc:%u, pd:%u), "
  112. "%zu clean and %zu hot open cache pages "
  113. "(scanned open cache in %llu usecs)",
  114. df->fileno, df->ctx->config.tier,
  115. df->users.lockers,
  116. df->users.lockers_by_reason[DATAFILE_ACQUIRE_OPEN_CACHE],
  117. df->users.lockers_by_reason[DATAFILE_ACQUIRE_PAGE_DETAILS],
  118. clean_pages_in_open_cache,
  119. hot_pages_in_open_cache,
  120. time_to_scan_ut);
  121. }
  122. }
  123. netdata_spinlock_unlock(&df->users.spinlock);
  124. return can_be_deleted;
  125. }
  126. void generate_datafilepath(struct rrdengine_datafile *datafile, char *str, size_t maxlen)
  127. {
  128. (void) snprintfz(str, maxlen, "%s/" DATAFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL DATAFILE_EXTENSION,
  129. datafile->ctx->config.dbfiles_path, datafile->tier, datafile->fileno);
  130. }
  131. int close_data_file(struct rrdengine_datafile *datafile)
  132. {
  133. struct rrdengine_instance *ctx = datafile->ctx;
  134. uv_fs_t req;
  135. int ret;
  136. char path[RRDENG_PATH_MAX];
  137. generate_datafilepath(datafile, path, sizeof(path));
  138. ret = uv_fs_close(NULL, &req, datafile->file, NULL);
  139. if (ret < 0) {
  140. error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret));
  141. ctx_fs_error(ctx);
  142. }
  143. uv_fs_req_cleanup(&req);
  144. return ret;
  145. }
  146. int unlink_data_file(struct rrdengine_datafile *datafile)
  147. {
  148. struct rrdengine_instance *ctx = datafile->ctx;
  149. uv_fs_t req;
  150. int ret;
  151. char path[RRDENG_PATH_MAX];
  152. generate_datafilepath(datafile, path, sizeof(path));
  153. ret = uv_fs_unlink(NULL, &req, path, NULL);
  154. if (ret < 0) {
  155. error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret));
  156. ctx_fs_error(ctx);
  157. }
  158. uv_fs_req_cleanup(&req);
  159. __atomic_add_fetch(&ctx->stats.datafile_deletions, 1, __ATOMIC_RELAXED);
  160. return ret;
  161. }
  162. int destroy_data_file_unsafe(struct rrdengine_datafile *datafile)
  163. {
  164. struct rrdengine_instance *ctx = datafile->ctx;
  165. uv_fs_t req;
  166. int ret;
  167. char path[RRDENG_PATH_MAX];
  168. generate_datafilepath(datafile, path, sizeof(path));
  169. ret = uv_fs_ftruncate(NULL, &req, datafile->file, 0, NULL);
  170. if (ret < 0) {
  171. error("DBENGINE: uv_fs_ftruncate(%s): %s", path, uv_strerror(ret));
  172. ctx_fs_error(ctx);
  173. }
  174. uv_fs_req_cleanup(&req);
  175. ret = uv_fs_close(NULL, &req, datafile->file, NULL);
  176. if (ret < 0) {
  177. error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret));
  178. ctx_fs_error(ctx);
  179. }
  180. uv_fs_req_cleanup(&req);
  181. ret = uv_fs_unlink(NULL, &req, path, NULL);
  182. if (ret < 0) {
  183. error("DBENGINE: uv_fs_fsunlink(%s): %s", path, uv_strerror(ret));
  184. ctx_fs_error(ctx);
  185. }
  186. uv_fs_req_cleanup(&req);
  187. __atomic_add_fetch(&ctx->stats.datafile_deletions, 1, __ATOMIC_RELAXED);
  188. return ret;
  189. }
  190. int create_data_file(struct rrdengine_datafile *datafile)
  191. {
  192. struct rrdengine_instance *ctx = datafile->ctx;
  193. uv_fs_t req;
  194. uv_file file;
  195. int ret, fd;
  196. struct rrdeng_df_sb *superblock;
  197. uv_buf_t iov;
  198. char path[RRDENG_PATH_MAX];
  199. generate_datafilepath(datafile, path, sizeof(path));
  200. fd = open_file_for_io(path, O_CREAT | O_RDWR | O_TRUNC, &file, use_direct_io);
  201. if (fd < 0) {
  202. ctx_fs_error(ctx);
  203. return fd;
  204. }
  205. datafile->file = file;
  206. __atomic_add_fetch(&ctx->stats.datafile_creations, 1, __ATOMIC_RELAXED);
  207. ret = posix_memalign((void *)&superblock, RRDFILE_ALIGNMENT, sizeof(*superblock));
  208. if (unlikely(ret)) {
  209. fatal("DBENGINE: posix_memalign:%s", strerror(ret));
  210. }
  211. memset(superblock, 0, sizeof(*superblock));
  212. (void) strncpy(superblock->magic_number, RRDENG_DF_MAGIC, RRDENG_MAGIC_SZ);
  213. (void) strncpy(superblock->version, RRDENG_DF_VER, RRDENG_VER_SZ);
  214. superblock->tier = 1;
  215. iov = uv_buf_init((void *)superblock, sizeof(*superblock));
  216. ret = uv_fs_write(NULL, &req, file, &iov, 1, 0, NULL);
  217. if (ret < 0) {
  218. fatal_assert(req.result < 0);
  219. error("DBENGINE: uv_fs_write: %s", uv_strerror(ret));
  220. ctx_io_error(ctx);
  221. }
  222. uv_fs_req_cleanup(&req);
  223. posix_memfree(superblock);
  224. if (ret < 0) {
  225. destroy_data_file_unsafe(datafile);
  226. return ret;
  227. }
  228. datafile->pos = sizeof(*superblock);
  229. ctx_io_write_op_bytes(ctx, sizeof(*superblock));
  230. return 0;
  231. }
  232. static int check_data_file_superblock(uv_file file)
  233. {
  234. int ret;
  235. struct rrdeng_df_sb *superblock;
  236. uv_buf_t iov;
  237. uv_fs_t req;
  238. ret = posix_memalign((void *)&superblock, RRDFILE_ALIGNMENT, sizeof(*superblock));
  239. if (unlikely(ret)) {
  240. fatal("DBENGINE: posix_memalign:%s", strerror(ret));
  241. }
  242. iov = uv_buf_init((void *)superblock, sizeof(*superblock));
  243. ret = uv_fs_read(NULL, &req, file, &iov, 1, 0, NULL);
  244. if (ret < 0) {
  245. error("DBENGINE: uv_fs_read: %s", uv_strerror(ret));
  246. uv_fs_req_cleanup(&req);
  247. goto error;
  248. }
  249. fatal_assert(req.result >= 0);
  250. uv_fs_req_cleanup(&req);
  251. if (strncmp(superblock->magic_number, RRDENG_DF_MAGIC, RRDENG_MAGIC_SZ) ||
  252. strncmp(superblock->version, RRDENG_DF_VER, RRDENG_VER_SZ) ||
  253. superblock->tier != 1) {
  254. error("DBENGINE: file has invalid superblock.");
  255. ret = UV_EINVAL;
  256. } else {
  257. ret = 0;
  258. }
  259. error:
  260. posix_memfree(superblock);
  261. return ret;
  262. }
  263. static int load_data_file(struct rrdengine_datafile *datafile)
  264. {
  265. struct rrdengine_instance *ctx = datafile->ctx;
  266. uv_fs_t req;
  267. uv_file file;
  268. int ret, fd, error;
  269. uint64_t file_size;
  270. char path[RRDENG_PATH_MAX];
  271. generate_datafilepath(datafile, path, sizeof(path));
  272. fd = open_file_for_io(path, O_RDWR, &file, use_direct_io);
  273. if (fd < 0) {
  274. ctx_fs_error(ctx);
  275. return fd;
  276. }
  277. info("DBENGINE: initializing data file \"%s\".", path);
  278. ret = check_file_properties(file, &file_size, sizeof(struct rrdeng_df_sb));
  279. if (ret)
  280. goto error;
  281. file_size = ALIGN_BYTES_CEILING(file_size);
  282. ret = check_data_file_superblock(file);
  283. if (ret)
  284. goto error;
  285. ctx_io_read_op_bytes(ctx, sizeof(struct rrdeng_df_sb));
  286. datafile->file = file;
  287. datafile->pos = file_size;
  288. info("DBENGINE: data file \"%s\" initialized (size:%"PRIu64").", path, file_size);
  289. return 0;
  290. error:
  291. error = ret;
  292. ret = uv_fs_close(NULL, &req, file, NULL);
  293. if (ret < 0) {
  294. error("DBENGINE: uv_fs_close(%s): %s", path, uv_strerror(ret));
  295. ctx_fs_error(ctx);
  296. }
  297. uv_fs_req_cleanup(&req);
  298. return error;
  299. }
  300. static int scan_data_files_cmp(const void *a, const void *b)
  301. {
  302. struct rrdengine_datafile *file1, *file2;
  303. char path1[RRDENG_PATH_MAX], path2[RRDENG_PATH_MAX];
  304. file1 = *(struct rrdengine_datafile **)a;
  305. file2 = *(struct rrdengine_datafile **)b;
  306. generate_datafilepath(file1, path1, sizeof(path1));
  307. generate_datafilepath(file2, path2, sizeof(path2));
  308. return strcmp(path1, path2);
  309. }
  310. /* Returns number of datafiles that were loaded or < 0 on error */
  311. static int scan_data_files(struct rrdengine_instance *ctx)
  312. {
  313. int ret, matched_files, failed_to_load, i;
  314. unsigned tier, no;
  315. uv_fs_t req;
  316. uv_dirent_t dent;
  317. struct rrdengine_datafile **datafiles, *datafile;
  318. struct rrdengine_journalfile *journalfile;
  319. ret = uv_fs_scandir(NULL, &req, ctx->config.dbfiles_path, 0, NULL);
  320. if (ret < 0) {
  321. fatal_assert(req.result < 0);
  322. uv_fs_req_cleanup(&req);
  323. error("DBENGINE: uv_fs_scandir(%s): %s", ctx->config.dbfiles_path, uv_strerror(ret));
  324. ctx_fs_error(ctx);
  325. return ret;
  326. }
  327. info("DBENGINE: found %d files in path %s", ret, ctx->config.dbfiles_path);
  328. datafiles = callocz(MIN(ret, MAX_DATAFILES), sizeof(*datafiles));
  329. for (matched_files = 0 ; UV_EOF != uv_fs_scandir_next(&req, &dent) && matched_files < MAX_DATAFILES ; ) {
  330. ret = sscanf(dent.name, DATAFILE_PREFIX RRDENG_FILE_NUMBER_SCAN_TMPL DATAFILE_EXTENSION, &tier, &no);
  331. if (2 == ret) {
  332. datafile = datafile_alloc_and_init(ctx, tier, no);
  333. datafiles[matched_files++] = datafile;
  334. }
  335. }
  336. uv_fs_req_cleanup(&req);
  337. if (0 == matched_files) {
  338. freez(datafiles);
  339. return 0;
  340. }
  341. if (matched_files == MAX_DATAFILES) {
  342. error("DBENGINE: warning: hit maximum database engine file limit of %d files", MAX_DATAFILES);
  343. }
  344. qsort(datafiles, matched_files, sizeof(*datafiles), scan_data_files_cmp);
  345. /* TODO: change this when tiering is implemented */
  346. ctx->atomic.last_fileno = datafiles[matched_files - 1]->fileno;
  347. for (failed_to_load = 0, i = 0 ; i < matched_files ; ++i) {
  348. uint8_t must_delete_pair = 0;
  349. datafile = datafiles[i];
  350. ret = load_data_file(datafile);
  351. if (0 != ret) {
  352. must_delete_pair = 1;
  353. }
  354. journalfile = journalfile_alloc_and_init(datafile);
  355. ret = journalfile_load(ctx, journalfile, datafile);
  356. if (0 != ret) {
  357. if (!must_delete_pair) /* If datafile is still open close it */
  358. close_data_file(datafile);
  359. must_delete_pair = 1;
  360. }
  361. if (must_delete_pair) {
  362. char path[RRDENG_PATH_MAX];
  363. error("DBENGINE: deleting invalid data and journal file pair.");
  364. ret = journalfile_unlink(journalfile);
  365. if (!ret) {
  366. journalfile_v1_generate_path(datafile, path, sizeof(path));
  367. info("DBENGINE: deleted journal file \"%s\".", path);
  368. }
  369. ret = unlink_data_file(datafile);
  370. if (!ret) {
  371. generate_datafilepath(datafile, path, sizeof(path));
  372. info("DBENGINE: deleted data file \"%s\".", path);
  373. }
  374. freez(journalfile);
  375. freez(datafile);
  376. ++failed_to_load;
  377. continue;
  378. }
  379. ctx_current_disk_space_increase(ctx, datafile->pos + journalfile->unsafe.pos);
  380. datafile_list_insert(ctx, datafile);
  381. }
  382. matched_files -= failed_to_load;
  383. freez(datafiles);
  384. return matched_files;
  385. }
  386. /* Creates a datafile and a journalfile pair */
  387. int create_new_datafile_pair(struct rrdengine_instance *ctx)
  388. {
  389. __atomic_add_fetch(&rrdeng_cache_efficiency_stats.datafile_creation_started, 1, __ATOMIC_RELAXED);
  390. struct rrdengine_datafile *datafile;
  391. struct rrdengine_journalfile *journalfile;
  392. unsigned fileno = ctx_last_fileno_get(ctx) + 1;
  393. int ret;
  394. char path[RRDENG_PATH_MAX];
  395. info("DBENGINE: creating new data and journal files in path %s", ctx->config.dbfiles_path);
  396. datafile = datafile_alloc_and_init(ctx, 1, fileno);
  397. ret = create_data_file(datafile);
  398. if(ret)
  399. goto error_after_datafile;
  400. generate_datafilepath(datafile, path, sizeof(path));
  401. info("DBENGINE: created data file \"%s\".", path);
  402. journalfile = journalfile_alloc_and_init(datafile);
  403. ret = journalfile_create(journalfile, datafile);
  404. if (ret)
  405. goto error_after_journalfile;
  406. journalfile_v1_generate_path(datafile, path, sizeof(path));
  407. info("DBENGINE: created journal file \"%s\".", path);
  408. ctx_current_disk_space_increase(ctx, datafile->pos + journalfile->unsafe.pos);
  409. datafile_list_insert(ctx, datafile);
  410. ctx_last_fileno_increment(ctx);
  411. return 0;
  412. error_after_journalfile:
  413. destroy_data_file_unsafe(datafile);
  414. freez(journalfile);
  415. error_after_datafile:
  416. freez(datafile);
  417. return ret;
  418. }
  419. /* Page cache must already be initialized.
  420. * Return 0 on success.
  421. */
  422. int init_data_files(struct rrdengine_instance *ctx)
  423. {
  424. int ret;
  425. fatal_assert(0 == uv_rwlock_init(&ctx->datafiles.rwlock));
  426. ret = scan_data_files(ctx);
  427. if (ret < 0) {
  428. error("DBENGINE: failed to scan path \"%s\".", ctx->config.dbfiles_path);
  429. return ret;
  430. } else if (0 == ret) {
  431. info("DBENGINE: data files not found, creating in path \"%s\".", ctx->config.dbfiles_path);
  432. ctx->atomic.last_fileno = 0;
  433. ret = create_new_datafile_pair(ctx);
  434. if (ret) {
  435. error("DBENGINE: failed to create data and journal files in path \"%s\".", ctx->config.dbfiles_path);
  436. return ret;
  437. }
  438. }
  439. else {
  440. if (ctx->loading.create_new_datafile_pair)
  441. create_new_datafile_pair(ctx);
  442. while(rrdeng_ctx_exceeded_disk_quota(ctx))
  443. datafile_delete(ctx, ctx->datafiles.first, false, false);
  444. }
  445. pgc_reset_hot_max(open_cache);
  446. ctx->loading.create_new_datafile_pair = false;
  447. return 0;
  448. }
  449. void finalize_data_files(struct rrdengine_instance *ctx)
  450. {
  451. bool logged = false;
  452. logged = false;
  453. while(__atomic_load_n(&ctx->atomic.extents_currently_being_flushed, __ATOMIC_RELAXED)) {
  454. if(!logged) {
  455. info("Waiting for inflight flush to finish on tier %d...", ctx->config.tier);
  456. logged = true;
  457. }
  458. sleep_usec(100 * USEC_PER_MS);
  459. }
  460. do {
  461. struct rrdengine_datafile *datafile = ctx->datafiles.first;
  462. struct rrdengine_journalfile *journalfile = datafile->journalfile;
  463. logged = false;
  464. size_t iterations = 100;
  465. while(!datafile_acquire_for_deletion(datafile) && datafile != ctx->datafiles.first->prev && --iterations > 0) {
  466. if(!logged) {
  467. info("Waiting to acquire data file %u of tier %d to close it...", datafile->fileno, ctx->config.tier);
  468. logged = true;
  469. }
  470. sleep_usec(100 * USEC_PER_MS);
  471. }
  472. logged = false;
  473. bool available = false;
  474. do {
  475. uv_rwlock_wrlock(&ctx->datafiles.rwlock);
  476. netdata_spinlock_lock(&datafile->writers.spinlock);
  477. available = (datafile->writers.running || datafile->writers.flushed_to_open_running) ? false : true;
  478. if(!available) {
  479. netdata_spinlock_unlock(&datafile->writers.spinlock);
  480. uv_rwlock_wrunlock(&ctx->datafiles.rwlock);
  481. if(!logged) {
  482. info("Waiting for writers to data file %u of tier %d to finish...", datafile->fileno, ctx->config.tier);
  483. logged = true;
  484. }
  485. sleep_usec(100 * USEC_PER_MS);
  486. }
  487. } while(!available);
  488. journalfile_close(journalfile, datafile);
  489. close_data_file(datafile);
  490. datafile_list_delete_unsafe(ctx, datafile);
  491. netdata_spinlock_unlock(&datafile->writers.spinlock);
  492. uv_rwlock_wrunlock(&ctx->datafiles.rwlock);
  493. freez(journalfile);
  494. freez(datafile);
  495. } while(ctx->datafiles.first);
  496. }