db_impl.cc 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566
  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #include "db/db_impl.h"
  5. #include <algorithm>
  6. #include <set>
  7. #include <string>
  8. #include <stdint.h>
  9. #include <stdio.h>
  10. #include <vector>
  11. #include "db/builder.h"
  12. #include "db/db_iter.h"
  13. #include "db/dbformat.h"
  14. #include "db/filename.h"
  15. #include "db/log_reader.h"
  16. #include "db/log_writer.h"
  17. #include "db/memtable.h"
  18. #include "db/table_cache.h"
  19. #include "db/version_set.h"
  20. #include "db/write_batch_internal.h"
  21. #include "leveldb/db.h"
  22. #include "leveldb/env.h"
  23. #include "leveldb/status.h"
  24. #include "leveldb/table.h"
  25. #include "leveldb/table_builder.h"
  26. #include "port/port.h"
  27. #include "table/block.h"
  28. #include "table/merger.h"
  29. #include "table/two_level_iterator.h"
  30. #include "util/coding.h"
  31. #include "util/logging.h"
  32. #include "util/mutexlock.h"
  33. namespace leveldb {
  34. const int kNumNonTableCacheFiles = 10;
  35. // Information kept for every waiting writer
  36. struct DBImpl::Writer {
  37. Status status;
  38. WriteBatch* batch;
  39. bool sync;
  40. bool done;
  41. port::CondVar cv;
  42. explicit Writer(port::Mutex* mu) : cv(mu) { }
  43. };
  44. struct DBImpl::CompactionState {
  45. Compaction* const compaction;
  46. // Sequence numbers < smallest_snapshot are not significant since we
  47. // will never have to service a snapshot below smallest_snapshot.
  48. // Therefore if we have seen a sequence number S <= smallest_snapshot,
  49. // we can drop all entries for the same key with sequence numbers < S.
  50. SequenceNumber smallest_snapshot;
  51. // Files produced by compaction
  52. struct Output {
  53. uint64_t number;
  54. uint64_t file_size;
  55. InternalKey smallest, largest;
  56. };
  57. std::vector<Output> outputs;
  58. // State kept for output being generated
  59. WritableFile* outfile;
  60. TableBuilder* builder;
  61. uint64_t total_bytes;
  62. Output* current_output() { return &outputs[outputs.size()-1]; }
  63. explicit CompactionState(Compaction* c)
  64. : compaction(c),
  65. outfile(NULL),
  66. builder(NULL),
  67. total_bytes(0) {
  68. }
  69. };
  70. // Fix user-supplied options to be reasonable
  71. template <class T,class V>
  72. static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
  73. if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
  74. if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
  75. }
  76. Options SanitizeOptions(const std::string& dbname,
  77. const InternalKeyComparator* icmp,
  78. const InternalFilterPolicy* ipolicy,
  79. const Options& src) {
  80. Options result = src;
  81. result.comparator = icmp;
  82. result.filter_policy = (src.filter_policy != NULL) ? ipolicy : NULL;
  83. ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
  84. ClipToRange(&result.write_buffer_size, 64<<10, 1<<30);
  85. ClipToRange(&result.block_size, 1<<10, 4<<20);
  86. if (result.info_log == NULL) {
  87. // Open a log file in the same directory as the db
  88. src.env->CreateDir(dbname); // In case it does not exist
  89. src.env->RenameFile(InfoLogFileName(dbname), OldInfoLogFileName(dbname));
  90. Status s = src.env->NewLogger(InfoLogFileName(dbname), &result.info_log);
  91. if (!s.ok()) {
  92. // No place suitable for logging
  93. result.info_log = NULL;
  94. }
  95. }
  96. if (result.block_cache == NULL) {
  97. result.block_cache = NewLRUCache(8 << 20);
  98. }
  99. return result;
  100. }
  101. DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
  102. : env_(raw_options.env),
  103. internal_comparator_(raw_options.comparator),
  104. internal_filter_policy_(raw_options.filter_policy),
  105. options_(SanitizeOptions(dbname, &internal_comparator_,
  106. &internal_filter_policy_, raw_options)),
  107. owns_info_log_(options_.info_log != raw_options.info_log),
  108. owns_cache_(options_.block_cache != raw_options.block_cache),
  109. dbname_(dbname),
  110. db_lock_(NULL),
  111. shutting_down_(NULL),
  112. bg_cv_(&mutex_),
  113. mem_(NULL),
  114. imm_(NULL),
  115. logfile_(NULL),
  116. logfile_number_(0),
  117. log_(NULL),
  118. seed_(0),
  119. tmp_batch_(new WriteBatch),
  120. bg_compaction_scheduled_(false),
  121. manual_compaction_(NULL) {
  122. has_imm_.Release_Store(NULL);
  123. // Reserve ten files or so for other uses and give the rest to TableCache.
  124. const int table_cache_size = options_.max_open_files - kNumNonTableCacheFiles;
  125. table_cache_ = new TableCache(dbname_, &options_, table_cache_size);
  126. versions_ = new VersionSet(dbname_, &options_, table_cache_,
  127. &internal_comparator_);
  128. }
  129. DBImpl::~DBImpl() {
  130. // Wait for background work to finish
  131. mutex_.Lock();
  132. shutting_down_.Release_Store(this); // Any non-NULL value is ok
  133. while (bg_compaction_scheduled_) {
  134. bg_cv_.Wait();
  135. }
  136. mutex_.Unlock();
  137. if (db_lock_ != NULL) {
  138. env_->UnlockFile(db_lock_);
  139. }
  140. delete versions_;
  141. if (mem_ != NULL) mem_->Unref();
  142. if (imm_ != NULL) imm_->Unref();
  143. delete tmp_batch_;
  144. delete log_;
  145. delete logfile_;
  146. delete table_cache_;
  147. if (owns_info_log_) {
  148. delete options_.info_log;
  149. }
  150. if (owns_cache_) {
  151. delete options_.block_cache;
  152. }
  153. }
  154. Status DBImpl::NewDB() {
  155. VersionEdit new_db;
  156. new_db.SetComparatorName(user_comparator()->Name());
  157. new_db.SetLogNumber(0);
  158. new_db.SetNextFile(2);
  159. new_db.SetLastSequence(0);
  160. const std::string manifest = DescriptorFileName(dbname_, 1);
  161. WritableFile* file;
  162. Status s = env_->NewWritableFile(manifest, &file);
  163. if (!s.ok()) {
  164. return s;
  165. }
  166. {
  167. log::Writer log(file);
  168. std::string record;
  169. new_db.EncodeTo(&record);
  170. s = log.AddRecord(record);
  171. if (s.ok()) {
  172. s = file->Close();
  173. }
  174. }
  175. delete file;
  176. if (s.ok()) {
  177. // Make "CURRENT" file that points to the new manifest file.
  178. s = SetCurrentFile(env_, dbname_, 1);
  179. } else {
  180. env_->DeleteFile(manifest);
  181. }
  182. return s;
  183. }
  184. void DBImpl::MaybeIgnoreError(Status* s) const {
  185. if (s->ok() || options_.paranoid_checks) {
  186. // No change needed
  187. } else {
  188. Log(options_.info_log, "Ignoring error %s", s->ToString().c_str());
  189. *s = Status::OK();
  190. }
  191. }
  192. void DBImpl::DeleteObsoleteFiles() {
  193. if (!bg_error_.ok()) {
  194. // After a background error, we don't know whether a new version may
  195. // or may not have been committed, so we cannot safely garbage collect.
  196. return;
  197. }
  198. // Make a set of all of the live files
  199. std::set<uint64_t> live = pending_outputs_;
  200. versions_->AddLiveFiles(&live);
  201. std::vector<std::string> filenames;
  202. env_->GetChildren(dbname_, &filenames); // Ignoring errors on purpose
  203. uint64_t number;
  204. FileType type;
  205. for (size_t i = 0; i < filenames.size(); i++) {
  206. if (ParseFileName(filenames[i], &number, &type)) {
  207. bool keep = true;
  208. switch (type) {
  209. case kLogFile:
  210. keep = ((number >= versions_->LogNumber()) ||
  211. (number == versions_->PrevLogNumber()));
  212. break;
  213. case kDescriptorFile:
  214. // Keep my manifest file, and any newer incarnations'
  215. // (in case there is a race that allows other incarnations)
  216. keep = (number >= versions_->ManifestFileNumber());
  217. break;
  218. case kTableFile:
  219. keep = (live.find(number) != live.end());
  220. break;
  221. case kTempFile:
  222. // Any temp files that are currently being written to must
  223. // be recorded in pending_outputs_, which is inserted into "live"
  224. keep = (live.find(number) != live.end());
  225. break;
  226. case kCurrentFile:
  227. case kDBLockFile:
  228. case kInfoLogFile:
  229. keep = true;
  230. break;
  231. }
  232. if (!keep) {
  233. if (type == kTableFile) {
  234. table_cache_->Evict(number);
  235. }
  236. Log(options_.info_log, "Delete type=%d #%lld\n",
  237. int(type),
  238. static_cast<unsigned long long>(number));
  239. env_->DeleteFile(dbname_ + "/" + filenames[i]);
  240. }
  241. }
  242. }
  243. }
  244. Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
  245. mutex_.AssertHeld();
  246. // Ignore error from CreateDir since the creation of the DB is
  247. // committed only when the descriptor is created, and this directory
  248. // may already exist from a previous failed creation attempt.
  249. env_->CreateDir(dbname_);
  250. assert(db_lock_ == NULL);
  251. Status s = env_->LockFile(LockFileName(dbname_), &db_lock_);
  252. if (!s.ok()) {
  253. return s;
  254. }
  255. if (!env_->FileExists(CurrentFileName(dbname_))) {
  256. if (options_.create_if_missing) {
  257. s = NewDB();
  258. if (!s.ok()) {
  259. return s;
  260. }
  261. } else {
  262. return Status::InvalidArgument(
  263. dbname_, "does not exist (create_if_missing is false)");
  264. }
  265. } else {
  266. if (options_.error_if_exists) {
  267. return Status::InvalidArgument(
  268. dbname_, "exists (error_if_exists is true)");
  269. }
  270. }
  271. s = versions_->Recover(save_manifest);
  272. if (!s.ok()) {
  273. return s;
  274. }
  275. SequenceNumber max_sequence(0);
  276. // Recover from all newer log files than the ones named in the
  277. // descriptor (new log files may have been added by the previous
  278. // incarnation without registering them in the descriptor).
  279. //
  280. // Note that PrevLogNumber() is no longer used, but we pay
  281. // attention to it in case we are recovering a database
  282. // produced by an older version of leveldb.
  283. const uint64_t min_log = versions_->LogNumber();
  284. const uint64_t prev_log = versions_->PrevLogNumber();
  285. std::vector<std::string> filenames;
  286. s = env_->GetChildren(dbname_, &filenames);
  287. if (!s.ok()) {
  288. return s;
  289. }
  290. std::set<uint64_t> expected;
  291. versions_->AddLiveFiles(&expected);
  292. uint64_t number;
  293. FileType type;
  294. std::vector<uint64_t> logs;
  295. for (size_t i = 0; i < filenames.size(); i++) {
  296. if (ParseFileName(filenames[i], &number, &type)) {
  297. expected.erase(number);
  298. if (type == kLogFile && ((number >= min_log) || (number == prev_log)))
  299. logs.push_back(number);
  300. }
  301. }
  302. if (!expected.empty()) {
  303. char buf[50];
  304. snprintf(buf, sizeof(buf), "%d missing files; e.g.",
  305. static_cast<int>(expected.size()));
  306. return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin())));
  307. }
  308. // Recover in the order in which the logs were generated
  309. std::sort(logs.begin(), logs.end());
  310. for (size_t i = 0; i < logs.size(); i++) {
  311. s = RecoverLogFile(logs[i], (i == logs.size() - 1), save_manifest, edit,
  312. &max_sequence);
  313. if (!s.ok()) {
  314. return s;
  315. }
  316. // The previous incarnation may not have written any MANIFEST
  317. // records after allocating this log number. So we manually
  318. // update the file number allocation counter in VersionSet.
  319. versions_->MarkFileNumberUsed(logs[i]);
  320. }
  321. if (versions_->LastSequence() < max_sequence) {
  322. versions_->SetLastSequence(max_sequence);
  323. }
  324. return Status::OK();
  325. }
  326. Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
  327. bool* save_manifest, VersionEdit* edit,
  328. SequenceNumber* max_sequence) {
  329. struct LogReporter : public log::Reader::Reporter {
  330. Env* env;
  331. Logger* info_log;
  332. const char* fname;
  333. Status* status; // NULL if options_.paranoid_checks==false
  334. virtual void Corruption(size_t bytes, const Status& s) {
  335. Log(info_log, "%s%s: dropping %d bytes; %s",
  336. (this->status == NULL ? "(ignoring error) " : ""),
  337. fname, static_cast<int>(bytes), s.ToString().c_str());
  338. if (this->status != NULL && this->status->ok()) *this->status = s;
  339. }
  340. };
  341. mutex_.AssertHeld();
  342. // Open the log file
  343. std::string fname = LogFileName(dbname_, log_number);
  344. SequentialFile* file;
  345. Status status = env_->NewSequentialFile(fname, &file);
  346. if (!status.ok()) {
  347. MaybeIgnoreError(&status);
  348. return status;
  349. }
  350. // Create the log reader.
  351. LogReporter reporter;
  352. reporter.env = env_;
  353. reporter.info_log = options_.info_log;
  354. reporter.fname = fname.c_str();
  355. reporter.status = (options_.paranoid_checks ? &status : NULL);
  356. // We intentionally make log::Reader do checksumming even if
  357. // paranoid_checks==false so that corruptions cause entire commits
  358. // to be skipped instead of propagating bad information (like overly
  359. // large sequence numbers).
  360. log::Reader reader(file, &reporter, true/*checksum*/,
  361. 0/*initial_offset*/);
  362. Log(options_.info_log, "Recovering log #%llu",
  363. (unsigned long long) log_number);
  364. // Read all the records and add to a memtable
  365. std::string scratch;
  366. Slice record;
  367. WriteBatch batch;
  368. int compactions = 0;
  369. MemTable* mem = NULL;
  370. while (reader.ReadRecord(&record, &scratch) &&
  371. status.ok()) {
  372. if (record.size() < 12) {
  373. reporter.Corruption(
  374. record.size(), Status::Corruption("log record too small"));
  375. continue;
  376. }
  377. WriteBatchInternal::SetContents(&batch, record);
  378. if (mem == NULL) {
  379. mem = new MemTable(internal_comparator_);
  380. mem->Ref();
  381. }
  382. status = WriteBatchInternal::InsertInto(&batch, mem);
  383. MaybeIgnoreError(&status);
  384. if (!status.ok()) {
  385. break;
  386. }
  387. const SequenceNumber last_seq =
  388. WriteBatchInternal::Sequence(&batch) +
  389. WriteBatchInternal::Count(&batch) - 1;
  390. if (last_seq > *max_sequence) {
  391. *max_sequence = last_seq;
  392. }
  393. if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
  394. compactions++;
  395. *save_manifest = true;
  396. status = WriteLevel0Table(mem, edit, NULL);
  397. mem->Unref();
  398. mem = NULL;
  399. if (!status.ok()) {
  400. // Reflect errors immediately so that conditions like full
  401. // file-systems cause the DB::Open() to fail.
  402. break;
  403. }
  404. }
  405. }
  406. delete file;
  407. // See if we should keep reusing the last log file.
  408. if (status.ok() && options_.reuse_logs && last_log && compactions == 0) {
  409. assert(logfile_ == NULL);
  410. assert(log_ == NULL);
  411. assert(mem_ == NULL);
  412. uint64_t lfile_size;
  413. if (env_->GetFileSize(fname, &lfile_size).ok() &&
  414. env_->NewAppendableFile(fname, &logfile_).ok()) {
  415. Log(options_.info_log, "Reusing old log %s \n", fname.c_str());
  416. log_ = new log::Writer(logfile_, lfile_size);
  417. logfile_number_ = log_number;
  418. if (mem != NULL) {
  419. mem_ = mem;
  420. mem = NULL;
  421. } else {
  422. // mem can be NULL if lognum exists but was empty.
  423. mem_ = new MemTable(internal_comparator_);
  424. mem_->Ref();
  425. }
  426. }
  427. }
  428. if (mem != NULL) {
  429. // mem did not get reused; compact it.
  430. if (status.ok()) {
  431. *save_manifest = true;
  432. status = WriteLevel0Table(mem, edit, NULL);
  433. }
  434. mem->Unref();
  435. }
  436. return status;
  437. }
  438. Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
  439. Version* base) {
  440. mutex_.AssertHeld();
  441. const uint64_t start_micros = env_->NowMicros();
  442. FileMetaData meta;
  443. meta.number = versions_->NewFileNumber();
  444. pending_outputs_.insert(meta.number);
  445. Iterator* iter = mem->NewIterator();
  446. Log(options_.info_log, "Level-0 table #%llu: started",
  447. (unsigned long long) meta.number);
  448. Status s;
  449. {
  450. mutex_.Unlock();
  451. s = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
  452. mutex_.Lock();
  453. }
  454. Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
  455. (unsigned long long) meta.number,
  456. (unsigned long long) meta.file_size,
  457. s.ToString().c_str());
  458. delete iter;
  459. pending_outputs_.erase(meta.number);
  460. // Note that if file_size is zero, the file has been deleted and
  461. // should not be added to the manifest.
  462. int level = 0;
  463. if (s.ok() && meta.file_size > 0) {
  464. const Slice min_user_key = meta.smallest.user_key();
  465. const Slice max_user_key = meta.largest.user_key();
  466. if (base != NULL) {
  467. level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
  468. }
  469. edit->AddFile(level, meta.number, meta.file_size,
  470. meta.smallest, meta.largest);
  471. }
  472. CompactionStats stats;
  473. stats.micros = env_->NowMicros() - start_micros;
  474. stats.bytes_written = meta.file_size;
  475. stats_[level].Add(stats);
  476. return s;
  477. }
  478. void DBImpl::CompactMemTable() {
  479. mutex_.AssertHeld();
  480. assert(imm_ != NULL);
  481. // Save the contents of the memtable as a new Table
  482. VersionEdit edit;
  483. Version* base = versions_->current();
  484. base->Ref();
  485. Status s = WriteLevel0Table(imm_, &edit, base);
  486. base->Unref();
  487. if (s.ok() && shutting_down_.Acquire_Load()) {
  488. s = Status::IOError("Deleting DB during memtable compaction");
  489. }
  490. // Replace immutable memtable with the generated Table
  491. if (s.ok()) {
  492. edit.SetPrevLogNumber(0);
  493. edit.SetLogNumber(logfile_number_); // Earlier logs no longer needed
  494. s = versions_->LogAndApply(&edit, &mutex_);
  495. }
  496. if (s.ok()) {
  497. // Commit to the new state
  498. imm_->Unref();
  499. imm_ = NULL;
  500. has_imm_.Release_Store(NULL);
  501. DeleteObsoleteFiles();
  502. } else {
  503. RecordBackgroundError(s);
  504. }
  505. }
  506. void DBImpl::CompactRange(const Slice* begin, const Slice* end) {
  507. int max_level_with_files = 1;
  508. {
  509. MutexLock l(&mutex_);
  510. Version* base = versions_->current();
  511. for (int level = 1; level < config::kNumLevels; level++) {
  512. if (base->OverlapInLevel(level, begin, end)) {
  513. max_level_with_files = level;
  514. }
  515. }
  516. }
  517. TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap
  518. for (int level = 0; level < max_level_with_files; level++) {
  519. TEST_CompactRange(level, begin, end);
  520. }
  521. }
  522. void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
  523. assert(level >= 0);
  524. assert(level + 1 < config::kNumLevels);
  525. InternalKey begin_storage, end_storage;
  526. ManualCompaction manual;
  527. manual.level = level;
  528. manual.done = false;
  529. if (begin == NULL) {
  530. manual.begin = NULL;
  531. } else {
  532. begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
  533. manual.begin = &begin_storage;
  534. }
  535. if (end == NULL) {
  536. manual.end = NULL;
  537. } else {
  538. end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
  539. manual.end = &end_storage;
  540. }
  541. MutexLock l(&mutex_);
  542. while (!manual.done && !shutting_down_.Acquire_Load() && bg_error_.ok()) {
  543. if (manual_compaction_ == NULL) { // Idle
  544. manual_compaction_ = &manual;
  545. MaybeScheduleCompaction();
  546. } else { // Running either my compaction or another compaction.
  547. bg_cv_.Wait();
  548. }
  549. }
  550. if (manual_compaction_ == &manual) {
  551. // Cancel my manual compaction since we aborted early for some reason.
  552. manual_compaction_ = NULL;
  553. }
  554. }
  555. Status DBImpl::TEST_CompactMemTable() {
  556. // NULL batch means just wait for earlier writes to be done
  557. Status s = Write(WriteOptions(), NULL);
  558. if (s.ok()) {
  559. // Wait until the compaction completes
  560. MutexLock l(&mutex_);
  561. while (imm_ != NULL && bg_error_.ok()) {
  562. bg_cv_.Wait();
  563. }
  564. if (imm_ != NULL) {
  565. s = bg_error_;
  566. }
  567. }
  568. return s;
  569. }
  570. void DBImpl::RecordBackgroundError(const Status& s) {
  571. mutex_.AssertHeld();
  572. if (bg_error_.ok()) {
  573. bg_error_ = s;
  574. bg_cv_.SignalAll();
  575. }
  576. }
  577. void DBImpl::MaybeScheduleCompaction() {
  578. mutex_.AssertHeld();
  579. if (bg_compaction_scheduled_) {
  580. // Already scheduled
  581. } else if (shutting_down_.Acquire_Load()) {
  582. // DB is being deleted; no more background compactions
  583. } else if (!bg_error_.ok()) {
  584. // Already got an error; no more changes
  585. } else if (imm_ == NULL &&
  586. manual_compaction_ == NULL &&
  587. !versions_->NeedsCompaction()) {
  588. // No work to be done
  589. } else {
  590. bg_compaction_scheduled_ = true;
  591. env_->Schedule(&DBImpl::BGWork, this);
  592. }
  593. }
  594. void DBImpl::BGWork(void* db) {
  595. reinterpret_cast<DBImpl*>(db)->BackgroundCall();
  596. }
  597. void DBImpl::BackgroundCall() {
  598. MutexLock l(&mutex_);
  599. assert(bg_compaction_scheduled_);
  600. if (shutting_down_.Acquire_Load()) {
  601. // No more background work when shutting down.
  602. } else if (!bg_error_.ok()) {
  603. // No more background work after a background error.
  604. } else {
  605. BackgroundCompaction();
  606. }
  607. bg_compaction_scheduled_ = false;
  608. // Previous compaction may have produced too many files in a level,
  609. // so reschedule another compaction if needed.
  610. MaybeScheduleCompaction();
  611. bg_cv_.SignalAll();
  612. }
  613. void DBImpl::BackgroundCompaction() {
  614. mutex_.AssertHeld();
  615. if (imm_ != NULL) {
  616. CompactMemTable();
  617. return;
  618. }
  619. Compaction* c;
  620. bool is_manual = (manual_compaction_ != NULL);
  621. InternalKey manual_end;
  622. if (is_manual) {
  623. ManualCompaction* m = manual_compaction_;
  624. c = versions_->CompactRange(m->level, m->begin, m->end);
  625. m->done = (c == NULL);
  626. if (c != NULL) {
  627. manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
  628. }
  629. Log(options_.info_log,
  630. "Manual compaction at level-%d from %s .. %s; will stop at %s\n",
  631. m->level,
  632. (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
  633. (m->end ? m->end->DebugString().c_str() : "(end)"),
  634. (m->done ? "(end)" : manual_end.DebugString().c_str()));
  635. } else {
  636. c = versions_->PickCompaction();
  637. }
  638. Status status;
  639. if (c == NULL) {
  640. // Nothing to do
  641. } else if (!is_manual && c->IsTrivialMove()) {
  642. // Move file to next level
  643. assert(c->num_input_files(0) == 1);
  644. FileMetaData* f = c->input(0, 0);
  645. c->edit()->DeleteFile(c->level(), f->number);
  646. c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
  647. f->smallest, f->largest);
  648. status = versions_->LogAndApply(c->edit(), &mutex_);
  649. if (!status.ok()) {
  650. RecordBackgroundError(status);
  651. }
  652. VersionSet::LevelSummaryStorage tmp;
  653. Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
  654. static_cast<unsigned long long>(f->number),
  655. c->level() + 1,
  656. static_cast<unsigned long long>(f->file_size),
  657. status.ToString().c_str(),
  658. versions_->LevelSummary(&tmp));
  659. } else {
  660. CompactionState* compact = new CompactionState(c);
  661. status = DoCompactionWork(compact);
  662. if (!status.ok()) {
  663. RecordBackgroundError(status);
  664. }
  665. CleanupCompaction(compact);
  666. c->ReleaseInputs();
  667. DeleteObsoleteFiles();
  668. }
  669. delete c;
  670. if (status.ok()) {
  671. // Done
  672. } else if (shutting_down_.Acquire_Load()) {
  673. // Ignore compaction errors found during shutting down
  674. } else {
  675. Log(options_.info_log,
  676. "Compaction error: %s", status.ToString().c_str());
  677. }
  678. if (is_manual) {
  679. ManualCompaction* m = manual_compaction_;
  680. if (!status.ok()) {
  681. m->done = true;
  682. }
  683. if (!m->done) {
  684. // We only compacted part of the requested range. Update *m
  685. // to the range that is left to be compacted.
  686. m->tmp_storage = manual_end;
  687. m->begin = &m->tmp_storage;
  688. }
  689. manual_compaction_ = NULL;
  690. }
  691. }
  692. void DBImpl::CleanupCompaction(CompactionState* compact) {
  693. mutex_.AssertHeld();
  694. if (compact->builder != NULL) {
  695. // May happen if we get a shutdown call in the middle of compaction
  696. compact->builder->Abandon();
  697. delete compact->builder;
  698. } else {
  699. assert(compact->outfile == NULL);
  700. }
  701. delete compact->outfile;
  702. for (size_t i = 0; i < compact->outputs.size(); i++) {
  703. const CompactionState::Output& out = compact->outputs[i];
  704. pending_outputs_.erase(out.number);
  705. }
  706. delete compact;
  707. }
  708. Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
  709. assert(compact != NULL);
  710. assert(compact->builder == NULL);
  711. uint64_t file_number;
  712. {
  713. mutex_.Lock();
  714. file_number = versions_->NewFileNumber();
  715. pending_outputs_.insert(file_number);
  716. CompactionState::Output out;
  717. out.number = file_number;
  718. out.smallest.Clear();
  719. out.largest.Clear();
  720. compact->outputs.push_back(out);
  721. mutex_.Unlock();
  722. }
  723. // Make the output file
  724. std::string fname = TableFileName(dbname_, file_number);
  725. Status s = env_->NewWritableFile(fname, &compact->outfile);
  726. if (s.ok()) {
  727. compact->builder = new TableBuilder(options_, compact->outfile);
  728. }
  729. return s;
  730. }
  731. Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
  732. Iterator* input) {
  733. assert(compact != NULL);
  734. assert(compact->outfile != NULL);
  735. assert(compact->builder != NULL);
  736. const uint64_t output_number = compact->current_output()->number;
  737. assert(output_number != 0);
  738. // Check for iterator errors
  739. Status s = input->status();
  740. const uint64_t current_entries = compact->builder->NumEntries();
  741. if (s.ok()) {
  742. s = compact->builder->Finish();
  743. } else {
  744. compact->builder->Abandon();
  745. }
  746. const uint64_t current_bytes = compact->builder->FileSize();
  747. compact->current_output()->file_size = current_bytes;
  748. compact->total_bytes += current_bytes;
  749. delete compact->builder;
  750. compact->builder = NULL;
  751. // Finish and check for file errors
  752. if (s.ok()) {
  753. s = compact->outfile->Sync();
  754. }
  755. if (s.ok()) {
  756. s = compact->outfile->Close();
  757. }
  758. delete compact->outfile;
  759. compact->outfile = NULL;
  760. if (s.ok() && current_entries > 0) {
  761. // Verify that the table is usable
  762. Iterator* iter = table_cache_->NewIterator(ReadOptions(),
  763. output_number,
  764. current_bytes);
  765. s = iter->status();
  766. delete iter;
  767. if (s.ok()) {
  768. Log(options_.info_log,
  769. "Generated table #%llu: %lld keys, %lld bytes",
  770. (unsigned long long) output_number,
  771. (unsigned long long) current_entries,
  772. (unsigned long long) current_bytes);
  773. }
  774. }
  775. return s;
  776. }
  777. Status DBImpl::InstallCompactionResults(CompactionState* compact) {
  778. mutex_.AssertHeld();
  779. Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
  780. compact->compaction->num_input_files(0),
  781. compact->compaction->level(),
  782. compact->compaction->num_input_files(1),
  783. compact->compaction->level() + 1,
  784. static_cast<long long>(compact->total_bytes));
  785. // Add compaction outputs
  786. compact->compaction->AddInputDeletions(compact->compaction->edit());
  787. const int level = compact->compaction->level();
  788. for (size_t i = 0; i < compact->outputs.size(); i++) {
  789. const CompactionState::Output& out = compact->outputs[i];
  790. compact->compaction->edit()->AddFile(
  791. level + 1,
  792. out.number, out.file_size, out.smallest, out.largest);
  793. }
  794. return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
  795. }
  796. Status DBImpl::DoCompactionWork(CompactionState* compact) {
  797. const uint64_t start_micros = env_->NowMicros();
  798. int64_t imm_micros = 0; // Micros spent doing imm_ compactions
  799. Log(options_.info_log, "Compacting %d@%d + %d@%d files",
  800. compact->compaction->num_input_files(0),
  801. compact->compaction->level(),
  802. compact->compaction->num_input_files(1),
  803. compact->compaction->level() + 1);
  804. assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
  805. assert(compact->builder == NULL);
  806. assert(compact->outfile == NULL);
  807. if (snapshots_.empty()) {
  808. compact->smallest_snapshot = versions_->LastSequence();
  809. } else {
  810. compact->smallest_snapshot = snapshots_.oldest()->number_;
  811. }
  812. // Release mutex while we're actually doing the compaction work
  813. mutex_.Unlock();
  814. Iterator* input = versions_->MakeInputIterator(compact->compaction);
  815. input->SeekToFirst();
  816. Status status;
  817. ParsedInternalKey ikey;
  818. std::string current_user_key;
  819. bool has_current_user_key = false;
  820. SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
  821. for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
  822. // Prioritize immutable compaction work
  823. if (has_imm_.NoBarrier_Load() != NULL) {
  824. const uint64_t imm_start = env_->NowMicros();
  825. mutex_.Lock();
  826. if (imm_ != NULL) {
  827. CompactMemTable();
  828. bg_cv_.SignalAll(); // Wakeup MakeRoomForWrite() if necessary
  829. }
  830. mutex_.Unlock();
  831. imm_micros += (env_->NowMicros() - imm_start);
  832. }
  833. Slice key = input->key();
  834. if (compact->compaction->ShouldStopBefore(key) &&
  835. compact->builder != NULL) {
  836. status = FinishCompactionOutputFile(compact, input);
  837. if (!status.ok()) {
  838. break;
  839. }
  840. }
  841. // Handle key/value, add to state, etc.
  842. bool drop = false;
  843. if (!ParseInternalKey(key, &ikey)) {
  844. // Do not hide error keys
  845. current_user_key.clear();
  846. has_current_user_key = false;
  847. last_sequence_for_key = kMaxSequenceNumber;
  848. } else {
  849. if (!has_current_user_key ||
  850. user_comparator()->Compare(ikey.user_key,
  851. Slice(current_user_key)) != 0) {
  852. // First occurrence of this user key
  853. current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
  854. has_current_user_key = true;
  855. last_sequence_for_key = kMaxSequenceNumber;
  856. }
  857. if (last_sequence_for_key <= compact->smallest_snapshot) {
  858. // Hidden by an newer entry for same user key
  859. drop = true; // (A)
  860. } else if (ikey.type == kTypeDeletion &&
  861. ikey.sequence <= compact->smallest_snapshot &&
  862. compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
  863. // For this user key:
  864. // (1) there is no data in higher levels
  865. // (2) data in lower levels will have larger sequence numbers
  866. // (3) data in layers that are being compacted here and have
  867. // smaller sequence numbers will be dropped in the next
  868. // few iterations of this loop (by rule (A) above).
  869. // Therefore this deletion marker is obsolete and can be dropped.
  870. drop = true;
  871. }
  872. last_sequence_for_key = ikey.sequence;
  873. }
  874. #if 0
  875. Log(options_.info_log,
  876. " Compact: %s, seq %d, type: %d %d, drop: %d, is_base: %d, "
  877. "%d smallest_snapshot: %d",
  878. ikey.user_key.ToString().c_str(),
  879. (int)ikey.sequence, ikey.type, kTypeValue, drop,
  880. compact->compaction->IsBaseLevelForKey(ikey.user_key),
  881. (int)last_sequence_for_key, (int)compact->smallest_snapshot);
  882. #endif
  883. if (!drop) {
  884. // Open output file if necessary
  885. if (compact->builder == NULL) {
  886. status = OpenCompactionOutputFile(compact);
  887. if (!status.ok()) {
  888. break;
  889. }
  890. }
  891. if (compact->builder->NumEntries() == 0) {
  892. compact->current_output()->smallest.DecodeFrom(key);
  893. }
  894. compact->current_output()->largest.DecodeFrom(key);
  895. compact->builder->Add(key, input->value());
  896. // Close output file if it is big enough
  897. if (compact->builder->FileSize() >=
  898. compact->compaction->MaxOutputFileSize()) {
  899. status = FinishCompactionOutputFile(compact, input);
  900. if (!status.ok()) {
  901. break;
  902. }
  903. }
  904. }
  905. input->Next();
  906. }
  907. if (status.ok() && shutting_down_.Acquire_Load()) {
  908. status = Status::IOError("Deleting DB during compaction");
  909. }
  910. if (status.ok() && compact->builder != NULL) {
  911. status = FinishCompactionOutputFile(compact, input);
  912. }
  913. if (status.ok()) {
  914. status = input->status();
  915. }
  916. delete input;
  917. input = NULL;
  918. CompactionStats stats;
  919. stats.micros = env_->NowMicros() - start_micros - imm_micros;
  920. for (int which = 0; which < 2; which++) {
  921. for (int i = 0; i < compact->compaction->num_input_files(which); i++) {
  922. stats.bytes_read += compact->compaction->input(which, i)->file_size;
  923. }
  924. }
  925. for (size_t i = 0; i < compact->outputs.size(); i++) {
  926. stats.bytes_written += compact->outputs[i].file_size;
  927. }
  928. mutex_.Lock();
  929. stats_[compact->compaction->level() + 1].Add(stats);
  930. if (status.ok()) {
  931. status = InstallCompactionResults(compact);
  932. }
  933. if (!status.ok()) {
  934. RecordBackgroundError(status);
  935. }
  936. VersionSet::LevelSummaryStorage tmp;
  937. Log(options_.info_log,
  938. "compacted to: %s", versions_->LevelSummary(&tmp));
  939. return status;
  940. }
  941. namespace {
  942. struct IterState {
  943. port::Mutex* mu;
  944. Version* version;
  945. MemTable* mem;
  946. MemTable* imm;
  947. };
  948. static void CleanupIteratorState(void* arg1, void* arg2) {
  949. IterState* state = reinterpret_cast<IterState*>(arg1);
  950. state->mu->Lock();
  951. state->mem->Unref();
  952. if (state->imm != NULL) state->imm->Unref();
  953. state->version->Unref();
  954. state->mu->Unlock();
  955. delete state;
  956. }
  957. } // namespace
  958. Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
  959. SequenceNumber* latest_snapshot,
  960. uint32_t* seed) {
  961. IterState* cleanup = new IterState;
  962. mutex_.Lock();
  963. *latest_snapshot = versions_->LastSequence();
  964. // Collect together all needed child iterators
  965. std::vector<Iterator*> list;
  966. list.push_back(mem_->NewIterator());
  967. mem_->Ref();
  968. if (imm_ != NULL) {
  969. list.push_back(imm_->NewIterator());
  970. imm_->Ref();
  971. }
  972. versions_->current()->AddIterators(options, &list);
  973. Iterator* internal_iter =
  974. NewMergingIterator(&internal_comparator_, &list[0], list.size());
  975. versions_->current()->Ref();
  976. cleanup->mu = &mutex_;
  977. cleanup->mem = mem_;
  978. cleanup->imm = imm_;
  979. cleanup->version = versions_->current();
  980. internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, NULL);
  981. *seed = ++seed_;
  982. mutex_.Unlock();
  983. return internal_iter;
  984. }
  985. Iterator* DBImpl::TEST_NewInternalIterator() {
  986. SequenceNumber ignored;
  987. uint32_t ignored_seed;
  988. return NewInternalIterator(ReadOptions(), &ignored, &ignored_seed);
  989. }
  990. int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
  991. MutexLock l(&mutex_);
  992. return versions_->MaxNextLevelOverlappingBytes();
  993. }
  994. Status DBImpl::Get(const ReadOptions& options,
  995. const Slice& key,
  996. std::string* value) {
  997. Status s;
  998. MutexLock l(&mutex_);
  999. SequenceNumber snapshot;
  1000. if (options.snapshot != NULL) {
  1001. snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
  1002. } else {
  1003. snapshot = versions_->LastSequence();
  1004. }
  1005. MemTable* mem = mem_;
  1006. MemTable* imm = imm_;
  1007. Version* current = versions_->current();
  1008. mem->Ref();
  1009. if (imm != NULL) imm->Ref();
  1010. current->Ref();
  1011. bool have_stat_update = false;
  1012. Version::GetStats stats;
  1013. // Unlock while reading from files and memtables
  1014. {
  1015. mutex_.Unlock();
  1016. // First look in the memtable, then in the immutable memtable (if any).
  1017. LookupKey lkey(key, snapshot);
  1018. if (mem->Get(lkey, value, &s)) {
  1019. // Done
  1020. } else if (imm != NULL && imm->Get(lkey, value, &s)) {
  1021. // Done
  1022. } else {
  1023. s = current->Get(options, lkey, value, &stats);
  1024. have_stat_update = true;
  1025. }
  1026. mutex_.Lock();
  1027. }
  1028. if (have_stat_update && current->UpdateStats(stats)) {
  1029. MaybeScheduleCompaction();
  1030. }
  1031. mem->Unref();
  1032. if (imm != NULL) imm->Unref();
  1033. current->Unref();
  1034. return s;
  1035. }
  1036. Iterator* DBImpl::NewIterator(const ReadOptions& options) {
  1037. SequenceNumber latest_snapshot;
  1038. uint32_t seed;
  1039. Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed);
  1040. return NewDBIterator(
  1041. this, user_comparator(), iter,
  1042. (options.snapshot != NULL
  1043. ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
  1044. : latest_snapshot),
  1045. seed);
  1046. }
  1047. void DBImpl::RecordReadSample(Slice key) {
  1048. MutexLock l(&mutex_);
  1049. if (versions_->current()->RecordReadSample(key)) {
  1050. MaybeScheduleCompaction();
  1051. }
  1052. }
  1053. const Snapshot* DBImpl::GetSnapshot() {
  1054. MutexLock l(&mutex_);
  1055. return snapshots_.New(versions_->LastSequence());
  1056. }
  1057. void DBImpl::ReleaseSnapshot(const Snapshot* s) {
  1058. MutexLock l(&mutex_);
  1059. snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
  1060. }
  1061. // Convenience methods
  1062. Status DBImpl::Put(const WriteOptions& o, const Slice& key, const Slice& val) {
  1063. return DB::Put(o, key, val);
  1064. }
  1065. Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
  1066. return DB::Delete(options, key);
  1067. }
  1068. Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
  1069. Writer w(&mutex_);
  1070. w.batch = my_batch;
  1071. w.sync = options.sync;
  1072. w.done = false;
  1073. MutexLock l(&mutex_);
  1074. writers_.push_back(&w);
  1075. while (!w.done && &w != writers_.front()) {
  1076. w.cv.Wait();
  1077. }
  1078. if (w.done) {
  1079. return w.status;
  1080. }
  1081. // May temporarily unlock and wait.
  1082. Status status = MakeRoomForWrite(my_batch == NULL);
  1083. uint64_t last_sequence = versions_->LastSequence();
  1084. Writer* last_writer = &w;
  1085. if (status.ok() && my_batch != NULL) { // NULL batch is for compactions
  1086. WriteBatch* updates = BuildBatchGroup(&last_writer);
  1087. WriteBatchInternal::SetSequence(updates, last_sequence + 1);
  1088. last_sequence += WriteBatchInternal::Count(updates);
  1089. // Add to log and apply to memtable. We can release the lock
  1090. // during this phase since &w is currently responsible for logging
  1091. // and protects against concurrent loggers and concurrent writes
  1092. // into mem_.
  1093. {
  1094. mutex_.Unlock();
  1095. status = log_->AddRecord(WriteBatchInternal::Contents(updates));
  1096. bool sync_error = false;
  1097. if (status.ok() && options.sync) {
  1098. status = logfile_->Sync();
  1099. if (!status.ok()) {
  1100. sync_error = true;
  1101. }
  1102. }
  1103. if (status.ok()) {
  1104. status = WriteBatchInternal::InsertInto(updates, mem_);
  1105. }
  1106. mutex_.Lock();
  1107. if (sync_error) {
  1108. // The state of the log file is indeterminate: the log record we
  1109. // just added may or may not show up when the DB is re-opened.
  1110. // So we force the DB into a mode where all future writes fail.
  1111. RecordBackgroundError(status);
  1112. }
  1113. }
  1114. if (updates == tmp_batch_) tmp_batch_->Clear();
  1115. versions_->SetLastSequence(last_sequence);
  1116. }
  1117. while (true) {
  1118. Writer* ready = writers_.front();
  1119. writers_.pop_front();
  1120. if (ready != &w) {
  1121. ready->status = status;
  1122. ready->done = true;
  1123. ready->cv.Signal();
  1124. }
  1125. if (ready == last_writer) break;
  1126. }
  1127. // Notify new head of write queue
  1128. if (!writers_.empty()) {
  1129. writers_.front()->cv.Signal();
  1130. }
  1131. return status;
  1132. }
  1133. // REQUIRES: Writer list must be non-empty
  1134. // REQUIRES: First writer must have a non-NULL batch
  1135. WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
  1136. assert(!writers_.empty());
  1137. Writer* first = writers_.front();
  1138. WriteBatch* result = first->batch;
  1139. assert(result != NULL);
  1140. size_t size = WriteBatchInternal::ByteSize(first->batch);
  1141. // Allow the group to grow up to a maximum size, but if the
  1142. // original write is small, limit the growth so we do not slow
  1143. // down the small write too much.
  1144. size_t max_size = 1 << 20;
  1145. if (size <= (128<<10)) {
  1146. max_size = size + (128<<10);
  1147. }
  1148. *last_writer = first;
  1149. std::deque<Writer*>::iterator iter = writers_.begin();
  1150. ++iter; // Advance past "first"
  1151. for (; iter != writers_.end(); ++iter) {
  1152. Writer* w = *iter;
  1153. if (w->sync && !first->sync) {
  1154. // Do not include a sync write into a batch handled by a non-sync write.
  1155. break;
  1156. }
  1157. if (w->batch != NULL) {
  1158. size += WriteBatchInternal::ByteSize(w->batch);
  1159. if (size > max_size) {
  1160. // Do not make batch too big
  1161. break;
  1162. }
  1163. // Append to *result
  1164. if (result == first->batch) {
  1165. // Switch to temporary batch instead of disturbing caller's batch
  1166. result = tmp_batch_;
  1167. assert(WriteBatchInternal::Count(result) == 0);
  1168. WriteBatchInternal::Append(result, first->batch);
  1169. }
  1170. WriteBatchInternal::Append(result, w->batch);
  1171. }
  1172. *last_writer = w;
  1173. }
  1174. return result;
  1175. }
  1176. // REQUIRES: mutex_ is held
  1177. // REQUIRES: this thread is currently at the front of the writer queue
  1178. Status DBImpl::MakeRoomForWrite(bool force) {
  1179. mutex_.AssertHeld();
  1180. assert(!writers_.empty());
  1181. bool allow_delay = !force;
  1182. Status s;
  1183. while (true) {
  1184. if (!bg_error_.ok()) {
  1185. // Yield previous error
  1186. s = bg_error_;
  1187. break;
  1188. } else if (
  1189. allow_delay &&
  1190. versions_->NumLevelFiles(0) >= config::kL0_SlowdownWritesTrigger) {
  1191. // We are getting close to hitting a hard limit on the number of
  1192. // L0 files. Rather than delaying a single write by several
  1193. // seconds when we hit the hard limit, start delaying each
  1194. // individual write by 1ms to reduce latency variance. Also,
  1195. // this delay hands over some CPU to the compaction thread in
  1196. // case it is sharing the same core as the writer.
  1197. mutex_.Unlock();
  1198. env_->SleepForMicroseconds(1000);
  1199. allow_delay = false; // Do not delay a single write more than once
  1200. mutex_.Lock();
  1201. } else if (!force &&
  1202. (mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) {
  1203. // There is room in current memtable
  1204. break;
  1205. } else if (imm_ != NULL) {
  1206. // We have filled up the current memtable, but the previous
  1207. // one is still being compacted, so we wait.
  1208. Log(options_.info_log, "Current memtable full; waiting...\n");
  1209. bg_cv_.Wait();
  1210. } else if (versions_->NumLevelFiles(0) >= config::kL0_StopWritesTrigger) {
  1211. // There are too many level-0 files.
  1212. Log(options_.info_log, "Too many L0 files; waiting...\n");
  1213. bg_cv_.Wait();
  1214. } else {
  1215. // Attempt to switch to a new memtable and trigger compaction of old
  1216. assert(versions_->PrevLogNumber() == 0);
  1217. uint64_t new_log_number = versions_->NewFileNumber();
  1218. WritableFile* lfile = NULL;
  1219. s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile);
  1220. if (!s.ok()) {
  1221. // Avoid chewing through file number space in a tight loop.
  1222. versions_->ReuseFileNumber(new_log_number);
  1223. break;
  1224. }
  1225. delete log_;
  1226. delete logfile_;
  1227. logfile_ = lfile;
  1228. logfile_number_ = new_log_number;
  1229. log_ = new log::Writer(lfile);
  1230. imm_ = mem_;
  1231. has_imm_.Release_Store(imm_);
  1232. mem_ = new MemTable(internal_comparator_);
  1233. mem_->Ref();
  1234. force = false; // Do not force another compaction if have room
  1235. MaybeScheduleCompaction();
  1236. }
  1237. }
  1238. return s;
  1239. }
  1240. bool DBImpl::GetProperty(const Slice& property, std::string* value) {
  1241. value->clear();
  1242. MutexLock l(&mutex_);
  1243. Slice in = property;
  1244. Slice prefix("leveldb.");
  1245. if (!in.starts_with(prefix)) return false;
  1246. in.remove_prefix(prefix.size());
  1247. if (in.starts_with("num-files-at-level")) {
  1248. in.remove_prefix(strlen("num-files-at-level"));
  1249. uint64_t level;
  1250. bool ok = ConsumeDecimalNumber(&in, &level) && in.empty();
  1251. if (!ok || level >= config::kNumLevels) {
  1252. return false;
  1253. } else {
  1254. char buf[100];
  1255. snprintf(buf, sizeof(buf), "%d",
  1256. versions_->NumLevelFiles(static_cast<int>(level)));
  1257. *value = buf;
  1258. return true;
  1259. }
  1260. } else if (in == "stats") {
  1261. char buf[200];
  1262. snprintf(buf, sizeof(buf),
  1263. " Compactions\n"
  1264. "Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
  1265. "--------------------------------------------------\n"
  1266. );
  1267. value->append(buf);
  1268. for (int level = 0; level < config::kNumLevels; level++) {
  1269. int files = versions_->NumLevelFiles(level);
  1270. if (stats_[level].micros > 0 || files > 0) {
  1271. snprintf(
  1272. buf, sizeof(buf),
  1273. "%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
  1274. level,
  1275. files,
  1276. versions_->NumLevelBytes(level) / 1048576.0,
  1277. stats_[level].micros / 1e6,
  1278. stats_[level].bytes_read / 1048576.0,
  1279. stats_[level].bytes_written / 1048576.0);
  1280. value->append(buf);
  1281. }
  1282. }
  1283. return true;
  1284. } else if (in == "sstables") {
  1285. *value = versions_->current()->DebugString();
  1286. return true;
  1287. } else if (in == "approximate-memory-usage") {
  1288. size_t total_usage = options_.block_cache->TotalCharge();
  1289. if (mem_) {
  1290. total_usage += mem_->ApproximateMemoryUsage();
  1291. }
  1292. if (imm_) {
  1293. total_usage += imm_->ApproximateMemoryUsage();
  1294. }
  1295. char buf[50];
  1296. snprintf(buf, sizeof(buf), "%llu",
  1297. static_cast<unsigned long long>(total_usage));
  1298. value->append(buf);
  1299. return true;
  1300. }
  1301. return false;
  1302. }
  1303. void DBImpl::GetApproximateSizes(
  1304. const Range* range, int n,
  1305. uint64_t* sizes) {
  1306. // TODO(opt): better implementation
  1307. Version* v;
  1308. {
  1309. MutexLock l(&mutex_);
  1310. versions_->current()->Ref();
  1311. v = versions_->current();
  1312. }
  1313. for (int i = 0; i < n; i++) {
  1314. // Convert user_key into a corresponding internal key.
  1315. InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
  1316. InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
  1317. uint64_t start = versions_->ApproximateOffsetOf(v, k1);
  1318. uint64_t limit = versions_->ApproximateOffsetOf(v, k2);
  1319. sizes[i] = (limit >= start ? limit - start : 0);
  1320. }
  1321. {
  1322. MutexLock l(&mutex_);
  1323. v->Unref();
  1324. }
  1325. }
  1326. // Default implementations of convenience methods that subclasses of DB
  1327. // can call if they wish
  1328. Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value) {
  1329. WriteBatch batch;
  1330. batch.Put(key, value);
  1331. return Write(opt, &batch);
  1332. }
  1333. Status DB::Delete(const WriteOptions& opt, const Slice& key) {
  1334. WriteBatch batch;
  1335. batch.Delete(key);
  1336. return Write(opt, &batch);
  1337. }
  1338. DB::~DB() { }
  1339. Status DB::Open(const Options& options, const std::string& dbname,
  1340. DB** dbptr) {
  1341. *dbptr = NULL;
  1342. DBImpl* impl = new DBImpl(options, dbname);
  1343. impl->mutex_.Lock();
  1344. VersionEdit edit;
  1345. // Recover handles create_if_missing, error_if_exists
  1346. bool save_manifest = false;
  1347. Status s = impl->Recover(&edit, &save_manifest);
  1348. if (s.ok() && impl->mem_ == NULL) {
  1349. // Create new log and a corresponding memtable.
  1350. uint64_t new_log_number = impl->versions_->NewFileNumber();
  1351. WritableFile* lfile;
  1352. s = options.env->NewWritableFile(LogFileName(dbname, new_log_number),
  1353. &lfile);
  1354. if (s.ok()) {
  1355. edit.SetLogNumber(new_log_number);
  1356. impl->logfile_ = lfile;
  1357. impl->logfile_number_ = new_log_number;
  1358. impl->log_ = new log::Writer(lfile);
  1359. impl->mem_ = new MemTable(impl->internal_comparator_);
  1360. impl->mem_->Ref();
  1361. }
  1362. }
  1363. if (s.ok() && save_manifest) {
  1364. edit.SetPrevLogNumber(0); // No older logs needed after recovery.
  1365. edit.SetLogNumber(impl->logfile_number_);
  1366. s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
  1367. }
  1368. if (s.ok()) {
  1369. impl->DeleteObsoleteFiles();
  1370. impl->MaybeScheduleCompaction();
  1371. }
  1372. impl->mutex_.Unlock();
  1373. if (s.ok()) {
  1374. assert(impl->mem_ != NULL);
  1375. *dbptr = impl;
  1376. } else {
  1377. delete impl;
  1378. }
  1379. return s;
  1380. }
  1381. Snapshot::~Snapshot() {
  1382. }
  1383. Status DestroyDB(const std::string& dbname, const Options& options) {
  1384. Env* env = options.env;
  1385. std::vector<std::string> filenames;
  1386. // Ignore error in case directory does not exist
  1387. env->GetChildren(dbname, &filenames);
  1388. if (filenames.empty()) {
  1389. return Status::OK();
  1390. }
  1391. FileLock* lock;
  1392. const std::string lockname = LockFileName(dbname);
  1393. Status result = env->LockFile(lockname, &lock);
  1394. if (result.ok()) {
  1395. uint64_t number;
  1396. FileType type;
  1397. for (size_t i = 0; i < filenames.size(); i++) {
  1398. if (ParseFileName(filenames[i], &number, &type) &&
  1399. type != kDBLockFile) { // Lock file will be deleted at end
  1400. Status del = env->DeleteFile(dbname + "/" + filenames[i]);
  1401. if (result.ok() && !del.ok()) {
  1402. result = del;
  1403. }
  1404. }
  1405. }
  1406. env->UnlockFile(lock); // Ignore error since state is already gone
  1407. env->DeleteFile(lockname);
  1408. env->DeleteDir(dbname); // Ignore error in case dir contains other files
  1409. }
  1410. return result;
  1411. }
  1412. } // namespace leveldb