diff --git a/db/c.cc b/db/c.cc index 30d624a6a45..51da66b8ba1 100644 --- a/db/c.cc +++ b/db/c.cc @@ -12,11 +12,6 @@ #include "rocksdb/c.h" #include - -#include -#include -#include - #include "port/port.h" #include "rocksdb/cache.h" #include "rocksdb/compaction_filter.h" @@ -49,60 +44,43 @@ #include "utilities/merge_operators.h" #include "utilities/rate_limiters/write_amp_based_rate_limiter.h" -using rocksdb::BackupableDBOptions; -using rocksdb::BackupEngine; -using rocksdb::BackupID; -using rocksdb::BackupInfo; -using rocksdb::BatchResult; -using rocksdb::BlockBasedTableOptions; -using rocksdb::BottommostLevelCompaction; +#include +#include +#include + using rocksdb::BytewiseComparator; using rocksdb::Cache; -using rocksdb::CheckPerfFlag; -using rocksdb::Checkpoint; using rocksdb::ColumnFamilyDescriptor; using rocksdb::ColumnFamilyHandle; using rocksdb::ColumnFamilyOptions; using rocksdb::CompactionFilter; using rocksdb::CompactionFilterFactory; using rocksdb::CompactionOptionsFIFO; -using rocksdb::CompactRangeOptions; using rocksdb::Comparator; using rocksdb::CompressionType; -using rocksdb::CuckooTableOptions; +using rocksdb::WALRecoveryMode; using rocksdb::DB; using rocksdb::DBOptions; using rocksdb::DbPath; -using rocksdb::DisablePerfFlag; -using rocksdb::EnablePerfFlag; using rocksdb::Env; using rocksdb::EnvOptions; +using rocksdb::InfoLogLevel; using rocksdb::FileLock; using rocksdb::FilterPolicy; using rocksdb::FlushOptions; -using rocksdb::InfoLogLevel; using rocksdb::IngestExternalFileOptions; using rocksdb::Iterator; -using rocksdb::LiveFileMetaData; using rocksdb::Logger; -using rocksdb::MemoryUtil; using rocksdb::MergeOperator; using rocksdb::MergeOperators; using rocksdb::NewBloomFilterPolicy; -using rocksdb::NewGenericRateLimiter; using rocksdb::NewLRUCache; -using rocksdb::NewWriteAmpBasedRateLimiter; -using rocksdb::OptimisticTransactionDB; -using rocksdb::OptimisticTransactionOptions; using rocksdb::Options; -using rocksdb::PerfContext; -using rocksdb::PerfLevel; -using rocksdb::PinnableSlice; +using rocksdb::BlockBasedTableOptions; +using rocksdb::CuckooTableOptions; using rocksdb::RandomAccessFile; using rocksdb::Range; -using rocksdb::RateLimiter; using rocksdb::ReadOptions; -using rocksdb::RestoreOptions; using rocksdb::SequentialFile; using rocksdb::Slice; using rocksdb::SliceParts; @@ -110,16 +88,37 @@ using rocksdb::SliceTransform; using rocksdb::Snapshot; using rocksdb::SstFileWriter; using rocksdb::Status; -using rocksdb::Transaction; -using rocksdb::TransactionDB; -using rocksdb::TransactionDBOptions; -using rocksdb::TransactionLogIterator; -using rocksdb::TransactionOptions; -using rocksdb::WALRecoveryMode; using rocksdb::WritableFile; using rocksdb::WriteBatch; using rocksdb::WriteBatchWithIndex; using rocksdb::WriteOptions; +using rocksdb::LiveFileMetaData; +using rocksdb::BackupEngine; +using rocksdb::BackupableDBOptions; +using rocksdb::BackupInfo; +using rocksdb::BackupID; +using rocksdb::RestoreOptions; +using rocksdb::CompactRangeOptions; +using rocksdb::BottommostLevelCompaction; +using rocksdb::RateLimiter; +using rocksdb::NewGenericRateLimiter; +using rocksdb::NewWriteAmpBasedRateLimiter; +using rocksdb::PinnableSlice; +using rocksdb::TransactionDBOptions; +using rocksdb::TransactionDB; +using rocksdb::TransactionOptions; +using rocksdb::OptimisticTransactionDB; +using rocksdb::OptimisticTransactionOptions; +using rocksdb::Transaction; +using rocksdb::Checkpoint; +using rocksdb::TransactionLogIterator; +using rocksdb::BatchResult; +using rocksdb::PerfLevel; +using rocksdb::EnablePerfFlag; +using rocksdb::DisablePerfFlag; +using rocksdb::CheckPerfFlag; +using rocksdb::PerfContext; +using rocksdb::MemoryUtil; using std::shared_ptr; using std::vector; diff --git a/db/column_family.cc b/db/column_family.cc index 0da7524a841..2a5493e43cc 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -738,8 +738,7 @@ WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( bool needed_delay = write_controller->NeedsDelay(); if (write_stall_condition == WriteStallCondition::kStopped && - write_stall_cause == WriteStallCause::kMemtableLimit && - !mutable_cf_options.disable_write_stall) { + write_stall_cause == WriteStallCause::kMemtableLimit && !mutable_cf_options.disable_write_stall) { write_controller_token_ = write_controller->GetStopToken(); internal_stats_->AddCFStats(InternalStats::MEMTABLE_LIMIT_STOPS, 1); ROCKS_LOG_WARN( @@ -749,8 +748,7 @@ WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( name_.c_str(), imm()->NumNotFlushed(), mutable_cf_options.max_write_buffer_number); } else if (write_stall_condition == WriteStallCondition::kStopped && - write_stall_cause == WriteStallCause::kL0FileCountLimit && - !mutable_cf_options.disable_write_stall) { + write_stall_cause == WriteStallCause::kL0FileCountLimit && !mutable_cf_options.disable_write_stall) { write_controller_token_ = write_controller->GetStopToken(); internal_stats_->AddCFStats(InternalStats::L0_FILE_COUNT_LIMIT_STOPS, 1); if (compaction_picker_->IsLevel0CompactionInProgress()) { @@ -761,8 +759,7 @@ WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( "[%s] Stopping writes because we have %d level-0 files", name_.c_str(), vstorage->l0_delay_trigger_count()); } else if (write_stall_condition == WriteStallCondition::kStopped && - write_stall_cause == WriteStallCause::kPendingCompactionBytes && - !mutable_cf_options.disable_write_stall) { + write_stall_cause == WriteStallCause::kPendingCompactionBytes && !mutable_cf_options.disable_write_stall) { write_controller_token_ = write_controller->GetStopToken(); internal_stats_->AddCFStats( InternalStats::PENDING_COMPACTION_BYTES_LIMIT_STOPS, 1); @@ -772,8 +769,7 @@ WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( "bytes %" PRIu64, name_.c_str(), compaction_needed_bytes); } else if (write_stall_condition == WriteStallCondition::kDelayed && - write_stall_cause == WriteStallCause::kMemtableLimit && - !mutable_cf_options.disable_write_stall) { + write_stall_cause == WriteStallCause::kMemtableLimit && !mutable_cf_options.disable_write_stall) { write_controller_token_ = SetupDelay(write_controller, compaction_needed_bytes, prev_compaction_needed_bytes_, was_stopped, @@ -788,8 +784,7 @@ WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( mutable_cf_options.max_write_buffer_number, write_controller->delayed_write_rate()); } else if (write_stall_condition == WriteStallCondition::kDelayed && - write_stall_cause == WriteStallCause::kL0FileCountLimit && - !mutable_cf_options.disable_write_stall) { + write_stall_cause == WriteStallCause::kL0FileCountLimit && !mutable_cf_options.disable_write_stall) { // L0 is the last two files from stopping. bool near_stop = vstorage->l0_delay_trigger_count() >= mutable_cf_options.level0_stop_writes_trigger - 2; @@ -809,8 +804,7 @@ WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( name_.c_str(), vstorage->l0_delay_trigger_count(), write_controller->delayed_write_rate()); } else if (write_stall_condition == WriteStallCondition::kDelayed && - write_stall_cause == WriteStallCause::kPendingCompactionBytes && - !mutable_cf_options.disable_write_stall) { + write_stall_cause == WriteStallCause::kPendingCompactionBytes && !mutable_cf_options.disable_write_stall) { // If the distance to hard limit is less than 1/4 of the gap between soft // and // hard bytes limit, we think it is near stop and speed up the slowdown. @@ -835,8 +829,7 @@ WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( name_.c_str(), vstorage->estimated_compaction_needed_bytes(), write_controller->delayed_write_rate()); } else { - assert(write_stall_condition == WriteStallCondition::kNormal || - mutable_cf_options.disable_write_stall); + assert(write_stall_condition == WriteStallCondition::kNormal || mutable_cf_options.disable_write_stall); if (vstorage->l0_delay_trigger_count() >= GetL0ThresholdSpeedupCompaction( mutable_cf_options.level0_file_num_compaction_trigger, diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc index f5c569900df..9cbaba2966c 100644 --- a/db/db_impl/db_impl.cc +++ b/db/db_impl/db_impl.cc @@ -163,7 +163,6 @@ DBImpl::DBImpl(const DBOptions& options, const std::string& dbname, seq_per_batch_(seq_per_batch), batch_per_txn_(batch_per_txn), db_lock_(nullptr), - log_write_mutex_(stats_, env_, DB_MUTEX_WAIT_MICROS, false), shutting_down_(false), bg_cv_(&mutex_), logfile_number_(0), @@ -1020,12 +1019,11 @@ Status DBImpl::SetDBOptions( mutable_db_options_.max_background_jobs, mutable_db_options_.base_background_compactions, /* parallelize_compactions */ true); - const BGJobLimits new_bg_job_limits = - GetBGJobLimits(new_options.max_background_flushes, - new_options.max_background_compactions, - new_options.max_background_jobs, - new_options.base_background_compactions, - /* parallelize_compactions */ true); + const BGJobLimits new_bg_job_limits = GetBGJobLimits( + new_options.max_background_flushes, + new_options.max_background_compactions, + new_options.max_background_jobs, + new_options.base_background_compactions, /* parallelize_compactions */ true); const bool max_flushes_increased = new_bg_job_limits.max_flushes > current_bg_job_limits.max_flushes; diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index b9dcb397de7..dfe681a97ab 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -1988,8 +1988,7 @@ DBImpl::BGJobLimits DBImpl::GetBGJobLimits(int max_background_flushes, } if (!parallelize_compactions) { // throttle background compactions until we deem necessary - res.max_compactions = - std::max(1, std::min(base_background_compactions, res.max_compactions)); + res.max_compactions = std::max(1, std::min(base_background_compactions, res.max_compactions)); } return res; } diff --git a/db/db_impl/db_impl_open.cc b/db/db_impl/db_impl_open.cc index 630368ff82f..b1623f7eee2 100644 --- a/db/db_impl/db_impl_open.cc +++ b/db/db_impl/db_impl_open.cc @@ -57,8 +57,7 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) { } auto bg_job_limits = DBImpl::GetBGJobLimits( result.max_background_flushes, result.max_background_compactions, - result.max_background_jobs, result.base_background_compactions, - true /* parallelize_compactions */); + result.max_background_jobs, result.base_background_compactions, true /* parallelize_compactions */); result.env->IncBackgroundThreadsIfNeeded(bg_job_limits.max_compactions, Env::Priority::LOW); result.env->IncBackgroundThreadsIfNeeded(bg_job_limits.max_flushes, diff --git a/db/db_impl/db_impl_write.cc b/db/db_impl/db_impl_write.cc index d3c6c6ca347..a703820d80c 100644 --- a/db/db_impl/db_impl_write.cc +++ b/db/db_impl/db_impl_write.cc @@ -88,15 +88,15 @@ Status DBImpl::MultiBatchWriteImpl(const WriteOptions& write_options, WriteContext write_context; bool ignore_missing_faimly = write_options.ignore_missing_column_families; if (writer.state == WriteThread::STATE_GROUP_LEADER) { - PERF_TIMER_STOP(write_pre_and_post_process_time); - PERF_TIMER_GUARD(write_delay_time); if (writer.callback && !writer.callback->AllowWriteBatching()) { write_thread_.WaitForMemTableWriters(); } WriteThread::WriteGroup wal_write_group; LogContext log_context; + PERF_TIMER_STOP(write_pre_and_post_process_time); writer.status = PreprocessWrite(write_options, &log_context, &write_context); + PERF_TIMER_START(write_pre_and_post_process_time); // This can set non-OK status if callback fail. last_batch_group_size_ = @@ -132,6 +132,7 @@ Status DBImpl::MultiBatchWriteImpl(const WriteOptions& write_options, RecordTick(stats_, BYTES_WRITTEN, total_byte_size); RecordInHistogram(stats_, BYTES_PER_WRITE, total_byte_size); + PERF_TIMER_STOP(write_pre_and_post_process_time); if (!write_options.disableWAL) { PERF_TIMER_GUARD(write_wal_time); stats->AddDBStats(InternalStats::kIntStatsWriteDoneBySelf, 1); @@ -162,6 +163,7 @@ Status DBImpl::MultiBatchWriteImpl(const WriteOptions& write_options, bool is_leader_thread = false; WriteThread::WriteGroup memtable_write_group; if (writer.state == WriteThread::STATE_MEMTABLE_WRITER_LEADER) { + PERF_TIMER_GUARD(write_memtable_time); assert(writer.ShouldWriteToMemtable()); write_thread_.EnterAsMemTableWriter(&writer, &memtable_write_group); assert(immutable_db_options_.allow_concurrent_memtable_write); @@ -169,7 +171,6 @@ Status DBImpl::MultiBatchWriteImpl(const WriteOptions& write_options, is_leader_thread = true; write_thread_.LaunchParallelMemTableWriters(&memtable_write_group); } else { - PERF_TIMER_GUARD(write_memtable_time); auto version_set = versions_->GetColumnFamilySet(); memtable_write_group.running.store(0); for (auto it = memtable_write_group.begin(); @@ -193,7 +194,6 @@ Status DBImpl::MultiBatchWriteImpl(const WriteOptions& write_options, } if (writer.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) { assert(writer.ShouldWriteToMemtable()); - PERF_TIMER_GUARD(write_memtable_time); auto version_set = versions_->GetColumnFamilySet(); WriteBatchInternal::AsyncInsertInto( &writer, writer.sequence, version_set, &flush_scheduler_, @@ -640,8 +640,8 @@ Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options, LogContext log_context(!write_options.disableWAL && write_options.sync); // PreprocessWrite does its own perf timing. PERF_TIMER_STOP(write_pre_and_post_process_time); - PERF_TIMER_GUARD(write_delay_time); w.status = PreprocessWrite(write_options, &log_context, &write_context); + PERF_TIMER_START(write_pre_and_post_process_time); // This can set non-OK status if callback fail. last_batch_group_size_ = @@ -678,6 +678,8 @@ Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options, RecordTick(stats_, BYTES_WRITTEN, total_byte_size); RecordInHistogram(stats_, BYTES_PER_WRITE, total_byte_size); + PERF_TIMER_STOP(write_pre_and_post_process_time); + if (w.status.ok() && !write_options.disableWAL) { PERF_TIMER_GUARD(write_wal_time); stats->AddDBStats(InternalStats::kIntStatsWriteDoneBySelf, 1); @@ -750,7 +752,7 @@ Status DBImpl::UnorderedWriteMemtable(const WriteOptions& write_options, WriteCallback* callback, uint64_t log_ref, SequenceNumber seq, const size_t sub_batch_cnt) { - PERF_TIMER_GUARD(write_memtable_time); + PERF_TIMER_GUARD(write_pre_and_post_process_time); StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE); WriteThread::Writer w(write_options, my_batch, callback, log_ref, @@ -822,8 +824,6 @@ Status DBImpl::WriteImplWALOnly( // else we are the leader of the write batch group assert(w.state == WriteThread::STATE_GROUP_LEADER); - PERF_TIMER_STOP(write_pre_and_post_process_time); - PERF_TIMER_GUARD(write_delay_time); if (publish_last_seq == kDoPublishLastSeq) { // Currently we only use kDoPublishLastSeq in unordered_write assert(immutable_db_options_.unordered_write); @@ -884,6 +884,8 @@ Status DBImpl::WriteImplWALOnly( } RecordInHistogram(stats_, BYTES_PER_WRITE, total_byte_size); + PERF_TIMER_STOP(write_pre_and_post_process_time); + PERF_TIMER_GUARD(write_wal_time); // LastAllocatedSequence is increased inside WriteToWAL under // wal_write_mutex_ to ensure ordered events in WAL @@ -932,6 +934,7 @@ Status DBImpl::WriteImplWALOnly( status = SyncWAL(); } } + PERF_TIMER_START(write_pre_and_post_process_time); if (!w.CallbackFailed()) { WriteStatusCheck(status); @@ -1033,15 +1036,19 @@ Status DBImpl::PreprocessWrite(const WriteOptions& write_options, } PERF_TIMER_STOP(write_scheduling_flushes_compactions_time); + PERF_TIMER_GUARD(write_pre_and_post_process_time); if (UNLIKELY(status.ok() && (write_controller_.IsStopped() || write_controller_.NeedsDelay()))) { + PERF_TIMER_STOP(write_pre_and_post_process_time); + PERF_TIMER_GUARD(write_delay_time); // We don't know size of curent batch so that we always use the size // for previous one. It might create a fairness issue that expiration // might happen for smaller writes but larger writes can go through. // Can optimize it if it is an issue. InstrumentedMutexLock l(&mutex_); status = DelayWrite(last_batch_group_size_, write_options); + PERF_TIMER_START(write_pre_and_post_process_time); } InstrumentedMutexLock l(&log_write_mutex_); @@ -1627,6 +1634,7 @@ Status DBImpl::ThrottleLowPriWritesIfNeeded(const WriteOptions& write_options, // is that in case the write is heavy, low pri writes may never have // a chance to run. Now we guarantee we are still slowly making // progress. + PERF_TIMER_GUARD(write_delay_time); write_controller_.low_pri_rate_limiter()->Request( my_batch->GetDataSize(), Env::IO_HIGH, nullptr /* stats */, RateLimiter::OpType::kWrite); diff --git a/db/db_options_test.cc b/db/db_options_test.cc index 46cdf7ac73d..1a637f934b9 100644 --- a/db/db_options_test.cc +++ b/db/db_options_test.cc @@ -406,10 +406,10 @@ TEST_F(DBOptionsTest, EnableAutoCompactionButDisableStall) { dbfull()->TEST_WaitForFlushMemTable(); ASSERT_EQ(2, NumTableFilesAtLevel(0)); uint64_t l0_size = SizeAtLevel(0); - + options.hard_pending_compaction_bytes_limit = l0_size; options.soft_pending_compaction_bytes_limit = l0_size; - + Reopen(options); dbfull()->TEST_WaitForCompact(); ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped()); @@ -418,14 +418,15 @@ TEST_F(DBOptionsTest, EnableAutoCompactionButDisableStall) { SyncPoint::GetInstance()->LoadDependency( {{"DBOptionsTest::EnableAutoCompactionButDisableStall:1", "BackgroundCallCompaction:0"}, - {"DBImpl::BackgroundCompaction():BeforePickCompaction", + {"DBImpl::BackgroundCompaction():BeforePickCompaction", "DBOptionsTest::EnableAutoCompactionButDisableStall:2"}, - {"DBOptionsTest::EnableAutoCompactionButDisableStall:3", + {"DBOptionsTest::EnableAutoCompactionButDisableStall:3", "DBImpl::BackgroundCompaction():AfterPickCompaction"}}); // Block background compaction. SyncPoint::GetInstance()->EnableProcessing(); - ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "false"}})); + ASSERT_OK( + dbfull()->SetOptions({{"disable_auto_compactions", "false"}})); TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionButDisableStall:1"); // Wait for stall condition recalculate. TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionButDisableStall:2"); @@ -433,7 +434,7 @@ TEST_F(DBOptionsTest, EnableAutoCompactionButDisableStall) { ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped()); ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay()); ASSERT_TRUE(dbfull()->TEST_write_controler().NeedSpeedupCompaction()); - + TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionButDisableStall:3"); // Background compaction executed. diff --git a/db/version_builder_test.cc b/db/version_builder_test.cc index d2df7a48ec1..2f049ef2a64 100644 --- a/db/version_builder_test.cc +++ b/db/version_builder_test.cc @@ -5,7 +5,6 @@ #include #include - #include "db/version_edit.h" #include "db/version_set.h" #include "logging/logging.h" diff --git a/db/version_edit.h b/db/version_edit.h index 9860a04233e..13f3e8b9e98 100644 --- a/db/version_edit.h +++ b/db/version_edit.h @@ -13,7 +13,6 @@ #include #include #include - #include "db/dbformat.h" #include "memory/arena.h" #include "rocksdb/cache.h" diff --git a/db/version_set.h b/db/version_set.h index f3c3dc510b2..583080291a7 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -927,10 +927,10 @@ class VersionSet { const EnvOptions& env_options, int new_levels); - // If sst_file_number is > 0, only prints manifest info for specified SST file - // number - Status DumpManifest(Options& options, std::string& dscname, bool verbose, - bool hex, bool json, uint64_t sst_file_number); + // If sst_file_number is > 0, only prints manifest info for specified SST file number +Status DumpManifest(Options& options, std::string& dscname, + bool verbose, bool hex, bool json, + uint64_t sst_file_number); #endif // ROCKSDB_LITE diff --git a/include/rocksdb/utilities/ldb_cmd.h b/include/rocksdb/utilities/ldb_cmd.h index 76184d6c48c..c43ef6fb212 100644 --- a/include/rocksdb/utilities/ldb_cmd.h +++ b/include/rocksdb/utilities/ldb_cmd.h @@ -9,7 +9,6 @@ #include #include - #include #include #include diff --git a/monitoring/perf_flag.cc b/monitoring/perf_flag.cc index 534c36e3261..a3bdbda353e 100644 --- a/monitoring/perf_flag.cc +++ b/monitoring/perf_flag.cc @@ -22,8 +22,8 @@ void DisablePerfFlag(uint64_t flag) { } bool CheckPerfFlag(uint64_t flag) { - return ((uint64_t)GET_FLAG(flag) & (uint64_t)0b1 - << (flag & (uint64_t)0b111)) != 0; + return ((uint64_t)GET_FLAG(flag) & + (uint64_t)0b1 << (flag & (uint64_t)0b111)) != 0; } } // namespace rocksdb diff --git a/monitoring/perf_flag_imp.h b/monitoring/perf_flag_imp.h index ebc0b9430bf..453c5e03db8 100644 --- a/monitoring/perf_flag_imp.h +++ b/monitoring/perf_flag_imp.h @@ -1,5 +1,4 @@ #include - #include "rocksdb/perf_flag.h" namespace rocksdb { @@ -8,4 +7,4 @@ extern __thread uint8_t perf_flags[FLAGS_LEN]; #else extern uint8_t perf_flags[FLAGS_LEN]; #endif -} // namespace rocksdb +} diff --git a/options/options_helper.cc b/options/options_helper.cc index 0bebea9da95..a1a998c38ad 100644 --- a/options/options_helper.cc +++ b/options/options_helper.cc @@ -165,7 +165,8 @@ ColumnFamilyOptions BuildColumnFamilyOptions( // Compaction related options cf_opts.disable_auto_compactions = mutable_cf_options.disable_auto_compactions; - cf_opts.disable_write_stall = mutable_cf_options.disable_write_stall; + cf_opts.disable_write_stall = + mutable_cf_options.disable_write_stall; cf_opts.soft_pending_compaction_bytes_limit = mutable_cf_options.soft_pending_compaction_bytes_limit; cf_opts.hard_pending_compaction_bytes_limit = diff --git a/options/options_parser.cc b/options/options_parser.cc index 13d2eb36cf7..4b7f95d1287 100644 --- a/options/options_parser.cc +++ b/options/options_parser.cc @@ -202,8 +202,7 @@ Status RocksDBOptionsParser::ParseStatement(std::string* name, Status RocksDBOptionsParser::Parse(const std::string& file_name, Env* env, bool ignore_unknown_options) { - ConfigOptions - config_options; // Use default for escaped(true) and check (exact) + ConfigOptions config_options; // Use default for escaped(true) and check (exact) config_options.ignore_unknown_options = ignore_unknown_options; config_options.env = env; return Parse(config_options, file_name); @@ -216,8 +215,7 @@ Status RocksDBOptionsParser::Parse(const ConfigOptions& config_options_in, auto ignore_unknown_options = config_options.ignore_unknown_options; std::unique_ptr seq_file; - Status s = - config_options.env->NewSequentialFile(file_name, &seq_file, EnvOptions()); + Status s = config_options.env->NewSequentialFile(file_name, &seq_file, EnvOptions()); if (!s.ok()) { return s; } diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 396c569a323..1c91b8382d5 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -1018,8 +1018,7 @@ void DumpManifestFile(Options options, std::string file, bool verbose, bool hex, ImmutableDBOptions immutable_db_options(options); VersionSet versions(dbname, &immutable_db_options, sopt, tc.get(), &wb, &wc, /*block_cache_tracer=*/nullptr); - Status s = - versions.DumpManifest(options, file, verbose, hex, json, sst_file_number); + Status s = versions.DumpManifest(options, file, verbose, hex, json, sst_file_number); if (!s.ok()) { printf("Error in processing file %s %s\n", file.c_str(), s.ToString().c_str()); diff --git a/tools/ldb_cmd_impl.h b/tools/ldb_cmd_impl.h index 35c43a23711..fe9fc47a277 100644 --- a/tools/ldb_cmd_impl.h +++ b/tools/ldb_cmd_impl.h @@ -631,4 +631,4 @@ class UnsafeRemoveSstFileCommand : public LDBCommand { uint64_t sst_file_number_; }; -} // namespace rocksdb +} // namespace ROCKSDB_NAMESPACE diff --git a/tools/ldb_cmd_test.cc b/tools/ldb_cmd_test.cc index 5278b61b265..aea02105852 100644 --- a/tools/ldb_cmd_test.cc +++ b/tools/ldb_cmd_test.cc @@ -6,7 +6,6 @@ #ifndef ROCKSDB_LITE #include "rocksdb/utilities/ldb_cmd.h" - #include "test_util/sync_point.h" #include "test_util/testharness.h"