Skip to content

Commit

Permalink
Fix compatibility
Browse files Browse the repository at this point in the history
Signed-off-by: Yang Zhang <[email protected]>
  • Loading branch information
v01dstar committed Oct 2, 2024
1 parent 31fdadf commit 68960f0
Show file tree
Hide file tree
Showing 25 changed files with 173 additions and 179 deletions.
8 changes: 4 additions & 4 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@ enable_language(C)
find_package(Git)

if (NOT ROCKSDB_GIT_REPO)
set(ROCKSDB_GIT_REPO "https://github.com/tikv/rocksdb.git")
set(ROCKSDB_GIT_REPO "https://github.com/v01dstar/rocksdb.git")
endif()

if (NOT ROCKSDB_GIT_BRANCH)
set(ROCKSDB_GIT_BRANCH "6.29.tikv") # should at least or newer than commit dcf2f8d56092285381be2acbf8f04b8aeeb7ad79
set(ROCKSDB_GIT_BRANCH "8.10-tikv") # should at least or newer than commit dcf2f8d56092285381be2acbf8f04b8aeeb7ad79
endif()

if (NOT DEFINED ROCKSDB_DIR)
Expand Down Expand Up @@ -92,7 +92,7 @@ if (WITH_TITAN_TESTS OR WITH_TITAN_TOOLS)
add_subdirectory(${ROCKSDB_DIR} rocksdb EXCLUDE_FROM_ALL)
# Check if -latomic is required or not
if (NOT MSVC)
set(CMAKE_REQUIRED_FLAGS "--std=c++11")
set(CMAKE_REQUIRED_FLAGS "--std=c++17")
CHECK_CXX_SOURCE_COMPILES("
#include <atomic>
std::atomic<uint64_t> x(0);
Expand All @@ -111,7 +111,7 @@ endif()

# Check if -latomic is required or not
if (NOT MSVC)
set(CMAKE_REQUIRED_FLAGS "--std=c++11")
set(CMAKE_REQUIRED_FLAGS "--std=c++17")
CHECK_CXX_SOURCE_COMPILES("
#include <atomic>
std::atomic<uint64_t> x(0);
Expand Down
2 changes: 1 addition & 1 deletion cmake/rocksdb_flags.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ else()
if(MINGW)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-format")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer")
include(CheckCXXCompilerFlag)
Expand Down
1 change: 1 addition & 0 deletions include/titan/options.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include <map>
#include <unordered_map>

#include "rocksdb/advanced_cache.h"
#include "rocksdb/options.h"

namespace rocksdb {
Expand Down
8 changes: 6 additions & 2 deletions src/blob_file_cache.cc
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include "blob_file_cache.h"

#include "file/filename.h"
#include "rocksdb/advanced_cache.h"

#include "util.h"

Expand All @@ -9,6 +10,9 @@ namespace titandb {

namespace {

const Cache::CacheItemHelper kBlobFileReaderCacheItemHelper(
CacheEntryRole::kBlockBasedTableReader, &DeleteCacheValue<BlobFileReader>);

Slice EncodeFileNumber(const uint64_t* number) {
return Slice(reinterpret_cast<const char*>(number), sizeof(*number));
}
Expand Down Expand Up @@ -87,8 +91,8 @@ Status BlobFileCache::GetBlobFileReaderHandle(uint64_t file_number,
stats_);
if (!s.ok()) return s;

cache_->Insert(cache_key, reader.release(), 1,
&DeleteCacheValue<BlobFileReader>, handle);
cache_->Insert(cache_key, reader.release(), &kBlobFileReaderCacheItemHelper,
1, handle);
return s;
}

Expand Down
53 changes: 30 additions & 23 deletions src/blob_file_iterator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,10 @@ BlobFileIterator::~BlobFileIterator() {}
bool BlobFileIterator::Init() {
Slice slice;
char header_buf[BlobFileHeader::kV3EncodedLength];
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ =
file_->Read(IOOptions(), 0, BlobFileHeader::kV3EncodedLength, &slice,
header_buf, nullptr /*aligned_buf*/, true /*for_compaction*/);
IOOptions io_options;
io_options.rate_limiter_priority = Env::IOPriority::IO_LOW;
status_ = file_->Read(io_options, 0, BlobFileHeader::kV3EncodedLength, &slice,
header_buf, nullptr /*aligned_buf*/);
if (!status_.ok()) {
return false;
}
Expand All @@ -39,12 +38,9 @@ bool BlobFileIterator::Init() {
header_size_ = blob_file_header.size();

char footer_buf[BlobFileFooter::kEncodedLength];
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ =
file_->Read(IOOptions(), file_size_ - BlobFileFooter::kEncodedLength,
BlobFileFooter::kEncodedLength, &slice, footer_buf,
nullptr /*aligned_buf*/, true /*for_compaction*/);
status_ = file_->Read(io_options, file_size_ - BlobFileFooter::kEncodedLength,
BlobFileFooter::kEncodedLength, &slice, footer_buf,
nullptr /*aligned_buf*/);
if (!status_.ok()) return false;
BlobFileFooter blob_file_footer;
status_ = blob_file_footer.DecodeFrom(&slice);
Expand Down Expand Up @@ -125,13 +121,22 @@ void BlobFileIterator::IterateForPrev(uint64_t offset) {
uint64_t total_length = 0;
FixedSlice<kRecordHeaderSize> header_buffer;
iterate_offset_ = header_size_;
<<<<<<< HEAD
while (iterate_offset_ < offset) {
// With for_compaction=true, rate_limiter is enabled. Since
// BlobFileIterator is only used for GC, we always set for_compaction to
// true.
status_ = file_->Read(IOOptions(), iterate_offset_, kRecordHeaderSize,
=======
IOOptions io_options;
// Since BlobFileIterator is only used for GC, we always set IO priority to
// low.
io_options.rate_limiter_priority = Env::IOPriority::IO_LOW;
for (; iterate_offset_ < offset; iterate_offset_ += total_length) {
status_ = file_->Read(io_options, iterate_offset_, kRecordHeaderSize,
>>>>>>> 0e45324 (Fix compatibility)
&header_buffer, header_buffer.get(),
nullptr /*aligned_buf*/, true /*for_compaction*/);
nullptr /*aligned_buf*/);
if (!status_.ok()) return;
status_ = decoder_.DecodeHeader(&header_buffer);
if (!status_.ok()) return;
Expand All @@ -148,23 +153,23 @@ void BlobFileIterator::IterateForPrev(uint64_t offset) {

void BlobFileIterator::GetBlobRecord() {
FixedSlice<kRecordHeaderSize> header_buffer;
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(IOOptions(), iterate_offset_, kRecordHeaderSize,
&header_buffer, header_buffer.get(),
nullptr /*aligned_buf*/, true /*for_compaction*/);
// Since BlobFileIterator is only used for GC, we always set IO priority to
// low.
IOOptions io_options;
io_options.rate_limiter_priority = Env::IOPriority::IO_LOW;
status_ =
file_->Read(io_options, iterate_offset_, kRecordHeaderSize,
&header_buffer, header_buffer.get(), nullptr /*aligned_buf*/);
if (!status_.ok()) return;
status_ = decoder_.DecodeHeader(&header_buffer);
if (!status_.ok()) return;

Slice record_slice;
auto record_size = decoder_.GetRecordSize();
buffer_.resize(record_size);
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(IOOptions(), iterate_offset_ + kRecordHeaderSize,
record_size, &record_slice, buffer_.data(),
nullptr /*aligned_buf*/, true /*for_compaction*/);
status_ =
file_->Read(io_options, iterate_offset_ + kRecordHeaderSize, record_size,
&record_slice, buffer_.data(), nullptr /*aligned_buf*/);
if (status_.ok()) {
status_ =
decoder_.DecodeRecord(&record_slice, &cur_blob_record_, &uncompressed_,
Expand Down Expand Up @@ -199,7 +204,9 @@ void BlobFileIterator::PrefetchAndGet() {
while (readahead_end_offset_ + readahead_size_ <= min_blob_size &&
readahead_size_ < kMaxReadaheadSize)
readahead_size_ <<= 1;
file_->Prefetch(readahead_end_offset_, readahead_size_);
IOOptions io_options;
io_options.rate_limiter_priority = Env::IOPriority::IO_LOW;
file_->Prefetch(io_options, readahead_end_offset_, readahead_size_);
readahead_end_offset_ += readahead_size_;
readahead_size_ = std::min(kMaxReadaheadSize, readahead_size_ << 1);
}
Expand Down
8 changes: 5 additions & 3 deletions src/blob_file_reader.cc
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,8 @@ Status BlobFileReader::Get(const ReadOptions& _options,
}
if (handle.size != static_cast<uint64_t>(blob.size())) {
return Status::Corruption(
"ReadRecord actual size: " + ToString(blob.size()) +
" not equal to blob size " + ToString(handle.size));
"ReadRecord actual size: " + std::to_string(blob.size()) +
" not equal to blob size " + std::to_string(handle.size));
}

BlobDecoder decoder(uncompression_dict_ == nullptr
Expand All @@ -165,7 +165,9 @@ Status BlobFilePrefetcher::Get(const ReadOptions& options,
last_offset_ = handle.offset + handle.size;
if (handle.offset + handle.size > readahead_limit_) {
readahead_size_ = std::max(handle.size, readahead_size_);
reader_->file_->Prefetch(handle.offset, readahead_size_);
IOOptions io_options;
io_options.rate_limiter_priority = Env::IOPriority::IO_HIGH;
reader_->file_->Prefetch(io_options, handle.offset, readahead_size_);
readahead_limit_ = handle.offset + readahead_size_;
readahead_size_ = std::min(kMaxReadaheadSize, readahead_size_ * 2);
}
Expand Down
2 changes: 1 addition & 1 deletion src/blob_file_size_collector_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class BlobFileSizeCollectorTest : public testing::Test {
void NewTableReader(std::unique_ptr<RandomAccessFileReader>&& file,
std::unique_ptr<TableReader>* result) {
TableReaderOptions options(ioptions_, prefix_extractor_, env_options_,
cf_ioptions_.internal_comparator);
cf_ioptions_.internal_comparator, 0);
uint64_t file_size = 0;
ASSERT_OK(env_->GetFileSize(file->file_name(), &file_size));
ASSERT_TRUE(file_size > 0);
Expand Down
4 changes: 2 additions & 2 deletions src/blob_format.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class BlobEncoder {
BlobEncoder(CompressionType compression, CompressionOptions compression_opt,
const CompressionDict* compression_dict)
: compression_opt_(compression_opt),
compression_ctx_(compression),
compression_ctx_(compression, compression_opt_),
compression_dict_(compression_dict),
compression_info_(new CompressionInfo(
compression_opt_, compression_ctx_, *compression_dict_, compression,
Expand Down Expand Up @@ -347,7 +347,7 @@ struct BlobFileHeader {
if (ver != BlobFileHeader::kVersion1 && ver != BlobFileHeader::kVersion2 &&
ver != BlobFileHeader::kVersion3) {
return Status::InvalidArgument("unrecognized blob file version " +
ToString(ver));
std::to_string(ver));
}
return Status::OK();
}
Expand Down
10 changes: 8 additions & 2 deletions src/blob_gc_job_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,16 +33,22 @@ class BlobGCJobTest : public testing::Test {
TitanOptions options_;
port::Mutex* mutex_;

BlobGCJobTest() : dbname_(test::TmpDir()) {
void ResetOptions() {
options_.dirname = dbname_ + "/titandb";
options_.create_if_missing = true;
options_.disable_background_gc = true;
options_.min_blob_size = 0;
options_.disable_auto_compactions = true;
options_.level_compaction_dynamic_level_bytes = false;
options_.env->CreateDirIfMissing(dbname_);
options_.env->CreateDirIfMissing(options_.dirname);
}
~BlobGCJobTest() { Close(); }

BlobGCJobTest() : dbname_(test::TmpDir()) { ResetOptions(); }
~BlobGCJobTest() {
Close();
ResetOptions();
}

void DisableMergeSmall() { options_.merge_small_file_threshold = 0; }

Expand Down
6 changes: 3 additions & 3 deletions src/blob_storage.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,9 @@ Status BlobStorage::Get(const ReadOptions& options, const BlobIndex& index,
if (blob_cache_ && options.fill_cache) {
Cache::Handle* cache_handle = nullptr;
auto cache_value = new OwnedSlice(std::move(blob));
blob_cache_->Insert(
cache_key, cache_value, cache_value->size() + sizeof(*cache_value),
&DeleteCacheValue<OwnedSlice>, &cache_handle, Cache::Priority::BOTTOM);
blob_cache_->Insert(cache_key, cache_value, &kBlobValueCacheItemHelper,
cache_value->size() + sizeof(*cache_value),
&cache_handle, Cache::Priority::BOTTOM);
value->PinSlice(record->value, UnrefCacheHandle, blob_cache_.get(),
cache_handle);
} else {
Expand Down
10 changes: 5 additions & 5 deletions src/db_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -785,9 +785,9 @@ Iterator* TitanDBImpl::NewIteratorImpl(
}

std::unique_ptr<ArenaWrappedDBIter> iter(db_impl_->NewIteratorImpl(
options, cfd, options.snapshot->GetSequenceNumber(),
nullptr /*read_callback*/, true /*expose_blob_index*/,
true /*allow_refresh*/));
options, cfd, cfd->GetReferencedSuperVersion(db_impl_),
options.snapshot->GetSequenceNumber(), nullptr /*read_callback*/,
true /*expose_blob_index*/, true /*allow_refresh*/));
return new TitanDBIterator(options, storage.get(), snapshot, std::move(iter),
env_->GetSystemClock().get(), stats_.get(),
db_options_.info_log.get());
Expand Down Expand Up @@ -936,8 +936,8 @@ Status TitanDBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
file_meta->fd.GetPathId());
if (props.count(fname) == 0) {
std::shared_ptr<const TableProperties> table_properties;
Status s =
version->GetTableProperties(&table_properties, file_meta, &fname);
Status s = version->GetTableProperties(
ReadOptions(), &table_properties, file_meta, &fname);
if (s.ok() && table_properties) {
props.insert({fname, table_properties});
} else {
Expand Down
2 changes: 1 addition & 1 deletion src/db_impl_gc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ Status TitanDBImpl::AsyncInitializeGC(
cf_handle->GetName().c_str());
TablePropertiesCollection collection;
// this operation may be slow
s = cf.second->GetPropertiesOfAllTables(&collection);
s = cf.second->GetPropertiesOfAllTables(ReadOptions(), &collection);
unref(cf.second);
if (!s.ok()) {
MutexLock l(&mutex_);
Expand Down
5 changes: 2 additions & 3 deletions src/db_iter.h
Original file line number Diff line number Diff line change
Expand Up @@ -179,10 +179,9 @@ class TitanDBIterator : public Iterator {
if (blob_cache && options_.fill_cache) {
Cache::Handle *cache_handle = nullptr;
auto cache_value = new OwnedSlice(std::move(blob));
blob_cache->Insert(cache_key, cache_value,
blob_cache->Insert(cache_key, cache_value, &kBlobValueCacheItemHelper,
cache_value->size() + sizeof(*cache_value),
&DeleteCacheValue<OwnedSlice>, &cache_handle,
Cache::Priority::BOTTOM);
&cache_handle, Cache::Priority::BOTTOM);
buffer_.PinSlice(*cache_value, UnrefCacheHandle, blob_cache,
cache_handle);
} else {
Expand Down
20 changes: 10 additions & 10 deletions src/edit_collector.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ class EditCollector {

if (edit.has_next_file_number_) {
if (edit.next_file_number_ < next_file_number_) {
status_ =
Status::Corruption("Edit has a smaller next file number " +
ToString(edit.next_file_number_) +
" than current " + ToString(next_file_number_));
status_ = Status::Corruption("Edit has a smaller next file number " +
std::to_string(edit.next_file_number_) +
" than current " +
std::to_string(next_file_number_));
return status_;
}
next_file_number_ = edit.next_file_number_;
Expand Down Expand Up @@ -138,7 +138,7 @@ class EditCollector {
"blob file %" PRIu64 " has been deleted twice\n",
number);
if (paranoid_check_) {
return Status::Corruption("Blob file " + ToString(number) +
return Status::Corruption("Blob file " + std::to_string(number) +
" has been added twice");
} else {
return Status::OK();
Expand All @@ -154,7 +154,7 @@ class EditCollector {
"blob file %" PRIu64 " has been deleted twice\n",
number);
if (paranoid_check_) {
return Status::Corruption("Blob file " + ToString(number) +
return Status::Corruption("Blob file " + std::to_string(number) +
" has been deleted twice");
} else {
return Status::OK();
Expand All @@ -173,14 +173,14 @@ class EditCollector {
TITAN_LOG_ERROR(storage->db_options().info_log,
"blob file %" PRIu64 " has been deleted before\n",
number);
return Status::Corruption("Blob file " + ToString(number) +
return Status::Corruption("Blob file " + std::to_string(number) +
" has been deleted before");
} else {
TITAN_LOG_ERROR(storage->db_options().info_log,
"blob file %" PRIu64 " has been added before\n",
number);

return Status::Corruption("Blob file " + ToString(number) +
return Status::Corruption("Blob file " + std::to_string(number) +
" has been added before");
}
}
Expand All @@ -196,14 +196,14 @@ class EditCollector {
TITAN_LOG_ERROR(storage->db_options().info_log,
"blob file %" PRIu64 " doesn't exist before\n",
number);
return Status::Corruption("Blob file " + ToString(number) +
return Status::Corruption("Blob file " + std::to_string(number) +
" doesn't exist before");
} else if (blob->is_obsolete()) {
TITAN_LOG_ERROR(storage->db_options().info_log,
"blob file %" PRIu64 " has been deleted already\n",
number);
if (paranoid_check_) {
return Status::Corruption("Blob file " + ToString(number) +
return Status::Corruption("Blob file " + std::to_string(number) +
" has been deleted already");
}
}
Expand Down
Loading

0 comments on commit 68960f0

Please sign in to comment.