Skip to content

Commit

Permalink
Pass through use_fs_buffer to Read method (facebook#13200)
Browse files Browse the repository at this point in the history
Summary:
This is a follow up to facebook#13177, which was supposed to disable the file system buffer optimization for compaction reads. However, it did not work as expected because I did not pass through `use_fs_buffer` to the `Read` method, which also calls `UseFSBuffer`.

Pull Request resolved: facebook#13200

Test Plan:
I added simple tests to verify we do not hit the overflow issue when we are doing compaction prefetches.

```
 ./prefetch_test --gtest_filter="*FSBufferPrefetchForCompaction*"
```

Of course I will be looking through the warm storage crash test logs as well once the change is merged.

Reviewed By: anand1976

Differential Revision: D66996079

Pulled By: archang19

fbshipit-source-id: b4d9254f1354ccfc53a307174de5f2388b7e5474
  • Loading branch information
archang19 authored and facebook-github-bot committed Dec 10, 2024
1 parent d386385 commit 5aead7a
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 5 deletions.
11 changes: 7 additions & 4 deletions file/file_prefetch_buffer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,11 +93,10 @@ void FilePrefetchBuffer::PrepareBufferForRead(
Status FilePrefetchBuffer::Read(BufferInfo* buf, const IOOptions& opts,
RandomAccessFileReader* reader,
uint64_t read_len, uint64_t aligned_useful_len,
uint64_t start_offset) {
uint64_t start_offset, bool use_fs_buffer) {
Slice result;
Status s;
char* to_buf = nullptr;
bool use_fs_buffer = UseFSBuffer(reader);
if (use_fs_buffer) {
s = FSBufferDirectRead(reader, buf, opts, start_offset + aligned_useful_len,
read_len, result);
Expand Down Expand Up @@ -197,12 +196,14 @@ Status FilePrefetchBuffer::Prefetch(const IOOptions& opts,

Status s;
if (read_len > 0) {
s = Read(buf, opts, reader, read_len, aligned_useful_len, rounddown_offset);
s = Read(buf, opts, reader, read_len, aligned_useful_len, rounddown_offset,
use_fs_buffer);
}

if (usage_ == FilePrefetchBufferUsage::kTableOpenPrefetchTail && s.ok()) {
RecordInHistogram(stats_, TABLE_OPEN_PREFETCH_TAIL_READ_BYTES, read_len);
}
assert(buf->offset_ <= offset);
return s;
}

Expand Down Expand Up @@ -733,7 +734,8 @@ Status FilePrefetchBuffer::PrefetchInternal(const IOOptions& opts,
}

if (read_len1 > 0) {
s = Read(buf, opts, reader, read_len1, aligned_useful_len1, start_offset1);
s = Read(buf, opts, reader, read_len1, aligned_useful_len1, start_offset1,
use_fs_buffer);
if (!s.ok()) {
AbortAllIOs();
FreeAllBuffers();
Expand Down Expand Up @@ -861,6 +863,7 @@ bool FilePrefetchBuffer::TryReadFromCacheUntracked(
if (copy_to_overlap_buffer) {
buf = overlap_buf_;
}
assert(buf->offset_ <= offset);
uint64_t offset_in_buffer = offset - buf->offset_;
*result = Slice(buf->buffer_.BufferStart() + offset_in_buffer, n);
if (prefetched) {
Expand Down
3 changes: 2 additions & 1 deletion file/file_prefetch_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,8 @@ class FilePrefetchBuffer {

Status Read(BufferInfo* buf, const IOOptions& opts,
RandomAccessFileReader* reader, uint64_t read_len,
uint64_t aligned_useful_len, uint64_t start_offset);
uint64_t aligned_useful_len, uint64_t start_offset,
bool use_fs_buffer);

Status ReadAsync(BufferInfo* buf, const IOOptions& opts,
RandomAccessFileReader* reader, uint64_t read_len,
Expand Down
56 changes: 56 additions & 0 deletions file/prefetch_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3710,6 +3710,62 @@ TEST_P(FSBufferPrefetchTest, FSBufferPrefetchUnalignedReads) {
}
}

TEST_P(FSBufferPrefetchTest, FSBufferPrefetchForCompaction) {
// Quick test to make sure file system buffer reuse is disabled for compaction
// reads. Will update once it is re-enabled
// Primarily making sure we do not hit unsigned integer overflow issues
std::string fname = "fs-buffer-prefetch-for-compaction";
Random rand(0);
std::string content = rand.RandomString(32768);
Write(fname, content);

FileOptions opts;
std::unique_ptr<RandomAccessFileReader> r;
Read(fname, opts, &r);

std::shared_ptr<Statistics> stats = CreateDBStatistics();
ReadaheadParams readahead_params;
readahead_params.initial_readahead_size = 8192;
readahead_params.max_readahead_size = 8192;
bool use_async_prefetch = GetParam();
// Async IO is not enabled for compaction prefetching
if (use_async_prefetch) {
return;
}
readahead_params.num_buffers = 1;

FilePrefetchBuffer fpb(readahead_params, true, false, fs(), clock(),
stats.get());

Slice result;
Status s;
ASSERT_TRUE(
fpb.TryReadFromCache(IOOptions(), r.get(), 0, 4096, &result, &s, true));
ASSERT_EQ(s, Status::OK());
ASSERT_EQ(strncmp(result.data(), content.substr(0, 4096).c_str(), 4096), 0);

fpb.UpdateReadPattern(4096, 4096, false);

ASSERT_TRUE(fpb.TryReadFromCache(IOOptions(), r.get(), 8192, 8192, &result,
&s, true));
ASSERT_EQ(s, Status::OK());
ASSERT_EQ(strncmp(result.data(), content.substr(8192, 8192).c_str(), 8192),
0);

ASSERT_TRUE(fpb.TryReadFromCache(IOOptions(), r.get(), 12288, 4096, &result,
&s, true));
ASSERT_EQ(s, Status::OK());
ASSERT_EQ(strncmp(result.data(), content.substr(12288, 4096).c_str(), 4096),
0);

// Read from 16000-26000 (start and end do not meet normal alignment)
ASSERT_TRUE(fpb.TryReadFromCache(IOOptions(), r.get(), 16000, 10000, &result,
&s, true));
ASSERT_EQ(s, Status::OK());
ASSERT_EQ(strncmp(result.data(), content.substr(16000, 10000).c_str(), 10000),
0);
}

} // namespace ROCKSDB_NAMESPACE

int main(int argc, char** argv) {
Expand Down

0 comments on commit 5aead7a

Please sign in to comment.