Skip to content

Commit

Permalink
Update cache directory to create content with MP owner access only (#637
Browse files Browse the repository at this point in the history
)

Signed-off-by: Daniel Carl Jones <[email protected]>
  • Loading branch information
dannycjones authored Nov 28, 2023
1 parent 801e4c1 commit f2860e7
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 39 deletions.
108 changes: 72 additions & 36 deletions mountpoint-s3/src/data_cache/cache_directory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
use std::fs;
use std::io;
use std::os::unix::fs::DirBuilderExt;
use std::path::{Path, PathBuf};

use thiserror::Error;
Expand All @@ -27,36 +28,39 @@ pub enum ManagedCacheDirError {
}

impl ManagedCacheDir {
/// Create a new directory inside the provided parent path, cleaning it of any contents if it already exists.
/// Create a new directory inside the provided parent path.
/// If the directory already exists, it will be deleted before being recreated.
pub fn new_from_parent<P: AsRef<Path>>(parent_path: P) -> Result<Self, ManagedCacheDirError> {
let managed_path = parent_path.as_ref().join("mountpoint-cache");
let managed_cache_dir = Self {
managed_path: parent_path.as_ref().join("mountpoint-cache"),
};

if let Err(mkdir_err) = fs::create_dir(&managed_path) {
managed_cache_dir.remove()?;

let mkdir_result = fs::DirBuilder::new().mode(0o700).create(managed_cache_dir.as_path());
if let Err(mkdir_err) = mkdir_result {
match mkdir_err.kind() {
io::ErrorKind::AlreadyExists => (),
io::ErrorKind::AlreadyExists => tracing::warn!(
cache_dir = ?managed_cache_dir.as_path(),
"cache sub-directory already existed immediately after removal",
),
_kind => return Err(ManagedCacheDirError::CreationFailure(mkdir_err)),
}
}

let managed_cache_dir = Self { managed_path };
managed_cache_dir.clean()?;

Ok(managed_cache_dir)
}

/// Clear the cache sub-directory
fn clean(&self) -> Result<(), ManagedCacheDirError> {
tracing::debug!(cache_subdirectory = ?self.managed_path, "cleaning up contents of cache sub-directory");
let read_dir = fs::read_dir(self.managed_path.as_path()).map_err(ManagedCacheDirError::CleanupFailure)?;
for entry in read_dir {
let path = entry.map_err(ManagedCacheDirError::CleanupFailure)?.path();
if path.is_dir() {
fs::remove_dir_all(path).map_err(ManagedCacheDirError::CleanupFailure)?;
} else {
fs::remove_file(path).map_err(ManagedCacheDirError::CleanupFailure)?;
/// Remove the cache sub-directory, along with its contents if any
fn remove(&self) -> Result<(), ManagedCacheDirError> {
tracing::debug!(cache_subdirectory = ?self.managed_path, "removing the cache sub-directory and any contents");
if let Err(remove_dir_err) = fs::remove_dir_all(&self.managed_path) {
match remove_dir_err.kind() {
io::ErrorKind::NotFound => (),
_kind => return Err(ManagedCacheDirError::CleanupFailure(remove_dir_err)),
}
}
tracing::trace!(cache_subdirectory = ?self.managed_path, "cleanup complete");
tracing::trace!(cache_subdirectory = ?self.managed_path, "cache sub-directory removal complete");
Ok(())
}

Expand All @@ -73,8 +77,8 @@ impl ManagedCacheDir {

impl Drop for ManagedCacheDir {
fn drop(&mut self) {
if let Err(err) = self.clean() {
tracing::error!(managed_cache_path = ?self.managed_path, "failed to clean cache directory: {err}");
if let Err(err) = self.remove() {
tracing::error!(cache_subdirectory = ?self.managed_path, "failed to remove cache sub-directory: {err}");
}
}
}
Expand All @@ -84,6 +88,9 @@ mod tests {
use super::ManagedCacheDir;

use std::fs;
use std::os::unix::fs::{DirBuilderExt, PermissionsExt};

const EXPECTED_DIR_MODE: u32 = 0o700;

#[test]
fn test_unused() {
Expand All @@ -92,13 +99,21 @@ mod tests {

let managed_dir =
ManagedCacheDir::new_from_parent(temp_dir.path()).expect("creating managed dir should succeed");
assert!(expected_path.try_exists().unwrap(), "{expected_path:?} should exist");
let dir_mode = fs::metadata(&expected_path)
.expect("path should exist")
.permissions()
.mode();
assert_eq!(
dir_mode & 0o777,
EXPECTED_DIR_MODE,
"path should have {EXPECTED_DIR_MODE:#o} permission mode but had {dir_mode:#o}",
);

drop(managed_dir);
let dir_entries = fs::read_dir(&expected_path)
.expect("cache dir should still exist")
.count();
assert!(dir_entries == 0, "directory should be empty");
assert!(
!expected_path.try_exists().unwrap(),
"{expected_path:?} should not exist"
);

temp_dir.close().unwrap();
}
Expand All @@ -110,7 +125,15 @@ mod tests {

let managed_dir =
ManagedCacheDir::new_from_parent(temp_dir.path()).expect("creating managed dir should succeed");
assert!(expected_path.try_exists().unwrap(), "{expected_path:?} should exist");
let dir_mode = fs::metadata(&expected_path)
.expect("path should exist")
.permissions()
.mode();
assert_eq!(
dir_mode & 0o777,
EXPECTED_DIR_MODE,
"path should have {EXPECTED_DIR_MODE:#o} permission mode but had {dir_mode:#o}",
);

fs::File::create(expected_path.join("file.txt"))
.expect("should be able to create file within managed directory");
Expand All @@ -119,10 +142,10 @@ mod tests {
.expect("should be able to create file within subdirectory");

drop(managed_dir);
let dir_entries = fs::read_dir(&expected_path)
.expect("cache dir should still exist")
.count();
assert!(dir_entries == 0, "directory should be empty");
assert!(
!expected_path.try_exists().unwrap(),
"{expected_path:?} should not exist"
);

temp_dir.close().unwrap();
}
Expand All @@ -132,21 +155,34 @@ mod tests {
let temp_dir = tempfile::tempdir().unwrap();
let expected_path = temp_dir.path().join("mountpoint-cache");

fs::create_dir_all(expected_path.join("dir")).unwrap();
fs::DirBuilder::new()
.recursive(true)
.mode(0o775) // something that isn't the expected `0o700`
.create(expected_path.join("dir"))
.unwrap();
fs::File::create(expected_path.join("dir/file.txt")).unwrap();

let managed_dir =
ManagedCacheDir::new_from_parent(temp_dir.path()).expect("creating managed dir should succeed");
assert!(expected_path.try_exists().unwrap(), "{expected_path:?} should exist");

let dir_mode = fs::metadata(&expected_path)
.expect("path should exist")
.permissions()
.mode();
assert_eq!(
dir_mode & 0o777,
EXPECTED_DIR_MODE,
"path should have {EXPECTED_DIR_MODE:#o} permission mode but had {dir_mode:#o}",
);

let dir_entries = fs::read_dir(&expected_path).unwrap().count();
assert!(dir_entries == 0, "directory should be empty");

drop(managed_dir);
let dir_entries = fs::read_dir(&expected_path)
.expect("cache dir should still exist")
.count();
assert!(dir_entries == 0, "directory should be empty");
assert!(
!expected_path.try_exists().unwrap(),
"{expected_path:?} should not exist"
);

temp_dir.close().unwrap();
}
Expand Down
15 changes: 12 additions & 3 deletions mountpoint-s3/src/data_cache/disk_data_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
use std::fs;
use std::io::{ErrorKind, Read, Seek, Write};
use std::os::unix::fs::{DirBuilderExt, OpenOptionsExt};
use std::path::{Path, PathBuf};
use std::time::Instant;

Expand Down Expand Up @@ -269,15 +270,23 @@ impl DiskDataCache {
.as_ref()
.parent()
.expect("path should include cache key in directory name");
fs::create_dir_all(cache_path_for_key)?;
fs::DirBuilder::new()
.mode(0o700)
.recursive(true)
.create(cache_path_for_key)?;

trace!(
key = block.header.s3_key,
offset = block.header.block_offset,
"writing block at {}",
path.as_ref().display()
);
let mut file = fs::File::create(path.as_ref())?;
let mut file = fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.mode(0o600)
.open(path.as_ref())?;
file.write_all(CACHE_VERSION.as_bytes())?;
let serialize_result = bincode::serialize_into(&mut file, &block);
if let Err(err) = serialize_result {
Expand Down Expand Up @@ -439,7 +448,7 @@ impl DataCache for DiskDataCache {
/// Key to identify a block in the disk cache, composed of a hash of the S3 key and Etag, and the block index.
/// An S3 key may be up to 1024 UTF-8 bytes long, which exceeds the maximum UNIX file name length.
/// Instead, this key contains a hash of the S3 key and ETag to avoid the limit when used in paths.
/// The risk of collisions is mitigated as we ignore blocks read that contain the wrong S3 key, etc..
/// The risk of collisions is mitigated as we ignore blocks read that contain the wrong S3 key, etc..
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)]
struct DiskBlockKey {
hashed_key: [u8; 32],
Expand Down

0 comments on commit f2860e7

Please sign in to comment.