From b0f9fb12f0636fb7566af32b876e38d576d25442 Mon Sep 17 00:00:00 2001 From: zeapoz Date: Tue, 2 Apr 2024 13:42:30 +0200 Subject: [PATCH] ref(snapshot): modify file names to match upstream --- src/processor/snapshot/exporter.rs | 28 ++++++++++++++++------------ src/processor/snapshot/importer.rs | 14 ++++++++++---- src/processor/snapshot/mod.rs | 2 +- 3 files changed, 27 insertions(+), 17 deletions(-) diff --git a/src/processor/snapshot/exporter.rs b/src/processor/snapshot/exporter.rs index c9c1bd6..68cfe37 100644 --- a/src/processor/snapshot/exporter.rs +++ b/src/processor/snapshot/exporter.rs @@ -11,7 +11,7 @@ use prost::Message; use super::{ database::{self, SnapshotDB}, types::{self, SnapshotFactoryDependency, SnapshotHeader}, - DEFAULT_DB_PATH, SNAPSHOT_FACTORY_DEPS_FILE_NAME, SNAPSHOT_HEADER_FILE_NAME, + DEFAULT_DB_PATH, SNAPSHOT_FACTORY_DEPS_FILE_NAME_SUFFIX, SNAPSHOT_HEADER_FILE_NAME, }; pub mod protobuf { @@ -24,17 +24,17 @@ pub struct SnapshotExporter { } impl SnapshotExporter { - pub fn new(basedir: &Path, db_path: Option) -> Self { + pub fn new(basedir: &Path, db_path: Option) -> Result { let db_path = match db_path { Some(p) => PathBuf::from(p), None => PathBuf::from(DEFAULT_DB_PATH), }; - let database = SnapshotDB::new_read_only(db_path).unwrap(); - Self { + let database = SnapshotDB::new_read_only(db_path)?; + Ok(Self { basedir: basedir.to_path_buf(), database, - } + }) } pub fn export_snapshot(&self, chunk_size: u64) -> Result<()> { @@ -77,7 +77,10 @@ impl SnapshotExporter { buf.reserve(fd_len - buf.capacity()); } - let path = self.basedir.join(SNAPSHOT_FACTORY_DEPS_FILE_NAME); + let path = self.basedir.join(format!( + "snapshot_l1_batch_{}_{}", + header.l1_batch_number, SNAPSHOT_FACTORY_DEPS_FILE_NAME_SUFFIX + )); header.factory_deps_filepath = path .clone() .into_os_string() @@ -103,7 +106,7 @@ impl SnapshotExporter { fn export_storage_logs(&self, chunk_size: u64, header: &mut SnapshotHeader) -> Result<()> { let mut buf = BytesMut::new(); - let mut chunk_index = 0; + let mut chunk_id = 0; let index_to_key_map = self.database.cf_handle(database::INDEX_TO_KEY_MAP).unwrap(); let mut iterator = self @@ -144,15 +147,16 @@ impl SnapshotExporter { buf.reserve(chunk_len - buf.capacity()); } - chunk_index += 1; - let path = PathBuf::new() - .join(&self.basedir) - .join(format!("{chunk_index}.gz")); + let path = PathBuf::new().join(&self.basedir).join(format!( + "snapshot_l1_batch_{}_storage_logs_part_{:0>4}.proto.gzip", + header.l1_batch_number, chunk_id + )); + chunk_id += 1; header .storage_logs_chunks .push(types::SnapshotStorageLogsChunkMetadata { - chunk_id: chunk_index, + chunk_id, filepath: path .clone() .into_os_string() diff --git a/src/processor/snapshot/importer.rs b/src/processor/snapshot/importer.rs index 9593bc4..2d2995a 100644 --- a/src/processor/snapshot/importer.rs +++ b/src/processor/snapshot/importer.rs @@ -24,7 +24,7 @@ use super::{ SnapshotFactoryDependencies, SnapshotStorageLog, SnapshotStorageLogsChunk, }, types::SnapshotHeader, - SNAPSHOT_FACTORY_DEPS_FILE_NAME, SNAPSHOT_HEADER_FILE_NAME, + SNAPSHOT_FACTORY_DEPS_FILE_NAME_SUFFIX, SNAPSHOT_HEADER_FILE_NAME, }; use crate::processor::tree::tree_wrapper::TreeWrapper; @@ -47,7 +47,7 @@ impl SnapshotImporter { pub async fn run(mut self) -> Result<()> { let header = self.read_header()?; - let factory_deps = self.read_factory_deps()?; + let factory_deps = self.read_factory_deps(&header)?; let storage_logs_chunk = self.read_storage_logs_chunks(&header)?; self.tree @@ -63,8 +63,11 @@ impl SnapshotImporter { Ok(header) } - fn read_factory_deps(&self) -> Result { - let factory_deps_path = self.directory.join(SNAPSHOT_FACTORY_DEPS_FILE_NAME); + fn read_factory_deps(&self, header: &SnapshotHeader) -> Result { + let factory_deps_path = self.directory.join(format!( + "snapshot_l1_batch_{}_{}", + header.l1_batch_number, SNAPSHOT_FACTORY_DEPS_FILE_NAME_SUFFIX + )); let bytes = fs::read(factory_deps_path)?; let mut decoder = GzDecoder::new(&bytes[..]); @@ -97,6 +100,9 @@ impl SnapshotImporter { let mut decompressed_bytes = Vec::new(); decoder.read_to_end(&mut decompressed_bytes)?; + // TODO: It would be nice to avoid the intermediary step of decoding. Something like + // implementing a method on the types::* that does it automatically. Will improve + // readabitly for the export code too as a bonus. let storage_logs_chunk = SnapshotStorageLogsChunk::decode(&decompressed_bytes[..])?; chunks.push(storage_logs_chunk); } diff --git a/src/processor/snapshot/mod.rs b/src/processor/snapshot/mod.rs index 3cdd31a..9f21c75 100644 --- a/src/processor/snapshot/mod.rs +++ b/src/processor/snapshot/mod.rs @@ -26,7 +26,7 @@ use crate::processor::snapshot::types::MiniblockNumber; pub const DEFAULT_DB_PATH: &str = "snapshot_db"; pub const SNAPSHOT_HEADER_FILE_NAME: &str = "snapshot-header.json"; -pub const SNAPSHOT_FACTORY_DEPS_FILE_NAME: &str = "factory_deps.dat"; +pub const SNAPSHOT_FACTORY_DEPS_FILE_NAME_SUFFIX: &str = "factory_deps.proto.gzip"; pub struct SnapshotBuilder { database: SnapshotDB,