Skip to content

Commit

Permalink
fix: reduce number of opened files on rocksdb (#486)
Browse files Browse the repository at this point in the history
  • Loading branch information
renancloudwalk authored Mar 31, 2024
1 parent 7c1681c commit a4aa152
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 11 deletions.
19 changes: 10 additions & 9 deletions src/eth/storage/hybrid/hybrid_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use sqlx::QueryBuilder;
use sqlx::Row;
use tokio::join;

use super::rocks_db::DbConfig;
use super::rocks_db::RocksDb;
use crate::eth::primitives::Account;
use crate::eth::primitives::Address;
Expand Down Expand Up @@ -71,15 +72,15 @@ pub struct HybridStorageState {
impl HybridStorageState {
pub fn new() -> Self {
Self {
accounts: RocksDb::new("./data/accounts.rocksdb").unwrap(),
accounts_history: RocksDb::new("./data/accounts_history.rocksdb").unwrap(),
account_slots: RocksDb::new("./data/account_slots.rocksdb").unwrap(),
account_slots_history: RocksDb::new("./data/account_slots_history.rocksdb").unwrap(),
transactions: RocksDb::new("./data/transactions.rocksdb").unwrap(),
blocks_by_number: RocksDb::new("./data/blocks_by_number.rocksdb").unwrap(),
blocks_by_hash: RocksDb::new("./data/blocks_by_hash.rocksdb").unwrap(), //XXX this is not needed we can afford to have blocks_by_hash pointing into blocks_by_number
logs: RocksDb::new("./data/logs.rocksdb").unwrap(),
metadata: RocksDb::new("./data/metadata.rocksdb").unwrap(),
accounts: RocksDb::new("./data/accounts.rocksdb", DbConfig::Default).unwrap(),
accounts_history: RocksDb::new("./data/accounts_history.rocksdb", DbConfig::LargeSSTFiles).unwrap(),
account_slots: RocksDb::new("./data/account_slots.rocksdb", DbConfig::Default).unwrap(),
account_slots_history: RocksDb::new("./data/account_slots_history.rocksdb", DbConfig::LargeSSTFiles).unwrap(),
transactions: RocksDb::new("./data/transactions.rocksdb", DbConfig::LargeSSTFiles).unwrap(),
blocks_by_number: RocksDb::new("./data/blocks_by_number.rocksdb", DbConfig::LargeSSTFiles).unwrap(),
blocks_by_hash: RocksDb::new("./data/blocks_by_hash.rocksdb", DbConfig::LargeSSTFiles).unwrap(), //XXX this is not needed we can afford to have blocks_by_hash pointing into blocks_by_number
logs: RocksDb::new("./data/logs.rocksdb", DbConfig::LargeSSTFiles).unwrap(),
metadata: RocksDb::new("./data/metadata.rocksdb", DbConfig::Default).unwrap(),
}
}

Expand Down
27 changes: 25 additions & 2 deletions src/eth/storage/hybrid/rocks_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,43 @@ use std::marker::PhantomData;
use anyhow::Result;
use rocksdb::DBIteratorWithThreadMode;
use rocksdb::IteratorMode;
use rocksdb::Options;
use rocksdb::WriteBatch;
use rocksdb::DB;
use serde::Deserialize;
use serde::Serialize;

pub enum DbConfig {
LargeSSTFiles,
Default,
}

// A generic struct that abstracts over key-value pairs stored in RocksDB.
pub struct RocksDb<K, V> {
pub db: DB,
_marker: PhantomData<(K, V)>,
}

impl<K: Serialize + for<'de> Deserialize<'de> + std::hash::Hash + Eq, V: Serialize + for<'de> Deserialize<'de> + Clone> RocksDb<K, V> {
pub fn new(db_path: &str) -> anyhow::Result<Self> {
let db = DB::open_default(db_path)?;
pub fn new(db_path: &str, config: DbConfig) -> anyhow::Result<Self> {
let mut opts = Options::default();

opts.create_if_missing(true);

match config {
DbConfig::LargeSSTFiles => {
// Adjusting for large SST files
opts.set_target_file_size_base(256 * 1024 * 1024); // 128MB
opts.set_max_write_buffer_number(4);
opts.set_write_buffer_size(64 * 1024 * 1024); // 64MB
opts.set_max_bytes_for_level_base(512 * 1024 * 1024); // 512MB
opts.set_max_open_files(100);
}
DbConfig::Default => {} // Default options are already set
}

let db = DB::open(&opts, db_path)?;

Ok(RocksDb { db, _marker: PhantomData })
}

Expand Down

0 comments on commit a4aa152

Please sign in to comment.