From e042326655f4c882866e9cd99a2dbbe7dc117615 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Thu, 24 Aug 2023 10:33:07 +0200 Subject: [PATCH 01/18] Use kvx with explicit namespace type. --- Cargo.lock | 15 ++--- Cargo.toml | 4 +- src/cli/ta_client.rs | 5 +- .../crypto/signing/dispatch/signerinfo.rs | 2 +- src/commons/eventsourcing/kv.rs | 56 +++++++++++-------- src/commons/eventsourcing/mod.rs | 8 ++- src/commons/eventsourcing/store.rs | 18 ++++-- src/commons/eventsourcing/wal.rs | 5 +- src/constants.rs | 24 ++++---- src/daemon/auth/common/crypt.rs | 4 +- src/daemon/ca/manager.rs | 6 +- src/daemon/ca/status.rs | 9 ++- src/daemon/config.rs | 4 +- src/daemon/properties/mod.rs | 2 +- src/daemon/ta/mod.rs | 6 +- src/pubd/repository.rs | 2 +- src/upgrades/pre_0_10_0/cas_migration.rs | 2 +- src/upgrades/pre_0_10_0/pubd_migration.rs | 2 +- src/upgrades/pre_0_14_0/mod.rs | 6 +- 19 files changed, 103 insertions(+), 77 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0257db892..979aefcc5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,9 +1130,8 @@ dependencies = [ [[package]] name = "kvx" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aea5d159eef7e2aa78c53130afef47424676a67195299aa84ba092ab86eedcdf" +version = "0.7.0" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#73465df3b1fbf90527725350c24cee960117f4b0" dependencies = [ "kvx_macros", "kvx_types", @@ -1148,9 +1147,8 @@ dependencies = [ [[package]] name = "kvx_macros" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40efe43ad6ed19cf391e87927dd6c1cd18f231f00112111a6e9d3214fda6e46" +version = "0.7.0" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#73465df3b1fbf90527725350c24cee960117f4b0" dependencies = [ "kvx_types", "proc-macro-error", @@ -1161,9 +1159,8 @@ dependencies = [ [[package]] name = "kvx_types" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261784226d4f9e160da81b401ee77ce1a3c00598599f3d7da93a6fc6caa93d9d" +version = "0.7.0" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#73465df3b1fbf90527725350c24cee960117f4b0" dependencies = [ "postgres", "postgres-types", diff --git a/Cargo.toml b/Cargo.toml index 3d138c46e..f4cfb2376 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,8 +36,8 @@ hyper = { version = "^0.14", features = ["server"] } intervaltree = "0.2.6" jmespatch = { version = "^0.3", features = ["sync"], optional = true } kmip = { version = "0.4.2", package = "kmip-protocol", features = ["tls-with-openssl"], optional = true } -kvx = { version = "0.6.0", features = ["macros"] } -# kvx = { version = "0.6.0", git = "https://github.com/nlnetlabs/kvx", features = ["macros"] } +# kvx = { version = "0.6.0", features = ["macros"] } +kvx = { version = "0.7.0", git = "https://github.com/nlnetlabs/kvx", branch = "support-namespace-migrations", features = ["macros"] } libflate = "^1" log = "^0.4" once_cell = { version = "^1.7.2", optional = true } diff --git a/src/cli/ta_client.rs b/src/cli/ta_client.rs index e60778c0e..98e7665dd 100644 --- a/src/cli/ta_client.rs +++ b/src/cli/ta_client.rs @@ -12,6 +12,7 @@ use std::{ use bytes::Bytes; use clap::{App, Arg, ArgMatches, SubCommand}; + use log::LevelFilter; use rpki::{ ca::idexchange::{self, ChildHandle, RepoInfo, ServiceUri}, @@ -28,7 +29,7 @@ use crate::{ api::{AddChildRequest, ApiRepositoryContact, CertAuthInfo, IdCertInfo, RepositoryContact, Token}, crypto::{KrillSigner, KrillSignerBuilder, OpenSslSignerConfig}, error::Error as KrillError, - eventsourcing::{segment, AggregateStore, AggregateStoreError, Segment}, + eventsourcing::{namespace, AggregateStore, AggregateStoreError, Namespace}, util::{file, httpclient}, }, constants::{ @@ -1002,7 +1003,7 @@ struct TrustAnchorSignerManager { impl TrustAnchorSignerManager { fn create(config: Config) -> Result { - let store = AggregateStore::create(&config.storage_uri, segment!("signer"), config.use_history_cache) + let store = AggregateStore::create_from_url(&config.storage_uri, namespace!("signer"), config.use_history_cache) .map_err(KrillError::AggregateStoreError)?; let ta_handle = TrustAnchorHandle::new("ta".into()); let signer = config.signer()?; diff --git a/src/commons/crypto/signing/dispatch/signerinfo.rs b/src/commons/crypto/signing/dispatch/signerinfo.rs index c7400e4b4..591bfda4e 100644 --- a/src/commons/crypto/signing/dispatch/signerinfo.rs +++ b/src/commons/crypto/signing/dispatch/signerinfo.rs @@ -373,7 +373,7 @@ impl std::fmt::Debug for SignerMapper { impl SignerMapper { /// Build a SignerMapper that will read/write its data in a subdirectory of the given work dir. pub fn build(storage_uri: &Url) -> KrillResult { - let store = AggregateStore::::create(storage_uri, SIGNERS_NS, true)?; + let store = AggregateStore::::create_from_url(storage_uri, SIGNERS_NS, true)?; Ok(SignerMapper { store }) } diff --git a/src/commons/eventsourcing/kv.rs b/src/commons/eventsourcing/kv.rs index 44146c77b..f23e5e208 100644 --- a/src/commons/eventsourcing/kv.rs +++ b/src/commons/eventsourcing/kv.rs @@ -1,7 +1,7 @@ -use std::fmt; +use std::{fmt, sync::Arc}; -pub use kvx::{segment, Key, Scope, Segment, SegmentBuf}; -use kvx::{KeyValueStoreBackend, ReadStore, WriteStore}; +pub use kvx::{segment, namespace, Key, Namespace, Scope, Segment, SegmentBuf}; +use kvx::{KeyValueStoreBackend, ReadStore, WriteStore, NamespaceBuf}; use serde::{de::DeserializeOwned, Serialize}; use url::Url; @@ -33,17 +33,17 @@ impl SegmentExt for Segment { } } -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct KeyValueStore { - inner: kvx::KeyValueStore, + inner: Arc, } impl KeyValueStore { /// Creates a new KeyValueStore and initializes the version if it had /// not been set. - pub fn create(storage_uri: &Url, name_space: impl Into) -> Result { + pub fn create(storage_uri: &Url, name_space: impl Into) -> Result { let store = KeyValueStore { - inner: kvx::KeyValueStore::new(storage_uri, name_space)?, + inner: Arc::new(kvx::KeyValueStore::new(storage_uri, name_space)?), }; Ok(store) } @@ -224,6 +224,16 @@ mod tests { .unwrap() } + fn random_namespace() -> NamespaceBuf { + rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(8) + .map(char::from) + .collect::() + .parse() + .unwrap() + } + fn get_storage_uri() -> Url { env::var("KRILL_KV_STORAGE_URL") .ok() @@ -235,7 +245,7 @@ mod tests { fn test_store() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); @@ -248,7 +258,7 @@ mod tests { fn test_store_new() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); @@ -260,7 +270,7 @@ mod tests { fn test_store_scoped() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let id = random_segment(); let scope = Scope::from_segment(segment!("scope")); @@ -281,7 +291,7 @@ mod tests { fn test_get() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); assert_eq!(store.get::(&key).unwrap(), None); @@ -294,7 +304,7 @@ mod tests { fn test_get_transactional() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); assert_eq!(store.get_transactional::(&key).unwrap(), None); @@ -307,7 +317,7 @@ mod tests { fn test_has() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); assert!(!store.has(&key).unwrap()); @@ -320,7 +330,7 @@ mod tests { fn test_drop_key() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); store.store(&key, &content).unwrap(); @@ -334,7 +344,7 @@ mod tests { fn test_drop_scope() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let scope = Scope::from_segment(random_segment()); let key = Key::new_scoped(scope.clone(), random_segment()); @@ -355,7 +365,7 @@ mod tests { fn test_wipe() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let scope = Scope::from_segment(segment!("scope")); let key = Key::new_scoped(scope.clone(), random_segment()); @@ -373,7 +383,7 @@ mod tests { fn test_move_key() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_string(); let key = Key::new_global(random_segment()); @@ -390,7 +400,7 @@ mod tests { fn test_archive() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_string(); let key = Key::new_global(random_segment()); @@ -406,7 +416,7 @@ mod tests { fn test_archive_corrupt() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_string(); let key = Key::new_global(random_segment()); @@ -422,7 +432,7 @@ mod tests { fn test_archive_surplus() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_string(); let key = Key::new_global(random_segment()); @@ -438,7 +448,7 @@ mod tests { fn test_scopes() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let id = segment!("id"); let scope = Scope::from_segment(random_segment()); @@ -467,7 +477,7 @@ mod tests { fn test_has_scope() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let scope = Scope::from_segment(random_segment()); let key = Key::new_scoped(scope.clone(), segment!("id")); @@ -481,7 +491,7 @@ mod tests { fn test_keys() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_segment()).unwrap(); + let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); let content = "content".to_owned(); let id = segment!("command--id"); let scope = Scope::from_segment(segment!("command")); diff --git a/src/commons/eventsourcing/mod.rs b/src/commons/eventsourcing/mod.rs index bf12907df..e564886db 100644 --- a/src/commons/eventsourcing/mod.rs +++ b/src/commons/eventsourcing/mod.rs @@ -21,7 +21,7 @@ pub use self::listener::*; pub mod locks; mod kv; -pub use self::kv::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentBuf, SegmentExt}; +pub use self::kv::{segment, namespace, Key, KeyValueError, KeyValueStore, Namespace, Scope, Segment, SegmentBuf, SegmentExt}; //------------ Tests --------------------------------------------------------- @@ -341,9 +341,11 @@ mod tests { // let storage_uri = crate::commons::util::storage::storage_uri_from_data_dir(&data_dir).unwrap(); let storage_uri = tmp_storage(); + let kv = KeyValueStore::create(&storage_uri, namespace!("person")).unwrap(); let counter = Arc::new(EventCounter::default()); - let mut manager = AggregateStore::::create(&storage_uri, segment!("person"), false).unwrap(); + + let mut manager = AggregateStore::::create_with_store(kv.clone(), false).unwrap(); manager.add_post_save_listener(counter.clone()); let alice_name = "alice smith".to_string(); @@ -376,7 +378,7 @@ mod tests { assert_eq!(21, alice.age()); // Should read state from disk - let manager = AggregateStore::::create(&storage_uri, segment!("person"), false).unwrap(); + let manager = AggregateStore::::create_with_store(kv, false).unwrap(); let alice = manager.get_latest(&alice_handle).unwrap(); assert_eq!("alice smith-doe", alice.name()); diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index 85474f5e1..2c1e2e172 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -5,6 +5,7 @@ use std::{ sync::{Arc, Mutex, RwLock}, }; +use kvx::NamespaceBuf; use rpki::{ca::idexchange::MyHandle, repository::x509::Time}; use serde::{de::DeserializeOwned, Serialize}; use url::Url; @@ -14,7 +15,7 @@ use crate::commons::{ error::KrillIoError, eventsourcing::{ cmd::Command, locks::HandleLocks, segment, Aggregate, Key, KeyValueError, KeyValueStore, PostSaveEventListener, - PreSaveEventListener, Scope, Segment, SegmentBuf, SegmentExt, StoredCommand, StoredCommandBuilder, + PreSaveEventListener, Scope, Segment, SegmentExt, StoredCommand, StoredCommandBuilder, }, }; @@ -42,13 +43,22 @@ pub struct AggregateStore { /// # Starting up /// impl AggregateStore { - /// Creates an AggregateStore using a disk based KeyValueStore - pub fn create( + /// Creates an AggregateStore using the given storage url + pub fn create_from_url( storage_uri: &Url, - name_space: impl Into, + name_space: impl Into, use_history_cache: bool, ) -> StoreResult { let kv = KeyValueStore::create(storage_uri, name_space)?; + + Self::create_with_store(kv, use_history_cache) + } + + /// Creates an AggregateStore using the given KV store + pub fn create_with_store( + kv: KeyValueStore, + use_history_cache: bool, + ) -> StoreResult { let cache = RwLock::new(HashMap::new()); let history_cache = if !use_history_cache { None diff --git a/src/commons/eventsourcing/wal.rs b/src/commons/eventsourcing/wal.rs index 0fd7b5a59..29e0bdadf 100644 --- a/src/commons/eventsourcing/wal.rs +++ b/src/commons/eventsourcing/wal.rs @@ -5,12 +5,13 @@ use std::{ sync::{Arc, RwLock}, }; +use kvx::NamespaceBuf; use rpki::ca::idexchange::MyHandle; use serde::Serialize; use url::Url; use crate::commons::eventsourcing::{ - locks::HandleLocks, segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentBuf, SegmentExt, Storable, + locks::HandleLocks, segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt, Storable, }; //------------ WalSupport ---------------------------------------------------- @@ -129,7 +130,7 @@ pub struct WalStore { impl WalStore { /// Creates a new store using a disk based keystore for the given data /// directory and namespace (directory). - pub fn create(storage_uri: &Url, name_space: impl Into) -> WalStoreResult { + pub fn create(storage_uri: &Url, name_space: impl Into) -> WalStoreResult { let kv = KeyValueStore::create(storage_uri, name_space)?; let cache = RwLock::new(HashMap::new()); let locks = HandleLocks::default(); diff --git a/src/constants.rs b/src/constants.rs index ccd91c60a..bf93710e5 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -1,7 +1,9 @@ +use kvx::Namespace; + use crate::{ commons::{ actor::ActorDef, - eventsourcing::{segment, Segment}, + eventsourcing::namespace, }, daemon::auth::common::NoResourceType, }; @@ -47,16 +49,16 @@ pub fn test_announcements_enabled() -> bool { // until const fn's are more versatile for str's, we need to use lazy_static to be able to expand the segment macro at // compile time, while running the expanded code, which actually makes it a Segment, at runtime -pub const CASERVER_NS: &Segment = segment!("cas"); -pub const CA_OBJECTS_NS: &Segment = segment!("ca_objects"); -pub const KEYS_NS: &Segment = segment!("keys"); -pub const PUBSERVER_CONTENT_NS: &Segment = segment!("pubd_objects"); -pub const PUBSERVER_NS: &Segment = segment!("pubd"); -pub const PROPERTIES_NS: &Segment = segment!("properties"); -pub const SIGNERS_NS: &Segment = segment!("signers"); -pub const STATUS_NS: &Segment = segment!("status"); -pub const TA_PROXY_SERVER_NS: &Segment = segment!("ta_proxy"); -pub const TA_SIGNER_SERVER_NS: &Segment = segment!("ta_signer"); +pub const CASERVER_NS: &Namespace = namespace!("cas"); +pub const CA_OBJECTS_NS: &Namespace = namespace!("ca_objects"); +pub const KEYS_NS: &Namespace = namespace!("keys"); +pub const PUBSERVER_CONTENT_NS: &Namespace = namespace!("pubd_objects"); +pub const PUBSERVER_NS: &Namespace = namespace!("pubd"); +pub const PROPERTIES_NS: &Namespace = namespace!("properties"); +pub const SIGNERS_NS: &Namespace = namespace!("signers"); +pub const STATUS_NS: &Namespace = namespace!("status"); +pub const TA_PROXY_SERVER_NS: &Namespace = namespace!("ta_proxy"); +pub const TA_SIGNER_SERVER_NS: &Namespace = namespace!("ta_signer"); pub const UPGRADE_DIR: &str = "upgrade-data"; diff --git a/src/daemon/auth/common/crypt.rs b/src/daemon/auth/common/crypt.rs index a4ae61a9b..a74f2cc05 100644 --- a/src/daemon/auth/common/crypt.rs +++ b/src/daemon/auth/common/crypt.rs @@ -18,7 +18,7 @@ use std::sync::atomic::{AtomicU64, Ordering}; -use kvx::{segment, Key, Segment}; +use kvx::{namespace, segment, Key, Namespace, Segment}; use crate::{ commons::{error::Error, util::ext_serde, KrillResult}, @@ -34,7 +34,7 @@ const POLY1305_TAG_BYTE_LEN: usize = POLY1305_TAG_BIT_LEN / 8; const CLEARTEXT_PREFIX_LEN: usize = CHACHA20_NONCE_BYTE_LEN + POLY1305_TAG_BYTE_LEN; const UNUSED_AAD: [u8; 0] = [0; 0]; -const CRYPT_STATE_NS: &Segment = segment!("login_sessions"); +const CRYPT_STATE_NS: &Namespace = namespace!("login_sessions"); const CRYPT_STATE_KEY: &Segment = segment!("main_key"); #[derive(Debug, Deserialize, Serialize)] diff --git a/src/daemon/ca/manager.rs b/src/daemon/ca/manager.rs index 2988fcd28..4b99f0fb2 100644 --- a/src/daemon/ca/manager.rs +++ b/src/daemon/ca/manager.rs @@ -109,7 +109,7 @@ impl CaManager { // Create the AggregateStore for the event-sourced `CertAuth` structures that handle // most CA functions. let mut ca_store = - AggregateStore::::create(&config.storage_uri, CASERVER_NS, config.use_history_cache)?; + AggregateStore::::create_from_url(&config.storage_uri, CASERVER_NS, config.use_history_cache)?; if config.always_recover_data { // If the user chose to 'always recover data' then do so. @@ -151,7 +151,7 @@ impl CaManager { // Create TA proxy store if we need it. let ta_proxy_store = if config.ta_proxy_enabled() { - let mut store = AggregateStore::::create( + let mut store = AggregateStore::::create_from_url( &config.storage_uri, TA_PROXY_SERVER_NS, config.use_history_cache, @@ -168,7 +168,7 @@ impl CaManager { }; let ta_signer_store = if config.ta_signer_enabled() { - Some(AggregateStore::create( + Some(AggregateStore::create_from_url( &config.storage_uri, TA_SIGNER_SERVER_NS, config.use_history_cache, diff --git a/src/daemon/ca/status.rs b/src/daemon/ca/status.rs index 75b304c0d..29eae4313 100644 --- a/src/daemon/ca/status.rs +++ b/src/daemon/ca/status.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, str::FromStr, sync::RwLock}; +use kvx::NamespaceBuf; use rpki::ca::{ idexchange::{CaHandle, ChildHandle, ParentHandle, ServiceUri}, provisioning::ResourceClassListResponse as Entitlements, @@ -13,7 +14,7 @@ use crate::commons::{ RepoStatus, }, error::Error, - eventsourcing::{segment, Key, KeyValueStore, Scope, Segment, SegmentBuf, SegmentExt}, + eventsourcing::{segment, Key, KeyValueStore, Scope, Segment, SegmentExt}, util::httpclient, KrillResult, }; @@ -67,7 +68,7 @@ pub struct StatusStore { } impl StatusStore { - pub fn create(storage_uri: &Url, namespace: impl Into) -> KrillResult { + pub fn create(storage_uri: &Url, namespace: impl Into) -> KrillResult { let store = KeyValueStore::create(storage_uri, namespace)?; let cache = RwLock::new(HashMap::new()); @@ -403,6 +404,8 @@ mod tests { use std::path::PathBuf; + use kvx::{namespace, Namespace}; + use crate::{ commons::util::{file, storage::storage_uri_from_data_dir}, test, @@ -422,7 +425,7 @@ mod tests { serde_json::from_str(status_testbed_before_migration).unwrap(); let storage_uri = storage_uri_from_data_dir(&data_dir).unwrap(); - let store = StatusStore::create(&storage_uri, segment!("status")).unwrap(); + let store = StatusStore::create(&storage_uri, namespace!("status")).unwrap(); let testbed = CaHandle::from_str("testbed").unwrap(); let status_testbed_migrated = store.get_ca_status(&testbed); diff --git a/src/daemon/config.rs b/src/daemon/config.rs index b20e03a75..c482838ed 100644 --- a/src/daemon/config.rs +++ b/src/daemon/config.rs @@ -8,7 +8,7 @@ use std::{ }; use chrono::Duration; -use kvx::SegmentBuf; +use kvx::NamespaceBuf; use log::{error, LevelFilter}; use rpki::{ ca::idexchange::PublisherHandle, @@ -825,7 +825,7 @@ impl Config { KeyValueStore::create(&self.storage_uri, PROPERTIES_NS).map_err(Error::KeyValueError) } - pub fn key_value_store(&self, name_space: impl Into) -> KrillResult { + pub fn key_value_store(&self, name_space: impl Into) -> KrillResult { KeyValueStore::create(&self.storage_uri, name_space).map_err(Error::KeyValueError) } diff --git a/src/daemon/properties/mod.rs b/src/daemon/properties/mod.rs index b8e580921..a9681a0de 100644 --- a/src/daemon/properties/mod.rs +++ b/src/daemon/properties/mod.rs @@ -257,7 +257,7 @@ pub struct PropertiesManager { impl PropertiesManager { pub fn create(storage_uri: &Url, use_history_cache: bool) -> KrillResult { let main_key = MyHandle::from_str(PROPERTIES_DFLT_NAME).unwrap(); - AggregateStore::create(storage_uri, PROPERTIES_NS, use_history_cache) + AggregateStore::create_from_url(storage_uri, PROPERTIES_NS, use_history_cache) .map(|store| PropertiesManager { store, main_key, diff --git a/src/daemon/ta/mod.rs b/src/daemon/ta/mod.rs index 585edfdb0..bd04d5cff 100644 --- a/src/daemon/ta/mod.rs +++ b/src/daemon/ta/mod.rs @@ -39,7 +39,7 @@ mod tests { commons::{ api::{PublicationServerInfo, RepositoryContact}, crypto::KrillSignerBuilder, - eventsourcing::{segment, AggregateStore, Segment}, + eventsourcing::{namespace, AggregateStore, Namespace}, }, daemon::config::ConfigDefaults, test, @@ -51,9 +51,9 @@ mod tests { let cleanup = test::init_logging(); let ta_signer_store: AggregateStore = - AggregateStore::create(storage_uri, segment!("ta_signer"), false).unwrap(); + AggregateStore::create_from_url(storage_uri, namespace!("ta_signer"), false).unwrap(); let ta_proxy_store: AggregateStore = - AggregateStore::create(storage_uri, segment!("ta_proxy"), false).unwrap(); + AggregateStore::create_from_url(storage_uri, namespace!("ta_proxy"), false).unwrap(); // We will import a TA key - this is only (supposed to be) supported for the openssl signer let signers = ConfigDefaults::openssl_signer_only(); diff --git a/src/pubd/repository.rs b/src/pubd/repository.rs index 522f22b47..b6545a6c3 100644 --- a/src/pubd/repository.rs +++ b/src/pubd/repository.rs @@ -1516,7 +1516,7 @@ pub struct RepositoryAccessProxy { impl RepositoryAccessProxy { pub fn create(config: &Config) -> KrillResult { - let store = AggregateStore::::create( + let store = AggregateStore::::create_from_url( &config.storage_uri, PUBSERVER_NS, config.use_history_cache, diff --git a/src/upgrades/pre_0_10_0/cas_migration.rs b/src/upgrades/pre_0_10_0/cas_migration.rs index 6239a3a6c..5b6e33ac0 100644 --- a/src/upgrades/pre_0_10_0/cas_migration.rs +++ b/src/upgrades/pre_0_10_0/cas_migration.rs @@ -68,7 +68,7 @@ impl CasMigration { pub fn upgrade(mode: UpgradeMode, config: &Config) -> UpgradeResult<()> { let current_kv_store = KeyValueStore::create(&config.storage_uri, CASERVER_NS)?; let new_kv_store = KeyValueStore::create(config.upgrade_storage_uri(), CASERVER_NS)?; - let new_agg_store = AggregateStore::::create( + let new_agg_store = AggregateStore::::create_from_url( config.upgrade_storage_uri(), CASERVER_NS, config.use_history_cache, diff --git a/src/upgrades/pre_0_10_0/pubd_migration.rs b/src/upgrades/pre_0_10_0/pubd_migration.rs index 6cee9870a..27dfe116b 100644 --- a/src/upgrades/pre_0_10_0/pubd_migration.rs +++ b/src/upgrades/pre_0_10_0/pubd_migration.rs @@ -28,7 +28,7 @@ impl PublicationServerRepositoryAccessMigration { let current_kv_store = KeyValueStore::create(&config.storage_uri, PUBSERVER_NS)?; let new_kv_store = KeyValueStore::create(config.upgrade_storage_uri(), PUBSERVER_NS)?; let new_agg_store = - AggregateStore::create(config.upgrade_storage_uri(), PUBSERVER_NS, config.use_history_cache)?; + AggregateStore::create_from_url(config.upgrade_storage_uri(), PUBSERVER_NS, config.use_history_cache)?; let store_migration = PublicationServerRepositoryAccessMigration { current_kv_store, diff --git a/src/upgrades/pre_0_14_0/mod.rs b/src/upgrades/pre_0_14_0/mod.rs index 4fddebdd2..720fa7f11 100644 --- a/src/upgrades/pre_0_14_0/mod.rs +++ b/src/upgrades/pre_0_14_0/mod.rs @@ -1,6 +1,6 @@ use std::{fmt, str::FromStr}; -use kvx::Segment; +use kvx::Namespace; use rpki::{ca::idexchange::MyHandle, repository::x509::Time}; use crate::{ @@ -239,7 +239,7 @@ pub struct GenericUpgradeAggregateStore { } impl GenericUpgradeAggregateStore { - pub fn upgrade(name_space: &Segment, mode: UpgradeMode, config: &Config) -> UpgradeResult<()> { + pub fn upgrade(name_space: &Namespace, mode: UpgradeMode, config: &Config) -> UpgradeResult<()> { let current_kv_store = KeyValueStore::create(&config.storage_uri, name_space)?; if current_kv_store.scopes()?.is_empty() { @@ -248,7 +248,7 @@ impl GenericUpgradeAggregateStore { } else { let new_kv_store = KeyValueStore::create(config.upgrade_storage_uri(), name_space)?; let new_agg_store = - AggregateStore::::create(config.upgrade_storage_uri(), name_space, config.use_history_cache)?; + AggregateStore::::create_from_url(config.upgrade_storage_uri(), name_space, config.use_history_cache)?; let store_migration = GenericUpgradeAggregateStore { store_name: name_space.to_string(), From 265ac61db46974d3c2a81b77d857b53dd504a6e6 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Thu, 24 Aug 2023 15:16:23 +0200 Subject: [PATCH 02/18] No longer expect that the memory store is wiped when created. --- src/commons/eventsourcing/kv.rs | 17 ++++++++--------- src/commons/eventsourcing/mod.rs | 11 ++++++----- src/commons/eventsourcing/store.rs | 16 +++++----------- 3 files changed, 19 insertions(+), 25 deletions(-) diff --git a/src/commons/eventsourcing/kv.rs b/src/commons/eventsourcing/kv.rs index f23e5e208..1667d59f7 100644 --- a/src/commons/eventsourcing/kv.rs +++ b/src/commons/eventsourcing/kv.rs @@ -1,7 +1,7 @@ -use std::{fmt, sync::Arc}; +use std::fmt; -pub use kvx::{segment, namespace, Key, Namespace, Scope, Segment, SegmentBuf}; -use kvx::{KeyValueStoreBackend, ReadStore, WriteStore, NamespaceBuf}; +pub use kvx::{namespace, segment, Key, Namespace, Scope, Segment, SegmentBuf}; +use kvx::{KeyValueStoreBackend, NamespaceBuf, ReadStore, WriteStore}; use serde::{de::DeserializeOwned, Serialize}; use url::Url; @@ -33,19 +33,18 @@ impl SegmentExt for Segment { } } -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct KeyValueStore { - inner: Arc, + inner: kvx::KeyValueStore, } impl KeyValueStore { /// Creates a new KeyValueStore and initializes the version if it had /// not been set. pub fn create(storage_uri: &Url, name_space: impl Into) -> Result { - let store = KeyValueStore { - inner: Arc::new(kvx::KeyValueStore::new(storage_uri, name_space)?), - }; - Ok(store) + kvx::KeyValueStore::new(storage_uri, name_space) + .map(|inner| KeyValueStore { inner }) + .map_err(KeyValueError::KVError) } /// Stores a key value pair, serialized as json, overwrite existing diff --git a/src/commons/eventsourcing/mod.rs b/src/commons/eventsourcing/mod.rs index e564886db..a38e503ab 100644 --- a/src/commons/eventsourcing/mod.rs +++ b/src/commons/eventsourcing/mod.rs @@ -21,7 +21,9 @@ pub use self::listener::*; pub mod locks; mod kv; -pub use self::kv::{segment, namespace, Key, KeyValueError, KeyValueStore, Namespace, Scope, Segment, SegmentBuf, SegmentExt}; +pub use self::kv::{ + namespace, segment, Key, KeyValueError, KeyValueStore, Namespace, Scope, Segment, SegmentBuf, SegmentExt, +}; //------------ Tests --------------------------------------------------------- @@ -341,11 +343,10 @@ mod tests { // let storage_uri = crate::commons::util::storage::storage_uri_from_data_dir(&data_dir).unwrap(); let storage_uri = tmp_storage(); - let kv = KeyValueStore::create(&storage_uri, namespace!("person")).unwrap(); let counter = Arc::new(EventCounter::default()); - let mut manager = AggregateStore::::create_with_store(kv.clone(), false).unwrap(); + let mut manager = AggregateStore::::create_from_url(&storage_uri, namespace!("person"), false).unwrap(); manager.add_post_save_listener(counter.clone()); let alice_name = "alice smith".to_string(); @@ -377,8 +378,8 @@ mod tests { assert_eq!("alice smith-doe", alice.name()); assert_eq!(21, alice.age()); - // Should read state from disk - let manager = AggregateStore::::create_with_store(kv, false).unwrap(); + // Should read state again when restarted with same data store mapping. + let mut manager = AggregateStore::::create_from_url(&storage_uri, namespace!("person"), false).unwrap(); let alice = manager.get_latest(&alice_handle).unwrap(); assert_eq!("alice smith-doe", alice.name()); diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index 2c1e2e172..fc11911f3 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -50,15 +50,6 @@ impl AggregateStore { use_history_cache: bool, ) -> StoreResult { let kv = KeyValueStore::create(storage_uri, name_space)?; - - Self::create_with_store(kv, use_history_cache) - } - - /// Creates an AggregateStore using the given KV store - pub fn create_with_store( - kv: KeyValueStore, - use_history_cache: bool, - ) -> StoreResult { let cache = RwLock::new(HashMap::new()); let history_cache = if !use_history_cache { None @@ -404,7 +395,10 @@ where // Little local helper so we can use borrowed records without keeping // the lock longer than it wants to live. - fn command_history_for_records(crit: CommandHistoryCriteria, records: &[CommandHistoryRecord]) -> CommandHistory { + fn command_history_for_records( + crit: CommandHistoryCriteria, + records: &[CommandHistoryRecord], + ) -> CommandHistory { let offset = crit.offset(); let rows = match crit.rows_limit() { @@ -429,7 +423,7 @@ where CommandHistory::new(offset, total, matching) } - + match &self.history_cache { Some(mutex) => { let mut cache_lock = mutex.lock().unwrap(); From 02c822dcbae3ad1df3a693db2c1601d7694bf62e Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Thu, 24 Aug 2023 17:09:25 +0200 Subject: [PATCH 03/18] Remove upgrade_storage_uri --- Cargo.lock | 6 +- src/bin/krillup.rs | 10 +--- src/cli/ta_client.rs | 2 +- .../crypto/signing/dispatch/signerinfo.rs | 2 +- src/commons/eventsourcing/kv.rs | 60 ++++++++++++------- src/commons/eventsourcing/mod.rs | 4 +- src/commons/eventsourcing/store.rs | 18 ++++-- src/commons/eventsourcing/wal.rs | 4 +- src/commons/util/mod.rs | 5 ++ src/constants.rs | 7 +-- src/daemon/ca/manager.rs | 6 +- src/daemon/ca/status.rs | 4 +- src/daemon/config.rs | 26 ++------ src/daemon/properties/mod.rs | 2 +- src/daemon/ta/mod.rs | 4 +- src/pubd/repository.rs | 7 +-- src/upgrades/mod.rs | 24 ++++---- src/upgrades/pre_0_10_0/cas_migration.rs | 9 +-- src/upgrades/pre_0_10_0/pubd_migration.rs | 4 +- src/upgrades/pre_0_14_0/mod.rs | 4 +- 20 files changed, 107 insertions(+), 101 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 979aefcc5..fe132f877 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1131,7 +1131,7 @@ dependencies = [ [[package]] name = "kvx" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#73465df3b1fbf90527725350c24cee960117f4b0" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#22379ed92b3befb436cd709956515a5887e72fff" dependencies = [ "kvx_macros", "kvx_types", @@ -1148,7 +1148,7 @@ dependencies = [ [[package]] name = "kvx_macros" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#73465df3b1fbf90527725350c24cee960117f4b0" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#22379ed92b3befb436cd709956515a5887e72fff" dependencies = [ "kvx_types", "proc-macro-error", @@ -1160,7 +1160,7 @@ dependencies = [ [[package]] name = "kvx_types" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#73465df3b1fbf90527725350c24cee960117f4b0" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#22379ed92b3befb436cd709956515a5887e72fff" dependencies = [ "postgres", "postgres-types", diff --git a/src/bin/krillup.rs b/src/bin/krillup.rs index f4aad1283..5e9006d31 100644 --- a/src/bin/krillup.rs +++ b/src/bin/krillup.rs @@ -31,8 +31,7 @@ async fn main() { match Config::create(config_file, true) { Ok(config) => { - let properties_manager = match PropertiesManager::create(&config.storage_uri, config.use_history_cache) - { + let properties_manager = match PropertiesManager::create(&config.storage_uri, config.use_history_cache) { Ok(mgr) => mgr, Err(e) => { eprintln!("*** Error Preparing Data Migration ***"); @@ -61,12 +60,7 @@ async fn main() { let from = report.versions().from(); let to = report.versions().to(); if report.data_migration() { - info!( - "Prepared and verified upgrade from {} to {}. Prepared data was saved to: {}", - from, - to, - config.upgrade_storage_uri() - ); + info!("Prepared and verified upgrade from {} to {}.", from, to,); } else { info!("No preparation is needed for the upgrade from {} to {}.", from, to) } diff --git a/src/cli/ta_client.rs b/src/cli/ta_client.rs index 98e7665dd..3e9f0de49 100644 --- a/src/cli/ta_client.rs +++ b/src/cli/ta_client.rs @@ -1003,7 +1003,7 @@ struct TrustAnchorSignerManager { impl TrustAnchorSignerManager { fn create(config: Config) -> Result { - let store = AggregateStore::create_from_url(&config.storage_uri, namespace!("signer"), config.use_history_cache) + let store = AggregateStore::create(&config.storage_uri, namespace!("signer"), config.use_history_cache) .map_err(KrillError::AggregateStoreError)?; let ta_handle = TrustAnchorHandle::new("ta".into()); let signer = config.signer()?; diff --git a/src/commons/crypto/signing/dispatch/signerinfo.rs b/src/commons/crypto/signing/dispatch/signerinfo.rs index 591bfda4e..c7400e4b4 100644 --- a/src/commons/crypto/signing/dispatch/signerinfo.rs +++ b/src/commons/crypto/signing/dispatch/signerinfo.rs @@ -373,7 +373,7 @@ impl std::fmt::Debug for SignerMapper { impl SignerMapper { /// Build a SignerMapper that will read/write its data in a subdirectory of the given work dir. pub fn build(storage_uri: &Url) -> KrillResult { - let store = AggregateStore::::create_from_url(storage_uri, SIGNERS_NS, true)?; + let store = AggregateStore::::create(storage_uri, SIGNERS_NS, true)?; Ok(SignerMapper { store }) } diff --git a/src/commons/eventsourcing/kv.rs b/src/commons/eventsourcing/kv.rs index 1667d59f7..88956efda 100644 --- a/src/commons/eventsourcing/kv.rs +++ b/src/commons/eventsourcing/kv.rs @@ -1,11 +1,11 @@ -use std::fmt; +use std::{fmt, str::FromStr}; pub use kvx::{namespace, segment, Key, Namespace, Scope, Segment, SegmentBuf}; use kvx::{KeyValueStoreBackend, NamespaceBuf, ReadStore, WriteStore}; use serde::{de::DeserializeOwned, Serialize}; use url::Url; -use crate::commons::error::KrillIoError; +use crate::commons::{error::KrillIoError, util::KrillVersion}; pub trait SegmentExt { fn parse_lossy(value: &str) -> SegmentBuf; @@ -39,14 +39,30 @@ pub struct KeyValueStore { } impl KeyValueStore { - /// Creates a new KeyValueStore and initializes the version if it had - /// not been set. - pub fn create(storage_uri: &Url, name_space: impl Into) -> Result { + /// Creates a new KeyValueStore. + pub fn create(storage_uri: &Url, name_space: &Namespace) -> Result { kvx::KeyValueStore::new(storage_uri, name_space) .map(|inner| KeyValueStore { inner }) .map_err(KeyValueError::KVError) } + /// Creates a new KeyValueStore for upgrades. + /// + /// Adds the implicit prefix "upgrade-{version}-" to the given namespace. + pub fn create_upgrade_store(storage_uri: &Url, namespace: &Namespace) -> Result { + let namespace_string = format!( + "upgrade_{}_{}", + KrillVersion::code_version().hyphen_notated(), + namespace + ); + let namespace = NamespaceBuf::from_str(&namespace_string) + .map_err(|e| KeyValueError::Other(format!("Cannot parse namespace: {}. Error: {}", namespace_string, e)))?; + + kvx::KeyValueStore::new(storage_uri, namespace) + .map(|inner| KeyValueStore { inner }) + .map_err(KeyValueError::KVError) + } + /// Stores a key value pair, serialized as json, overwrite existing pub fn store(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { Ok(self.inner.store(key, serde_json::to_value(value)?)?) @@ -171,6 +187,7 @@ pub enum KeyValueError { UnknownKey(Key), DuplicateKey(Key), KVError(kvx::Error), + Other(String), } impl From for KeyValueError { @@ -200,6 +217,7 @@ impl fmt::Display for KeyValueError { KeyValueError::UnknownKey(key) => write!(f, "Unknown key: {}", key), KeyValueError::DuplicateKey(key) => write!(f, "Duplicate key: {}", key), KeyValueError::KVError(e) => write!(f, "Store error: {}", e), + KeyValueError::Other(msg) => write!(f, "{}", msg), } } } @@ -244,7 +262,7 @@ mod tests { fn test_store() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); @@ -257,7 +275,7 @@ mod tests { fn test_store_new() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); @@ -269,7 +287,7 @@ mod tests { fn test_store_scoped() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let id = random_segment(); let scope = Scope::from_segment(segment!("scope")); @@ -290,7 +308,7 @@ mod tests { fn test_get() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); assert_eq!(store.get::(&key).unwrap(), None); @@ -303,7 +321,7 @@ mod tests { fn test_get_transactional() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); assert_eq!(store.get_transactional::(&key).unwrap(), None); @@ -316,7 +334,7 @@ mod tests { fn test_has() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); assert!(!store.has(&key).unwrap()); @@ -329,7 +347,7 @@ mod tests { fn test_drop_key() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); store.store(&key, &content).unwrap(); @@ -343,7 +361,7 @@ mod tests { fn test_drop_scope() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let scope = Scope::from_segment(random_segment()); let key = Key::new_scoped(scope.clone(), random_segment()); @@ -364,7 +382,7 @@ mod tests { fn test_wipe() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let scope = Scope::from_segment(segment!("scope")); let key = Key::new_scoped(scope.clone(), random_segment()); @@ -382,7 +400,7 @@ mod tests { fn test_move_key() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_string(); let key = Key::new_global(random_segment()); @@ -399,7 +417,7 @@ mod tests { fn test_archive() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_string(); let key = Key::new_global(random_segment()); @@ -415,7 +433,7 @@ mod tests { fn test_archive_corrupt() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_string(); let key = Key::new_global(random_segment()); @@ -431,7 +449,7 @@ mod tests { fn test_archive_surplus() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_string(); let key = Key::new_global(random_segment()); @@ -447,7 +465,7 @@ mod tests { fn test_scopes() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let id = segment!("id"); let scope = Scope::from_segment(random_segment()); @@ -476,7 +494,7 @@ mod tests { fn test_has_scope() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let scope = Scope::from_segment(random_segment()); let key = Key::new_scoped(scope.clone(), segment!("id")); @@ -490,7 +508,7 @@ mod tests { fn test_keys() { let storage_uri = get_storage_uri(); - let store = KeyValueStore::create(&storage_uri, random_namespace()).unwrap(); + let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let id = segment!("command--id"); let scope = Scope::from_segment(segment!("command")); diff --git a/src/commons/eventsourcing/mod.rs b/src/commons/eventsourcing/mod.rs index a38e503ab..bda8c6ffc 100644 --- a/src/commons/eventsourcing/mod.rs +++ b/src/commons/eventsourcing/mod.rs @@ -346,7 +346,7 @@ mod tests { let counter = Arc::new(EventCounter::default()); - let mut manager = AggregateStore::::create_from_url(&storage_uri, namespace!("person"), false).unwrap(); + let mut manager = AggregateStore::::create(&storage_uri, namespace!("person"), false).unwrap(); manager.add_post_save_listener(counter.clone()); let alice_name = "alice smith".to_string(); @@ -379,7 +379,7 @@ mod tests { assert_eq!(21, alice.age()); // Should read state again when restarted with same data store mapping. - let mut manager = AggregateStore::::create_from_url(&storage_uri, namespace!("person"), false).unwrap(); + let manager = AggregateStore::::create(&storage_uri, namespace!("person"), false).unwrap(); let alice = manager.get_latest(&alice_handle).unwrap(); assert_eq!("alice smith-doe", alice.name()); diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index fc11911f3..88a3a5693 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, Mutex, RwLock}, }; -use kvx::NamespaceBuf; +use kvx::Namespace; use rpki::{ca::idexchange::MyHandle, repository::x509::Time}; use serde::{de::DeserializeOwned, Serialize}; use url::Url; @@ -44,12 +44,22 @@ pub struct AggregateStore { /// impl AggregateStore { /// Creates an AggregateStore using the given storage url - pub fn create_from_url( + pub fn create(storage_uri: &Url, name_space: &Namespace, use_history_cache: bool) -> StoreResult { + let kv = KeyValueStore::create(storage_uri, name_space)?; + Self::create_from_kv(kv, use_history_cache) + } + + /// Creates an AggregateStore for upgrades using the given storage url + pub fn create_upgrade_store( storage_uri: &Url, - name_space: impl Into, + name_space: &Namespace, use_history_cache: bool, ) -> StoreResult { - let kv = KeyValueStore::create(storage_uri, name_space)?; + let kv = KeyValueStore::create_upgrade_store(storage_uri, name_space)?; + Self::create_from_kv(kv, use_history_cache) + } + + fn create_from_kv(kv: KeyValueStore, use_history_cache: bool) -> StoreResult { let cache = RwLock::new(HashMap::new()); let history_cache = if !use_history_cache { None diff --git a/src/commons/eventsourcing/wal.rs b/src/commons/eventsourcing/wal.rs index 29e0bdadf..44859c8bc 100644 --- a/src/commons/eventsourcing/wal.rs +++ b/src/commons/eventsourcing/wal.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, RwLock}, }; -use kvx::NamespaceBuf; +use kvx::Namespace; use rpki::ca::idexchange::MyHandle; use serde::Serialize; use url::Url; @@ -130,7 +130,7 @@ pub struct WalStore { impl WalStore { /// Creates a new store using a disk based keystore for the given data /// directory and namespace (directory). - pub fn create(storage_uri: &Url, name_space: impl Into) -> WalStoreResult { + pub fn create(storage_uri: &Url, name_space: &Namespace) -> WalStoreResult { let kv = KeyValueStore::create(storage_uri, name_space)?; let cache = RwLock::new(HashMap::new()); let locks = HandleLocks::default(); diff --git a/src/commons/util/mod.rs b/src/commons/util/mod.rs index 297909689..9962cd8df 100644 --- a/src/commons/util/mod.rs +++ b/src/commons/util/mod.rs @@ -36,6 +36,11 @@ impl KrillVersion { Self::from_str(KRILL_VERSION).unwrap() } + /// Make a notation friendly to namespaces for upgrades. + pub fn hyphen_notated(&self) -> String { + format!("{}-{}-{}{}", self.major, self.minor, self.patch, self.release_type) + } + pub fn v0_5_0_or_before() -> Self { Self::dev(0, 5, 0, "or-before".to_string()) } diff --git a/src/constants.rs b/src/constants.rs index bf93710e5..f26b1911b 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -1,10 +1,7 @@ use kvx::Namespace; use crate::{ - commons::{ - actor::ActorDef, - eventsourcing::namespace, - }, + commons::{actor::ActorDef, eventsourcing::namespace}, daemon::auth::common::NoResourceType, }; @@ -60,8 +57,6 @@ pub const STATUS_NS: &Namespace = namespace!("status"); pub const TA_PROXY_SERVER_NS: &Namespace = namespace!("ta_proxy"); pub const TA_SIGNER_SERVER_NS: &Namespace = namespace!("ta_signer"); -pub const UPGRADE_DIR: &str = "upgrade-data"; - pub const PROPERTIES_DFLT_NAME: &str = "main"; pub const PUBSERVER_DFLT: &str = "0"; diff --git a/src/daemon/ca/manager.rs b/src/daemon/ca/manager.rs index 4b99f0fb2..2988fcd28 100644 --- a/src/daemon/ca/manager.rs +++ b/src/daemon/ca/manager.rs @@ -109,7 +109,7 @@ impl CaManager { // Create the AggregateStore for the event-sourced `CertAuth` structures that handle // most CA functions. let mut ca_store = - AggregateStore::::create_from_url(&config.storage_uri, CASERVER_NS, config.use_history_cache)?; + AggregateStore::::create(&config.storage_uri, CASERVER_NS, config.use_history_cache)?; if config.always_recover_data { // If the user chose to 'always recover data' then do so. @@ -151,7 +151,7 @@ impl CaManager { // Create TA proxy store if we need it. let ta_proxy_store = if config.ta_proxy_enabled() { - let mut store = AggregateStore::::create_from_url( + let mut store = AggregateStore::::create( &config.storage_uri, TA_PROXY_SERVER_NS, config.use_history_cache, @@ -168,7 +168,7 @@ impl CaManager { }; let ta_signer_store = if config.ta_signer_enabled() { - Some(AggregateStore::create_from_url( + Some(AggregateStore::create( &config.storage_uri, TA_SIGNER_SERVER_NS, config.use_history_cache, diff --git a/src/daemon/ca/status.rs b/src/daemon/ca/status.rs index 29eae4313..e7d4f4a30 100644 --- a/src/daemon/ca/status.rs +++ b/src/daemon/ca/status.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, str::FromStr, sync::RwLock}; -use kvx::NamespaceBuf; +use kvx::Namespace; use rpki::ca::{ idexchange::{CaHandle, ChildHandle, ParentHandle, ServiceUri}, provisioning::ResourceClassListResponse as Entitlements, @@ -68,7 +68,7 @@ pub struct StatusStore { } impl StatusStore { - pub fn create(storage_uri: &Url, namespace: impl Into) -> KrillResult { + pub fn create(storage_uri: &Url, namespace: &Namespace) -> KrillResult { let store = KeyValueStore::create(storage_uri, namespace)?; let cache = RwLock::new(HashMap::new()); diff --git a/src/daemon/config.rs b/src/daemon/config.rs index c482838ed..d33e107ec 100644 --- a/src/daemon/config.rs +++ b/src/daemon/config.rs @@ -8,7 +8,7 @@ use std::{ }; use chrono::Duration; -use kvx::NamespaceBuf; +use kvx::Namespace; use log::{error, LevelFilter}; use rpki::{ ca::idexchange::PublisherHandle, @@ -27,10 +27,7 @@ use crate::{ crypto::{OpenSslSignerConfig, SignSupport}, error::{Error, KrillIoError}, eventsourcing::KeyValueStore, - util::{ - ext_serde, - storage::{data_dir_from_storage_uri, storage_uri_from_data_dir}, - }, + util::{ext_serde, storage::data_dir_from_storage_uri}, KrillResult, }, constants::*, @@ -452,11 +449,9 @@ pub struct Config { )] pub storage_uri: Url, - #[serde(default="ConfigDefaults::dflt_true")] + #[serde(default = "ConfigDefaults::dflt_true")] pub use_history_cache: bool, - upgrade_storage_uri: Option, - tls_keys_dir: Option, repo_dir: Option, @@ -815,17 +810,13 @@ pub struct Benchmark { /// # Accessors impl Config { - pub fn upgrade_storage_uri(&self) -> &Url { - self.upgrade_storage_uri.as_ref().unwrap() // should not panic, as it is always set by Config::verify - } - /// General purpose KV store, can be used to track server settings /// etc not specific to any Aggregate or WalSupport type pub fn general_key_value_store(&self) -> KrillResult { KeyValueStore::create(&self.storage_uri, PROPERTIES_NS).map_err(Error::KeyValueError) } - pub fn key_value_store(&self, name_space: impl Into) -> KrillResult { + pub fn key_value_store(&self, name_space: &Namespace) -> KrillResult { KeyValueStore::create(&self.storage_uri, name_space).map_err(Error::KeyValueError) } @@ -1076,7 +1067,6 @@ impl Config { https_mode, storage_uri: storage_uri.clone(), use_history_cache: false, - upgrade_storage_uri: data_dir.map(|d| storage_uri_from_data_dir(&d.join(UPGRADE_DIR)).unwrap()), tls_keys_dir: data_dir.map(|d| d.join(HTTPS_SUB_DIR)), repo_dir: data_dir.map(|d| d.join(REPOSITORY_DIR)), ta_support_enabled: false, // but, enabled by testbed where applicable @@ -1166,7 +1156,6 @@ impl Config { if upgrade_only { info!("Prepare upgrade using configuration file: {}", config_file); info!("Processing data from: {}", config.storage_uri); - info!("Saving prepared data to: {}", config.upgrade_storage_uri(),); } else { info!("{} uses configuration file: {}", KRILL_SERVER_APP, config_file); } @@ -1202,13 +1191,6 @@ impl Config { self.ca_refresh_seconds = CA_REFRESH_SECONDS_MAX; } - if self.upgrade_storage_uri.is_none() { - if self.storage_uri.scheme() != "local" { - return Err(ConfigError::other("'upgrade_storage_uri' is not configured, but 'storage_uri' is not a local directory, please configure an 'upgrade_storage_uri'")); - } - self.upgrade_storage_uri = Some(self.storage_uri.join(UPGRADE_DIR).unwrap()); - } - if self.tls_keys_dir.is_none() { if let Some(mut data_dir) = data_dir_from_storage_uri(&self.storage_uri) { data_dir.push(HTTPS_SUB_DIR); diff --git a/src/daemon/properties/mod.rs b/src/daemon/properties/mod.rs index a9681a0de..b8e580921 100644 --- a/src/daemon/properties/mod.rs +++ b/src/daemon/properties/mod.rs @@ -257,7 +257,7 @@ pub struct PropertiesManager { impl PropertiesManager { pub fn create(storage_uri: &Url, use_history_cache: bool) -> KrillResult { let main_key = MyHandle::from_str(PROPERTIES_DFLT_NAME).unwrap(); - AggregateStore::create_from_url(storage_uri, PROPERTIES_NS, use_history_cache) + AggregateStore::create(storage_uri, PROPERTIES_NS, use_history_cache) .map(|store| PropertiesManager { store, main_key, diff --git a/src/daemon/ta/mod.rs b/src/daemon/ta/mod.rs index bd04d5cff..a2552e44b 100644 --- a/src/daemon/ta/mod.rs +++ b/src/daemon/ta/mod.rs @@ -51,9 +51,9 @@ mod tests { let cleanup = test::init_logging(); let ta_signer_store: AggregateStore = - AggregateStore::create_from_url(storage_uri, namespace!("ta_signer"), false).unwrap(); + AggregateStore::create(storage_uri, namespace!("ta_signer"), false).unwrap(); let ta_proxy_store: AggregateStore = - AggregateStore::create_from_url(storage_uri, namespace!("ta_proxy"), false).unwrap(); + AggregateStore::create(storage_uri, namespace!("ta_proxy"), false).unwrap(); // We will import a TA key - this is only (supposed to be) supported for the openssl signer let signers = ConfigDefaults::openssl_signer_only(); diff --git a/src/pubd/repository.rs b/src/pubd/repository.rs index b6545a6c3..dadb7da8e 100644 --- a/src/pubd/repository.rs +++ b/src/pubd/repository.rs @@ -1516,11 +1516,8 @@ pub struct RepositoryAccessProxy { impl RepositoryAccessProxy { pub fn create(config: &Config) -> KrillResult { - let store = AggregateStore::::create_from_url( - &config.storage_uri, - PUBSERVER_NS, - config.use_history_cache, - )?; + let store = + AggregateStore::::create(&config.storage_uri, PUBSERVER_NS, config.use_history_cache)?; let key = MyHandle::from_str(PUBSERVER_DFLT).unwrap(); if store.has(&key)? { diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index ff7015e2b..77586572d 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -634,19 +634,20 @@ pub fn prepare_upgrade_data_migrations( error!("{}", msg); Err(UpgradeError::custom(msg)) } else if versions.from < KrillVersion::candidate(0, 10, 0, 1) { - let upgrade_data_dir = data_dir_from_storage_uri(config.upgrade_storage_uri()).unwrap(); - if !upgrade_data_dir.exists() { - file::create_dir_all(&upgrade_data_dir)?; - } - // Get a lock to ensure that only one process can run this migration // at any one time (for a given config). let _lock = { + // Note that all version before 0.14.0 were using disk based storage + // and we only support migration to database storage *after* upgrading. + // So.. it is safe to unwrap the storage_uri into a data dir here. We + // would not be here otherwise. + let data_dir = data_dir_from_storage_uri(&config.storage_uri).unwrap(); + // Create upgrade dir if it did not yet exist. - let lock_file_path = upgrade_data_dir.join("upgrade.lock"); + let lock_file_path = data_dir.join("upgrade.lock"); fslock::LockFile::open(&lock_file_path).map_err(|_| { UpgradeError::custom( - format!("Cannot get upgrade lock. Another process may be running a Krill upgrade. Or, perhaps you ran 'krillup' as root - in that case check the ownership of directory: {}", upgrade_data_dir.to_string_lossy()), + format!("Cannot get upgrade lock. Another process may be running a Krill upgrade. Or, perhaps you ran 'krillup' as root - in that case check the ownership of directory: {}", data_dir.to_string_lossy()), ) })? }; @@ -724,7 +725,7 @@ fn migrate_0_12_pubd_objects(config: &Config) -> KrillResult { let old_repo_content = old_store.get_latest(&repo_content_handle)?.as_ref().clone(); let repo_content: pubd::RepositoryContent = old_repo_content.try_into()?; let new_key = Key::new_scoped(Scope::from_segment(segment!("0")), segment!("snapshot.json")); - let upgrade_store = KeyValueStore::create(config.upgrade_storage_uri(), PUBSERVER_CONTENT_NS)?; + let upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_CONTENT_NS)?; upgrade_store.store(&new_key, &repo_content)?; Ok(true) } else { @@ -748,7 +749,7 @@ fn migrate_pre_0_12_pubd_objects(config: &Config) -> KrillResult<()> { let repo_content: pubd::RepositoryContent = old_repo_content.try_into()?; let new_key = Key::new_scoped(Scope::from_segment(segment!("0")), segment!("snapshot.json")); - let upgrade_store = KeyValueStore::create(config.upgrade_storage_uri(), PUBSERVER_CONTENT_NS)?; + let upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_CONTENT_NS)?; upgrade_store.store(&new_key, &repo_content)?; } } @@ -840,7 +841,10 @@ pub fn finalise_data_migration( let version_file = current_dir.join("version"); if version_file.exists() { - debug!("Removing (no longer used) version file: {}", version_file.to_string_lossy()); + debug!( + "Removing (no longer used) version file: {}", + version_file.to_string_lossy() + ); std::fs::remove_file(&version_file).map_err(|e| { let context = format!( "Could not remove (no longer used) version file at: {}", diff --git a/src/upgrades/pre_0_10_0/cas_migration.rs b/src/upgrades/pre_0_10_0/cas_migration.rs index 5b6e33ac0..31cf04952 100644 --- a/src/upgrades/pre_0_10_0/cas_migration.rs +++ b/src/upgrades/pre_0_10_0/cas_migration.rs @@ -35,7 +35,7 @@ struct CaObjectsMigration { impl CaObjectsMigration { fn create(config: &Config) -> Result { let current_store = KeyValueStore::create(&config.storage_uri, CA_OBJECTS_NS)?; - let new_store = KeyValueStore::create(config.upgrade_storage_uri(), CA_OBJECTS_NS)?; + let new_store = KeyValueStore::create_upgrade_store(&config.storage_uri, CA_OBJECTS_NS)?; Ok(CaObjectsMigration { current_store, new_store, @@ -67,9 +67,10 @@ pub struct CasMigration { impl CasMigration { pub fn upgrade(mode: UpgradeMode, config: &Config) -> UpgradeResult<()> { let current_kv_store = KeyValueStore::create(&config.storage_uri, CASERVER_NS)?; - let new_kv_store = KeyValueStore::create(config.upgrade_storage_uri(), CASERVER_NS)?; - let new_agg_store = AggregateStore::::create_from_url( - config.upgrade_storage_uri(), + let new_kv_store = KeyValueStore::create_upgrade_store(&config.storage_uri, CASERVER_NS)?; + + let new_agg_store = AggregateStore::::create_upgrade_store( + &config.storage_uri, CASERVER_NS, config.use_history_cache, )?; diff --git a/src/upgrades/pre_0_10_0/pubd_migration.rs b/src/upgrades/pre_0_10_0/pubd_migration.rs index 27dfe116b..1015d96eb 100644 --- a/src/upgrades/pre_0_10_0/pubd_migration.rs +++ b/src/upgrades/pre_0_10_0/pubd_migration.rs @@ -26,9 +26,9 @@ pub struct PublicationServerRepositoryAccessMigration { impl PublicationServerRepositoryAccessMigration { pub fn upgrade(mode: UpgradeMode, config: &Config, versions: &UpgradeVersions) -> UpgradeResult<()> { let current_kv_store = KeyValueStore::create(&config.storage_uri, PUBSERVER_NS)?; - let new_kv_store = KeyValueStore::create(config.upgrade_storage_uri(), PUBSERVER_NS)?; + let new_kv_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_NS)?; let new_agg_store = - AggregateStore::create_from_url(config.upgrade_storage_uri(), PUBSERVER_NS, config.use_history_cache)?; + AggregateStore::create_upgrade_store(&config.storage_uri, PUBSERVER_NS, config.use_history_cache)?; let store_migration = PublicationServerRepositoryAccessMigration { current_kv_store, diff --git a/src/upgrades/pre_0_14_0/mod.rs b/src/upgrades/pre_0_14_0/mod.rs index 720fa7f11..be5a3182d 100644 --- a/src/upgrades/pre_0_14_0/mod.rs +++ b/src/upgrades/pre_0_14_0/mod.rs @@ -246,9 +246,9 @@ impl GenericUpgradeAggregateStore { // nothing to do here Ok(()) } else { - let new_kv_store = KeyValueStore::create(config.upgrade_storage_uri(), name_space)?; + let new_kv_store = KeyValueStore::create_upgrade_store(&config.storage_uri, name_space)?; let new_agg_store = - AggregateStore::::create_from_url(config.upgrade_storage_uri(), name_space, config.use_history_cache)?; + AggregateStore::::create_upgrade_store(&config.storage_uri, name_space, config.use_history_cache)?; let store_migration = GenericUpgradeAggregateStore { store_name: name_space.to_string(), From 321722917b541b71affc3104dafc2a86a8d60d12 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Fri, 25 Aug 2023 11:57:13 +0200 Subject: [PATCH 04/18] Move data_dir_from_storage_uri into Config --- src/commons/util/storage.rs | 53 +------------------------------------ src/daemon/config.rs | 51 ++++++++++++++++++++++++++++++++--- src/upgrades/mod.rs | 22 +++++++++------ 3 files changed, 62 insertions(+), 64 deletions(-) diff --git a/src/commons/util/storage.rs b/src/commons/util/storage.rs index 49c1c5452..670ba87ca 100644 --- a/src/commons/util/storage.rs +++ b/src/commons/util/storage.rs @@ -1,61 +1,10 @@ -use std::path::{Path, PathBuf}; +use std::path::Path; use url::Url; use crate::commons::{error::Error, KrillResult}; -pub fn data_dir_from_storage_uri(storage_uri: &Url) -> Option { - if storage_uri.scheme() != "local" { - None - } else { - Some( - Path::new(&format!( - "{}{}", - storage_uri.host_str().unwrap_or(""), - storage_uri.path() - )) - .to_path_buf(), - ) - } -} - // TODO mark as test only // #[cfg(test)] pub fn storage_uri_from_data_dir(data_dir: &Path) -> KrillResult { Url::parse(&format!("local://{}/", data_dir.to_string_lossy())).map_err(|e| Error::custom(e.to_string())) } - -#[cfg(test)] -mod tests { - use std::path::{Path, PathBuf}; - use url::Url; - - use crate::commons::util::storage::{data_dir_from_storage_uri, storage_uri_from_data_dir}; - - #[test] - fn conversion() { - assert_eq!( - data_dir_from_storage_uri(&Url::parse("local:///tmp/test").unwrap()).unwrap(), - PathBuf::from("/tmp/test") - ); - assert_eq!( - data_dir_from_storage_uri(&Url::parse("local://./data").unwrap()).unwrap(), - PathBuf::from("./data") - ); - assert_eq!( - data_dir_from_storage_uri(&Url::parse("local://data").unwrap()).unwrap(), - PathBuf::from("data") - ); - assert_eq!( - data_dir_from_storage_uri(&Url::parse("local://data/test").unwrap()).unwrap(), - PathBuf::from("data/test") - ); - assert_eq!( - storage_uri_from_data_dir(Path::new("./data")).unwrap(), - Url::parse("local://./data/").unwrap() - ); - assert_eq!( - storage_uri_from_data_dir(Path::new("/tmp/data")).unwrap(), - Url::parse("local:///tmp/data/").unwrap() - ); - } -} diff --git a/src/daemon/config.rs b/src/daemon/config.rs index d33e107ec..94b233b14 100644 --- a/src/daemon/config.rs +++ b/src/daemon/config.rs @@ -27,7 +27,7 @@ use crate::{ crypto::{OpenSslSignerConfig, SignSupport}, error::{Error, KrillIoError}, eventsourcing::KeyValueStore, - util::{ext_serde, storage::data_dir_from_storage_uri}, + util::ext_serde, KrillResult, }, constants::*, @@ -820,6 +820,23 @@ impl Config { KeyValueStore::create(&self.storage_uri, name_space).map_err(Error::KeyValueError) } + /// Returns the data directory if disk was used for storage. + /// This will always be true for upgrades of pre 0.14.0 versions + pub fn data_dir(&self) -> Option { + if self.storage_uri.scheme() != "local" { + None + } else { + Some( + Path::new(&format!( + "{}{}", + self.storage_uri.host_str().unwrap_or(""), + self.storage_uri.path() + )) + .to_path_buf(), + ) + } + } + pub fn tls_keys_dir(&self) -> &PathBuf { self.tls_keys_dir.as_ref().unwrap() // should not panic, as it is always set } @@ -1192,7 +1209,7 @@ impl Config { } if self.tls_keys_dir.is_none() { - if let Some(mut data_dir) = data_dir_from_storage_uri(&self.storage_uri) { + if let Some(mut data_dir) = self.data_dir() { data_dir.push(HTTPS_SUB_DIR); self.tls_keys_dir = Some(data_dir); } else { @@ -1201,7 +1218,7 @@ impl Config { } if self.repo_dir.is_none() { - if let Some(mut data_dir) = data_dir_from_storage_uri(&self.storage_uri) { + if let Some(mut data_dir) = self.data_dir() { data_dir.push(REPOSITORY_DIR); self.repo_dir = Some(data_dir); } else { @@ -1210,7 +1227,7 @@ impl Config { } if self.pid_file.is_none() { - if let Some(mut data_dir) = data_dir_from_storage_uri(&self.storage_uri) { + if let Some(mut data_dir) = self.data_dir() { data_dir.push("krill.pid"); self.pid_file = Some(data_dir); } else { @@ -2171,4 +2188,30 @@ mod tests { let res = parse_and_process_config_str(config_str); assert_err_msg(res, "Signer name 'Blah' is not unique"); } + + #[test] + fn data_dir_for_storage() { + fn test_uri(uri: &str, expected_path: &str) { + let storage_uri = Url::parse(uri).unwrap(); + let config = Config::test_config(&storage_uri, None, false, false, false, false); + + let expected_path = PathBuf::from(expected_path); + assert_eq!(config.data_dir().unwrap(), expected_path); + } + + test_uri("local:///tmp/test", "/tmp/test"); + test_uri("local://./data", "./data"); + test_uri("local://data", "data"); + test_uri("local://data/test", "data/test"); + test_uri("local:///tmp/test", "/tmp/test"); + + // assert_eq!( + // storage_uri_from_data_dir(Path::new("./data")).unwrap(), + // Url::parse("local://./data/").unwrap() + // ); + // assert_eq!( + // storage_uri_from_data_dir(Path::new("/tmp/data")).unwrap(), + // Url::parse("local:///tmp/data/").unwrap() + // ); + } } diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index 77586572d..4784c73af 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -17,7 +17,7 @@ use crate::{ segment, Aggregate, AggregateStore, AggregateStoreError, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt, Storable, StoredCommand, WalStore, WithStorableDetails, }, - util::{file, storage::data_dir_from_storage_uri, KrillVersion}, + util::{file, KrillVersion}, KrillResult, }, constants::{ @@ -641,7 +641,9 @@ pub fn prepare_upgrade_data_migrations( // and we only support migration to database storage *after* upgrading. // So.. it is safe to unwrap the storage_uri into a data dir here. We // would not be here otherwise. - let data_dir = data_dir_from_storage_uri(&config.storage_uri).unwrap(); + let data_dir = config + .data_dir() + .ok_or(UpgradeError::custom("no data dir found in config"))?; // Create upgrade dir if it did not yet exist. let lock_file_path = data_dir.join("upgrade.lock"); @@ -715,7 +717,9 @@ pub fn prepare_upgrade_data_migrations( /// Migrate v0.12.x RepositoryContent to the new 0.13.0+ format. /// Apply any open WAL changes to the source first. fn migrate_0_12_pubd_objects(config: &Config) -> KrillResult { - let data_dir = data_dir_from_storage_uri(&config.storage_uri).unwrap(); + let data_dir = config + .data_dir() + .ok_or(Error::custom("cannot find data dir in config"))?; let old_repo_content_dir = data_dir.join(PUBSERVER_CONTENT_NS.as_str()); if old_repo_content_dir.exists() { let old_store: WalStore = WalStore::create(&config.storage_uri, PUBSERVER_CONTENT_NS)?; @@ -739,7 +743,9 @@ fn migrate_0_12_pubd_objects(config: &Config) -> KrillResult { /// The format of the RepositoryContent did not change in 0.12, but /// the location and way of storing it did. So, migrate if present. fn migrate_pre_0_12_pubd_objects(config: &Config) -> KrillResult<()> { - let data_dir = data_dir_from_storage_uri(&config.storage_uri).unwrap(); + let data_dir = config + .data_dir() + .ok_or(Error::custom("cannot find data dir in config"))?; let old_repo_content_dir = data_dir.join(PUBSERVER_CONTENT_NS.as_str()); if old_repo_content_dir.exists() { let old_store = KeyValueStore::create(&config.storage_uri, PUBSERVER_CONTENT_NS)?; @@ -788,7 +794,7 @@ pub fn finalise_data_migration( // Furthermore, now that we are storing the version in one single // place, we can remove the "version" file from any directory that // remains after migration. - if let Some(data_dir) = data_dir_from_storage_uri(&config.storage_uri) { + if let Some(data_dir) = config.data_dir() { let archive_base_dir = data_dir.join(&format!("archive-{}", upgrade.from())); let upgrade_base_dir = data_dir.join("upgrade-data"); @@ -878,7 +884,7 @@ pub fn finalise_data_migration( } // Remove version files that are no longer required - if let Some(data_dir) = data_dir_from_storage_uri(&config.storage_uri) { + if let Some(data_dir) = config.data_dir() { for ns in &[ CASERVER_NS, CA_OBJECTS_NS, @@ -922,7 +928,7 @@ pub fn finalise_data_migration( /// one to the mapping in the signer store, if any. #[cfg(feature = "hsm")] fn record_preexisting_openssl_keys_in_signer_mapper(config: &Config) -> Result<(), UpgradeError> { - match data_dir_from_storage_uri(&config.storage_uri) { + match config.data_dir() { None => Ok(()), Some(data_dir) => { if !data_dir.join(SIGNERS_NS.as_str()).exists() { @@ -1034,7 +1040,7 @@ fn upgrade_versions( // We found the KrillVersion stored in the properties manager // introduced in Krill 0.14.0. UpgradeVersions::for_current(current) - } else if let Some(data_dir) = data_dir_from_storage_uri(&config.storage_uri) { + } else if let Some(data_dir) = config.data_dir() { // If the disk is used for storage, then we need to check // if there are any pre Krill 0.14.0 version files in the // usual places. If so, then this is an upgrade. From d3902a1ac1e3bbef84912f03c0743062955b340c Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Fri, 25 Aug 2023 15:49:48 +0200 Subject: [PATCH 05/18] Test StatusStore migration using in-memory storage. --- src/commons/eventsourcing/kv.rs | 25 +++++++++- src/commons/util/storage.rs | 2 +- src/daemon/ca/status.rs | 48 ++++++++++--------- src/pubd/manager.rs | 8 +--- .../{ => status}/ta/status.json | 0 .../{ => status}/testbed/status.json | 0 6 files changed, 52 insertions(+), 31 deletions(-) rename test-resources/status_store/migration-0.9.5/{ => status}/ta/status.json (100%) rename test-resources/status_store/migration-0.9.5/{ => status}/testbed/status.json (100%) diff --git a/src/commons/eventsourcing/kv.rs b/src/commons/eventsourcing/kv.rs index 88956efda..3663ec606 100644 --- a/src/commons/eventsourcing/kv.rs +++ b/src/commons/eventsourcing/kv.rs @@ -63,6 +63,19 @@ impl KeyValueStore { .map_err(KeyValueError::KVError) } + /// Import all data from the given KV store into this + pub fn import(&self, other: &Self) -> Result<(), KeyValueError> { + for scope in other.scopes()? { + for key in other.keys(&scope, "")? { + if let Some(value) = other.get_raw_value(&key)? { + self.store_raw_value(&key, value)?; + } + } + } + + Ok(()) + } + /// Stores a key value pair, serialized as json, overwrite existing pub fn store(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { Ok(self.inner.store(key, serde_json::to_value(value)?)?) @@ -82,13 +95,21 @@ impl KeyValueStore { /// Gets a value for a key, returns an error if the value cannot be deserialized, /// returns None if it cannot be found. pub fn get(&self, key: &Key) -> Result, KeyValueError> { - if let Some(value) = self.inner.get(key)? { + if let Some(value) = self.get_raw_value(key)? { Ok(serde_json::from_value(value)?) } else { Ok(None) } } + fn get_raw_value(&self, key: &Key) -> Result, KeyValueError> { + self.inner.get(key).map_err(KeyValueError::KVError) + } + + fn store_raw_value(&self, key: &Key, value: serde_json::Value) -> Result<(), KeyValueError> { + self.inner.store(key, value).map_err(KeyValueError::KVError) + } + /// Transactional `get`. pub fn get_transactional(&self, key: &Key) -> Result, KeyValueError> { let mut result: Option = None; @@ -145,7 +166,7 @@ impl KeyValueStore { self.move_key(key, &key.clone().with_sub_scope(segment!("surplus"))) } - /// Returns all 1st level scopes + /// Returns all scopes, including sub_scopes pub fn scopes(&self) -> Result, KeyValueError> { Ok(self.inner.list_scopes()?) } diff --git a/src/commons/util/storage.rs b/src/commons/util/storage.rs index 670ba87ca..875805ee7 100644 --- a/src/commons/util/storage.rs +++ b/src/commons/util/storage.rs @@ -4,7 +4,7 @@ use url::Url; use crate::commons::{error::Error, KrillResult}; // TODO mark as test only -// #[cfg(test)] +#[cfg(test)] pub fn storage_uri_from_data_dir(data_dir: &Path) -> KrillResult { Url::parse(&format!("local://{}/", data_dir.to_string_lossy())).map_err(|e| Error::custom(e.to_string())) } diff --git a/src/daemon/ca/status.rs b/src/daemon/ca/status.rs index e7d4f4a30..49614ea07 100644 --- a/src/daemon/ca/status.rs +++ b/src/daemon/ca/status.rs @@ -402,35 +402,39 @@ impl StatusStore { mod tests { use super::*; - use std::path::PathBuf; - - use kvx::{namespace, Namespace}; - - use crate::{ - commons::util::{file, storage::storage_uri_from_data_dir}, - test, - }; + use crate::{constants::STATUS_NS, test}; #[test] fn read_save_status() { - test::test_under_tmp(|data_dir| { - let source = PathBuf::from("test-resources/status_store/migration-0.9.5/"); - let target = data_dir.join("status"); - file::backup_dir(&source, &target).unwrap(); + let source_dir_path_str = "test-resources/status_store/migration-0.9.5/"; + let source_dir_url = Url::parse(&format!("local://{}", source_dir_path_str)).unwrap(); + + let source_store = KeyValueStore::create(&source_dir_url, STATUS_NS).unwrap(); + + let test_storage_uri = test::tmp_storage(); + let status_kv_store = KeyValueStore::create(&test_storage_uri, STATUS_NS).unwrap(); - let status_testbed_before_migration = - include_str!("../../../test-resources/status_store/migration-0.9.5/testbed/status.json"); + // copy the source KV store (files) into the test KV store (in memory) + status_kv_store.import(&source_store).unwrap(); - let status_testbed_before_migration: CaStatus = - serde_json::from_str(status_testbed_before_migration).unwrap(); + // get the status for testbed before initialising a StatusStore + // using the copied the data - that will be done next and start + // a migration. + let testbed_status_key = Key::new_scoped( + Scope::from_segment(segment!("testbed")), + Segment::parse("status.json").unwrap(), + ); + let status_testbed_before_migration: CaStatus = status_kv_store.get(&testbed_status_key).unwrap().unwrap(); - let storage_uri = storage_uri_from_data_dir(&data_dir).unwrap(); - let store = StatusStore::create(&storage_uri, namespace!("status")).unwrap(); - let testbed = CaHandle::from_str("testbed").unwrap(); + // Initialise the StatusStore using the new (in memory) storage, + // and migrate the data. + let store = StatusStore::create(&test_storage_uri, STATUS_NS).unwrap(); + let testbed = CaHandle::from_str("testbed").unwrap(); - let status_testbed_migrated = store.get_ca_status(&testbed); + // Get the migrated status for testbed and verify that it's equivalent + // to the status before migration. + let status_testbed_migrated = store.get_ca_status(&testbed); - assert_eq!(status_testbed_before_migration, status_testbed_migrated); - }); + assert_eq!(status_testbed_before_migration, status_testbed_migrated); } } diff --git a/src/pubd/manager.rs b/src/pubd/manager.rs index 2f4ead09d..9815eb861 100644 --- a/src/pubd/manager.rs +++ b/src/pubd/manager.rs @@ -311,10 +311,7 @@ mod tests { IdCertInfo, }, crypto::{KrillSignerBuilder, OpenSslSignerConfig}, - util::{ - file::{self, CurrentFile}, - storage::storage_uri_from_data_dir, - }, + util::file::{self, CurrentFile}, }, constants::*, daemon::config::{SignerConfig, SignerType}, @@ -402,8 +399,7 @@ mod tests { fn should_not_add_publisher_twice() { // we need a disk, as repo_dir, etc. use data_dir by default let (data_dir, cleanup) = test::tmp_dir(); - // let storage_uri = test::tmp_storage(); - let storage_uri = storage_uri_from_data_dir(&data_dir).unwrap(); + let storage_uri = test::tmp_storage(); let server = make_server(&storage_uri, &data_dir); diff --git a/test-resources/status_store/migration-0.9.5/ta/status.json b/test-resources/status_store/migration-0.9.5/status/ta/status.json similarity index 100% rename from test-resources/status_store/migration-0.9.5/ta/status.json rename to test-resources/status_store/migration-0.9.5/status/ta/status.json diff --git a/test-resources/status_store/migration-0.9.5/testbed/status.json b/test-resources/status_store/migration-0.9.5/status/testbed/status.json similarity index 100% rename from test-resources/status_store/migration-0.9.5/testbed/status.json rename to test-resources/status_store/migration-0.9.5/status/testbed/status.json From 0988e2fd9cad97587748aebaeeabc70511e81fae Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Wed, 30 Aug 2023 14:38:18 +0200 Subject: [PATCH 06/18] Do not depend on a data dir for storage. --- Cargo.lock | 6 +- src/commons/eventsourcing/kv.rs | 63 +++- src/commons/eventsourcing/mod.rs | 7 +- src/commons/eventsourcing/wal.rs | 2 +- src/commons/util/mod.rs | 1 - src/commons/util/storage.rs | 10 - src/daemon/ca/status.rs | 2 +- src/daemon/config.rs | 11 +- src/daemon/properties/mod.rs | 2 +- src/pubd/manager.rs | 10 +- src/test.rs | 17 +- src/upgrades/mod.rs | 541 ++++++++++++------------------- tests/auth_check.rs | 4 +- tests/benchmark.rs | 2 +- tests/functional_ca_import.rs | 2 +- tests/functional_keyroll.rs | 3 +- 16 files changed, 287 insertions(+), 396 deletions(-) delete mode 100644 src/commons/util/storage.rs diff --git a/Cargo.lock b/Cargo.lock index fe132f877..4f82d90ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1131,7 +1131,7 @@ dependencies = [ [[package]] name = "kvx" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#22379ed92b3befb436cd709956515a5887e72fff" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#3619a8541028e65b750c678413c9b7089d9f6c4e" dependencies = [ "kvx_macros", "kvx_types", @@ -1148,7 +1148,7 @@ dependencies = [ [[package]] name = "kvx_macros" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#22379ed92b3befb436cd709956515a5887e72fff" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#3619a8541028e65b750c678413c9b7089d9f6c4e" dependencies = [ "kvx_types", "proc-macro-error", @@ -1160,7 +1160,7 @@ dependencies = [ [[package]] name = "kvx_types" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#22379ed92b3befb436cd709956515a5887e72fff" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#3619a8541028e65b750c678413c9b7089d9f6c4e" dependencies = [ "postgres", "postgres-types", diff --git a/src/commons/eventsourcing/kv.rs b/src/commons/eventsourcing/kv.rs index 3663ec606..03d90444a 100644 --- a/src/commons/eventsourcing/kv.rs +++ b/src/commons/eventsourcing/kv.rs @@ -5,7 +5,7 @@ use kvx::{KeyValueStoreBackend, NamespaceBuf, ReadStore, WriteStore}; use serde::{de::DeserializeOwned, Serialize}; use url::Url; -use crate::commons::{error::KrillIoError, util::KrillVersion}; +use crate::commons::error::KrillIoError; pub trait SegmentExt { fn parse_lossy(value: &str) -> SegmentBuf; @@ -40,8 +40,8 @@ pub struct KeyValueStore { impl KeyValueStore { /// Creates a new KeyValueStore. - pub fn create(storage_uri: &Url, name_space: &Namespace) -> Result { - kvx::KeyValueStore::new(storage_uri, name_space) + pub fn create(storage_uri: &Url, namespace: &Namespace) -> Result { + kvx::KeyValueStore::new(storage_uri, namespace) .map(|inner| KeyValueStore { inner }) .map_err(KeyValueError::KVError) } @@ -50,22 +50,59 @@ impl KeyValueStore { /// /// Adds the implicit prefix "upgrade-{version}-" to the given namespace. pub fn create_upgrade_store(storage_uri: &Url, namespace: &Namespace) -> Result { - let namespace_string = format!( - "upgrade_{}_{}", - KrillVersion::code_version().hyphen_notated(), - namespace - ); - let namespace = NamespaceBuf::from_str(&namespace_string) - .map_err(|e| KeyValueError::Other(format!("Cannot parse namespace: {}. Error: {}", namespace_string, e)))?; + let namespace = Self::prefixed_namespace(namespace, "upgrade")?; kvx::KeyValueStore::new(storage_uri, namespace) .map(|inner| KeyValueStore { inner }) .map_err(KeyValueError::KVError) } + fn prefixed_namespace(namespace: &Namespace, prefix: &str) -> Result { + let namespace_string = format!("{}_{}", prefix, namespace); + NamespaceBuf::from_str(&namespace_string) + .map_err(|e| KeyValueError::Other(format!("Cannot parse namespace: {}. Error: {}", namespace_string, e))) + } + + /// Archive this store (i.e. for this namespace). Deletes + /// any existing archive for this namespace if present. + pub fn migrate_to_archive(&mut self, storage_uri: &Url, namespace: &Namespace) -> Result<(), KeyValueError> { + let archive_ns = Self::prefixed_namespace(namespace, "archive")?; + // Wipe any existing archive, before archiving this store. + // We don't want to keep too much old data. See issue: #1088. + let archive_store = KeyValueStore::create(storage_uri, namespace)?; + archive_store.wipe()?; + + self.inner.migrate_namespace(archive_ns).map_err(KeyValueError::KVError) + } + + /// Make this (upgrade) store the current store. + /// + /// Fails if there is a non-empty current store. + pub fn migrate_to_current(&mut self, storage_uri: &Url, namespace: &Namespace) -> Result<(), KeyValueError> { + let current_store = KeyValueStore::create(storage_uri, namespace)?; + if !current_store.is_empty()? { + Err(KeyValueError::Other(format!( + "Abort migrate upgraded store for {} to current. The current store was not archived.", + namespace + ))) + } else { + self.inner + .migrate_namespace(namespace.into()) + .map_err(KeyValueError::KVError) + } + } + + /// Returns true if this KeyValueStore (with this namespace) has any entries + pub fn is_empty(&self) -> Result { + self.inner.is_empty().map_err(KeyValueError::KVError) + } + /// Import all data from the given KV store into this pub fn import(&self, other: &Self) -> Result<(), KeyValueError> { - for scope in other.scopes()? { + let mut scopes = other.scopes()?; + scopes.push(Scope::global()); // not explicitly listed but should be migrated as well. + + for scope in scopes { for key in other.keys(&scope, "")? { if let Some(value) = other.get_raw_value(&key)? { self.store_raw_value(&key, value)?; @@ -152,7 +189,7 @@ impl KeyValueStore { } /// Archive a key - pub fn archive(&self, key: &Key) -> Result<(), KeyValueError> { + pub fn archive_key(&self, key: &Key) -> Result<(), KeyValueError> { self.move_key(key, &key.clone().with_sub_scope(segment!("archived"))) } @@ -445,7 +482,7 @@ mod tests { store.store(&key, &content).unwrap(); assert!(store.has(&key).unwrap()); - store.archive(&key).unwrap(); + store.archive_key(&key).unwrap(); assert!(!store.has(&key).unwrap()); assert!(store.has(&key.with_sub_scope(segment!("archived"))).unwrap()); } diff --git a/src/commons/eventsourcing/mod.rs b/src/commons/eventsourcing/mod.rs index bda8c6ffc..68d46fdf2 100644 --- a/src/commons/eventsourcing/mod.rs +++ b/src/commons/eventsourcing/mod.rs @@ -47,7 +47,7 @@ mod tests { api::{CommandHistoryCriteria, CommandSummary}, }, constants::ACTOR_DEF_TEST, - test::tmp_storage, + test::mem_storage, }; use super::*; @@ -339,10 +339,7 @@ mod tests { #[test] fn event_sourcing_framework() { - // crate::test::test_under_tmp(|data_dir| { - // let storage_uri = crate::commons::util::storage::storage_uri_from_data_dir(&data_dir).unwrap(); - - let storage_uri = tmp_storage(); + let storage_uri = mem_storage(); let counter = Arc::new(EventCounter::default()); diff --git a/src/commons/eventsourcing/wal.rs b/src/commons/eventsourcing/wal.rs index 44859c8bc..84560c130 100644 --- a/src/commons/eventsourcing/wal.rs +++ b/src/commons/eventsourcing/wal.rs @@ -356,7 +356,7 @@ impl WalStore { if let Ok(revision) = u64::from_str(number) { if revision < latest.revision() { if archive { - self.kv.archive(&key)?; + self.kv.archive_key(&key)?; } else { self.kv.drop_key(&key)?; } diff --git a/src/commons/util/mod.rs b/src/commons/util/mod.rs index 9962cd8df..916a38768 100644 --- a/src/commons/util/mod.rs +++ b/src/commons/util/mod.rs @@ -15,7 +15,6 @@ pub mod cmslogger; pub mod ext_serde; pub mod file; pub mod httpclient; -pub mod storage; //------------ KrillVersion -------------------------------------------------- diff --git a/src/commons/util/storage.rs b/src/commons/util/storage.rs deleted file mode 100644 index 875805ee7..000000000 --- a/src/commons/util/storage.rs +++ /dev/null @@ -1,10 +0,0 @@ -use std::path::Path; -use url::Url; - -use crate::commons::{error::Error, KrillResult}; - -// TODO mark as test only -#[cfg(test)] -pub fn storage_uri_from_data_dir(data_dir: &Path) -> KrillResult { - Url::parse(&format!("local://{}/", data_dir.to_string_lossy())).map_err(|e| Error::custom(e.to_string())) -} diff --git a/src/daemon/ca/status.rs b/src/daemon/ca/status.rs index 49614ea07..ae752aa6a 100644 --- a/src/daemon/ca/status.rs +++ b/src/daemon/ca/status.rs @@ -411,7 +411,7 @@ mod tests { let source_store = KeyValueStore::create(&source_dir_url, STATUS_NS).unwrap(); - let test_storage_uri = test::tmp_storage(); + let test_storage_uri = test::mem_storage(); let status_kv_store = KeyValueStore::create(&test_storage_uri, STATUS_NS).unwrap(); // copy the source KV store (files) into the test KV store (in memory) diff --git a/src/daemon/config.rs b/src/daemon/config.rs index 94b233b14..7bc368198 100644 --- a/src/daemon/config.rs +++ b/src/daemon/config.rs @@ -822,7 +822,7 @@ impl Config { /// Returns the data directory if disk was used for storage. /// This will always be true for upgrades of pre 0.14.0 versions - pub fn data_dir(&self) -> Option { + fn data_dir(&self) -> Option { if self.storage_uri.scheme() != "local" { None } else { @@ -2204,14 +2204,5 @@ mod tests { test_uri("local://data", "data"); test_uri("local://data/test", "data/test"); test_uri("local:///tmp/test", "/tmp/test"); - - // assert_eq!( - // storage_uri_from_data_dir(Path::new("./data")).unwrap(), - // Url::parse("local://./data/").unwrap() - // ); - // assert_eq!( - // storage_uri_from_data_dir(Path::new("/tmp/data")).unwrap(), - // Url::parse("local:///tmp/data/").unwrap() - // ); } } diff --git a/src/daemon/properties/mod.rs b/src/daemon/properties/mod.rs index b8e580921..6c70ff95b 100644 --- a/src/daemon/properties/mod.rs +++ b/src/daemon/properties/mod.rs @@ -267,7 +267,7 @@ impl PropertiesManager { } pub fn is_initialized(&self) -> bool { - self.properties().is_ok() + self.store.has(&self.main_key).unwrap_or_default() } pub fn init(&self, krill_version: KrillVersion) -> KrillResult> { diff --git a/src/pubd/manager.rs b/src/pubd/manager.rs index 9815eb861..82ab27f06 100644 --- a/src/pubd/manager.rs +++ b/src/pubd/manager.rs @@ -375,7 +375,7 @@ mod tests { fn should_add_publisher() { // we need a disk, as repo_dir, etc. use data_dir by default let (data_dir, cleanup) = test::tmp_dir(); - let storage_uri = test::tmp_storage(); + let storage_uri = test::mem_storage(); let server = make_server(&storage_uri, &data_dir); let alice = publisher_alice(&storage_uri); @@ -399,7 +399,7 @@ mod tests { fn should_not_add_publisher_twice() { // we need a disk, as repo_dir, etc. use data_dir by default let (data_dir, cleanup) = test::tmp_dir(); - let storage_uri = test::tmp_storage(); + let storage_uri = test::mem_storage(); let server = make_server(&storage_uri, &data_dir); @@ -423,7 +423,7 @@ mod tests { fn should_list_files() { // we need a disk, as repo_dir, etc. use data_dir by default let (data_dir, cleanup) = test::tmp_dir(); - let storage_uri = test::tmp_storage(); + let storage_uri = test::mem_storage(); let server = make_server(&storage_uri, &data_dir); let alice = publisher_alice(&storage_uri); @@ -444,7 +444,7 @@ mod tests { async fn should_publish_files() { // we need a disk, as repo_dir, etc. use data_dir by default let (data_dir, cleanup) = test::tmp_dir(); - let storage_uri = test::tmp_storage(); + let storage_uri = test::mem_storage(); let server = make_server(&storage_uri, &data_dir); let session = session_dir(&data_dir); @@ -633,7 +633,7 @@ mod tests { #[test] pub fn repository_session_reset() { let (data_dir, cleanup) = test::tmp_dir(); - let storage_uri = test::tmp_storage(); + let storage_uri = test::mem_storage(); let server = make_server(&storage_uri, &data_dir); // set up server with default repository, and publisher alice diff --git a/src/test.rs b/src/test.rs index 0aa6caac7..55d89dcc3 100644 --- a/src/test.rs +++ b/src/test.rs @@ -61,7 +61,7 @@ pub const KRILL_SECOND_SERVER_URI: &str = "https://localhost:3002/"; pub fn init_logging() -> impl FnOnce() { // Just creates a test config so we can initialize logging, then forgets about it - let storage = tmp_storage(); + let storage = mem_storage(); let (dir, cleanup) = tmp_dir(); let _ = Config::test(&storage, Some(&dir), false, false, false, false).init_logging(); @@ -148,7 +148,7 @@ pub fn init_config(config: &mut Config) { /// Starts krill server for testing using the given configuration. Creates a random base directory in the 'work' folder, /// adjusts the config to use it and returns it. Be sure to clean it up when the test is done. pub async fn start_krill_with_custom_config(mut config: Config) -> Url { - let storage_uri = tmp_storage(); + let storage_uri = mem_storage(); config.storage_uri = storage_uri.clone(); start_krill(config).await; storage_uri @@ -163,8 +163,7 @@ pub async fn start_krill_with_default_test_config( second_signer: bool, ) -> impl FnOnce() { let (data_dir, cleanup) = tmp_dir(); - let storage_uri = tmp_storage(); - // let storage_uri = storage_uri_from_data_dir(&data_dir).unwrap(); + let storage_uri = mem_storage(); let config = test_config( &storage_uri, Some(&data_dir), @@ -182,7 +181,7 @@ pub async fn start_krill_with_default_test_config( /// RRDP delta delays work properly. pub async fn start_krill_testbed_with_rrdp_interval(interval: u32) -> impl FnOnce() { let (data_dir, cleanup) = tmp_dir(); - let storage_uri = tmp_storage(); + let storage_uri = mem_storage(); let mut config = test_config(&storage_uri, Some(&data_dir), true, false, false, false); config.rrdp_updates_config.rrdp_delta_interval_min_seconds = interval; start_krill(config).await; @@ -206,7 +205,7 @@ async fn start_krill_with_error_trap(config: Arc) { /// own temp dir for storage. pub async fn start_krill_pubd(rrdp_delta_rrdp_delta_min_interval_seconds: u32) -> impl FnOnce() { let (data_dir, cleanup) = tmp_dir(); - let storage_uri = tmp_storage(); + let storage_uri = mem_storage(); let mut config = test_config(&storage_uri, Some(&data_dir), false, false, false, true); config.rrdp_updates_config.rrdp_delta_interval_min_seconds = rrdp_delta_rrdp_delta_min_interval_seconds; init_config(&mut config); @@ -231,7 +230,7 @@ pub async fn start_krill_pubd(rrdp_delta_rrdp_delta_min_interval_seconds: u32) - /// own temp dir for storage. pub async fn start_second_krill() -> impl FnOnce() { let (data_dir, cleanup) = tmp_dir(); - let storage_uri = tmp_storage(); + let storage_uri = mem_storage(); let mut config = test_config(&storage_uri, Some(&data_dir), false, false, false, true); init_config(&mut config); config.port = 3002; @@ -823,7 +822,7 @@ pub fn test_in_memory(op: F) where F: FnOnce(&Url), { - let storage_uri = tmp_storage(); + let storage_uri = mem_storage(); op(&storage_uri); } @@ -861,7 +860,7 @@ fn random_hex_string() -> String { hex::encode(bytes) } -pub fn tmp_storage() -> Url { +pub fn mem_storage() -> Url { let mut bytes = [0; 8]; openssl::rand::rand_bytes(&mut bytes).unwrap(); diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index 4784c73af..aafe24f97 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -2,7 +2,7 @@ //! - Updating the format of commands or events //! - Export / Import data -use std::{convert::TryInto, fmt, fs, path::Path, str::FromStr, time::Duration}; +use std::{convert::TryInto, fmt, str::FromStr, time::Duration}; use serde::{de::DeserializeOwned, Deserialize}; @@ -12,12 +12,12 @@ use crate::{ commons::{ actor::Actor, crypto::KrillSignerBuilder, - error::{Error, KrillIoError}, + error::KrillIoError, eventsourcing::{ segment, Aggregate, AggregateStore, AggregateStoreError, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt, Storable, StoredCommand, WalStore, WithStorableDetails, }, - util::{file, KrillVersion}, + util::KrillVersion, KrillResult, }, constants::{ @@ -634,26 +634,6 @@ pub fn prepare_upgrade_data_migrations( error!("{}", msg); Err(UpgradeError::custom(msg)) } else if versions.from < KrillVersion::candidate(0, 10, 0, 1) { - // Get a lock to ensure that only one process can run this migration - // at any one time (for a given config). - let _lock = { - // Note that all version before 0.14.0 were using disk based storage - // and we only support migration to database storage *after* upgrading. - // So.. it is safe to unwrap the storage_uri into a data dir here. We - // would not be here otherwise. - let data_dir = config - .data_dir() - .ok_or(UpgradeError::custom("no data dir found in config"))?; - - // Create upgrade dir if it did not yet exist. - let lock_file_path = data_dir.join("upgrade.lock"); - fslock::LockFile::open(&lock_file_path).map_err(|_| { - UpgradeError::custom( - format!("Cannot get upgrade lock. Another process may be running a Krill upgrade. Or, perhaps you ran 'krillup' as root - in that case check the ownership of directory: {}", data_dir.to_string_lossy()), - ) - })? - }; - // Complex migrations involving command / event conversions pre_0_10_0::PublicationServerRepositoryAccessMigration::upgrade(mode, config, &versions)?; pre_0_10_0::CasMigration::upgrade(mode, config)?; @@ -717,24 +697,16 @@ pub fn prepare_upgrade_data_migrations( /// Migrate v0.12.x RepositoryContent to the new 0.13.0+ format. /// Apply any open WAL changes to the source first. fn migrate_0_12_pubd_objects(config: &Config) -> KrillResult { - let data_dir = config - .data_dir() - .ok_or(Error::custom("cannot find data dir in config"))?; - let old_repo_content_dir = data_dir.join(PUBSERVER_CONTENT_NS.as_str()); - if old_repo_content_dir.exists() { - let old_store: WalStore = WalStore::create(&config.storage_uri, PUBSERVER_CONTENT_NS)?; - let repo_content_handle = MyHandle::new("0".into()); - - if old_store.has(&repo_content_handle)? { - let old_repo_content = old_store.get_latest(&repo_content_handle)?.as_ref().clone(); - let repo_content: pubd::RepositoryContent = old_repo_content.try_into()?; - let new_key = Key::new_scoped(Scope::from_segment(segment!("0")), segment!("snapshot.json")); - let upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_CONTENT_NS)?; - upgrade_store.store(&new_key, &repo_content)?; - Ok(true) - } else { - Ok(false) - } + let old_store: WalStore = WalStore::create(&config.storage_uri, PUBSERVER_CONTENT_NS)?; + let repo_content_handle = MyHandle::new("0".into()); + + if old_store.has(&repo_content_handle)? { + let old_repo_content = old_store.get_latest(&repo_content_handle)?.as_ref().clone(); + let repo_content: pubd::RepositoryContent = old_repo_content.try_into()?; + let new_key = Key::new_scoped(Scope::from_segment(segment!("0")), segment!("snapshot.json")); + let upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_CONTENT_NS)?; + upgrade_store.store(&new_key, &repo_content)?; + Ok(true) } else { Ok(false) } @@ -743,22 +715,17 @@ fn migrate_0_12_pubd_objects(config: &Config) -> KrillResult { /// The format of the RepositoryContent did not change in 0.12, but /// the location and way of storing it did. So, migrate if present. fn migrate_pre_0_12_pubd_objects(config: &Config) -> KrillResult<()> { - let data_dir = config - .data_dir() - .ok_or(Error::custom("cannot find data dir in config"))?; - let old_repo_content_dir = data_dir.join(PUBSERVER_CONTENT_NS.as_str()); - if old_repo_content_dir.exists() { - let old_store = KeyValueStore::create(&config.storage_uri, PUBSERVER_CONTENT_NS)?; - let old_key = Key::new_global(segment!("0.json")); - if let Ok(Some(old_repo_content)) = old_store.get::(&old_key) { - info!("Found pre 0.12.0 RC2 publication server data. Migrating.."); - let repo_content: pubd::RepositoryContent = old_repo_content.try_into()?; - - let new_key = Key::new_scoped(Scope::from_segment(segment!("0")), segment!("snapshot.json")); - let upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_CONTENT_NS)?; - upgrade_store.store(&new_key, &repo_content)?; - } + let old_store = KeyValueStore::create(&config.storage_uri, PUBSERVER_CONTENT_NS)?; + let old_key = Key::new_global(segment!("0.json")); + if let Ok(Some(old_repo_content)) = old_store.get::(&old_key) { + info!("Found pre 0.12.0 RC2 publication server data. Migrating.."); + let repo_content: pubd::RepositoryContent = old_repo_content.try_into()?; + + let new_key = Key::new_scoped(Scope::from_segment(segment!("0")), segment!("snapshot.json")); + let upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_CONTENT_NS)?; + upgrade_store.store(&new_key, &repo_content)?; } + Ok(()) } @@ -772,137 +739,50 @@ pub fn finalise_data_migration( config: &Config, properties_manager: &PropertiesManager, ) -> KrillResult<()> { - if upgrade.from >= KrillVersion::candidate(0, 14, 0, 0) { - // Not supported yet, we will need to implement changing the - // namespace in kvx::KeyValueStore. - // - // When this is done then we can use the same logic for any - // storage implementation used (disk/db). - todo!("Support migrations from 0.14.x and higher migrations"); - } else { - info!( - "Finish data migrations for upgrade from {} to {}", - upgrade.from(), - upgrade.to() - ); - - // Krill versions before 0.14.x *always* used disk based storage. - // - // So, we should always get some data dir from the current config - // when upgrading from a a version before 0.14.x. - // - // Furthermore, now that we are storing the version in one single - // place, we can remove the "version" file from any directory that - // remains after migration. - if let Some(data_dir) = config.data_dir() { - let archive_base_dir = data_dir.join(&format!("archive-{}", upgrade.from())); - let upgrade_base_dir = data_dir.join("upgrade-data"); - - for ns in &[ - CASERVER_NS, - CA_OBJECTS_NS, - KEYS_NS, - PUBSERVER_CONTENT_NS, - PUBSERVER_NS, - SIGNERS_NS, - STATUS_NS, - TA_PROXY_SERVER_NS, - TA_SIGNER_SERVER_NS, - ] { - // Data structure is as follows: - // - // data_dir/ - // upgrade-data/ --> upgraded (may be missing) - // ns1, - // ns2, - // etc - // ns1, --> current - // ns2, - // etc - // - // archive-prev-v/ --> archived current dirs which were upgraded - // - let upgraded_dir = upgrade_base_dir.join(ns.as_str()); - let archive_dir = archive_base_dir.join(ns.as_str()); - let current_dir = data_dir.join(ns.as_str()); - - if upgraded_dir.exists() { - // Data was prepared. So we archive the current data and - // then move the prepped data. - move_dir(¤t_dir, &archive_dir)?; - move_dir(&upgraded_dir, ¤t_dir)?; - } else if current_dir.exists() { - // There was no new data for this directory. But, we make a backup - // so that we can have a consistent data set to fall back to in case - // of a downgrade. - file::backup_dir(¤t_dir, &archive_dir).map_err(|e| { - Error::Custom(format!( - "Could not backup directory {} to {} after migration: {}", - current_dir.to_string_lossy(), - archive_dir.to_string_lossy(), - e - )) - })?; - } - - let version_file = current_dir.join("version"); - if version_file.exists() { - debug!( - "Removing (no longer used) version file: {}", - version_file.to_string_lossy() - ); - std::fs::remove_file(&version_file).map_err(|e| { - let context = format!( - "Could not remove (no longer used) version file at: {}", - version_file.to_string_lossy(), - ); - Error::IoError(KrillIoError::new(context, e)) - })?; - } - } - - // remove the upgrade base dir - if it's empty - so ignore error. - let _ = fs::remove_dir(&upgrade_base_dir); - } - - // move the dirs - fn move_dir(from: &Path, to: &Path) -> KrillResult<()> { - if let Some(parent) = to.parent() { - if !parent.exists() { - file::create_dir_all(parent).map_err(Error::IoError)?; - } + // For each NS + // + // Check if upgrade store to this version exists. + // If so: + // -- drop archive store if it exists + // -- archive current store (rename ns) + // -- move upgrade to current + info!( + "Finish data migrations for upgrade from {} to {}", + upgrade.from(), + upgrade.to() + ); + + for ns in [ + CASERVER_NS, + CA_OBJECTS_NS, + KEYS_NS, + PUBSERVER_CONTENT_NS, + PUBSERVER_NS, + SIGNERS_NS, + STATUS_NS, + TA_PROXY_SERVER_NS, + TA_SIGNER_SERVER_NS, + ] { + // Check if there is a non-empty upgrade store for this namespace + // that would need to be migrated. + let mut upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, ns)?; + if !upgrade_store.is_empty()? { + debug!("Migrate new data for {} and archive old", ns); + let mut current_store = KeyValueStore::create(&config.storage_uri, ns)?; + if !current_store.is_empty()? { + current_store.migrate_to_archive(&config.storage_uri, ns)?; } - std::fs::rename(from, to).map_err(|e| { - let context = format!( - "Could not rename directory from: {} to: {}.", - from.to_string_lossy(), - to.to_string_lossy() - ); - Error::IoError(KrillIoError::new(context, e)) - }) - } - } - // Remove version files that are no longer required - if let Some(data_dir) = config.data_dir() { - for ns in &[ - CASERVER_NS, - CA_OBJECTS_NS, - KEYS_NS, - PUBSERVER_CONTENT_NS, - PUBSERVER_NS, - SIGNERS_NS, - STATUS_NS, - TA_PROXY_SERVER_NS, - TA_SIGNER_SERVER_NS, - ] { - let path = data_dir.join(ns.as_str()).join("version"); - if path.exists() { - debug!("Removing version excess file: {}", path.to_string_lossy()); - std::fs::remove_file(&path).map_err(|e| { - let context = format!("Could not remove old version file at: {}", path.to_string_lossy(),); - Error::IoError(KrillIoError::new(context, e)) - })?; + upgrade_store.migrate_to_current(&config.storage_uri, ns)?; + } else { + // No migration needed, but check if we have a current store + // for this namespace that still includes a version file. If + // so, remove it. + let current_store = KeyValueStore::create(&config.storage_uri, ns)?; + let version_key = Key::new_global(segment!("version")); + if current_store.has(&version_key)? { + debug!("Removing excess version key in ns: {}", ns); + current_store.drop_key(&version_key)?; } } } @@ -928,82 +808,70 @@ pub fn finalise_data_migration( /// one to the mapping in the signer store, if any. #[cfg(feature = "hsm")] fn record_preexisting_openssl_keys_in_signer_mapper(config: &Config) -> Result<(), UpgradeError> { - match config.data_dir() { - None => Ok(()), - Some(data_dir) => { - if !data_dir.join(SIGNERS_NS.as_str()).exists() { - let mut num_recorded_keys = 0; - let keys_dir = data_dir.join(KEYS_NS.as_str()); + let signers_key_store = KeyValueStore::create(&config.storage_uri, SIGNERS_NS)?; + if signers_key_store.is_empty()? { + let mut num_recorded_keys = 0; + // If the key value store for the "signers" namespace is empty, then it was not yet initialised + // and we may need to import keys from a previous krill installation (earlier version, or a custom + // build that has the hsm feature disabled.) - info!( - "Scanning for not yet mapped OpenSSL signer keys in {} to record in the signer store", - keys_dir.to_string_lossy() - ); + let keys_key_store = KeyValueStore::create(&config.storage_uri, KEYS_NS)?; + info!("Mapping OpenSSL signer keys, using uri: {}", config.storage_uri); + + let probe_interval = Duration::from_secs(config.signer_probe_retry_seconds); + let krill_signer = KrillSignerBuilder::new(&config.storage_uri, probe_interval, &config.signers) + .with_default_signer(config.default_signer()) + .with_one_off_signer(config.one_off_signer()) + .build() + .unwrap(); - let probe_interval = Duration::from_secs(config.signer_probe_retry_seconds); - let krill_signer = KrillSignerBuilder::new(&config.storage_uri, probe_interval, &config.signers) - .with_default_signer(config.default_signer()) - .with_one_off_signer(config.one_off_signer()) - .build() - .unwrap(); - - // For every file (key) in the legacy OpenSSL signer keys directory - if let Ok(dir_iter) = keys_dir.read_dir() { - let mut openssl_signer_handle: Option = None; - - for entry in dir_iter { - let entry = entry.map_err(|err| { - UpgradeError::IoError(KrillIoError::new( - format!( - "I/O error while looking for signer keys to register in: {}", - keys_dir.to_string_lossy() - ), - err, - )) - })?; - - if entry.path().is_file() { - // Is it a key identifier? - if let Ok(key_id) = KeyIdentifier::from_str(&entry.file_name().to_string_lossy()) { - // Is the key already recorded in the mapper? It shouldn't be, but asking will cause the initial - // registration of the OpenSSL signer to occur and for it to be assigned a handle. We need the - // handle so that we can register keys with the mapper. - if krill_signer.get_key_info(&key_id).is_err() { - // No, record it - - // Find out the handle of the OpenSSL signer used to create this key, if not yet known. - if openssl_signer_handle.is_none() { - // No, find it by asking each of the active signers if they have the key because one of - // them must have it and it should be the one and only OpenSSL signer that Krill was - // using previously. We can't just find and use the only OpenSSL signers as Krill may - // have been configured with more than one each with separate keys directories. - for (a_signer_handle, a_signer) in krill_signer.get_active_signers().iter() { - if a_signer.get_key_info(&key_id).is_ok() { - openssl_signer_handle = Some(a_signer_handle.clone()); - break; - } - } - } - - // Record the key in the signer mapper as being owned by the found signer handle. - if let Some(signer_handle) = &openssl_signer_handle { - let internal_key_id = key_id.to_string(); - if let Some(mapper) = krill_signer.get_mapper() { - mapper.add_key(signer_handle, &key_id, &internal_key_id)?; - num_recorded_keys += 1; - } - } - } + // For every file (key) in the legacy OpenSSL signer keys directory + + let mut openssl_signer_handle: Option = None; + + for key in keys_key_store.keys(&Scope::global(), "")? { + debug!("Found key: {}", key); + // Is it a key identifier? + if let Ok(key_id) = KeyIdentifier::from_str(key.name().as_str()) { + // Is the key already recorded in the mapper? It shouldn't be, but asking will cause the initial + // registration of the OpenSSL signer to occur and for it to be assigned a handle. We need the + // handle so that we can register keys with the mapper. + if krill_signer.get_key_info(&key_id).is_err() { + // No, record it + + // Find out the handle of the OpenSSL signer used to create this key, if not yet known. + if openssl_signer_handle.is_none() { + // No, find it by asking each of the active signers if they have the key because one of + // them must have it and it should be the one and only OpenSSL signer that Krill was + // using previously. We can't just find and use the only OpenSSL signers as Krill may + // have been configured with more than one each with separate keys directories. + for (a_signer_handle, a_signer) in krill_signer.get_active_signers().iter() { + if a_signer.get_key_info(&key_id).is_ok() { + openssl_signer_handle = Some(a_signer_handle.clone()); + break; } } } - } - info!("Recorded {} key identifiers in the signer store", num_recorded_keys); + // Record the key in the signer mapper as being owned by the found signer handle. + if let Some(signer_handle) = &openssl_signer_handle { + let internal_key_id = key_id.to_string(); + if let Some(mapper) = krill_signer.get_mapper() { + mapper.add_key(signer_handle, &key_id, &internal_key_id)?; + num_recorded_keys += 1; + } + } + } + } else { + debug!("Could not parse key as key identifier: {}", key); } - - Ok(()) } + + info!("Recorded {} key identifiers in the signer store", num_recorded_keys); + Ok(()) + } else { + debug!("Signers were set up before. No need to migrate keys."); + Ok(()) } } @@ -1036,58 +904,47 @@ fn upgrade_versions( config: &Config, properties_manager: &PropertiesManager, ) -> Result, UpgradeError> { - if let Ok(current) = properties_manager.current_krill_version() { - // We found the KrillVersion stored in the properties manager - // introduced in Krill 0.14.0. + if properties_manager.is_initialized() { + // The properties manager was introduced in Krill 0.14.0. + // If it's initialised then it MUST have a Krill Version. + let current = properties_manager.current_krill_version()?; UpgradeVersions::for_current(current) - } else if let Some(data_dir) = config.data_dir() { - // If the disk is used for storage, then we need to check - // if there are any pre Krill 0.14.0 version files in the - // usual places. If so, then this is an upgrade. + } else { + // No KrillVersion yet. So, either this is an older Krill version, + // or this a fresh installation. // - // If there are no such files, then we know that this is a - // new clean installation. Otherwise, we would have found - // the properties_manager.current_krill_version(). - let mut current = None; - - // So.. try to find the most recent version among those files - // in as far as they exist. - for ns in &[CASERVER_NS, PUBSERVER_NS, PUBSERVER_CONTENT_NS] { - let path = data_dir.join(ns.as_str()).join("version"); - if let Ok(bytes) = file::read(&path) { - if let Ok(new_version_seen_on_disk) = serde_json::from_slice::(&bytes) { - if let Some(previous_seen_on_disk) = current.clone() { - if new_version_seen_on_disk > previous_seen_on_disk { - current = Some(new_version_seen_on_disk); - } - } else { - current = Some(new_version_seen_on_disk); + // If this is an existing older Krill installation then we will + // find version files (keys) in one or more existing key value + // stores used for the various entities in Krill. + // + // If can't find any versions then this is a clean install. + + let mut current: Option = None; + + // Scan the following data stores. The *latest* version seen will determine + // the actual installed Krill version - this is because these version files + // did not always get updated in each store - but only in stores that needed + // an upgrade (at least this is true for some past migrations). So, it's the + // latest version (if any) that counts here. + for ns in &[CASERVER_NS, CA_OBJECTS_NS, PUBSERVER_NS, PUBSERVER_CONTENT_NS] { + let kv_store = KeyValueStore::create(&config.storage_uri, ns)?; + let key = Key::new_global(segment!("version")); + + if let Some(key_store_version) = kv_store.get::(&key)? { + if let Some(last_seen) = ¤t { + if &key_store_version > last_seen { + current = Some(key_store_version) } + } else { + current = Some(key_store_version); } } } match current { - None => { - info!("Clean installation for Krill version {}", KrillVersion::code_version()); - Ok(None) - } + None => Ok(None), Some(current) => UpgradeVersions::for_current(current), } - } else { - // No disk was used. We do not support upgrading from <0.14.0 to 0.14.0 or - // above AND migrating to a database at the same time. If users want this - // then they should first upgrade using disk based storage and then migrate - // the data content to a new storage option. See issue #1079 - info!( - "Clean installation using database storage for Krill version {}", - KrillVersion::code_version() - ); - info!("NOTE: if you meant to upgrade an existing Krill <0.14.0 installation"); - info!(" then you should stop this instance, clear the new database, then"); - info!(" upgrade your old installation using the disk as a storage option,"); - info!(" and then migrate your data to a database."); - Ok(None) } } @@ -1097,21 +954,29 @@ fn upgrade_versions( mod tests { use std::path::PathBuf; - use crate::{ - commons::util::{file, storage::storage_uri_from_data_dir}, - test::tmp_dir, - }; + use kvx::Namespace; + use url::Url; - use super::*; + use crate::test; - async fn test_upgrade(source: PathBuf) { - let (data_dir, cleanup) = tmp_dir(); - let storage_uri = storage_uri_from_data_dir(&data_dir).unwrap(); - file::backup_dir(&source, &data_dir).unwrap(); + use super::*; - let config = Config::test(&storage_uri, Some(&data_dir), false, false, false, false); + fn test_upgrade(base_dir: &str, namespaces: &[&str]) { + // Copy data for the given names spaces into memory for testing. + let mem_storage_base_uri = test::mem_storage(); + let bogus_path = PathBuf::from("/dev/null"); // needed for tls_dir etc, but will be ignored here + let config = Config::test(&mem_storage_base_uri, Some(&bogus_path), false, false, false, false); let _ = config.init_logging(); + let source_url = Url::parse(&format!("local://{}", base_dir)).unwrap(); + for ns in namespaces { + let namespace = Namespace::parse(ns).unwrap(); + let source_store = KeyValueStore::create(&source_url, namespace).unwrap(); + let target_store = KeyValueStore::create(&mem_storage_base_uri, namespace).unwrap(); + + target_store.import(&source_store).unwrap(); + } + let properties_manager = PropertiesManager::create(&config.storage_uri, config.use_history_cache).unwrap(); prepare_upgrade_data_migrations(UpgradeMode::PrepareOnly, &config, &properties_manager) @@ -1124,26 +989,37 @@ mod tests { .unwrap(); finalise_data_migration(report.versions(), &config, &properties_manager).unwrap(); - - cleanup(); } - #[tokio::test] - async fn prepare_then_upgrade_0_9_5() { - let source = PathBuf::from("test-resources/migrations/v0_9_5/"); - test_upgrade(source).await; + #[test] + fn prepare_then_upgrade_0_9_5() { + test_upgrade( + "test-resources/migrations/v0_9_5/", + &["ca_objects", "cas", "pubd", "pubd_objects"], + ); } - #[tokio::test] - async fn prepare_then_upgrade_0_12_1() { - let source = PathBuf::from("test-resources/migrations/v0_12_1/"); - test_upgrade(source).await; + #[test] + fn prepare_then_upgrade_0_12_1() { + test_upgrade("test-resources/migrations/v0_12_1/", &["pubd", "pubd_objects"]); } - #[tokio::test] - async fn prepare_then_upgrade_0_13_0() { - let source = PathBuf::from("test-resources/migrations/v0_13_1/"); - test_upgrade(source).await; + #[test] + fn prepare_then_upgrade_0_13_0() { + test_upgrade( + "test-resources/migrations/v0_13_1/", + &[ + "ca_objects", + "cas", + "keys", + "pubd", + "pubd_objects", + "signers", + "status", + "ta_proxy", + "ta_signer", + ], + ); } #[test] @@ -1152,18 +1028,23 @@ mod tests { let _repo: pre_0_13_0::OldRepositoryContent = serde_json::from_str(json).unwrap(); } - #[cfg(all(feature = "hsm", not(any(feature = "hsm-tests-kmip", feature = "hsm-tests-pkcs11"))))] + // Used by complex hsm feature dependent functions. + #[allow(dead_code)] fn unmapped_keys_test_core(do_upgrade: bool) { let expected_key_id = KeyIdentifier::from_str("5CBCAB14B810C864F3EEA8FD102B79F4E53FCC70").unwrap(); - // Place a key previously created by an OpenSSL signer in the KEYS_NS under the Krill data dir. - // Then run the upgrade. It should find the key and add it to the mapper. - let (data_dir, cleanup) = tmp_dir(); - let storage_uri = storage_uri_from_data_dir(&data_dir).unwrap(); - let source = PathBuf::from("test-resources/migrations/unmapped_keys/"); - file::backup_dir(&source, &data_dir).unwrap(); + // Copy test data into test storage + let mem_storage_base_uri = test::mem_storage(); - let mut config = Config::test(&storage_uri, Some(&data_dir), false, false, false, false); + let source_url = Url::parse("local://test-resources/migrations/unmapped_keys/").unwrap(); + let source_store = KeyValueStore::create(&source_url, KEYS_NS).unwrap(); + + let target_store = KeyValueStore::create(&mem_storage_base_uri, KEYS_NS).unwrap(); + target_store.import(&source_store).unwrap(); + + let bogus_path = PathBuf::from("/dev/null"); // needed for tls_dir etc, but will be ignored here + + let mut config = Config::test(&mem_storage_base_uri, Some(&bogus_path), false, false, false, false); let _ = config.init_logging(); config.process().unwrap(); @@ -1175,7 +1056,7 @@ mod tests { // is associated with the newly created mapper store and is thus able to use the // key that we placed on disk. let probe_interval = Duration::from_secs(config.signer_probe_retry_seconds); - let krill_signer = KrillSignerBuilder::new(&storage_uri, probe_interval, &config.signers) + let krill_signer = KrillSignerBuilder::new(&mem_storage_base_uri, probe_interval, &config.signers) .with_default_signer(config.default_signer()) .with_one_off_signer(config.one_off_signer()) .build() @@ -1191,13 +1072,11 @@ mod tests { if do_upgrade { // Verify that the mapper has a record of the test key belonging to the signer - assert!(mapper.get_signer_for_key(&expected_key_id).is_ok()); + mapper.get_signer_for_key(&expected_key_id).unwrap(); } else { // Verify that the mapper does NOT have a record of the test key belonging to the signer assert!(mapper.get_signer_for_key(&expected_key_id).is_err()); } - - cleanup(); } #[cfg(all(feature = "hsm", not(any(feature = "hsm-tests-kmip", feature = "hsm-tests-pkcs11"))))] diff --git a/tests/auth_check.rs b/tests/auth_check.rs index 20aaf8600..c60b531ce 100644 --- a/tests/auth_check.rs +++ b/tests/auth_check.rs @@ -5,7 +5,7 @@ use rpki::ca::idexchange::Handle; use krill::{ commons::api::Token, - test::{init_ca, start_krill_with_custom_config, test_config, tmp_storage}, + test::{init_ca, mem_storage, start_krill_with_custom_config, test_config}, }; extern crate krill; @@ -16,7 +16,7 @@ async fn auth_check() { // Use a copy of the default test Krill config but change the server admin token thereby hopefully causing the // bearer token sent by the test suite support functions not to match and thus be rejected which in turn should // cause a Rust panic. - let storage_uri = tmp_storage(); + let storage_uri = mem_storage(); let mut config = test_config(&storage_uri, None, false, false, false, false); config.admin_token = Token::from("wrong secret"); diff --git a/tests/benchmark.rs b/tests/benchmark.rs index c5c5cc4d8..edd96790f 100644 --- a/tests/benchmark.rs +++ b/tests/benchmark.rs @@ -6,7 +6,7 @@ use log::LevelFilter; #[tokio::test(flavor = "multi_thread")] async fn benchmark() { let (data_dir, cleanup) = tmp_dir(); - let storage_uri = tmp_storage(); + let storage_uri = mem_storage(); let cas = 10; let ca_roas = 10; diff --git a/tests/functional_ca_import.rs b/tests/functional_ca_import.rs index 31174da0e..48e7a19fe 100644 --- a/tests/functional_ca_import.rs +++ b/tests/functional_ca_import.rs @@ -11,7 +11,7 @@ async fn functional_ca_import() { // Start an empty Krill instance. let (data_dir, cleanup) = tmp_dir(); - let krill_storage = tmp_storage(); + let krill_storage = mem_storage(); let mut config = test_config(&krill_storage, Some(&data_dir), false, false, false, false); config.ta_support_enabled = true; config.ta_signer_enabled = true; diff --git a/tests/functional_keyroll.rs b/tests/functional_keyroll.rs index d642017f2..0aa7f8d57 100644 --- a/tests/functional_keyroll.rs +++ b/tests/functional_keyroll.rs @@ -23,8 +23,7 @@ use krill::{ #[tokio::test] async fn functional_keyroll() { let (data_dir, cleanup) = tmp_dir(); - let storage_uri = tmp_storage(); - // let storage_uri = util::storage::storage_uri_from_data_dir(&data_dir).unwrap(); + let storage_uri = mem_storage(); let config = test_config(&storage_uri, Some(&data_dir), true, false, false, false); start_krill(config).await; From 42992a7c865492ddc9b2ac5cb49919ce4784a933 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Wed, 30 Aug 2023 15:48:13 +0200 Subject: [PATCH 07/18] Fix Feature Frenzy --- src/upgrades/mod.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index aafe24f97..547176b0f 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -2,7 +2,7 @@ //! - Updating the format of commands or events //! - Export / Import data -use std::{convert::TryInto, fmt, str::FromStr, time::Duration}; +use std::{convert::TryInto, fmt, str::FromStr}; use serde::{de::DeserializeOwned, Deserialize}; @@ -11,7 +11,6 @@ use rpki::{ca::idexchange::MyHandle, repository::x509::Time}; use crate::{ commons::{ actor::Actor, - crypto::KrillSignerBuilder, error::KrillIoError, eventsourcing::{ segment, Aggregate, AggregateStore, AggregateStoreError, Key, KeyValueError, KeyValueStore, Scope, Segment, @@ -818,12 +817,13 @@ fn record_preexisting_openssl_keys_in_signer_mapper(config: &Config) -> Result<( let keys_key_store = KeyValueStore::create(&config.storage_uri, KEYS_NS)?; info!("Mapping OpenSSL signer keys, using uri: {}", config.storage_uri); - let probe_interval = Duration::from_secs(config.signer_probe_retry_seconds); - let krill_signer = KrillSignerBuilder::new(&config.storage_uri, probe_interval, &config.signers) - .with_default_signer(config.default_signer()) - .with_one_off_signer(config.one_off_signer()) - .build() - .unwrap(); + let probe_interval = std::time::Duration::from_secs(config.signer_probe_retry_seconds); + let krill_signer = + crate::commons::crypto::KrillSignerBuilder::new(&config.storage_uri, probe_interval, &config.signers) + .with_default_signer(config.default_signer()) + .with_one_off_signer(config.one_off_signer()) + .build() + .unwrap(); // For every file (key) in the legacy OpenSSL signer keys directory @@ -1028,8 +1028,7 @@ mod tests { let _repo: pre_0_13_0::OldRepositoryContent = serde_json::from_str(json).unwrap(); } - // Used by complex hsm feature dependent functions. - #[allow(dead_code)] + #[cfg(all(feature = "hsm", not(any(feature = "hsm-tests-kmip", feature = "hsm-tests-pkcs11"))))] fn unmapped_keys_test_core(do_upgrade: bool) { let expected_key_id = KeyIdentifier::from_str("5CBCAB14B810C864F3EEA8FD102B79F4E53FCC70").unwrap(); From 4798e073a1fa629d4213eae1e30c500cc81808a5 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Fri, 1 Sep 2023 11:15:30 +0200 Subject: [PATCH 08/18] Improve error logging. --- src/commons/eventsourcing/kv.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/commons/eventsourcing/kv.rs b/src/commons/eventsourcing/kv.rs index 03d90444a..11c34f9f7 100644 --- a/src/commons/eventsourcing/kv.rs +++ b/src/commons/eventsourcing/kv.rs @@ -133,7 +133,21 @@ impl KeyValueStore { /// returns None if it cannot be found. pub fn get(&self, key: &Key) -> Result, KeyValueError> { if let Some(value) = self.get_raw_value(key)? { - Ok(serde_json::from_value(value)?) + match serde_json::from_value(value) { + Ok(result) => Ok(result), + Err(e) => { + // Get the value again so that we can do a full error report + let value = self.get_raw_value(key)?; + let value_str = value.map(|v| v.to_string()).unwrap_or("".to_string()); + + let expected_type = std::any::type_name::(); + + Err(KeyValueError::Other(format!( + "Could not deserialize value for key '{}'. Expected type: {}. Error: {}. Value was: {}", + key, expected_type, e, value_str + ))) + } + } } else { Ok(None) } From 37958102d630cd6b1a36018538224f8021db0dba Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Fri, 1 Sep 2023 11:17:36 +0200 Subject: [PATCH 09/18] Fix upgrade code. --- src/upgrades/mod.rs | 31 +++++++++++++++-------- src/upgrades/pre_0_10_0/cas_migration.rs | 6 +++-- src/upgrades/pre_0_10_0/old_events.rs | 6 +---- src/upgrades/pre_0_10_0/pubd_migration.rs | 18 ++++++------- 4 files changed, 33 insertions(+), 28 deletions(-) diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index 547176b0f..eba7473f2 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -14,7 +14,7 @@ use crate::{ error::KrillIoError, eventsourcing::{ segment, Aggregate, AggregateStore, AggregateStoreError, Key, KeyValueError, KeyValueStore, Scope, Segment, - SegmentExt, Storable, StoredCommand, WalStore, WithStorableDetails, + SegmentExt, Storable, StoredCommand, WalStore, WalStoreError, WithStorableDetails, }, util::KrillVersion, KrillResult, @@ -25,6 +25,7 @@ use crate::{ }, daemon::{config::Config, krillserver::KrillServer, properties::PropertiesManager}, pubd, + upgrades::pre_0_14_0::{OldStoredCommand, OldStoredEffect, OldStoredEvent}, }; #[cfg(feature = "hsm")] @@ -33,15 +34,14 @@ use rpki::crypto::KeyIdentifier; #[cfg(feature = "hsm")] use crate::commons::crypto::SignerHandle; -use self::pre_0_13_0::OldRepositoryContent; +use self::{pre_0_13_0::OldRepositoryContent, pre_0_14_0::OldCommandKey}; pub mod pre_0_10_0; #[allow(clippy::mutable_key_type)] pub mod pre_0_13_0; -mod pre_0_14_0; -pub use self::pre_0_14_0::*; +pub mod pre_0_14_0; pub type UpgradeResult = Result; @@ -107,6 +107,7 @@ impl UpgradeVersions { #[allow(clippy::large_enum_variant)] pub enum UpgradeError { AggregateStoreError(AggregateStoreError), + WalStoreError(WalStoreError), KeyStoreError(KeyValueError), IoError(KrillIoError), Unrecognised(String), @@ -121,6 +122,7 @@ impl fmt::Display for UpgradeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let cause = match &self { UpgradeError::AggregateStoreError(e) => format!("Aggregate Error: {}", e), + UpgradeError::WalStoreError(e) => format!("Write-Ahead-Log Store Error: {}", e), UpgradeError::KeyStoreError(e) => format!("Keystore Error: {}", e), UpgradeError::IoError(e) => format!("I/O Error: {}", e), UpgradeError::Unrecognised(s) => format!("Unrecognised: {}", s), @@ -150,6 +152,12 @@ impl From for UpgradeError { } } +impl From for UpgradeError { + fn from(e: WalStoreError) -> Self { + UpgradeError::WalStoreError(e) + } +} + impl From for UpgradeError { fn from(e: KeyValueError) -> Self { UpgradeError::KeyStoreError(e) @@ -766,7 +774,7 @@ pub fn finalise_data_migration( // that would need to be migrated. let mut upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, ns)?; if !upgrade_store.is_empty()? { - debug!("Migrate new data for {} and archive old", ns); + info!("Migrate new data for {} and archive old", ns); let mut current_store = KeyValueStore::create(&config.storage_uri, ns)?; if !current_store.is_empty()? { current_store.migrate_to_archive(&config.storage_uri, ns)?; @@ -1054,12 +1062,13 @@ mod tests { // Now test that a newly initialized `KrillSigner` with a default OpenSSL signer // is associated with the newly created mapper store and is thus able to use the // key that we placed on disk. - let probe_interval = Duration::from_secs(config.signer_probe_retry_seconds); - let krill_signer = KrillSignerBuilder::new(&mem_storage_base_uri, probe_interval, &config.signers) - .with_default_signer(config.default_signer()) - .with_one_off_signer(config.one_off_signer()) - .build() - .unwrap(); + let probe_interval = std::time::Duration::from_secs(config.signer_probe_retry_seconds); + let krill_signer = + crate::commons::crypto::KrillSignerBuilder::new(&mem_storage_base_uri, probe_interval, &config.signers) + .with_default_signer(config.default_signer()) + .with_one_off_signer(config.one_off_signer()) + .build() + .unwrap(); // Trigger the signer to be bound to the one the migration just registered in the mapper krill_signer.random_serial().unwrap(); diff --git a/src/upgrades/pre_0_10_0/cas_migration.rs b/src/upgrades/pre_0_10_0/cas_migration.rs index 31cf04952..fde01bb10 100644 --- a/src/upgrades/pre_0_10_0/cas_migration.rs +++ b/src/upgrades/pre_0_10_0/cas_migration.rs @@ -5,7 +5,7 @@ use rpki::{ca::idexchange::CaHandle, repository::x509::Time}; use crate::commons::eventsourcing::{StoredCommand, StoredCommandBuilder}; use crate::daemon::ca::CaObjects; -use crate::upgrades::{OldStoredCommand, UnconvertedEffect}; +use crate::upgrades::UnconvertedEffect; use crate::{ commons::{ api::CertAuthStorableCommand, @@ -18,6 +18,7 @@ use crate::{ }, upgrades::{ pre_0_10_0::{Pre0_10CertAuthEvent, Pre0_10CertAuthInitEvent}, + pre_0_14_0::OldStoredCommand, UpgradeAggregateStorePre0_14, UpgradeError, UpgradeMode, UpgradeResult, }, }; @@ -43,11 +44,12 @@ impl CaObjectsMigration { } fn prepare_new_data_for(&self, ca: &CaHandle) -> Result<(), UpgradeError> { - let key = Key::new_global(Segment::parse_lossy(ca.as_str())); // ca should always be a valid Segment + let key = Key::new_global(Segment::parse_lossy(&format!("{}.json", ca))); // ca should always be a valid Segment if let Some(old_objects) = self.current_store.get::(&key)? { let converted: CaObjects = old_objects.try_into()?; self.new_store.store(&key, &converted)?; + debug!("Stored updated objects for CA {} in {}", ca, self.new_store); } Ok(()) diff --git a/src/upgrades/pre_0_10_0/old_events.rs b/src/upgrades/pre_0_10_0/old_events.rs index 6850d8dc3..f832f27da 100644 --- a/src/upgrades/pre_0_10_0/old_events.rs +++ b/src/upgrades/pre_0_10_0/old_events.rs @@ -34,7 +34,7 @@ use crate::{ }, daemon::ta::{TaCertDetails, TrustAnchorLocator}, pubd::{Publisher, RepositoryAccessEvent, RepositoryAccessInitEvent}, - upgrades::{OldStoredEvent, UpgradeError}, + upgrades::UpgradeError, }; #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] @@ -1525,8 +1525,6 @@ impl Eq for OldPublishedCrl {} //------------ OldRepositoryAccessIni ------------------------------------------- -pub type Pre0_10RepositoryAccessIni = OldStoredEvent; - #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct Pre0_10RepositoryAccessInitDetails { id_cert: IdCert, @@ -1552,8 +1550,6 @@ impl fmt::Display for Pre0_10RepositoryAccessInitDetails { //------------ OldRepositoryAccessEvent ----------------------------------------- -pub type Pre0_10RepositoryAccessEvent = OldStoredEvent; - #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] #[allow(clippy::large_enum_variant)] #[serde(rename_all = "snake_case", tag = "type")] diff --git a/src/upgrades/pre_0_10_0/pubd_migration.rs b/src/upgrades/pre_0_10_0/pubd_migration.rs index 1015d96eb..58df57b6d 100644 --- a/src/upgrades/pre_0_10_0/pubd_migration.rs +++ b/src/upgrades/pre_0_10_0/pubd_migration.rs @@ -9,10 +9,9 @@ use crate::{ constants::PUBSERVER_NS, daemon::config::Config, pubd::{RepositoryAccess, RepositoryAccessEvent, RepositoryAccessInitEvent}, - upgrades::{ - OldRepositoryAccessEvent, OldRepositoryAccessInitEvent, OldStoredCommand, UnconvertedEffect, - UpgradeAggregateStorePre0_14, UpgradeMode, UpgradeResult, UpgradeVersions, - }, + upgrades::pre_0_10_0::{Pre0_10RepositoryAccessEventDetails, Pre0_10RepositoryAccessInitDetails}, + upgrades::pre_0_14_0::OldStoredCommand, + upgrades::{UnconvertedEffect, UpgradeAggregateStorePre0_14, UpgradeMode, UpgradeResult, UpgradeVersions}, }; /// Migrates the events, snapshots and info for the event-sourced RepositoryAccess. @@ -39,7 +38,7 @@ impl PublicationServerRepositoryAccessMigration { if store_migration .current_kv_store .has_scope(&Scope::from_segment(segment!("0")))? - && versions.from > KrillVersion::release(0, 9, 0) + && versions.from >= KrillVersion::release(0, 9, 0) && versions.from < KrillVersion::candidate(0, 10, 0, 1) { store_migration.upgrade(mode) @@ -52,8 +51,8 @@ impl PublicationServerRepositoryAccessMigration { impl UpgradeAggregateStorePre0_14 for PublicationServerRepositoryAccessMigration { type Aggregate = RepositoryAccess; - type OldInitEvent = OldRepositoryAccessInitEvent; - type OldEvent = OldRepositoryAccessEvent; + type OldInitEvent = Pre0_10RepositoryAccessInitDetails; + type OldEvent = Pre0_10RepositoryAccessEventDetails; type OldStorableDetails = StorableRepositoryCommand; fn store_name(&self) -> &str { @@ -81,7 +80,7 @@ impl UpgradeAggregateStorePre0_14 for PublicationServerRepositoryAccessMigration ) -> UpgradeResult> { let details = StorableRepositoryCommand::Init; let builder = StoredCommandBuilder::::new(actor, time, handle, 0, details); - let init_event: RepositoryAccessInitEvent = old_init.into_details(); + let init_event: RepositoryAccessInitEvent = old_init.into(); Ok(builder.finish_with_init_event(init_event)) } @@ -103,8 +102,7 @@ impl UpgradeAggregateStorePre0_14 for PublicationServerRepositoryAccessMigration let new_command = match old_effect { UnconvertedEffect::Error { msg } => new_command_builder.finish_with_error(msg), UnconvertedEffect::Success { events } => { - let full_events: Vec = - events.into_iter().map(|old| old.into_details()).collect(); + let full_events: Vec = events.into_iter().map(|old| old.into()).collect(); new_command_builder.finish_with_events(full_events) } }; From 0ce9e786d39d0c142045eaba528a7e71cbcbf4a3 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Fri, 1 Sep 2023 11:48:58 +0200 Subject: [PATCH 10/18] Add support for data migration. --- .gitignore | 2 +- src/daemon/ca/publishing.rs | 2 +- src/upgrades/data_migration.rs | 236 +++++++++++++++++++++++++++++++++ src/upgrades/mod.rs | 2 + 4 files changed, 240 insertions(+), 2 deletions(-) create mode 100644 src/upgrades/data_migration.rs diff --git a/.gitignore b/.gitignore index 5dfdbbd5b..b239de2c7 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,7 @@ .idea/ .vscode/ .DS_Store -data* +data/ krill.log target/ tmp diff --git a/src/daemon/ca/publishing.rs b/src/daemon/ca/publishing.rs index 04a54a782..a5148fdc3 100644 --- a/src/daemon/ca/publishing.rs +++ b/src/daemon/ca/publishing.rs @@ -177,7 +177,7 @@ impl CaObjectsStore { Key::new_global(Segment::parse_lossy(&format!("{}.json", ca))) // ca should always be a valid Segment } - fn cas(&self) -> KrillResult> { + pub fn cas(&self) -> KrillResult> { let cas = self .store .read() diff --git a/src/upgrades/data_migration.rs b/src/upgrades/data_migration.rs new file mode 100644 index 000000000..377c47a5b --- /dev/null +++ b/src/upgrades/data_migration.rs @@ -0,0 +1,236 @@ +//! Support data migrations from one KV storage type to another. + +use std::{str::FromStr, sync::Arc}; + +use kvx::{Namespace, Scope}; +use rpki::crypto::KeyIdentifier; +use url::Url; + +use crate::{ + commons::{ + crypto::{dispatch::signerinfo::SignerInfo, KrillSignerBuilder, OpenSslSigner}, + eventsourcing::{Aggregate, AggregateStore, KeyValueStore, WalStore, WalSupport}, + }, + constants::{ + CASERVER_NS, KEYS_NS, PROPERTIES_NS, PUBSERVER_CONTENT_NS, PUBSERVER_NS, SIGNERS_NS, TA_PROXY_SERVER_NS, + TA_SIGNER_SERVER_NS, + }, + daemon::{ + ca::{CaObjectsStore, CertAuth}, + config::Config, + properties::{Properties, PropertiesManager}, + ta::{TrustAnchorProxy, TrustAnchorSigner}, + }, + pubd::{RepositoryAccess, RepositoryContent}, + upgrades::{finalise_data_migration, prepare_upgrade_data_migrations, UpgradeError}, +}; + +use super::UpgradeResult; + +pub fn migrate(mut config: Config, target_storage: Url) -> UpgradeResult<()> { + // Copy the source data from config unmodified into the target_storage + info!("-----------------------------------------------------------"); + info!(" Krill Data Migration"); + info!("-----------------------------------------------------------"); + info!(""); + info!("-----------------------------------------------------------"); + info!("STEP 1: Copy data"); + info!(""); + info!("From: {}", &config.storage_uri); + info!(" To: {}", &target_storage); + info!("-----------------------------------------------------------"); + info!(""); + + copy_data_for_migration(&config, &target_storage)?; + + // Update the config file with the new target_storage + // and perform a normal data migration - the source data + // could be for an older version of Krill. + config.storage_uri = target_storage; + let properties_manager = PropertiesManager::create(&config.storage_uri, false)?; + + info!("-----------------------------------------------------------"); + info!("STEP 2: Upgrade data to current Krill version (if needed)"); + info!("-----------------------------------------------------------"); + info!(""); + if let Some(upgrade) = prepare_upgrade_data_migrations( + crate::upgrades::UpgradeMode::PrepareToFinalise, + &config, + &properties_manager, + )? { + finalise_data_migration(upgrade.versions(), &config, &properties_manager)?; + } + + info!("-----------------------------------------------------------"); + info!("STEP 3: Verify data in target store"); + info!(""); + info!("We verify the data by warming the cache for different types"); + info!("of data managed by Krill, without actually starting Krill."); + info!("-----------------------------------------------------------"); + info!(""); + // + // This step should not be needed, because: + // - upgrades (if there was one) already verify the data + // - if there was no upgrade, the data was not changed and there + // is nothing we can do here. + // + // That said, it's a pretty easy check to perform and it kind of makes + // sense to do it to now, even if it would be to point users at deeper + // source data issues. + verify_target_data(&config) +} + +fn verify_target_data(config: &Config) -> UpgradeResult<()> { + check_agg_store::(config, PROPERTIES_NS, "Properties")?; + check_agg_store::(config, SIGNERS_NS, "Signer")?; + + let ca_store = check_agg_store::(config, CASERVER_NS, "CAs")?; + check_ca_objects(config, ca_store)?; + + check_agg_store::(config, PUBSERVER_NS, "Publication Server Access")?; + check_wal_store::(config, PUBSERVER_CONTENT_NS, "Publication Server Objects")?; + check_agg_store::(config, TA_PROXY_SERVER_NS, "TA Proxy")?; + check_agg_store::(config, TA_SIGNER_SERVER_NS, "TA Signer")?; + + check_openssl_keys(config)?; + + Ok(()) +} + +fn check_openssl_keys(config: &Config) -> UpgradeResult<()> { + info!(""); + info!("Verify: OpenSSL keys"); + let open_ssl_signer = OpenSslSigner::build(&config.storage_uri, "test", None) + .map_err(|e| UpgradeError::Custom(format!("Cannot create openssl signer: {}", e)))?; + let keys_key_store = KeyValueStore::create(&config.storage_uri, KEYS_NS)?; + + for key in keys_key_store.keys(&Scope::global(), "")? { + let key_id = KeyIdentifier::from_str(key.name().as_str()).map_err(|e| { + UpgradeError::Custom(format!( + "Cannot parse as key identifier: {}. Error: {}", + key.name().as_str(), + e + )) + })?; + open_ssl_signer.get_key_info(&key_id).map_err(|e| { + UpgradeError::Custom(format!( + "Cannot get key with key_id {} from openssl keystore. Error: {}", + key_id, e + )) + })?; + } + info!("Ok"); + + Ok(()) +} + +fn check_agg_store(config: &Config, ns: &Namespace, name: &str) -> UpgradeResult> { + info!(""); + info!("Verify: {name}"); + let store: AggregateStore = AggregateStore::create(&config.storage_uri, ns, false)?; + if !store.list()?.is_empty() { + store.warm()?; + info!("Ok"); + } else { + info!("not applicable"); + } + Ok(store) +} + +fn check_wal_store(config: &Config, ns: &Namespace, name: &str) -> UpgradeResult<()> { + info!(""); + info!("Verify: {name}"); + let store: WalStore = WalStore::create(&config.storage_uri, ns)?; + if !store.list()?.is_empty() { + store.warm()?; + info!("Ok"); + } else { + info!("not applicable"); + } + Ok(()) +} + +fn check_ca_objects(config: &Config, ca_store: AggregateStore) -> UpgradeResult<()> { + // make a dummy Signer to use for the CaObjectsStore - it won't be used, + // but it's needed for construction. + let probe_interval = std::time::Duration::from_secs(config.signer_probe_retry_seconds); + let signer = Arc::new( + KrillSignerBuilder::new(&config.storage_uri, probe_interval, &config.signers) + .with_default_signer(config.default_signer()) + .with_one_off_signer(config.one_off_signer()) + .build()?, + ); + + let ca_objects_store = CaObjectsStore::create(&config.storage_uri, config.issuance_timing.clone(), signer)?; + + let cas_with_objects = ca_objects_store.cas()?; + + for ca in &cas_with_objects { + ca_objects_store.ca_objects(ca)?; + if !ca_store.has(ca)? { + warn!(" Objects found for CA '{}' which no longer exists.", ca); + } + } + + for ca in ca_store.list()? { + if !cas_with_objects.contains(&ca) { + debug!(" CA '{}' did not have any CA objects yet.", ca); + } + } + + Ok(()) +} + +fn copy_data_for_migration(config: &Config, target_storage: &Url) -> UpgradeResult<()> { + for ns in &[ + "ca_objects", + "cas", + "keys", + "pubd", + "pubd_objects", + "signers", + "status", + "ta_proxy", + "ta_signer", + ] { + let namespace = Namespace::parse(ns) + .map_err(|_| UpgradeError::Custom(format!("Cannot parse namespace '{}'. This is a bug.", ns)))?; + let source_kv_store = KeyValueStore::create(&config.storage_uri, namespace)?; + if !source_kv_store.is_empty()? { + let target_kv_store = KeyValueStore::create(target_storage, namespace)?; + target_kv_store.import(&source_kv_store)?; + } + } + + Ok(()) +} + +#[cfg(test)] +pub mod tests { + + use std::path::PathBuf; + + use log::LevelFilter; + + use crate::test; + + use super::*; + + #[test] + fn test_data_migration() { + // Create a config file that uses test data for its storage_uri + let test_sources_base = "test-resources/migrations/v0_9_5/"; + let test_sources_url = Url::parse(&format!("local://{}", test_sources_base)).unwrap(); + + let bogus_path = PathBuf::from("/dev/null"); // needed for tls_dir etc, but will be ignored here + let mut config = Config::test(&test_sources_url, Some(&bogus_path), false, false, false, false); + config.log_level = LevelFilter::Info; + + let _ = config.init_logging(); + + // Create an in-memory target store to migrate to + let target_store = test::mem_storage(); + + migrate(config, target_store).unwrap(); + } +} diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index eba7473f2..358dc23ab 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -36,6 +36,8 @@ use crate::commons::crypto::SignerHandle; use self::{pre_0_13_0::OldRepositoryContent, pre_0_14_0::OldCommandKey}; +pub mod data_migration; + pub mod pre_0_10_0; #[allow(clippy::mutable_key_type)] From 0f508be1cbc53254e039c0a8d5f635d07186e2e0 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Fri, 1 Sep 2023 16:31:00 +0200 Subject: [PATCH 11/18] Add command line option to migrate data to krillup. --- Cargo.lock | 6 +- src/bin/krillup.rs | 191 +++++++++++++++++++++++++++++-------------- src/daemon/config.rs | 2 +- 3 files changed, 134 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4f82d90ce..00839c6c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1131,7 +1131,7 @@ dependencies = [ [[package]] name = "kvx" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#3619a8541028e65b750c678413c9b7089d9f6c4e" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#5a476cbef68b70771ae78a4b94c19bc5773b1c99" dependencies = [ "kvx_macros", "kvx_types", @@ -1148,7 +1148,7 @@ dependencies = [ [[package]] name = "kvx_macros" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#3619a8541028e65b750c678413c9b7089d9f6c4e" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#5a476cbef68b70771ae78a4b94c19bc5773b1c99" dependencies = [ "kvx_types", "proc-macro-error", @@ -1160,7 +1160,7 @@ dependencies = [ [[package]] name = "kvx_types" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#3619a8541028e65b750c678413c9b7089d9f6c4e" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#5a476cbef68b70771ae78a4b94c19bc5773b1c99" dependencies = [ "postgres", "postgres-types", diff --git a/src/bin/krillup.rs b/src/bin/krillup.rs index 5e9006d31..06ac76dfa 100644 --- a/src/bin/krillup.rs +++ b/src/bin/krillup.rs @@ -1,76 +1,145 @@ extern crate krill; -use clap::{App, Arg}; -use log::info; +use clap::{App, Arg, ArgMatches, SubCommand}; +use log::{info, LevelFilter}; use krill::{ constants::{KRILL_DEFAULT_CONFIG_FILE, KRILL_UP_APP, KRILL_VERSION}, - daemon::{config::Config, properties::PropertiesManager}, - upgrades::{prepare_upgrade_data_migrations, UpgradeMode}, + daemon::{ + config::{Config, LogType}, + properties::PropertiesManager, + }, + upgrades::{data_migration::migrate, prepare_upgrade_data_migrations, UpgradeMode}, }; +use url::Url; -#[tokio::main] -async fn main() { - let matches = App::new(KRILL_UP_APP) - .version(KRILL_VERSION) - .about("\nThis tool can be used to reduce the risk and time needed for Krill upgrades, by preparing and verifying any data migrations that would be needed. The data_dir setting from the provided configuration file is used to find the data to migrate, and prepared data will be saved under 'data_dir/upgrade-data'.") - .arg( - Arg::with_name("config") - .short("c") - .long("config") - .value_name("FILE") - .help(&format!( - "Override the path to the config file (default: '{}')", - KRILL_DEFAULT_CONFIG_FILE - )) - .required(false), - ) - .get_matches(); - - let config_file = matches.value_of("config").unwrap_or(KRILL_DEFAULT_CONFIG_FILE); - - match Config::create(config_file, true) { - Ok(config) => { - let properties_manager = match PropertiesManager::create(&config.storage_uri, config.use_history_cache) { - Ok(mgr) => mgr, - Err(e) => { - eprintln!("*** Error Preparing Data Migration ***"); - eprintln!("{}", e); - eprintln!(); - eprintln!("Note that your server data has NOT been modified. Do not upgrade krill itself yet!"); - eprintln!("If you did upgrade krill, then downgrade it to your previous installed version."); - ::std::process::exit(1); - } - }; +fn main() { + let matches = make_matches(); + + match parse_matches(matches) { + Err(e) => { + eprintln!("{}", e); + ::std::process::exit(1); + } + Ok(mode) => match mode { + KrillUpMode::Prepare { config } => { + let properties_manager = match PropertiesManager::create(&config.storage_uri, config.use_history_cache) + { + Ok(mgr) => mgr, + Err(e) => { + eprintln!("*** Error Preparing Data Migration ***"); + eprintln!("{}", e); + eprintln!(); + eprintln!("Note that your server data has NOT been modified. Do not upgrade krill itself yet!"); + eprintln!("If you did upgrade krill, then downgrade it to your previous installed version."); + ::std::process::exit(1); + } + }; - match prepare_upgrade_data_migrations(UpgradeMode::PrepareOnly, &config, &properties_manager) { - Err(e) => { - eprintln!("*** Error Preparing Data Migration ***"); + match prepare_upgrade_data_migrations(UpgradeMode::PrepareOnly, &config, &properties_manager) { + Err(e) => { + eprintln!("*** Error Preparing Data Migration ***"); + eprintln!("{}", e); + eprintln!(); + eprintln!("Note that your server data has NOT been modified. Do not upgrade krill itself yet!"); + eprintln!("If you did upgrade krill, then downgrade it to your previous installed version."); + ::std::process::exit(1); + } + Ok(opt) => match opt { + None => { + info!("No update needed"); + } + Some(report) => { + let from = report.versions().from(); + let to = report.versions().to(); + if report.data_migration() { + info!("Prepared and verified upgrade from {} to {}.", from, to,); + } else { + info!("No preparation is needed for the upgrade from {} to {}.", from, to) + } + } + }, + } + } + KrillUpMode::Migrate { config, target } => { + if let Err(e) = migrate(config, target) { + eprintln!("*** Error Migrating DATA ***"); eprintln!("{}", e); eprintln!(); - eprintln!("Note that your server data has NOT been modified. Do not upgrade krill itself yet!"); - eprintln!("If you did upgrade krill, then downgrade it to your previous installed version."); + eprintln!("Note that your server data has NOT been modified."); ::std::process::exit(1); } - Ok(opt) => match opt { - None => { - info!("No update needed"); - } - Some(report) => { - let from = report.versions().from(); - let to = report.versions().to(); - if report.data_migration() { - info!("Prepared and verified upgrade from {} to {}.", from, to,); - } else { - info!("No preparation is needed for the upgrade from {} to {}.", from, to) - } - } - }, } - } - Err(e) => { - eprintln!("Could not parse config: {}", e); - ::std::process::exit(1); - } + }, + } +} + +fn make_matches<'a>() -> ArgMatches<'a> { + let mut app = App::new(KRILL_UP_APP).version(KRILL_VERSION).about("\nThis tool can be used to reduce the risk and time needed for Krill upgrades, by preparing and verifying any data migrations that would be needed. The data_dir setting from the provided configuration file is used to find the data to migrate, and prepared data will be saved under 'data_dir/upgrade-data'."); + + let mut prep_sub = SubCommand::with_name("prepare") + .about("Prepares a Krill upgrade data migration if needed by the new Krill version. This operation leaves the current data unmodified. You can run this operation while Krill is running. This tool will exit and report an error in case of any issues. To finish the migration, restart Krill. The migration process will continue to ensure it includes any changes after the preparation."); + prep_sub = add_config_arg(prep_sub); + app = app.subcommand(prep_sub); + + let mut migrate_sub = SubCommand::with_name("migrate") + .about("Migrate Krill data to a different storage. Stop Krill before running this tool to ensure data does not change during migration. The original data in the storage defined in the config file is not modified. If your current data is for an older version of Krill, this tool will attempt to upgrade it. After successful migration, you can reconfigure Krill to use the new data storage and restart it."); + migrate_sub = add_config_arg(migrate_sub); + migrate_sub = add_new_storage_arg(migrate_sub); + app = app.subcommand(migrate_sub); + + app.get_matches() +} + +fn add_config_arg<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> { + app.arg( + Arg::with_name("config") + .short("c") + .long("config") + .value_name("FILE") + .help("Override the path to the config file (default: '/etc/krill.conf').") + .required(false), + ) +} + +fn add_new_storage_arg<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> { + app.arg( + Arg::with_name("target") + .short("t") + .long("target") + .value_name("URL") + .help("Provide the target storage URI string. E.g. local:///var/lib/krill or postgres://postgres@localhost/postgres.") + .required(true), + ) +} + +fn parse_matches(matches: ArgMatches) -> Result { + if let Some(m) = matches.subcommand_matches("prepare") { + let config = parse_config(m)?; + Ok(KrillUpMode::Prepare { config }) + } else if let Some(m) = matches.subcommand_matches("migrate") { + let target_str = m.value_of("target").ok_or("--target missing".to_string())?; + let target = Url::parse(target_str).map_err(|e| format!("cannot parse url: {}. Error: {}", target_str, e))?; + + let config = parse_config(m)?; + Ok(KrillUpMode::Migrate { config, target }) + } else { + Err("Cannot parse arguments. Use --help.".to_string()) } } + +fn parse_config(m: &ArgMatches) -> Result { + let config_file = m.value_of("config").unwrap_or(KRILL_DEFAULT_CONFIG_FILE); + let mut config = Config::create(config_file, true) + .map_err(|e| format!("Cannot parse config file '{}'. Error: {}", config_file, e))?; + + config.log_level = LevelFilter::Info; + config.log_type = LogType::Stderr; + + Ok(config) +} + +enum KrillUpMode { + Prepare { config: Config }, + Migrate { config: Config, target: Url }, +} diff --git a/src/daemon/config.rs b/src/daemon/config.rs index 7bc368198..bf4089b73 100644 --- a/src/daemon/config.rs +++ b/src/daemon/config.rs @@ -486,7 +486,7 @@ pub struct Config { pub log_level: LevelFilter, #[serde(default = "ConfigDefaults::log_type")] - log_type: LogType, + pub log_type: LogType, #[serde(default = "ConfigDefaults::log_file")] log_file: Option, From 0843f178c2139e2fcd164a85ffef40d9e3a00e47 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Wed, 6 Sep 2023 16:19:20 +0200 Subject: [PATCH 12/18] Use transactions/locks (kvx::execute) for AggregateStore. --- Cargo.lock | 6 +- src/cli/ta_client.rs | 2 +- src/commons/eventsourcing/cmd.rs | 9 +- src/commons/eventsourcing/kv.rs | 4 + src/commons/eventsourcing/mod.rs | 1 + src/commons/eventsourcing/store.rs | 723 +++++++++++------------------ src/daemon/ca/manager.rs | 6 +- src/daemon/config.rs | 13 +- src/daemon/properties/mod.rs | 6 +- src/pubd/repository.rs | 4 +- src/upgrades/mod.rs | 12 +- 11 files changed, 291 insertions(+), 495 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00839c6c9..16c3f4010 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1131,7 +1131,7 @@ dependencies = [ [[package]] name = "kvx" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#5a476cbef68b70771ae78a4b94c19bc5773b1c99" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#7bb8ee911d13655b86df4433c8000005f33678ee" dependencies = [ "kvx_macros", "kvx_types", @@ -1148,7 +1148,7 @@ dependencies = [ [[package]] name = "kvx_macros" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#5a476cbef68b70771ae78a4b94c19bc5773b1c99" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#7bb8ee911d13655b86df4433c8000005f33678ee" dependencies = [ "kvx_types", "proc-macro-error", @@ -1160,7 +1160,7 @@ dependencies = [ [[package]] name = "kvx_types" version = "0.7.0" -source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#5a476cbef68b70771ae78a4b94c19bc5773b1c99" +source = "git+https://github.com/nlnetlabs/kvx?branch=support-namespace-migrations#7bb8ee911d13655b86df4433c8000005f33678ee" dependencies = [ "postgres", "postgres-types", diff --git a/src/cli/ta_client.rs b/src/cli/ta_client.rs index 3e9f0de49..3c1a3e9cd 100644 --- a/src/cli/ta_client.rs +++ b/src/cli/ta_client.rs @@ -1081,7 +1081,7 @@ impl TrustAnchorSignerManager { fn get_signer(&self) -> Result, Error> { if self.store.has(&self.ta_handle)? { - self.store.get_latest(&self.ta_handle).map_err(Error::StorageError) + self.store.get_latest(&self.ta_handle).map_err(Error::KrillError) } else { Err(Error::other("Trust Anchor Signer is not initialised.")) } diff --git a/src/commons/eventsourcing/cmd.rs b/src/commons/eventsourcing/cmd.rs index f125a8ea7..e9ba291df 100644 --- a/src/commons/eventsourcing/cmd.rs +++ b/src/commons/eventsourcing/cmd.rs @@ -35,7 +35,7 @@ pub trait WithStorableDetails: Storable + Send + Sync { /// It should be storable in the same way as normal commands, sent to this /// aggregate type so that they can use the same kind of ProcessedCommand /// and CommandHistoryRecord -pub trait InitCommand: fmt::Display + Send + Sync { +pub trait InitCommand: Clone + fmt::Display + Send + Sync { /// Identify the type of storable component for this command. Commands /// may contain short-lived things (e.g. an Arc) or even secrets /// which should not be persisted. @@ -57,6 +57,7 @@ pub trait InitCommand: fmt::Display + Send + Sync { /// Convenience wrapper so that implementations can just implement /// ['InitCommandDetails'] and leave the id and version boilerplate. +#[derive(Clone, Debug)] pub struct SentInitCommand { handle: MyHandle, details: I, @@ -103,7 +104,7 @@ impl fmt::Display for SentInitCommand { /// Implement this for an enum with CommandDetails, so you you can reuse the /// id and version boilerplate from ['SentCommand']. -pub trait InitCommandDetails: fmt::Display + Send + Sync + 'static { +pub trait InitCommandDetails: Clone + fmt::Display + Send + Sync + 'static { type StorableDetails: WithStorableDetails; fn store(&self) -> Self::StorableDetails; @@ -116,7 +117,7 @@ pub trait InitCommandDetails: fmt::Display + Send + Sync + 'static { /// Think of this as the data container for your update API, plus some /// meta-data to ensure that the command is sent to the right instance of an /// Aggregate, and that concurrency issues are handled. -pub trait Command: fmt::Display + Send + Sync { +pub trait Command: Clone + fmt::Display + Send + Sync { /// Identify the type of storable component for this command. Commands /// may contain short-lived things (e.g. an Arc) or even secrets /// which should not be persisted. @@ -211,7 +212,7 @@ impl fmt::Display for SentCommand { /// Implement this for an enum with CommandDetails, so you you can reuse the /// id and version boilerplate from ['SentCommand']. -pub trait CommandDetails: fmt::Display + Send + Sync + 'static { +pub trait CommandDetails: Clone + fmt::Display + Send + Sync + 'static { type Event: Event; type StorableDetails: WithStorableDetails; diff --git a/src/commons/eventsourcing/kv.rs b/src/commons/eventsourcing/kv.rs index 11c34f9f7..3e37a2092 100644 --- a/src/commons/eventsourcing/kv.rs +++ b/src/commons/eventsourcing/kv.rs @@ -129,6 +129,10 @@ impl KeyValueStore { )?) } + pub fn inner(&self) -> &kvx::KeyValueStore { + &self.inner + } + /// Gets a value for a key, returns an error if the value cannot be deserialized, /// returns None if it cannot be found. pub fn get(&self, key: &Key) -> Result, KeyValueError> { diff --git a/src/commons/eventsourcing/mod.rs b/src/commons/eventsourcing/mod.rs index 68d46fdf2..d94d65333 100644 --- a/src/commons/eventsourcing/mod.rs +++ b/src/commons/eventsourcing/mod.rs @@ -79,6 +79,7 @@ mod tests { } } + #[derive(Clone, Debug)] struct PersonInitCommandDetails { name: String, } diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index 88a3a5693..9a70edc6b 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -14,7 +14,7 @@ use crate::commons::{ api::{CommandHistory, CommandHistoryCriteria, CommandHistoryRecord}, error::KrillIoError, eventsourcing::{ - cmd::Command, locks::HandleLocks, segment, Aggregate, Key, KeyValueError, KeyValueStore, PostSaveEventListener, + cmd::Command, segment, Aggregate, Key, KeyValueError, KeyValueStore, PostSaveEventListener, PreSaveEventListener, Scope, Segment, SegmentExt, StoredCommand, StoredCommandBuilder, }, }; @@ -37,7 +37,6 @@ pub struct AggregateStore { history_cache: Option>>>, pre_save_listeners: Vec>>, post_save_listeners: Vec>>, - locks: HandleLocks, } /// # Starting up @@ -68,7 +67,6 @@ impl AggregateStore { }; let pre_save_listeners = vec![]; let post_save_listeners = vec![]; - let locks = HandleLocks::default(); let store = AggregateStore { kv, @@ -76,7 +74,6 @@ impl AggregateStore { history_cache, pre_save_listeners, post_save_listeners, - locks, }; Ok(store) @@ -102,93 +99,6 @@ impl AggregateStore { Ok(()) } - /// Recovers aggregates to the latest possible consistent state based on the - /// stored commands, and the enclosed associated events found in the keystore. - /// - /// Use this in case the state on disk is found to be inconsistent. I.e. the - /// `warm` function failed and Krill exited. - /// - /// Will save new snapshot for latest consistent state and archive any surplus - /// or corrupt commands. Will archive any non-snapshot / non-command keys. - pub fn recover(&self) -> StoreResult<()> { - // TODO: See issue #1086 - for handle in self.list()? { - info!("Will recover state for '{}'", &handle); - - let scope = Scope::from_segment(Segment::parse_lossy(handle.as_str())); - - // If there is not even a valid init command for the - // aggregate, then we can really only drop it altogether. - - let aggregate_opt = match self.get_command(&handle, 0) { - Err(_) => None, - Ok(init_command) => { - if let Some(init_event) = init_command.into_init() { - // Initialise - let mut aggregate = A::init(handle.clone(), init_event); - - // Find the next command and apply it until there - // is no (valid) next command. - while let Ok(command) = self.get_command(&handle, aggregate.version()) { - aggregate.apply_command(command); - } - - // Ret - Some(aggregate) - } else { - None - } - } - }; - - match aggregate_opt { - None => { - warn!( - "No valid initialisation command found for '{}', will remove it.", - handle - ); - - self.kv.drop_scope(&scope)?; - } - Some(aggregate) => { - // Archive any and all keys that are not command keys - // for versions we just applied. - for key in self.kv.keys(&scope, "")? { - // command keys use: command-#.json - let keep = if let Some(pfx_removed) = key.name().as_str().strip_prefix("command-") { - if let Some(suf_removed) = pfx_removed.strip_suffix(".json") { - if let Ok(nr) = suf_removed.parse::() { - // Keep command if it's for the version - // before this aggregate version. - nr < aggregate.version() - } else { - false - } - } else { - false - } - } else { - false - }; - - if !keep { - warn!("Archiving surplus key '{}' for '{}'", key, handle); - self.kv.archive_surplus(&key)?; - } - } - - // Now store a new aggregate - self.store_snapshot(&handle, &aggregate)?; - - // Store in mem cache - self.cache_update(&handle, Arc::new(aggregate)); - } - } - } - - Ok(()) - } - /// Adds a listener that will receive all events before they are stored. pub fn add_pre_save_listener>(&mut self, sync_listener: Arc) { self.pre_save_listeners.push(sync_listener); @@ -209,41 +119,61 @@ where /// Gets the latest version for the given aggregate. Returns /// an AggregateStoreError::UnknownAggregate in case the aggregate /// does not exist. - pub fn get_latest(&self, handle: &MyHandle) -> StoreResult> { - let agg_lock = self.locks.for_handle(handle.clone()); - let _read_lock = agg_lock.read(); - self.get_latest_no_lock(handle) + pub fn get_latest(&self, handle: &MyHandle) -> Result, A::Error> { + self.execute_opt_command(handle, None, false) } - /// Adds a new aggregate instance based on the init event. - pub fn add(&self, cmd: A::InitCommand) -> StoreResult> { - let handle = cmd.handle().clone(); + pub fn save_snapshot(&self, handle: &MyHandle) -> Result, A::Error> { + self.execute_opt_command(handle, None, true) + } - let agg_lock = self.locks.for_handle(handle.clone()); - let _write_lock = agg_lock.write(); + /// Adds a new aggregate instance based on the init event. + pub fn add(&self, cmd: A::InitCommand) -> Result, A::Error> { + let scope = Self::scope_for_agg(cmd.handle()); - let processed_command_builder = - StoredCommandBuilder::::new(cmd.actor().to_string(), Time::now(), handle.clone(), 0, cmd.store()); + self.kv + .inner() + .execute(&scope, move |kv| { + // The closure needs to return a Result. + // In our case T will be a Result, A::Error>. + // So.. any kvx error will be in the outer result, while + // any aggregate related issues can still be returned + // as an err in the inner result. + let handle = cmd.handle().clone(); + + let init_command_key = Self::key_for_command(&handle, 0); + + if kv.has(&init_command_key)? { + // This is no good.. this aggregate already exists. + Ok(Err(A::Error::from(AggregateStoreError::DuplicateAggregate(handle)))) + } else { + let processed_command_builder = StoredCommandBuilder::::new( + cmd.actor().to_string(), + Time::now(), + handle.clone(), + 0, + cmd.store(), + ); - let init_event = A::process_init_command(cmd).map_err(|_| AggregateStoreError::InitError(handle.clone()))?; - let aggregate = A::init(handle.clone(), init_event.clone()); + match A::process_init_command(cmd.clone()) { + Ok(init_event) => { + let aggregate = A::init(handle.clone(), init_event.clone()); + let processed_command = processed_command_builder.finish_with_init_event(init_event); - // Store the init command. It is unlikely that this should fail, but - // if it does then there is an issue with the storage layer that we cannot - // recover from. So, exit. - let processed_command = processed_command_builder.finish_with_init_event(init_event); - if let Err(e) = self.store_command(&processed_command) { - self.exit_with_fatal_storage_error(&handle, e); - } + let json = serde_json::to_value(&processed_command)?; + kv.store(&init_command_key, json)?; - // This should not fail, but if it does then it's not as critical - // because we can always reconstitute the state without snapshots. - self.store_snapshot(&handle, &aggregate)?; + let arc = Arc::new(aggregate); - let arc = Arc::new(aggregate); - self.cache_update(&handle, arc.clone()); + self.cache_update(&handle, arc.clone()); - Ok(arc) + Ok(Ok(arc)) + } + Err(e) => Ok(Err(e)), + } + } + }) + .map_err(|kv_err| A::Error::from(AggregateStoreError::KeyStoreError(KeyValueError::KVError(kv_err))))? } /// Send a command to the latest aggregate referenced by the handle in the command. @@ -261,130 +191,227 @@ where /// on error: /// - save command and error, return error pub fn command(&self, cmd: A::Command) -> Result, A::Error> { - debug!("Processing command {}", cmd); - let handle = cmd.handle().clone(); - - let agg_lock = self.locks.for_handle(handle.clone()); - let _write_lock = agg_lock.write(); - - // Get the latest arc. - let mut latest = self.get_latest_no_lock(&handle)?; + self.execute_opt_command(cmd.handle(), Some(&cmd), false) + } - if let Some(version) = cmd.version() { - if version != latest.version() { - error!( - "Version conflict updating '{}', expected version: {}, found: {}", - handle, - version, - latest.version() - ); + /// Returns true if an instance exists for the id + pub fn has(&self, id: &MyHandle) -> Result { + self.kv + .has_scope(&Scope::from_segment(Segment::parse_lossy(id.as_str()))) // id should always be a valid Segment + .map_err(AggregateStoreError::KeyStoreError) + } - return Err(A::Error::from(AggregateStoreError::ConcurrentModification(handle))); - } - } + /// Lists all known ids. + pub fn list(&self) -> Result, AggregateStoreError> { + self.aggregates() + } - let processed_command_builder = StoredCommandBuilder::new( - cmd.actor().to_string(), - Time::now(), - cmd.handle().clone(), - latest.version(), - cmd.store(), - ); - - match latest.process_command(cmd) { - Err(e) => { - // Store the processed command with the error. - // - // If persistence fails, then complain loudly, and exit. Krill should not keep running, because this would - // result in discrepancies between state in memory and state on disk. Let Krill crash and an operator investigate. - // See issue: https://github.com/NLnetLabs/krill/issues/322 - let processed_command = processed_command_builder.finish_with_error(&e); - if let Err(e) = self.store_command(&processed_command) { - self.exit_with_fatal_storage_error(&handle, e); + /// Get the latest aggregate and optionally apply a command to it. + /// + /// Uses `kvx::execute` to ensure that the whole operation is done inside + /// a transaction (postgres) or lock (disk). + fn execute_opt_command( + &self, + handle: &MyHandle, + cmd_opt: Option<&A::Command>, + save_snapshot: bool, + ) -> Result, A::Error> { + self.kv + .inner() + .execute(&Self::scope_for_agg(handle), |kv| { + // The closure needs to return a Result. + // In our case T will be a Result, A::Error>. + // So.. any kvx error will be in the outer result, while + // any aggregate related issues can still be returned + // as an err in the inner result. + + // Get the aggregate from the cache, or get it from the store. + let mut changed_from_cached = false; + + let res = match self.cache_get(handle) { + Some(arc) => Ok(arc), + None => { + // There was no cached aggregate, so try to get it + // or construct it from the store, and remember that + // it was changed compared to the (non-existent) cached + // version so that we know that should update the cache + // later. + changed_from_cached = true; + + let snapshot_key = Self::key_for_snapshot(handle); + match kv.get(&snapshot_key)? { + Some(value) => { + let agg: A = serde_json::from_value(value)?; + Ok(Arc::new(agg)) + } + None => { + let init_key = Self::key_for_command(handle, 0); + match kv.get(&init_key)? { + Some(value) => { + let init_command: StoredCommand = serde_json::from_value(value)?; + + match init_command.into_init() { + Some(init_event) => { + let agg = A::init(handle.clone(), init_event); + Ok(Arc::new(agg)) + } + None => Err(A::Error::from(AggregateStoreError::UnknownAggregate( + handle.clone(), + ))), + } + } + None => Err(A::Error::from(AggregateStoreError::UnknownAggregate(handle.clone()))), + } + } + } + } + }; + + let mut agg = match res { + Err(e) => return Ok(Err(e)), + Ok(agg) => agg, + }; + + // We have some version, cached or not. Now see if there are any further + // changes that ought to be applied. If any changes are found, be sure + // to mark the aggregate as changed so that the we can update the cache + // later. + let next_command = Self::key_for_command(handle, agg.version()); + if kv.has(&next_command)? { + let aggregate = Arc::make_mut(&mut agg); + + // check and apply any applicable processed commands until: + // - there are no more processed commands + // - the command cannot be applied (return an error) + loop { + let version = aggregate.version(); + + let key = Self::key_for_command(handle, version); + + match kv.get(&key)? { + None => break, + Some(value) => { + let command: StoredCommand = serde_json::from_value(value)?; + aggregate.apply_command(command); + changed_from_cached = true; + } + } + } } - // Update the cached aggregate so that its version is incremented - let agg = Arc::make_mut(&mut latest); - agg.increment_version(); + // If a command was passed in, try to apply it, and make sure that it is + // preserved (i.e. with events or an error). + let res = if let Some(cmd) = cmd_opt { + let aggregate = Arc::make_mut(&mut agg); - let mut cache = self.cache.write().unwrap(); - cache.insert(handle, Arc::new(agg.clone())); + let version = aggregate.version(); - Err(e) - } - Ok(events) => { - if events.is_empty() { - Ok(latest) // note: no-op no version info will be updated - } else { - // The command contains some effect. - let processed_command = processed_command_builder.finish_with_events(events); + let processed_command_builder = StoredCommandBuilder::::new( + cmd.actor().to_string(), + Time::now(), + cmd.handle().clone(), + version, + cmd.store(), + ); - // We will need to apply the command first because: - // a) then we are really, really, sure that it can be applied (no panics) - // b) more importantly, we will need to pass an updated aggregate to pre-save listeners - // - // Unfortunately, this means that we will need to clone the command. - let agg = Arc::make_mut(&mut latest); - agg.apply_command(processed_command.clone()); - - // If the command contained any events then we should inform the - // pre-save listeners. They may still generate errors, and if - // they do, then we will exit here with an error, without saving. - if let Some(events) = processed_command.events() { - for pre_save_listener in &self.pre_save_listeners { - pre_save_listener.as_ref().listen(agg, events)?; - } - } + let command_key = Self::key_for_command(handle, version); - // Store the processed command - the effect could be an error that - // we want to keep, or some events that update the state. + // The new command key MUST NOT be in use. If it is in use, then this points + // at a bug in Krill transaction / locking handling that we cannot recover + // from. So, exit here, as there is nothing sensible we can do with this error. // - // If persistence fails, then complain loudly, and exit. Krill should not keep running, because this would - // result in discrepancies between state in memory and state on disk. Let Krill crash and an operator investigate. // See issue: https://github.com/NLnetLabs/krill/issues/322 - if let Err(e) = self.store_command(&processed_command) { - self.exit_with_fatal_storage_error(&handle, e); + if kv.has(&command_key)? { + error!("Command key for '{handle}' version '{version}' already exists."); + error!("This is a bug. Please report this issue to rpki-team@nlnetlabs.nl."); + error!("Krill will exit. If this issue repeats, consider removing {}.", handle); + std::process::exit(1); } - // For now, we also update the snapshot on disk on every change. - // See issue #1084 - self.store_snapshot(&handle, agg)?; + match aggregate.process_command(cmd.clone()) { + Err(e) => { + // Store the processed command with the error. + let processed_command = processed_command_builder.finish_with_error(&e); + + let json = serde_json::to_value(&processed_command)?; + aggregate.apply_command(processed_command); - // Update the memory cache. - let mut cache = self.cache.write().unwrap(); - cache.insert(handle, Arc::new(agg.clone())); + changed_from_cached = true; + kv.store(&command_key, json)?; + + Err(e) + } + Ok(events) => { + // note: An empty events vec may result from a no-op command. We don't save those. + if !events.is_empty() { + // The command contains some effect. + let processed_command = processed_command_builder.finish_with_events(events); + + // We will need to apply the command first because: + // a) then we are really, really, sure that it can be applied (no panics) + // b) more importantly, we will need to pass an updated aggregate to pre-save listeners + // + // Unfortunately, this means that we will need to clone the command. + aggregate.apply_command(processed_command.clone()); + + // If the command contained any events then we should inform the + // pre-save listeners. They may still generate errors, and if + // they do, then we return with an error, without saving. + let mut opt_err: Option = None; + if let Some(events) = processed_command.events() { + for pre_save_listener in &self.pre_save_listeners { + if let Err(e) = pre_save_listener.as_ref().listen(aggregate, events) { + opt_err = Some(e); + break; + } + } + } - // Now send the events to the 'post-save' listeners. - if let Some(events) = processed_command.events() { - for listener in &self.post_save_listeners { - listener.as_ref().listen(agg, events); + if let Some(e) = opt_err { + // A pre-save listener reported and error. Return with the error + // and do not save the updated aggregate. + changed_from_cached = false; + Err(e) + } else { + // Save the latest command. + let json = serde_json::to_value(&processed_command)?; + kv.store(&command_key, json)?; + + // Now send the events to the 'post-save' listeners. + if let Some(events) = processed_command.events() { + for listener in &self.post_save_listeners { + listener.as_ref().listen(aggregate, events); + } + } + + Ok(()) + } + } else { + Ok(()) + } } } + } else { + Ok(()) + }; - Ok(latest) + if changed_from_cached { + self.cache_update(handle, agg.clone()); } - } - } - } - /// Returns true if an instance exists for the id - pub fn has(&self, id: &MyHandle) -> Result { - self.kv - .has_scope(&Scope::from_segment(Segment::parse_lossy(id.as_str()))) // id should always be a valid Segment - .map_err(AggregateStoreError::KeyStoreError) - } - - /// Lists all known ids. - pub fn list(&self) -> Result, AggregateStoreError> { - self.aggregates() - } + if save_snapshot { + let key = Self::key_for_snapshot(handle); + let value = serde_json::to_value(agg.as_ref())?; + kv.store(&key, value)?; + } - /// Exit with a fatal storage error - fn exit_with_fatal_storage_error(&self, handle: &MyHandle, e: impl fmt::Display) { - error!("Cannot save state for '{}'. Got error: {}", handle, e); - error!("Please check permissions and storage space for: {}", self.kv); - error!("Krill will now exit to prevent discrepancies between in-memory and stored state."); - std::process::exit(1); + if let Err(e) = res { + Ok(Err(e)) + } else { + Ok(Ok(agg)) + } + }) + .map_err(|kv_err| A::Error::from(AggregateStoreError::KeyStoreError(KeyValueError::KVError(kv_err))))? } } @@ -471,17 +498,10 @@ where /// Get the command for this key, if it exists pub fn get_command(&self, id: &MyHandle, version: u64) -> Result, AggregateStoreError> { let key = Self::key_for_command(id, version); - match self.kv.get(&key) { - Ok(Some(cmd)) => Ok(cmd), - Ok(None) => Err(AggregateStoreError::CommandNotFound(id.clone(), version)), - Err(e) => { - error!( - "Found corrupt command at: {}, will try to archive. Error was: {}", - key, e - ); - self.kv.archive_corrupt(&key)?; - Err(AggregateStoreError::CommandCorrupt(id.clone(), version)) - } + + match self.kv.get_transactional(&key)? { + Some(cmd) => Ok(cmd), + None => Err(AggregateStoreError::CommandNotFound(id.clone(), version)), } } } @@ -490,10 +510,6 @@ impl AggregateStore where A::Error: From, { - fn has_updates(&self, id: &MyHandle, aggregate: &A) -> bool { - self.get_command(id, aggregate.version()).is_ok() - } - fn cache_get(&self, id: &MyHandle) -> Option> { self.cache.read().unwrap().get(id).cloned() } @@ -505,37 +521,6 @@ where fn cache_update(&self, id: &MyHandle, arc: Arc) { self.cache.write().unwrap().insert(id.clone(), arc); } - - // This fn uses no lock of its own, so that we can use it in the context - // of the correct lock obtained by the caller. I.e. if we were to get a - // read lock here, then we could not use it inside of `fn process` which - // wants a write lock for the aggregate. - fn get_latest_no_lock(&self, handle: &MyHandle) -> StoreResult> { - trace!("Trying to load aggregate id: {}", handle); - - match self.cache_get(handle) { - None => match self.get_aggregate(handle, None)? { - None => { - error!("Could not load aggregate with id: {} from disk", handle); - Err(AggregateStoreError::UnknownAggregate(handle.clone())) - } - Some(agg) => { - let arc: Arc = Arc::new(agg); - self.cache_update(handle, arc.clone()); - trace!("Loaded aggregate id: {} from disk", handle); - Ok(arc) - } - }, - Some(mut arc) => { - if self.has_updates(handle, &arc) { - let agg = Arc::make_mut(&mut arc); - self.update_aggregate(handle, agg, None)?; - } - trace!("Loaded aggregate id: {} from memory", handle); - Ok(arc) - } - } - } } /// # Manage values in the KeyValue store @@ -544,30 +529,17 @@ impl AggregateStore where A::Error: From, { - fn key_for_snapshot(agg: &MyHandle) -> Key { - Key::new_scoped( - Scope::from_segment(Segment::parse_lossy(agg.as_str())), // agg should always be a valid Segment - segment!("snapshot.json"), - ) + fn scope_for_agg(agg: &MyHandle) -> Scope { + Scope::from_segment(Segment::parse_lossy(agg.as_str())) // agg should always be a valid Segment } - fn key_for_backup_snapshot(agg: &MyHandle) -> Key { - Key::new_scoped( - Scope::from_segment(Segment::parse_lossy(agg.as_str())), // agg should always be a valid Segment - segment!("snapshot-bk.json"), - ) - } - - fn key_for_new_snapshot(agg: &MyHandle) -> Key { - Key::new_scoped( - Scope::from_segment(Segment::parse_lossy(agg.as_str())), // agg should always be a valid Segment - segment!("snapshot-new.json"), - ) + fn key_for_snapshot(agg: &MyHandle) -> Key { + Key::new_scoped(Self::scope_for_agg(agg), segment!("snapshot.json")) } fn key_for_command(agg: &MyHandle, version: u64) -> Key { Key::new_scoped( - Scope::from_segment(Segment::parse_lossy(agg.as_str())), // agg should always be a valid Segment + Self::scope_for_agg(agg), Segment::parse(&format!("command-{}.json", version)).unwrap(), // cannot panic as a u64 cannot contain a Scope::SEPARATOR ) } @@ -585,183 +557,16 @@ where Ok(res) } - fn store_command(&self, command: &StoredCommand) -> Result<(), AggregateStoreError> { - let key = Self::key_for_command(command.handle(), command.version()); - - self.kv.store_new(&key, command)?; - Ok(()) - } - - /// Get the latest aggregate - /// limit to the event nr, i.e. the resulting aggregate version will be limit + 1 - fn get_aggregate(&self, id: &MyHandle, limit: Option) -> Result, AggregateStoreError> { - // 1) Try to get a snapshot. - // 2) If that fails, or if it exceeds the limit, try the backup - // 3) If that fails, try to get the init event. - // - // Then replay all newer events that can be found up to the version (or latest if version is None) - trace!("Getting aggregate for '{}'", id); - - let mut aggregate_opt: Option = None; - - let snapshot_key = Self::key_for_snapshot(id); - - match self.kv.get::(&snapshot_key) { - Err(e) => { - // snapshot file was present and corrupt - error!( - "Could not parse snapshot for '{}', archiving as corrupt. Error was: {}", - id, e - ); - self.kv.archive_corrupt(&snapshot_key)?; - } - Ok(Some(agg)) => { - // snapshot present and okay - trace!("Found snapshot for '{}'", id); - if let Some(limit) = limit { - if limit >= agg.version() - 1 { - aggregate_opt = Some(agg) - } else { - warn!("Snapshot for '{}' is after version '{}', archiving it", id, limit); - self.kv.archive_surplus(&snapshot_key)?; - } - } else { - debug!("Found valid snapshot for '{}'", id); - aggregate_opt = Some(agg) - } - } - Ok(None) => {} - } - - if aggregate_opt.is_none() { - warn!("No suitable snapshot found for '{}' will try backup snapshot", id); - let backup_snapshot_key = Self::key_for_backup_snapshot(id); - match self.kv.get::(&backup_snapshot_key) { - Err(e) => { - // backup snapshot present and corrupt - error!( - "Could not parse backup snapshot for '{}', archiving as corrupt. Error: {}", - id, e - ); - self.kv.archive_corrupt(&backup_snapshot_key)?; - } - Ok(Some(agg)) => { - trace!("Found backup snapshot for '{}'", id); - if let Some(limit) = limit { - if limit >= agg.version() - 1 { - aggregate_opt = Some(agg) - } else { - warn!( - "Backup snapshot for '{}' is after version '{}', archiving it", - id, limit - ); - self.kv.archive_surplus(&backup_snapshot_key)?; - } - } else { - debug!("Found valid backup snapshot for '{}'", id); - aggregate_opt = Some(agg) - } - } - Ok(None) => {} - } - } - - if aggregate_opt.is_none() { - warn!( - "No suitable snapshot for '{}' will rebuild state from events. This can take some time.", - id - ); - - if let Ok(init_command) = self.get_command(id, 0) { - aggregate_opt = init_command.into_init().map(|init| A::init(id.clone(), init)); - } - } - - match aggregate_opt { - None => Ok(None), - Some(mut aggregate) => { - self.update_aggregate(id, &mut aggregate, limit)?; - Ok(Some(aggregate)) - } - } - } - - fn update_aggregate( - &self, - id: &MyHandle, - aggregate: &mut A, - limit: Option, - ) -> Result<(), AggregateStoreError> { - let start = aggregate.version(); - - if let Some(limit) = limit { - debug!("Will update '{}' from version: {} to: {}", id, start, limit + 1); - } else { - debug!("Will update '{}' to latest version", id); - } - - // check and apply any applicable processed commands until: - // - the limit is reached (if supplied) - // - there are no more processed commands - // - the command cannot be applied (return an error) - loop { - let version = aggregate.version(); - if let Some(limit) = limit { - if limit == version - 1 { - debug!("Updated '{}' to: {}", id, version); - break; - } - } - - if let Ok(command) = self.get_command(id, version) { - aggregate.apply_command(command); - debug!("Applied command {} to aggregate {}", version, id); - } else { - debug!("No more processed commands found. updated '{}' to: {}", id, version); - break; - } - } - - Ok(()) - } - - /// Saves the latest snapshot - backs up previous snapshot, and drops previous backup. - /// Uses moves to ensure that files are written entirely before they are made available - /// for reading. - pub fn store_snapshot(&self, id: &MyHandle, aggregate: &V) -> Result<(), AggregateStoreError> { - let snapshot_new = Self::key_for_new_snapshot(id); - let snapshot_current = Self::key_for_snapshot(id); - let snapshot_backup = Self::key_for_backup_snapshot(id); - - self.kv.store(&snapshot_new, aggregate)?; - - if self.kv.has(&snapshot_backup)? { - self.kv.drop_key(&snapshot_backup)?; - } - if self.kv.has(&snapshot_current)? { - self.kv.move_key(&snapshot_current, &snapshot_backup)?; - } - self.kv.move_key(&snapshot_new, &snapshot_current)?; - - Ok(()) - } - /// Drop an aggregate, completely. Handle with care! pub fn drop_aggregate(&self, id: &MyHandle) -> Result<(), AggregateStoreError> { - { - // First get write access - ensure that no one is using this - let agg_lock = self.locks.for_handle(id.clone()); - let _write_lock = agg_lock.write(); - - self.cache_remove(id); - self.kv - .drop_scope(&Scope::from_segment(Segment::parse_lossy(id.as_str())))?; - // id should always be a valid Segment - } + let scope = Self::scope_for_agg(id); + + self.kv + .inner() + .execute(&scope, |kv| kv.delete_scope(&scope)) + .map_err(|kv_err| AggregateStoreError::KeyStoreError(KeyValueError::KVError(kv_err)))?; - // Then drop the lock for this aggregate immediately. The write lock is - // out of scope now, to ensure we do not get into a deadlock. - self.locks.drop_handle(id); + self.cache_remove(id); Ok(()) } } @@ -775,6 +580,7 @@ pub enum AggregateStoreError { KeyStoreError(KeyValueError), NotInitialized, UnknownAggregate(MyHandle), + DuplicateAggregate(MyHandle), InitError(MyHandle), ReplayError(MyHandle, u64, u64), ConcurrentModification(MyHandle), @@ -793,6 +599,7 @@ impl fmt::Display for AggregateStoreError { AggregateStoreError::KeyStoreError(e) => write!(f, "KeyStore Error: {}", e), AggregateStoreError::NotInitialized => write!(f, "This aggregate store is not initialized"), AggregateStoreError::UnknownAggregate(handle) => write!(f, "unknown entity: {}", handle), + AggregateStoreError::DuplicateAggregate(handle) => write!(f, "duplicate entity: {}", handle), AggregateStoreError::InitError(handle) => write!(f, "Command 0 for '{}' has no init", handle), AggregateStoreError::ReplayError(handle, version, fail_version) => write!( f, diff --git a/src/daemon/ca/manager.rs b/src/daemon/ca/manager.rs index 2988fcd28..50df70dff 100644 --- a/src/daemon/ca/manager.rs +++ b/src/daemon/ca/manager.rs @@ -116,7 +116,7 @@ impl CaManager { // This is slow, but it will ensure that all commands and events are accounted for, // and there are no incomplete changes where some but not all files for a change were // written to disk. - ca_store.recover()?; + todo!("issue #1086"); } else if let Err(e) = ca_store.warm() { // Otherwise we just tried to 'warm' the cache. This serves two purposes: // 1. this ensures that all `CertAuth` structs are available in memory @@ -127,7 +127,7 @@ impl CaManager { "Could not warm up cache, data seems corrupt. Will try to recover!! Error was: {}", e ); - ca_store.recover()?; + todo!("issue #1086"); } // Create the `CaObjectStore` that is responsible for maintaining CA objects: the `CaObjects` @@ -236,7 +236,6 @@ impl CaManager { .as_ref() .ok_or_else(|| Error::custom("TA proxy not enabled"))? .get_latest(&ta_handle) - .map_err(Error::AggregateStoreError) } /// Gets the Trust Anchor Signer, if present. Returns an error if the TA is uninitialized. @@ -246,7 +245,6 @@ impl CaManager { .as_ref() .ok_or_else(|| Error::custom("TA signer not enabled"))? .get_latest(&ta_handle) - .map_err(Error::AggregateStoreError) } /// Initialises the (one) Trust Anchor proxy. diff --git a/src/daemon/config.rs b/src/daemon/config.rs index bf4089b73..1570e00a9 100644 --- a/src/daemon/config.rs +++ b/src/daemon/config.rs @@ -1419,14 +1419,11 @@ impl Config { )); } - if self.default_signer.is_named() { - if self.find_signer_reference(&self.default_signer).is_none() { - return Err(ConfigError::other(&format!( - "'{}' cannot be used as the 'default_signer' as no signer with that name is defined", - self.default_signer.name() - ))); - } - } else { + if self.default_signer.is_named() && self.find_signer_reference(&self.default_signer).is_none() { + return Err(ConfigError::other(&format!( + "'{}' cannot be used as the 'default_signer' as no signer with that name is defined", + self.default_signer.name() + ))); } if self.one_off_signer.is_named() && self.find_signer_reference(&self.one_off_signer).is_none() { diff --git a/src/daemon/properties/mod.rs b/src/daemon/properties/mod.rs index 6c70ff95b..232545825 100644 --- a/src/daemon/properties/mod.rs +++ b/src/daemon/properties/mod.rs @@ -276,7 +276,7 @@ impl PropertiesManager { PropertiesInitCommandDetails { krill_version }, &self.system_actor, ); - self.store.add(cmd).map_err(Error::AggregateStoreError) + self.store.add(cmd) } /// Returns the current KrillVersion used for the data store @@ -297,9 +297,7 @@ impl PropertiesManager { } fn properties(&self) -> KrillResult> { - self.store - .get_latest(&self.main_key) - .map_err(Error::AggregateStoreError) + self.store.get_latest(&self.main_key) } } diff --git a/src/pubd/repository.rs b/src/pubd/repository.rs index dadb7da8e..eafeb3edf 100644 --- a/src/pubd/repository.rs +++ b/src/pubd/repository.rs @@ -1522,13 +1522,13 @@ impl RepositoryAccessProxy { if store.has(&key)? { if config.always_recover_data { - store.recover()?; + todo!("issue #1086"); } else if let Err(e) = store.warm() { error!( "Could not warm up cache, storage seems corrupt, will try to recover!! Error was: {}", e ); - store.recover()?; + todo!("issue #1086"); } } diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index 358dc23ab..898a1785a 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -417,23 +417,13 @@ pub trait UpgradeAggregateStorePre0_14 { "Will verify the migration by rebuilding '{}' from migrated commands", &scope ); - let latest = self.preparation_aggregate_store().get_latest(&handle).map_err(|e| { + let _latest = self.preparation_aggregate_store().save_snapshot(&handle).map_err(|e| { UpgradeError::Custom(format!( "Could not rebuild state after migrating CA '{}'! Error was: {}.", handle, e )) })?; - // Store snapshot to avoid having to re-process the deltas again in future - self.preparation_aggregate_store() - .store_snapshot(&handle, latest.as_ref()) - .map_err(|e| { - UpgradeError::Custom(format!( - "Could not save snapshot for CA '{}' after migration! Disk full?!? Error was: {}.", - handle, e - )) - })?; - // Call the post command migration hook, this will do nothing // unless the implementer of this trait overrode it. self.post_command_migration(&handle)?; From b9655ddc8a899fa5511bb9033465de6fc4323b48 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Thu, 7 Sep 2023 12:19:40 +0200 Subject: [PATCH 13/18] Update snapshots in the background. --- src/commons/eventsourcing/store.rs | 14 ++++- src/commons/eventsourcing/wal.rs | 11 +++- src/daemon/scheduler.rs | 82 +++++++++++++++++++++++++++--- src/pubd/manager.rs | 9 ---- src/pubd/repository.rs | 7 --- 5 files changed, 96 insertions(+), 27 deletions(-) diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index 9a70edc6b..01c94c918 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -43,8 +43,8 @@ pub struct AggregateStore { /// impl AggregateStore { /// Creates an AggregateStore using the given storage url - pub fn create(storage_uri: &Url, name_space: &Namespace, use_history_cache: bool) -> StoreResult { - let kv = KeyValueStore::create(storage_uri, name_space)?; + pub fn create(storage_uri: &Url, namespace: &Namespace, use_history_cache: bool) -> StoreResult { + let kv = KeyValueStore::create(storage_uri, namespace)?; Self::create_from_kv(kv, use_history_cache) } @@ -123,6 +123,16 @@ where self.execute_opt_command(handle, None, false) } + /// Updates the snapshots for all entities in this store. + pub fn update_snapshots(&self) -> Result<(), A::Error> { + for handle in self.list()? { + self.save_snapshot(&handle)?; + } + + Ok(()) + } + + /// Gets the latest version for the given aggregate and updates the snapshot. pub fn save_snapshot(&self, handle: &MyHandle) -> Result, A::Error> { self.execute_opt_command(handle, None, true) } diff --git a/src/commons/eventsourcing/wal.rs b/src/commons/eventsourcing/wal.rs index 84560c130..e34638468 100644 --- a/src/commons/eventsourcing/wal.rs +++ b/src/commons/eventsourcing/wal.rs @@ -322,10 +322,19 @@ impl WalStore { } } + pub fn update_snapshots(&self) -> WalStoreResult<()> { + for handle in self.list()? { + self.update_snapshot(&handle, false)?; + } + Ok(()) + } + /// Update snapshot and archive or delete old wal sets /// /// This is a separate function because serializing a large instance can - /// be expensive. + /// be expensive. Note that the archive bool argument is (currently) always + /// set to false, but it has been added to support archiving - rather than + /// deleting old change sets - in future. pub fn update_snapshot(&self, handle: &MyHandle, archive: bool) -> WalStoreResult<()> { // Note that we do not need to keep a lock for the instance when we update the snapshot. // This function just updates the latest snapshot in the key value store, and it removes diff --git a/src/daemon/scheduler.rs b/src/daemon/scheduler.rs index 688f7d348..b69509864 100644 --- a/src/daemon/scheduler.rs +++ b/src/daemon/scheduler.rs @@ -3,25 +3,36 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; +use kvx::Namespace; use tokio::time::sleep; use rpki::ca::{ idexchange::{CaHandle, ParentHandle}, provisioning::{ResourceClassName, RevocationRequest}, }; +use url::Url; use crate::{ - commons::{actor::Actor, api::Timestamp, bgp::BgpAnalyser, KrillResult}, + commons::{ + actor::Actor, + api::Timestamp, + bgp::BgpAnalyser, + crypto::dispatch::signerinfo::SignerInfo, + eventsourcing::{Aggregate, AggregateStore, WalStore, WalSupport}, + KrillResult, + }, constants::{ - SCHEDULER_INTERVAL_RENEW_MINS, SCHEDULER_INTERVAL_REPUBLISH_MINS, SCHEDULER_RESYNC_REPO_CAS_THRESHOLD, - SCHEDULER_USE_JITTER_CAS_THRESHOLD, + CASERVER_NS, PROPERTIES_NS, PUBSERVER_CONTENT_NS, PUBSERVER_NS, SCHEDULER_INTERVAL_RENEW_MINS, + SCHEDULER_INTERVAL_REPUBLISH_MINS, SCHEDULER_RESYNC_REPO_CAS_THRESHOLD, SCHEDULER_USE_JITTER_CAS_THRESHOLD, + SIGNERS_NS, }, daemon::{ - ca::CaManager, + ca::{CaManager, CertAuth}, config::Config, mq::{in_hours, in_minutes, now, Task, TaskQueue}, + properties::Properties, }, - pubd::RepositoryManager, + pubd::{RepositoryAccess, RepositoryContent, RepositoryManager}, }; #[cfg(feature = "multi-user")] @@ -194,7 +205,10 @@ impl Scheduler { #[cfg(feature = "multi-user")] self.tasks.sweep_login_cache(in_minutes(1)); - self.tasks.update_snapshots(in_hours(24)); + // Plan updating snapshots soon after a restart. + // This also ensures that this task gets triggered in long + // running tests, such as functional_parent_child.rs. + self.tasks.update_snapshots(now()); Ok(()) } @@ -322,11 +336,63 @@ impl Scheduler { Ok(()) } + // Call update_snapshots on all AggregateStores and WalStores fn update_snapshots(&self) -> KrillResult<()> { - if let Err(e) = self.repo_manager.update_snapshots() { - error!("Could not update snapshots on disk! Error: {}", e); + fn update_aggregate_store_snapshots(storage_uri: &Url, namespace: &Namespace) { + match AggregateStore::::create(storage_uri, namespace, false) { + Err(e) => { + // Note: this is highly unlikely.. probably something else is broken and Krill + // would have panicked as a result already. + error!( + "Could not update snapshots for {} will try again in 24 hours. Error: {}", + namespace, e + ); + } + Ok(store) => { + if let Err(e) = store.update_snapshots() { + // Note: this is highly unlikely.. probably something else is broken and Krill + // would have panicked as a result already. + error!( + "Could not update snapshots for {} will try again in 24 hours. Error: {}", + namespace, e + ); + } else { + info!("Updated snapshots for {}", namespace); + } + } + } + } + + fn update_wal_store_snapshots(storage_uri: &Url, namespace: &Namespace) { + match WalStore::::create(storage_uri, namespace) { + Err(e) => { + // Note: this is highly unlikely.. probably something else is broken and Krill + // would have panicked as a result already. + error!( + "Could not update snapshots for {} will try again in 24 hours. Error: {}", + namespace, e + ); + } + Ok(store) => { + if let Err(e) = store.update_snapshots() { + // Note: this is highly unlikely.. probably something else is broken and Krill + // would have panicked as a result already. + error!( + "Could not update snapshots for {} will try again in 24 hours. Error: {}", + namespace, e + ); + } + } + } } + update_aggregate_store_snapshots::(&self.config.storage_uri, CASERVER_NS); + update_aggregate_store_snapshots::(&self.config.storage_uri, SIGNERS_NS); + update_aggregate_store_snapshots::(&self.config.storage_uri, PROPERTIES_NS); + update_aggregate_store_snapshots::(&self.config.storage_uri, PUBSERVER_NS); + + update_wal_store_snapshots::(&self.config.storage_uri, PUBSERVER_CONTENT_NS); + self.tasks.update_snapshots(in_hours(24)); Ok(()) diff --git a/src/pubd/manager.rs b/src/pubd/manager.rs index 82ab27f06..6cdd99109 100644 --- a/src/pubd/manager.rs +++ b/src/pubd/manager.rs @@ -85,15 +85,6 @@ impl RepositoryManager { self.content.clear() } - /// Update snapshots on disk for faster re-starts - pub fn update_snapshots(&self) -> KrillResult<()> { - if self.initialized()? { - self.content.update_snapshots() - } else { - Ok(()) - } - } - /// List all current publishers pub fn publishers(&self) -> KrillResult> { self.access.publishers() diff --git a/src/pubd/repository.rs b/src/pubd/repository.rs index eafeb3edf..7d395e005 100644 --- a/src/pubd/repository.rs +++ b/src/pubd/repository.rs @@ -117,13 +117,6 @@ impl RepositoryContentProxy { Ok(()) } - // Update snapshot on disk for faster load times after restart. - pub fn update_snapshots(&self) -> KrillResult<()> { - self.store - .update_snapshot(&self.default_handle, false) - .map_err(Error::WalStoreError) - } - /// Return the repository content stats pub fn stats(&self) -> KrillResult { self.get_default_content().map(|content| content.stats()) From 2e56ff74c6d0dea136a813af88421bcecda41c6d Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Thu, 7 Sep 2023 16:15:35 +0200 Subject: [PATCH 14/18] Use transactions/locks for WalStore. --- src/commons/eventsourcing/locks.rs | 91 ------- src/commons/eventsourcing/mod.rs | 2 - src/commons/eventsourcing/store.rs | 4 +- src/commons/eventsourcing/wal.rs | 386 ++++++++++++++++------------- src/pubd/repository.rs | 4 +- 5 files changed, 214 insertions(+), 273 deletions(-) delete mode 100644 src/commons/eventsourcing/locks.rs diff --git a/src/commons/eventsourcing/locks.rs b/src/commons/eventsourcing/locks.rs deleted file mode 100644 index 24bd98230..000000000 --- a/src/commons/eventsourcing/locks.rs +++ /dev/null @@ -1,91 +0,0 @@ -//! Support locking on Handles so that updates can be -//! performed sequentially. Useful for both event sourced -//! types (Aggregates) as well as write-ahead logging -//! types. - -//------------ HandleLocks --------------------------------------------------- - -use std::{ - collections::HashMap, - sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}, -}; - -use rpki::ca::idexchange::MyHandle; - -#[derive(Debug, Default)] -struct HandleLockMap(HashMap>); - -impl HandleLockMap { - fn create_handle_lock(&mut self, handle: MyHandle) { - self.0.insert(handle, RwLock::new(())); - } - - fn has_handle(&self, handle: &MyHandle) -> bool { - self.0.contains_key(handle) - } - - fn drop_handle_lock(&mut self, handle: &MyHandle) { - self.0.remove(handle); - } -} - -pub struct HandleLock<'a> { - // Needs a read reference to the map that holds the RwLock - // for the handle. - map: RwLockReadGuard<'a, HandleLockMap>, - handle: MyHandle, -} - -impl HandleLock<'_> { - // panics if there is no entry for the handle. - pub fn read(&self) -> RwLockReadGuard<'_, ()> { - self.map.0.get(&self.handle).unwrap().read().unwrap() - } - - // panics if there is no entry for the handle. - pub fn write(&self) -> RwLockWriteGuard<'_, ()> { - self.map.0.get(&self.handle).unwrap().write().unwrap() - } -} - -/// This structure is used to ensure that we have unique access to an instance for a [`Handle`] -/// managed in an [`AggregateStore`] or [`WalStore`]. Currently uses a `std::sync::RwLock`, but -/// this should be improved to use an async lock instead (e.g. `tokio::sync::RwLock`). -/// This has not been done yet, because that change is quite pervasive. -#[derive(Debug, Default)] -pub struct HandleLocks { - locks: RwLock, -} - -impl HandleLocks { - pub fn for_handle(&self, handle: MyHandle) -> HandleLock<'_> { - { - // Return the lock *if* there is an entry for the handle - let map = self.locks.read().unwrap(); - if map.has_handle(&handle) { - return HandleLock { map, handle }; - } - } - - { - // There was no entry.. try to create an entry for the - // handle. - let mut map = self.locks.write().unwrap(); - - // But.. first check again, because we could have had a - // race condition if two threads call this function. - if !map.has_handle(&handle) { - map.create_handle_lock(handle.clone()); - } - } - - // Entry probably exists now, but recurse in case the entry - // was dropped immediately after creation. - self.for_handle(handle) - } - - pub fn drop_handle(&self, handle: &MyHandle) { - let mut map = self.locks.write().unwrap(); - map.drop_handle_lock(handle); - } -} diff --git a/src/commons/eventsourcing/mod.rs b/src/commons/eventsourcing/mod.rs index d94d65333..1d02e44cb 100644 --- a/src/commons/eventsourcing/mod.rs +++ b/src/commons/eventsourcing/mod.rs @@ -18,8 +18,6 @@ pub use self::store::*; mod listener; pub use self::listener::*; -pub mod locks; - mod kv; pub use self::kv::{ namespace, segment, Key, KeyValueError, KeyValueStore, Namespace, Scope, Segment, SegmentBuf, SegmentExt, diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index 01c94c918..9dca57ed6 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -238,7 +238,7 @@ where // Get the aggregate from the cache, or get it from the store. let mut changed_from_cached = false; - let res = match self.cache_get(handle) { + let latest_result = match self.cache_get(handle) { Some(arc) => Ok(arc), None => { // There was no cached aggregate, so try to get it @@ -277,7 +277,7 @@ where } }; - let mut agg = match res { + let mut agg = match latest_result { Err(e) => return Ok(Err(e)), Ok(agg) => agg, }; diff --git a/src/commons/eventsourcing/wal.rs b/src/commons/eventsourcing/wal.rs index e34638468..46adb9cef 100644 --- a/src/commons/eventsourcing/wal.rs +++ b/src/commons/eventsourcing/wal.rs @@ -5,14 +5,12 @@ use std::{ sync::{Arc, RwLock}, }; -use kvx::Namespace; +use kvx::{Namespace, ReadStore}; use rpki::ca::idexchange::MyHandle; use serde::Serialize; use url::Url; -use crate::commons::eventsourcing::{ - locks::HandleLocks, segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt, Storable, -}; +use crate::commons::eventsourcing::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt, Storable}; //------------ WalSupport ---------------------------------------------------- @@ -81,7 +79,7 @@ pub trait WalSupport: Storable { //------------ WalCommand ---------------------------------------------------- -pub trait WalCommand: fmt::Display { +pub trait WalCommand: Clone + fmt::Display { fn handle(&self) -> &MyHandle; } @@ -124,7 +122,6 @@ impl WalSet { pub struct WalStore { kv: KeyValueStore, cache: RwLock>>, - locks: HandleLocks, } impl WalStore { @@ -133,9 +130,8 @@ impl WalStore { pub fn create(storage_uri: &Url, name_space: &Namespace) -> WalStoreResult { let kv = KeyValueStore::create(storage_uri, name_space)?; let cache = RwLock::new(HashMap::new()); - let locks = HandleLocks::default(); - Ok(WalStore { kv, cache, locks }) + Ok(WalStore { kv, cache }) } /// Warms up the store: caches all instances. @@ -152,21 +148,30 @@ impl WalStore { /// Add a new entity for the given handle. Fails if the handle is in use. pub fn add(&self, handle: &MyHandle, instance: T) -> WalStoreResult<()> { - let handle_lock = self.locks.for_handle(handle.clone()); - let _write = handle_lock.write(); - + let scope = Self::scope_for_handle(handle); let instance = Arc::new(instance); - let key = Self::key_for_snapshot(handle); - // TODO rewrite as transaction, this was `.store_new` - self.kv.store(&key, &instance)?; // Should fail if this key exists - self.cache.write().unwrap().insert(handle.clone(), instance); - Ok(()) + + self.kv + .inner() + .execute(&scope, |kv| { + let key = Self::key_for_snapshot(handle); + let json = serde_json::to_value(instance.as_ref())?; + kv.store(&key, json)?; + + self.cache_update(handle, instance.clone()); + + Ok(()) + }) + .map_err(|e| WalStoreError::KeyStoreError(KeyValueError::KVError(e))) } /// Checks whether there is an instance for the given handle. pub fn has(&self, handle: &MyHandle) -> WalStoreResult { - let key = Self::key_for_snapshot(handle); - self.kv.has(&key).map_err(WalStoreError::KeyStoreError) + let scope = Self::scope_for_handle(handle); + self.kv + .inner() + .has_scope(&scope) + .map_err(|e| WalStoreError::KeyStoreError(KeyValueError::KVError(e))) } /// Get the latest revision for the given handle. @@ -174,53 +179,8 @@ impl WalStore { /// This will use the cache if it's available and otherwise get a snapshot /// from the keystore. Then it will check whether there are any further /// changes. - pub fn get_latest(&self, handle: &MyHandle) -> WalStoreResult> { - let handle_lock = self.locks.for_handle(handle.clone()); - let _read = handle_lock.read(); - - self.get_latest_no_lock(handle) - } - - /// Get the latest revision without using a lock. - /// - /// Intended to be used by public functions which manage the locked read/write access - /// to this instance for this handle. - fn get_latest_no_lock(&self, handle: &MyHandle) -> WalStoreResult> { - let mut instance = match self.cache.read().unwrap().get(handle).cloned() { - None => Arc::new(self.get_snapshot(handle)?), - Some(instance) => instance, - }; - - if !self.kv.has(&Self::key_for_wal_set(handle, instance.revision()))? { - // No further changes for this revision exist. - // - // Note: this is expected to be the case if our cached instances - // are kept up-to-date, and we run on a single node. Double - // checking this should not be too expensive though, and it - // allows us to use same code path for warming the cache and - // for getting the latest instance in other cases. - Ok(instance) - } else { - // Changes exist: - // - apply all of them - // - update the cache instance - // - return updated - let instance = Arc::make_mut(&mut instance); - - loop { - trace!("Get changeset for {} revision {}", handle, instance.revision()); - let wal_set_key = Self::key_for_wal_set(handle, instance.revision()); - if let Some(set) = self.kv.get(&wal_set_key)? { - instance.apply(set) - } else { - break; - } - } - - let instance = Arc::new(instance.clone()); - self.cache.write().unwrap().insert(handle.clone(), instance.clone()); - Ok(instance) - } + pub fn get_latest(&self, handle: &MyHandle) -> Result, T::Error> { + self.execute_opt_command(handle, None, false) } /// Remove an instance from this store. Irrevocable. @@ -228,36 +188,19 @@ impl WalStore { if !self.has(handle)? { Err(WalStoreError::Unknown(handle.clone())) } else { - { - // First get a lock and remove the object - let handle_lock = self.locks.for_handle(handle.clone()); - let _write = handle_lock.write(); - self.cache.write().unwrap().remove(handle); - self.kv - .drop_scope(&Scope::from_segment(Segment::parse_lossy(handle.as_str())))?; - // handle should always be a valid Segment - } + let scope = Self::scope_for_handle(handle); - // Then drop the lock for it as well. We could not do this - // while holding the write lock. - // - // Note that the corresponding entity was removed from the key - // value store while we had a write lock for its handle. - // So, even if another concurrent thread would now try to update - // this same entity, that update would fail because the entity - // no longer exists. - self.locks.drop_handle(handle); - Ok(()) + self.kv + .inner() + .execute(&scope, |kv| { + kv.delete_scope(&scope)?; + self.cache_remove(handle); + Ok(()) + }) + .map_err(|e| WalStoreError::KeyStoreError(KeyValueError::KVError(e))) } } - fn get_snapshot(&self, handle: &MyHandle) -> WalStoreResult { - trace!("Load WAL snapshot for {}", handle); - self.kv - .get(&Self::key_for_snapshot(handle))? - .ok_or_else(|| WalStoreError::Unknown(handle.clone())) - } - /// Returns a list of all instances managed in this store. pub fn list(&self) -> WalStoreResult> { let mut res = vec![]; @@ -278,116 +221,209 @@ impl WalStore { /// - apply the wal set locally /// - save the wal set /// - if saved properly update the cache - /// - /// - /// pub fn send_command(&self, command: T::Command) -> Result, T::Error> { let handle = command.handle().clone(); + self.execute_opt_command(&handle, Some(command), false) + } - let handle_lock = self.locks.for_handle(handle.clone()); - let _write = handle_lock.write(); - - let mut latest = self.get_latest_no_lock(&handle)?; + fn execute_opt_command( + &self, + handle: &MyHandle, + cmd_opt: Option, + save_snapshot: bool, + ) -> Result, T::Error> { + self.kv + .inner() + .execute(&Self::scope_for_handle(handle), |kv| { + // The closure needs to return a Result. + // In our case X will be a Result, T::Error>. + // + // or in full: Result, T::Error>, kvx::Error> + // + // So.. any kvx error will be in the outer result, while + // any T related issues can still be returned as an err + // in the inner result. + + // Track whether T has changed compared to the cached + // version (if any) so that we will know whether the + // cache should be updated. + let mut changed_from_cached = false; + + // Get the instance for T from the cache, or get it + // from the store. + let latest_option = match self.cache_get(handle) { + Some(t) => { + debug!("Found cached instance for '{handle}', at revision: {}", t.revision()); + Some(t) + } + None => { + trace!("No cached instance found for '{handle}'"); + changed_from_cached = true; - let summary = command.to_string(); - let revision = latest.revision(); - let changes = latest.process_command(command)?; + let key = Self::key_for_snapshot(handle); - if changes.is_empty() { - debug!("No changes need for '{}' when processing command: {}", handle, summary); - Ok(latest) - } else { - // lock the cache first, before writing any updates - let mut cache = self.cache.write().unwrap(); + match kv.get(&key)? { + Some(value) => { + debug!("Deserializing stored instance for '{handle}'"); + let latest: T = serde_json::from_value(value)?; + Some(Arc::new(latest)) + } + None => { + debug!("No instance found instance for '{handle}'"); + None + } + } + } + }; + + // Get a mutable latest T to work with, or return with an + // inner Err informing the caller that there is no instance. + let mut latest = match latest_option { + Some(latest) => latest, + None => return Ok(Err(T::Error::from(WalStoreError::Unknown(handle.clone())))), + }; + + // Check for updates and apply changes + + { + // Check if there any new changes that ought to be applied. + // If so, apply them and remember that the instance was changed + // compared to the (possible) cached version. + + let latest_inner = Arc::make_mut(&mut latest); + + // Check for changes and apply them until: + // - there are no more changes + // - or we encountered an error + loop { + let revision = latest_inner.revision(); + let key = Self::key_for_wal_set(handle, revision); + + if let Some(value) = kv.get(&key)? { + let set: WalSet = serde_json::from_value(value)?; + debug!("applying revision '{revision}' to '{handle}'"); + latest_inner.apply(set); + changed_from_cached = true; + } else { + break; + } + } - let set: WalSet = WalSet { - revision, - summary, - changes, - }; + // Process the command + if let Some(command) = cmd_opt.clone() { + let summary = command.to_string(); + let revision = latest_inner.revision(); - let key_for_wal_set = Self::key_for_wal_set(&handle, revision); - // TODO rewrite as transaction, this was `.store_new` - self.kv - .store(&key_for_wal_set, &set) - .map_err(WalStoreError::KeyStoreError)?; + debug!("Applying command {command} to {handle}"); + match latest_inner.process_command(command) { + Err(e) => { + debug!("Error applying command to '{handle}'. Error: {e}"); + return Ok(Err(e)); + } + Ok(changes) => { + if changes.is_empty() { + debug!( + "No changes needed for '{}' when processing command: {}", + handle, summary + ); + } else { + debug!( + "{} changes resulted for '{}' when processing command: {}", + changes.len(), + handle, + summary + ); + changed_from_cached = true; + + let set: WalSet = WalSet { + revision, + summary, + changes, + }; + + let key_for_wal_set = Self::key_for_wal_set(handle, revision); + + if kv.has(&key_for_wal_set)? { + error!("Change set for '{handle}' version '{revision}' already exists."); + error!("This is a bug. Please report this issue to rpki-team@nlnetlabs.nl."); + error!("Krill will exit. If this issue repeats, consider removing {}.", handle); + std::process::exit(1); + } + + let json = serde_json::to_value(&set)?; + + latest_inner.apply(set); + + kv.store(&key_for_wal_set, json)?; + } + } + } + } + } - let latest = Arc::make_mut(&mut latest); - latest.apply(set); + if changed_from_cached { + self.cache_update(handle, latest.clone()); + } - let latest = Arc::new(latest.clone()); - cache.insert(handle, latest.clone()); + if save_snapshot { + // Save the latest version as snapshot + let key = Self::key_for_snapshot(handle); + let value = serde_json::to_value(latest.as_ref())?; + kv.store(&key, value)?; + + // Delete all wal sets (changes), since we are doing + // this inside a transaction or locked scope we can + // assume that all changes were applied, and there + // are no other threads creating additional changes + // that we were not aware of. + for key in kv.list_keys(&Self::scope_for_handle(handle))? { + if key.name().as_str().starts_with("wal-") { + kv.delete(&key)?; + } + } + } - Ok(latest) - } + Ok(Ok(latest)) + }) + .map_err(|kv_err| T::Error::from(WalStoreError::KeyStoreError(KeyValueError::KVError(kv_err))))? } - pub fn update_snapshots(&self) -> WalStoreResult<()> { + pub fn update_snapshots(&self) -> Result<(), T::Error> { for handle in self.list()? { - self.update_snapshot(&handle, false)?; + self.update_snapshot(&handle)?; } Ok(()) } /// Update snapshot and archive or delete old wal sets - /// - /// This is a separate function because serializing a large instance can - /// be expensive. Note that the archive bool argument is (currently) always - /// set to false, but it has been added to support archiving - rather than - /// deleting old change sets - in future. - pub fn update_snapshot(&self, handle: &MyHandle, archive: bool) -> WalStoreResult<()> { - // Note that we do not need to keep a lock for the instance when we update the snapshot. - // This function just updates the latest snapshot in the key value store, and it removes - // or archives all write-ahead log ("wal-") changes predating the new snapshot. - // - // It is fine if another thread gets the entity for this handle and updates it while we - // do this. As it turns out, writing snapshots can be expensive for large objects, so - // we do not want block updates while we do this. - // - // This function is intended to be called in the back-ground at regular (slow) intervals - // so any updates that were just missed will simply be folded in to the new snapshot when - // this function is called again. - let latest = self.get_latest(handle)?; - let key = Self::key_for_snapshot(handle); - self.kv.store(&key, &latest)?; - - // Archive or delete old wal sets - for key in self - .kv - .keys(&Scope::from_segment(Segment::parse_lossy(handle.as_str())), "wal-")? - // handle should always be a valid Segment - { - // Carefully inspect the key, just ignore keys - // following a format that is not expected. - // Who knows what people write in this dir? - if let Some(remaining) = key.name().as_str().strip_prefix("wal-") { - if let Some(number) = remaining.strip_suffix(".json") { - if let Ok(revision) = u64::from_str(number) { - if revision < latest.revision() { - if archive { - self.kv.archive_key(&key)?; - } else { - self.kv.drop_key(&key)?; - } - } - } - } - } - } + pub fn update_snapshot(&self, handle: &MyHandle) -> Result, T::Error> { + self.execute_opt_command(handle, None, true) + } - Ok(()) + fn cache_get(&self, id: &MyHandle) -> Option> { + self.cache.read().unwrap().get(id).cloned() + } + + fn cache_remove(&self, id: &MyHandle) { + self.cache.write().unwrap().remove(id); + } + + fn cache_update(&self, id: &MyHandle, arc: Arc) { + self.cache.write().unwrap().insert(id.clone(), arc); + } + + fn scope_for_handle(handle: &MyHandle) -> Scope { + // handle should always be a valid Segment + Scope::from_segment(Segment::parse_lossy(handle.as_str())) } fn key_for_snapshot(handle: &MyHandle) -> Key { - Key::new_scoped( - Scope::from_segment(Segment::parse_lossy(handle.as_str())), // handle should always be a valid Segment - segment!("snapshot.json"), - ) + Key::new_scoped(Self::scope_for_handle(handle), segment!("snapshot.json")) } fn key_for_wal_set(handle: &MyHandle, revision: u64) -> Key { Key::new_scoped( - Scope::from_segment(Segment::parse_lossy(handle.as_str())), // handle should always be a valid Segment + Self::scope_for_handle(handle), Segment::parse(&format!("wal-{}.json", revision)).unwrap(), // cannot panic as a u64 cannot contain a Scope::SEPARATOR ) } diff --git a/src/pubd/repository.rs b/src/pubd/repository.rs index 7d395e005..6fc0fa8b4 100644 --- a/src/pubd/repository.rs +++ b/src/pubd/repository.rs @@ -102,9 +102,7 @@ impl RepositoryContentProxy { } fn get_default_content(&self) -> KrillResult> { - self.store - .get_latest(&self.default_handle) - .map_err(Error::WalStoreError) + self.store.get_latest(&self.default_handle) } // Clear all content, so it can be re-initialized. From c396e92bf89ef4295b93d767558a02428f6cb657 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Fri, 8 Sep 2023 11:05:47 +0200 Subject: [PATCH 15/18] Do not expose inner kvx. --- src/commons/eventsourcing/kv.rs | 21 +++++++++++++++-- src/commons/eventsourcing/store.rs | 22 +++--------------- src/commons/eventsourcing/wal.rs | 37 ++++++++---------------------- 3 files changed, 32 insertions(+), 48 deletions(-) diff --git a/src/commons/eventsourcing/kv.rs b/src/commons/eventsourcing/kv.rs index 3e37a2092..6439cc8de 100644 --- a/src/commons/eventsourcing/kv.rs +++ b/src/commons/eventsourcing/kv.rs @@ -129,8 +129,25 @@ impl KeyValueStore { )?) } - pub fn inner(&self) -> &kvx::KeyValueStore { - &self.inner + /// Execute one or more `kvx::KeyValueStoreBackend` operations + /// within a transaction or scope lock context inside the given + /// closure. + /// + /// The closure needs to return a Result. This + /// allows the caller to simply use the ? operator on any kvx + /// calls that could result in an error within the closure. The + /// kvx::Error is mapped to a KeyValueError to avoid that the + /// caller needs to have any specific knowledge about the kvx::Error + /// type. + /// + /// T can be () if no return value is needed. If anything can + /// fail in the closure, other than kvx calls, then T can be + /// a Result. + pub fn execute(&self, scope: &Scope, op: F) -> Result + where + F: FnMut(&dyn KeyValueStoreBackend) -> Result, + { + self.inner.execute(scope, op).map_err(KeyValueError::KVError) } /// Gets a value for a key, returns an error if the value cannot be deserialized, diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index 9dca57ed6..c4813c145 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -142,13 +142,7 @@ where let scope = Self::scope_for_agg(cmd.handle()); self.kv - .inner() .execute(&scope, move |kv| { - // The closure needs to return a Result. - // In our case T will be a Result, A::Error>. - // So.. any kvx error will be in the outer result, while - // any aggregate related issues can still be returned - // as an err in the inner result. let handle = cmd.handle().clone(); let init_command_key = Self::key_for_command(&handle, 0); @@ -183,7 +177,7 @@ where } } }) - .map_err(|kv_err| A::Error::from(AggregateStoreError::KeyStoreError(KeyValueError::KVError(kv_err))))? + .map_err(|e| A::Error::from(AggregateStoreError::KeyStoreError(e)))? } /// Send a command to the latest aggregate referenced by the handle in the command. @@ -227,14 +221,7 @@ where save_snapshot: bool, ) -> Result, A::Error> { self.kv - .inner() .execute(&Self::scope_for_agg(handle), |kv| { - // The closure needs to return a Result. - // In our case T will be a Result, A::Error>. - // So.. any kvx error will be in the outer result, while - // any aggregate related issues can still be returned - // as an err in the inner result. - // Get the aggregate from the cache, or get it from the store. let mut changed_from_cached = false; @@ -421,7 +408,7 @@ where Ok(Ok(agg)) } }) - .map_err(|kv_err| A::Error::from(AggregateStoreError::KeyStoreError(KeyValueError::KVError(kv_err))))? + .map_err(|e| A::Error::from(AggregateStoreError::KeyStoreError(e)))? } } @@ -571,10 +558,7 @@ where pub fn drop_aggregate(&self, id: &MyHandle) -> Result<(), AggregateStoreError> { let scope = Self::scope_for_agg(id); - self.kv - .inner() - .execute(&scope, |kv| kv.delete_scope(&scope)) - .map_err(|kv_err| AggregateStoreError::KeyStoreError(KeyValueError::KVError(kv_err)))?; + self.kv.execute(&scope, |kv| kv.delete_scope(&scope))?; self.cache_remove(id); Ok(()) diff --git a/src/commons/eventsourcing/wal.rs b/src/commons/eventsourcing/wal.rs index 46adb9cef..6894b334d 100644 --- a/src/commons/eventsourcing/wal.rs +++ b/src/commons/eventsourcing/wal.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, RwLock}, }; -use kvx::{Namespace, ReadStore}; +use kvx::Namespace; use rpki::ca::idexchange::MyHandle; use serde::Serialize; use url::Url; @@ -152,7 +152,6 @@ impl WalStore { let instance = Arc::new(instance); self.kv - .inner() .execute(&scope, |kv| { let key = Self::key_for_snapshot(handle); let json = serde_json::to_value(instance.as_ref())?; @@ -162,16 +161,13 @@ impl WalStore { Ok(()) }) - .map_err(|e| WalStoreError::KeyStoreError(KeyValueError::KVError(e))) + .map_err(WalStoreError::KeyStoreError) } /// Checks whether there is an instance for the given handle. pub fn has(&self, handle: &MyHandle) -> WalStoreResult { let scope = Self::scope_for_handle(handle); - self.kv - .inner() - .has_scope(&scope) - .map_err(|e| WalStoreError::KeyStoreError(KeyValueError::KVError(e))) + self.kv.has_scope(&scope).map_err(WalStoreError::KeyStoreError) } /// Get the latest revision for the given handle. @@ -191,13 +187,12 @@ impl WalStore { let scope = Self::scope_for_handle(handle); self.kv - .inner() .execute(&scope, |kv| { kv.delete_scope(&scope)?; self.cache_remove(handle); Ok(()) }) - .map_err(|e| WalStoreError::KeyStoreError(KeyValueError::KVError(e))) + .map_err(WalStoreError::KeyStoreError) } } @@ -233,24 +228,13 @@ impl WalStore { save_snapshot: bool, ) -> Result, T::Error> { self.kv - .inner() .execute(&Self::scope_for_handle(handle), |kv| { - // The closure needs to return a Result. - // In our case X will be a Result, T::Error>. - // - // or in full: Result, T::Error>, kvx::Error> - // - // So.. any kvx error will be in the outer result, while - // any T related issues can still be returned as an err - // in the inner result. - - // Track whether T has changed compared to the cached - // version (if any) so that we will know whether the - // cache should be updated. + // Track whether anything has changed compared to the cached + // instance (if any) so that we will know whether the cache + // should be updated. let mut changed_from_cached = false; - // Get the instance for T from the cache, or get it - // from the store. + // Get the instance from the cache, or get it from the store. let latest_option = match self.cache_get(handle) { Some(t) => { debug!("Found cached instance for '{handle}', at revision: {}", t.revision()); @@ -276,7 +260,7 @@ impl WalStore { } }; - // Get a mutable latest T to work with, or return with an + // Get a mutable instance to work with, or return with an // inner Err informing the caller that there is no instance. let mut latest = match latest_option { Some(latest) => latest, @@ -284,7 +268,6 @@ impl WalStore { }; // Check for updates and apply changes - { // Check if there any new changes that ought to be applied. // If so, apply them and remember that the instance was changed @@ -385,7 +368,7 @@ impl WalStore { Ok(Ok(latest)) }) - .map_err(|kv_err| T::Error::from(WalStoreError::KeyStoreError(KeyValueError::KVError(kv_err))))? + .map_err(|e| T::Error::from(WalStoreError::KeyStoreError(e)))? } pub fn update_snapshots(&self) -> Result<(), T::Error> { From c17ba2cc175607ab47d2f1a824bfd01d22a82cb5 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Fri, 8 Sep 2023 12:10:08 +0200 Subject: [PATCH 16/18] Use transactions for CaObjects. --- src/daemon/ca/publishing.rs | 58 +++++++++++++++---------------------- 1 file changed, 24 insertions(+), 34 deletions(-) diff --git a/src/daemon/ca/publishing.rs b/src/daemon/ca/publishing.rs index a5148fdc3..cb00844bc 100644 --- a/src/daemon/ca/publishing.rs +++ b/src/daemon/ca/publishing.rs @@ -1,11 +1,6 @@ //! Support for signing mft, crl, certificates, roas.. //! Common objects for TAs and CAs -use std::{ - borrow::BorrowMut, - collections::HashMap, - str::FromStr, - sync::{Arc, RwLock}, -}; +use std::{borrow::BorrowMut, collections::HashMap, str::FromStr, sync::Arc}; use chrono::Duration; use rpki::{ @@ -57,7 +52,7 @@ use super::{AspaInfo, AspaObjectsUpdates, BgpSecCertInfo, BgpSecCertificateUpdat /// inspect, and causes issues with regards to replaying CA state from scratch. #[derive(Clone, Debug)] pub struct CaObjectsStore { - store: Arc>, + store: Arc, signer: Arc, issuance_timing: IssuanceTimingConfig, } @@ -70,7 +65,7 @@ impl CaObjectsStore { signer: Arc, ) -> KrillResult { let store = KeyValueStore::create(storage_uri, CA_OBJECTS_NS)?; - let store = Arc::new(RwLock::new(store)); + let store = Arc::new(store); Ok(CaObjectsStore { store, signer, @@ -180,8 +175,6 @@ impl CaObjectsStore { pub fn cas(&self) -> KrillResult> { let cas = self .store - .read() - .unwrap() .keys(&Scope::global(), ".json")? .iter() .flat_map(|k| { @@ -202,7 +195,7 @@ impl CaObjectsStore { pub fn ca_objects(&self, ca: &CaHandle) -> KrillResult { let key = Self::key(ca); - match self.store.read().unwrap().get(&key).map_err(Error::KeyValueError)? { + match self.store.get_transactional(&key).map_err(Error::KeyValueError)? { None => { let objects = CaObjects::new(ca.clone(), None, HashMap::new(), vec![]); Ok(objects) @@ -214,33 +207,30 @@ impl CaObjectsStore { /// Perform an action (closure) on a mutable instance of the CaObjects for a /// CA. If the CA did not have any CaObjects yet, one will be created. The /// closure is executed within a write lock. - pub fn with_ca_objects(&self, ca: &CaHandle, op: F) -> KrillResult<()> + pub fn with_ca_objects(&self, ca: &CaHandle, mut op: F) -> KrillResult<()> where - F: FnOnce(&mut CaObjects) -> KrillResult<()>, + F: FnMut(&mut CaObjects) -> KrillResult<()>, { - // TODO: use kvx execute/transaction - let lock = self.store.write().unwrap(); - - let key = Self::key(ca); + self.store + .execute(&Scope::global(), |kv| { + let key = Self::key(ca); - let mut objects = lock - .get(&key) + let mut objects: CaObjects = if let Some(value) = kv.get(&key)? { + serde_json::from_value(value)? + } else { + CaObjects::new(ca.clone(), None, HashMap::new(), vec![]) + }; + + match op(&mut objects) { + Err(e) => Ok(Err(e)), + Ok(()) => { + let value = serde_json::to_value(&objects)?; + kv.store(&key, value)?; + Ok(Ok(())) + } + } + }) .map_err(Error::KeyValueError)? - .unwrap_or_else(|| CaObjects::new(ca.clone(), None, HashMap::new(), vec![])); - - op(&mut objects)?; - - lock.store(&key, &objects).map_err(Error::KeyValueError)?; - - Ok(()) - } - - pub fn put_ca_objects(&self, ca: &CaHandle, objects: &CaObjects) -> KrillResult<()> { - self.store - .write() - .unwrap() - .store(&Self::key(ca), objects) - .map_err(Error::KeyValueError) } // Re-issue MFT and CRL for all CAs *if needed*, returns all CAs which were updated. From edddb9c669af8afdc956b0d021e8435293c5681f Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Fri, 8 Sep 2023 16:58:33 +0200 Subject: [PATCH 17/18] Use transactions for all (non-migration) functions in Krill KeyValueStore --- src/commons/eventsourcing/kv.rs | 184 ++++++----------------------- src/commons/eventsourcing/store.rs | 2 +- src/daemon/ca/publishing.rs | 2 +- 3 files changed, 38 insertions(+), 150 deletions(-) diff --git a/src/commons/eventsourcing/kv.rs b/src/commons/eventsourcing/kv.rs index 6439cc8de..d52a22620 100644 --- a/src/commons/eventsourcing/kv.rs +++ b/src/commons/eventsourcing/kv.rs @@ -69,7 +69,7 @@ impl KeyValueStore { let archive_ns = Self::prefixed_namespace(namespace, "archive")?; // Wipe any existing archive, before archiving this store. // We don't want to keep too much old data. See issue: #1088. - let archive_store = KeyValueStore::create(storage_uri, namespace)?; + let archive_store = KeyValueStore::create(storage_uri, &archive_ns)?; archive_store.wipe()?; self.inner.migrate_namespace(archive_ns).map_err(KeyValueError::KVError) @@ -94,18 +94,24 @@ impl KeyValueStore { /// Returns true if this KeyValueStore (with this namespace) has any entries pub fn is_empty(&self) -> Result { - self.inner.is_empty().map_err(KeyValueError::KVError) + self.execute(&Scope::global(), |kv| kv.is_empty()) } /// Import all data from the given KV store into this + /// + /// NOTE: This function is not transactional because both this, and the other + /// keystore could be in the same database and nested transactions are + /// currently not supported. This should be okay, because this function + /// is intended to be used for migrations and testing (copy test data + /// into a store) while Krill is not running. pub fn import(&self, other: &Self) -> Result<(), KeyValueError> { let mut scopes = other.scopes()?; scopes.push(Scope::global()); // not explicitly listed but should be migrated as well. for scope in scopes { for key in other.keys(&scope, "")? { - if let Some(value) = other.get_raw_value(&key)? { - self.store_raw_value(&key, value)?; + if let Some(value) = other.inner.get(&key).map_err(KeyValueError::KVError)? { + self.inner.store(&key, value).map_err(KeyValueError::KVError)?; } } } @@ -115,18 +121,20 @@ impl KeyValueStore { /// Stores a key value pair, serialized as json, overwrite existing pub fn store(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { - Ok(self.inner.store(key, serde_json::to_value(value)?)?) + self.execute(key.scope(), &mut move |kv: &dyn KeyValueStoreBackend| { + kv.store(key, serde_json::to_value(value)?) + }) } /// Stores a key value pair, serialized as json, fails if existing pub fn store_new(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { - Ok(self.inner.transaction( + self.execute( key.scope(), &mut move |kv: &dyn KeyValueStoreBackend| match kv.get(key)? { None => kv.store(key, serde_json::to_value(value)?), _ => Err(kvx::Error::Unknown), }, - )?) + ) } /// Execute one or more `kvx::KeyValueStoreBackend` operations @@ -153,99 +161,43 @@ impl KeyValueStore { /// Gets a value for a key, returns an error if the value cannot be deserialized, /// returns None if it cannot be found. pub fn get(&self, key: &Key) -> Result, KeyValueError> { - if let Some(value) = self.get_raw_value(key)? { - match serde_json::from_value(value) { - Ok(result) => Ok(result), - Err(e) => { - // Get the value again so that we can do a full error report - let value = self.get_raw_value(key)?; - let value_str = value.map(|v| v.to_string()).unwrap_or("".to_string()); - - let expected_type = std::any::type_name::(); - - Err(KeyValueError::Other(format!( - "Could not deserialize value for key '{}'. Expected type: {}. Error: {}. Value was: {}", - key, expected_type, e, value_str - ))) - } + self.execute(key.scope(), |kv| { + if let Some(value) = kv.get(key)? { + Ok(Some(serde_json::from_value(value)?)) + } else { + Ok(None) } - } else { - Ok(None) - } - } - - fn get_raw_value(&self, key: &Key) -> Result, KeyValueError> { - self.inner.get(key).map_err(KeyValueError::KVError) - } - - fn store_raw_value(&self, key: &Key, value: serde_json::Value) -> Result<(), KeyValueError> { - self.inner.store(key, value).map_err(KeyValueError::KVError) - } - - /// Transactional `get`. - pub fn get_transactional(&self, key: &Key) -> Result, KeyValueError> { - let mut result: Option = None; - let result_ref = &mut result; - self.inner - .transaction(key.scope(), &mut move |kv: &dyn KeyValueStoreBackend| { - if let Some(value) = kv.get(key)? { - *result_ref = Some(serde_json::from_value(value)?) - } - - Ok(()) - })?; - - Ok(result) + }) } /// Returns whether a key exists pub fn has(&self, key: &Key) -> Result { - Ok(self.inner.has(key)?) + self.execute(key.scope(), |kv| kv.has(key)) } /// Delete a key-value pair pub fn drop_key(&self, key: &Key) -> Result<(), KeyValueError> { - Ok(self.inner.delete(key)?) + self.execute(key.scope(), |kv| kv.delete(key)) } /// Delete a scope pub fn drop_scope(&self, scope: &Scope) -> Result<(), KeyValueError> { - Ok(self.inner.delete_scope(scope)?) + self.execute(scope, |kv| kv.delete_scope(scope)) } /// Wipe the complete store. Needless to say perhaps.. use with care.. pub fn wipe(&self) -> Result<(), KeyValueError> { - Ok(self.inner.clear()?) - } - - /// Move a value from one key to another - pub fn move_key(&self, from: &Key, to: &Key) -> Result<(), KeyValueError> { - Ok(self.inner.move_value(from, to)?) - } - - /// Archive a key - pub fn archive_key(&self, key: &Key) -> Result<(), KeyValueError> { - self.move_key(key, &key.clone().with_sub_scope(segment!("archived"))) - } - - /// Archive a key as corrupt - pub fn archive_corrupt(&self, key: &Key) -> Result<(), KeyValueError> { - self.move_key(key, &key.clone().with_sub_scope(segment!("corrupt"))) - } - - /// Archive a key as surplus - pub fn archive_surplus(&self, key: &Key) -> Result<(), KeyValueError> { - self.move_key(key, &key.clone().with_sub_scope(segment!("surplus"))) + self.execute(&Scope::global(), |kv| kv.clear()) } /// Returns all scopes, including sub_scopes pub fn scopes(&self) -> Result, KeyValueError> { - Ok(self.inner.list_scopes()?) + self.execute(&Scope::global(), |kv| kv.list_scopes()) } /// Returns whether a scope exists pub fn has_scope(&self, scope: &Scope) -> Result { - Ok(self.inner.has_scope(scope)?) + self.execute(&Scope::global(), |kv| kv.has_scope(scope)) } /// Returns all keys under a scope (scopes are exact strings, 'sub'-scopes @@ -254,12 +206,13 @@ impl KeyValueStore { /// /// If matching is not empty then the key must contain the given `&str`. pub fn keys(&self, scope: &Scope, matching: &str) -> Result, KeyValueError> { - Ok(self - .inner - .list_keys(scope)? - .into_iter() - .filter(|key| matching.is_empty() || key.name().as_str().contains(matching)) - .collect()) + self.execute(scope, |kv| { + kv.list_keys(scope).map(|keys| { + keys.into_iter() + .filter(|key| matching.is_empty() || key.name().as_str().contains(matching)) + .collect() + }) + }) } } @@ -417,10 +370,10 @@ mod tests { let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); let content = "content".to_owned(); let key = Key::new_global(random_segment()); - assert_eq!(store.get_transactional::(&key).unwrap(), None); + assert_eq!(store.get::(&key).unwrap(), None); store.store(&key, &content).unwrap(); - assert_eq!(store.get_transactional(&key).unwrap(), Some(content)); + assert_eq!(store.get(&key).unwrap(), Some(content)); } #[test] @@ -489,71 +442,6 @@ mod tests { assert!(store.keys(&Scope::global(), "").unwrap().is_empty()); } - #[test] - fn test_move_key() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); - let content = "content".to_string(); - let key = Key::new_global(random_segment()); - - store.store(&key, &content).unwrap(); - assert!(store.has(&key).unwrap()); - - let target = Key::new_global(random_segment()); - store.move_key(&key, &target).unwrap(); - assert!(!store.has(&key).unwrap()); - assert!(store.has(&target).unwrap()); - } - - #[test] - fn test_archive() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); - let content = "content".to_string(); - let key = Key::new_global(random_segment()); - - store.store(&key, &content).unwrap(); - assert!(store.has(&key).unwrap()); - - store.archive_key(&key).unwrap(); - assert!(!store.has(&key).unwrap()); - assert!(store.has(&key.with_sub_scope(segment!("archived"))).unwrap()); - } - - #[test] - fn test_archive_corrupt() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); - let content = "content".to_string(); - let key = Key::new_global(random_segment()); - - store.store(&key, &content).unwrap(); - assert!(store.has(&key).unwrap()); - - store.archive_corrupt(&key).unwrap(); - assert!(!store.has(&key).unwrap()); - assert!(store.has(&key.with_sub_scope(segment!("corrupt"))).unwrap()); - } - - #[test] - fn test_archive_surplus() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); - let content = "content".to_string(); - let key = Key::new_global(random_segment()); - - store.store(&key, &content).unwrap(); - assert!(store.has(&key).unwrap()); - - store.archive_surplus(&key).unwrap(); - assert!(!store.has(&key).unwrap()); - assert!(store.has(&key.with_sub_scope(segment!("surplus"))).unwrap()); - } - #[test] fn test_scopes() { let storage_uri = get_storage_uri(); diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index c4813c145..98fa2ea24 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -496,7 +496,7 @@ where pub fn get_command(&self, id: &MyHandle, version: u64) -> Result, AggregateStoreError> { let key = Self::key_for_command(id, version); - match self.kv.get_transactional(&key)? { + match self.kv.get(&key)? { Some(cmd) => Ok(cmd), None => Err(AggregateStoreError::CommandNotFound(id.clone(), version)), } diff --git a/src/daemon/ca/publishing.rs b/src/daemon/ca/publishing.rs index cb00844bc..f6a75ed2f 100644 --- a/src/daemon/ca/publishing.rs +++ b/src/daemon/ca/publishing.rs @@ -195,7 +195,7 @@ impl CaObjectsStore { pub fn ca_objects(&self, ca: &CaHandle) -> KrillResult { let key = Self::key(ca); - match self.store.get_transactional(&key).map_err(Error::KeyValueError)? { + match self.store.get(&key).map_err(Error::KeyValueError)? { None => { let objects = CaObjects::new(ca.clone(), None, HashMap::new(), vec![]); Ok(objects) From 247422c8f9920948e27b983c3c77702ace28c35c Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Fri, 8 Sep 2023 17:09:10 +0200 Subject: [PATCH 18/18] Reorder functions. --- src/commons/eventsourcing/kv.rs | 212 +++++++++++++++++--------------- 1 file changed, 111 insertions(+), 101 deletions(-) diff --git a/src/commons/eventsourcing/kv.rs b/src/commons/eventsourcing/kv.rs index d52a22620..d4a3f2164 100644 --- a/src/commons/eventsourcing/kv.rs +++ b/src/commons/eventsourcing/kv.rs @@ -38,6 +38,7 @@ pub struct KeyValueStore { inner: kvx::KeyValueStore, } +// # Construct and high level functions. impl KeyValueStore { /// Creates a new KeyValueStore. pub fn create(storage_uri: &Url, namespace: &Namespace) -> Result { @@ -46,6 +47,116 @@ impl KeyValueStore { .map_err(KeyValueError::KVError) } + /// Returns true if this KeyValueStore (with this namespace) has any entries + pub fn is_empty(&self) -> Result { + self.execute(&Scope::global(), |kv| kv.is_empty()) + } + + /// Wipe the complete store. Needless to say perhaps.. use with care.. + pub fn wipe(&self) -> Result<(), KeyValueError> { + self.execute(&Scope::global(), |kv| kv.clear()) + } + + /// Execute one or more `kvx::KeyValueStoreBackend` operations + /// within a transaction or scope lock context inside the given + /// closure. + /// + /// The closure needs to return a Result. This + /// allows the caller to simply use the ? operator on any kvx + /// calls that could result in an error within the closure. The + /// kvx::Error is mapped to a KeyValueError to avoid that the + /// caller needs to have any specific knowledge about the kvx::Error + /// type. + /// + /// T can be () if no return value is needed. If anything can + /// fail in the closure, other than kvx calls, then T can be + /// a Result. + pub fn execute(&self, scope: &Scope, op: F) -> Result + where + F: FnMut(&dyn KeyValueStoreBackend) -> Result, + { + self.inner.execute(scope, op).map_err(KeyValueError::KVError) + } +} + +// # Keys and Values +impl KeyValueStore { + /// Stores a key value pair, serialized as json, overwrite existing + pub fn store(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { + self.execute(key.scope(), &mut move |kv: &dyn KeyValueStoreBackend| { + kv.store(key, serde_json::to_value(value)?) + }) + } + + /// Stores a key value pair, serialized as json, fails if existing + pub fn store_new(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { + self.execute( + key.scope(), + &mut move |kv: &dyn KeyValueStoreBackend| match kv.get(key)? { + None => kv.store(key, serde_json::to_value(value)?), + _ => Err(kvx::Error::Unknown), + }, + ) + } + + /// Gets a value for a key, returns an error if the value cannot be deserialized, + /// returns None if it cannot be found. + pub fn get(&self, key: &Key) -> Result, KeyValueError> { + self.execute(key.scope(), |kv| { + if let Some(value) = kv.get(key)? { + Ok(Some(serde_json::from_value(value)?)) + } else { + Ok(None) + } + }) + } + + /// Returns whether a key exists + pub fn has(&self, key: &Key) -> Result { + self.execute(key.scope(), |kv| kv.has(key)) + } + + /// Delete a key-value pair + pub fn drop_key(&self, key: &Key) -> Result<(), KeyValueError> { + self.execute(key.scope(), |kv| kv.delete(key)) + } + + /// Returns all keys under a scope (scopes are exact strings, 'sub'-scopes + /// would need to be specified explicitly.. e.g. 'ca' and 'ca/archived' are + /// two distinct scopes. + /// + /// If matching is not empty then the key must contain the given `&str`. + pub fn keys(&self, scope: &Scope, matching: &str) -> Result, KeyValueError> { + self.execute(scope, |kv| { + kv.list_keys(scope).map(|keys| { + keys.into_iter() + .filter(|key| matching.is_empty() || key.name().as_str().contains(matching)) + .collect() + }) + }) + } +} + +// # Scopes +impl KeyValueStore { + /// Returns whether a scope exists + pub fn has_scope(&self, scope: &Scope) -> Result { + self.execute(&Scope::global(), |kv| kv.has_scope(scope)) + } + + /// Delete a scope + pub fn drop_scope(&self, scope: &Scope) -> Result<(), KeyValueError> { + self.execute(scope, |kv| kv.delete_scope(scope)) + } + + /// Returns all scopes, including sub_scopes + pub fn scopes(&self) -> Result, KeyValueError> { + self.execute(&Scope::global(), |kv| kv.list_scopes()) + } +} + +// # Migration Support +impl KeyValueStore { /// Creates a new KeyValueStore for upgrades. /// /// Adds the implicit prefix "upgrade-{version}-" to the given namespace. @@ -92,11 +203,6 @@ impl KeyValueStore { } } - /// Returns true if this KeyValueStore (with this namespace) has any entries - pub fn is_empty(&self) -> Result { - self.execute(&Scope::global(), |kv| kv.is_empty()) - } - /// Import all data from the given KV store into this /// /// NOTE: This function is not transactional because both this, and the other @@ -118,102 +224,6 @@ impl KeyValueStore { Ok(()) } - - /// Stores a key value pair, serialized as json, overwrite existing - pub fn store(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { - self.execute(key.scope(), &mut move |kv: &dyn KeyValueStoreBackend| { - kv.store(key, serde_json::to_value(value)?) - }) - } - - /// Stores a key value pair, serialized as json, fails if existing - pub fn store_new(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { - self.execute( - key.scope(), - &mut move |kv: &dyn KeyValueStoreBackend| match kv.get(key)? { - None => kv.store(key, serde_json::to_value(value)?), - _ => Err(kvx::Error::Unknown), - }, - ) - } - - /// Execute one or more `kvx::KeyValueStoreBackend` operations - /// within a transaction or scope lock context inside the given - /// closure. - /// - /// The closure needs to return a Result. This - /// allows the caller to simply use the ? operator on any kvx - /// calls that could result in an error within the closure. The - /// kvx::Error is mapped to a KeyValueError to avoid that the - /// caller needs to have any specific knowledge about the kvx::Error - /// type. - /// - /// T can be () if no return value is needed. If anything can - /// fail in the closure, other than kvx calls, then T can be - /// a Result. - pub fn execute(&self, scope: &Scope, op: F) -> Result - where - F: FnMut(&dyn KeyValueStoreBackend) -> Result, - { - self.inner.execute(scope, op).map_err(KeyValueError::KVError) - } - - /// Gets a value for a key, returns an error if the value cannot be deserialized, - /// returns None if it cannot be found. - pub fn get(&self, key: &Key) -> Result, KeyValueError> { - self.execute(key.scope(), |kv| { - if let Some(value) = kv.get(key)? { - Ok(Some(serde_json::from_value(value)?)) - } else { - Ok(None) - } - }) - } - - /// Returns whether a key exists - pub fn has(&self, key: &Key) -> Result { - self.execute(key.scope(), |kv| kv.has(key)) - } - - /// Delete a key-value pair - pub fn drop_key(&self, key: &Key) -> Result<(), KeyValueError> { - self.execute(key.scope(), |kv| kv.delete(key)) - } - - /// Delete a scope - pub fn drop_scope(&self, scope: &Scope) -> Result<(), KeyValueError> { - self.execute(scope, |kv| kv.delete_scope(scope)) - } - - /// Wipe the complete store. Needless to say perhaps.. use with care.. - pub fn wipe(&self) -> Result<(), KeyValueError> { - self.execute(&Scope::global(), |kv| kv.clear()) - } - - /// Returns all scopes, including sub_scopes - pub fn scopes(&self) -> Result, KeyValueError> { - self.execute(&Scope::global(), |kv| kv.list_scopes()) - } - - /// Returns whether a scope exists - pub fn has_scope(&self, scope: &Scope) -> Result { - self.execute(&Scope::global(), |kv| kv.has_scope(scope)) - } - - /// Returns all keys under a scope (scopes are exact strings, 'sub'-scopes - /// would need to be specified explicitly.. e.g. 'ca' and 'ca/archived' are - /// two distinct scopes. - /// - /// If matching is not empty then the key must contain the given `&str`. - pub fn keys(&self, scope: &Scope, matching: &str) -> Result, KeyValueError> { - self.execute(scope, |kv| { - kv.list_keys(scope).map(|keys| { - keys.into_iter() - .filter(|key| matching.is_empty() || key.name().as_str().contains(matching)) - .collect() - }) - }) - } } impl fmt::Display for KeyValueStore {