From f156934cf5ce552fdf1009c71eb7d63e6e319c28 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Mon, 6 Nov 2023 16:06:18 +0100 Subject: [PATCH 1/4] Move KV into storage mod. --- src/cli/ta_client.rs | 3 ++- src/commons/crypto/signing/signers/softsigner.rs | 2 +- src/commons/error.rs | 3 ++- src/commons/eventsourcing/mod.rs | 6 +----- src/commons/eventsourcing/store.rs | 4 ++-- src/commons/eventsourcing/wal.rs | 3 ++- src/commons/mod.rs | 1 + src/commons/{eventsourcing => storage}/kv.rs | 0 src/commons/storage/mod.rs | 4 ++++ src/constants.rs | 2 +- src/daemon/ca/publishing.rs | 3 ++- src/daemon/ca/status.rs | 2 +- src/daemon/config.rs | 2 +- src/ta/mod.rs | 3 ++- src/upgrades/data_migration.rs | 3 ++- src/upgrades/mod.rs | 5 +++-- src/upgrades/pre_0_10_0/cas_migration.rs | 3 ++- src/upgrades/pre_0_10_0/pubd_migration.rs | 3 ++- src/upgrades/pre_0_14_0/cas_migration.rs | 5 +---- src/upgrades/pre_0_14_0/mod.rs | 4 ++-- 20 files changed, 34 insertions(+), 27 deletions(-) rename src/commons/{eventsourcing => storage}/kv.rs (100%) create mode 100644 src/commons/storage/mod.rs diff --git a/src/cli/ta_client.rs b/src/cli/ta_client.rs index 418652b0f..73a2807db 100644 --- a/src/cli/ta_client.rs +++ b/src/cli/ta_client.rs @@ -19,7 +19,8 @@ use crate::{ api::{AddChildRequest, ApiRepositoryContact, CertAuthInfo, IdCertInfo, RepositoryContact, Token}, crypto::KrillSigner, error::Error as KrillError, - eventsourcing::{namespace, AggregateStore, AggregateStoreError, Namespace}, + eventsourcing::{AggregateStore, AggregateStoreError}, + storage::{namespace, Namespace}, util::{file, httpclient}, }, constants::{KRILL_CLI_API_ENV, KRILL_CLI_FORMAT_ENV, KRILL_TA_CLIENT_APP, KRILL_VERSION}, diff --git a/src/commons/crypto/signing/signers/softsigner.rs b/src/commons/crypto/signing/signers/softsigner.rs index 2d78f84c7..be80d3183 100644 --- a/src/commons/crypto/signing/signers/softsigner.rs +++ b/src/commons/crypto/signing/signers/softsigner.rs @@ -22,7 +22,7 @@ use url::Url; use crate::{ commons::{ crypto::{dispatch::signerinfo::SignerMapper, signers::error::SignerError, SignerHandle}, - eventsourcing::{Key, KeyValueStore, Segment, SegmentExt}, + storage::{Key, KeyValueStore, Segment, SegmentExt}, }, constants::KEYS_NS, }; diff --git a/src/commons/error.rs b/src/commons/error.rs index 1a2d83602..e52d662cb 100644 --- a/src/commons/error.rs +++ b/src/commons/error.rs @@ -20,7 +20,8 @@ use crate::{ commons::{ api::{rrdp::PublicationDeltaError, CustomerAsn, ErrorResponse, RoaPayload}, crypto::SignerError, - eventsourcing::{AggregateStoreError, KeyValueError}, + eventsourcing::AggregateStoreError, + storage::KeyValueError, util::httpclient, }, daemon::{ca::RoaPayloadJsonMapKey, http::tls_keys}, diff --git a/src/commons/eventsourcing/mod.rs b/src/commons/eventsourcing/mod.rs index 1d02e44cb..bca297630 100644 --- a/src/commons/eventsourcing/mod.rs +++ b/src/commons/eventsourcing/mod.rs @@ -18,11 +18,6 @@ pub use self::store::*; mod listener; pub use self::listener::*; -mod kv; -pub use self::kv::{ - namespace, segment, Key, KeyValueError, KeyValueStore, Namespace, Scope, Segment, SegmentBuf, SegmentExt, -}; - //------------ Tests --------------------------------------------------------- #[cfg(test)] @@ -43,6 +38,7 @@ mod tests { commons::{ actor::Actor, api::{CommandHistoryCriteria, CommandSummary}, + storage::{namespace, Namespace}, }, constants::ACTOR_DEF_TEST, test::mem_storage, diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index 732ac5c62..b3693dcdf 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -14,9 +14,9 @@ use crate::commons::{ api::{CommandHistory, CommandHistoryCriteria, CommandHistoryRecord}, error::KrillIoError, eventsourcing::{ - cmd::Command, segment, Aggregate, Key, KeyValueError, KeyValueStore, PostSaveEventListener, - PreSaveEventListener, Scope, Segment, SegmentExt, StoredCommand, StoredCommandBuilder, + cmd::Command, Aggregate, PostSaveEventListener, PreSaveEventListener, StoredCommand, StoredCommandBuilder, }, + storage::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt}, }; use super::InitCommand; diff --git a/src/commons/eventsourcing/wal.rs b/src/commons/eventsourcing/wal.rs index d82494d7f..ef7716c3a 100644 --- a/src/commons/eventsourcing/wal.rs +++ b/src/commons/eventsourcing/wal.rs @@ -10,7 +10,8 @@ use rpki::ca::idexchange::MyHandle; use serde::Serialize; use url::Url; -use crate::commons::eventsourcing::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt, Storable}; +use crate::commons::eventsourcing::Storable; +use crate::commons::storage::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt}; //------------ WalSupport ---------------------------------------------------- diff --git a/src/commons/mod.rs b/src/commons/mod.rs index c6911965c..53e171351 100644 --- a/src/commons/mod.rs +++ b/src/commons/mod.rs @@ -5,6 +5,7 @@ pub mod bgp; pub mod crypto; pub mod error; pub mod eventsourcing; +pub mod storage; pub mod util; //------------ Response Aliases ---------------------------------------------- diff --git a/src/commons/eventsourcing/kv.rs b/src/commons/storage/kv.rs similarity index 100% rename from src/commons/eventsourcing/kv.rs rename to src/commons/storage/kv.rs diff --git a/src/commons/storage/mod.rs b/src/commons/storage/mod.rs new file mode 100644 index 000000000..3943eead9 --- /dev/null +++ b/src/commons/storage/mod.rs @@ -0,0 +1,4 @@ +mod kv; +pub use self::kv::{ + namespace, segment, Key, KeyValueError, KeyValueStore, Namespace, Scope, Segment, SegmentBuf, SegmentExt, +}; diff --git a/src/constants.rs b/src/constants.rs index d3e7128b2..189d50b86 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -1,7 +1,7 @@ use kvx::Namespace; use crate::{ - commons::{actor::ActorDef, eventsourcing::namespace}, + commons::{actor::ActorDef, storage::namespace}, daemon::auth::common::NoResourceType, }; diff --git a/src/daemon/ca/publishing.rs b/src/daemon/ca/publishing.rs index 13648dd86..fe53fee8e 100644 --- a/src/daemon/ca/publishing.rs +++ b/src/daemon/ca/publishing.rs @@ -25,7 +25,8 @@ use crate::{ }, crypto::KrillSigner, error::Error, - eventsourcing::{Key, KeyValueStore, PreSaveEventListener, Scope, Segment, SegmentExt}, + eventsourcing::PreSaveEventListener, + storage::{Key, KeyValueStore, Scope, Segment, SegmentExt}, KrillResult, }, constants::CA_OBJECTS_NS, diff --git a/src/daemon/ca/status.rs b/src/daemon/ca/status.rs index ae752aa6a..afa2af352 100644 --- a/src/daemon/ca/status.rs +++ b/src/daemon/ca/status.rs @@ -14,7 +14,7 @@ use crate::commons::{ RepoStatus, }, error::Error, - eventsourcing::{segment, Key, KeyValueStore, Scope, Segment, SegmentExt}, + storage::{segment, Key, KeyValueStore, Scope, Segment, SegmentExt}, util::httpclient, KrillResult, }; diff --git a/src/daemon/config.rs b/src/daemon/config.rs index 5049f07ae..9e414730a 100644 --- a/src/daemon/config.rs +++ b/src/daemon/config.rs @@ -26,7 +26,7 @@ use crate::{ api::{PublicationServerUris, Token}, crypto::{OpenSslSignerConfig, SignSupport}, error::{Error, KrillIoError}, - eventsourcing::KeyValueStore, + storage::KeyValueStore, util::ext_serde, KrillResult, }, diff --git a/src/ta/mod.rs b/src/ta/mod.rs index 7dbb67f24..2563e96ee 100644 --- a/src/ta/mod.rs +++ b/src/ta/mod.rs @@ -42,7 +42,8 @@ mod tests { commons::{ api::{PublicationServerInfo, RepositoryContact}, crypto::KrillSignerBuilder, - eventsourcing::{namespace, AggregateStore, Namespace}, + eventsourcing::AggregateStore, + storage::{namespace, Namespace}, }, daemon::config::ConfigDefaults, test, diff --git a/src/upgrades/data_migration.rs b/src/upgrades/data_migration.rs index ea4413fff..5df111bd0 100644 --- a/src/upgrades/data_migration.rs +++ b/src/upgrades/data_migration.rs @@ -9,7 +9,8 @@ use url::Url; use crate::{ commons::{ crypto::{dispatch::signerinfo::SignerInfo, KrillSignerBuilder, OpenSslSigner}, - eventsourcing::{Aggregate, AggregateStore, KeyValueStore, WalStore, WalSupport}, + eventsourcing::{Aggregate, AggregateStore, WalStore, WalSupport}, + storage::KeyValueStore, }, constants::{ CASERVER_NS, KEYS_NS, PROPERTIES_NS, PUBSERVER_CONTENT_NS, PUBSERVER_NS, SIGNERS_NS, TA_PROXY_SERVER_NS, diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index be9da0f8b..1e77603dc 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -17,9 +17,10 @@ use crate::{ api::{AspaDefinition, AspaDefinitionUpdates, CustomerAsn, ProviderAsn}, error::KrillIoError, eventsourcing::{ - segment, Aggregate, AggregateStore, AggregateStoreError, Key, KeyValueError, KeyValueStore, Scope, Segment, - SegmentExt, Storable, StoredCommand, WalStore, WalStoreError, WithStorableDetails, + Aggregate, AggregateStore, AggregateStoreError, Storable, StoredCommand, WalStore, WalStoreError, + WithStorableDetails, }, + storage::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt}, util::KrillVersion, KrillResult, }, diff --git a/src/upgrades/pre_0_10_0/cas_migration.rs b/src/upgrades/pre_0_10_0/cas_migration.rs index 9e2c8b4de..b9ed15ebf 100644 --- a/src/upgrades/pre_0_10_0/cas_migration.rs +++ b/src/upgrades/pre_0_10_0/cas_migration.rs @@ -10,7 +10,8 @@ use crate::upgrades::{AspaMigrationConfigUpdates, AspaMigrationConfigs, CommandM use crate::{ commons::{ api::CertAuthStorableCommand, - eventsourcing::{AggregateStore, Key, KeyValueStore, Segment, SegmentExt}, + eventsourcing::AggregateStore, + storage::{Key, KeyValueStore, Segment, SegmentExt}, }, constants::{CASERVER_NS, CA_OBJECTS_NS}, daemon::{ diff --git a/src/upgrades/pre_0_10_0/pubd_migration.rs b/src/upgrades/pre_0_10_0/pubd_migration.rs index 8f31a7785..1c742d91f 100644 --- a/src/upgrades/pre_0_10_0/pubd_migration.rs +++ b/src/upgrades/pre_0_10_0/pubd_migration.rs @@ -3,7 +3,8 @@ use rpki::{ca::idexchange::MyHandle, repository::x509::Time}; use crate::{ commons::{ api::StorableRepositoryCommand, - eventsourcing::{segment, AggregateStore, KeyValueStore, Scope, Segment, StoredCommandBuilder}, + eventsourcing::{AggregateStore, StoredCommandBuilder}, + storage::{segment, KeyValueStore, Scope, Segment}, util::KrillVersion, }, constants::PUBSERVER_NS, diff --git a/src/upgrades/pre_0_14_0/cas_migration.rs b/src/upgrades/pre_0_14_0/cas_migration.rs index 2a15bb62b..df365389f 100644 --- a/src/upgrades/pre_0_14_0/cas_migration.rs +++ b/src/upgrades/pre_0_14_0/cas_migration.rs @@ -9,10 +9,7 @@ use crate::upgrades::{ UpgradeAggregateStorePre0_14, UpgradeMode, }; use crate::{ - commons::{ - api::CertAuthStorableCommand, - eventsourcing::{AggregateStore, KeyValueStore}, - }, + commons::{api::CertAuthStorableCommand, eventsourcing::AggregateStore, storage::KeyValueStore}, constants::CASERVER_NS, daemon::{ ca::{CertAuth, CertAuthEvent, CertAuthInitEvent}, diff --git a/src/upgrades/pre_0_14_0/mod.rs b/src/upgrades/pre_0_14_0/mod.rs index 779e32754..92ec5da1d 100644 --- a/src/upgrades/pre_0_14_0/mod.rs +++ b/src/upgrades/pre_0_14_0/mod.rs @@ -17,9 +17,9 @@ use crate::{ api::{AspaDefinition, CustomerAsn}, crypto::dispatch::signerinfo::{SignerInfo, SignerInfoEvent, SignerInfoInitEvent}, eventsourcing::{ - Aggregate, AggregateStore, KeyValueStore, Storable, StoredCommand, StoredCommandBuilder, - WithStorableDetails, + Aggregate, AggregateStore, Storable, StoredCommand, StoredCommandBuilder, WithStorableDetails, }, + storage::KeyValueStore, }, daemon::{ ca::{CertAuthEvent, CertAuthInitEvent}, From 3a2be662b91441a3c20a045cae280054dd756c4b Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Wed, 8 Nov 2023 15:17:20 +0100 Subject: [PATCH 2/4] Move Storable to storage::types --- src/commons/eventsourcing/agg.rs | 6 ++++-- src/commons/eventsourcing/cmd.rs | 3 ++- src/commons/eventsourcing/evt.rs | 2 +- src/commons/eventsourcing/store.rs | 6 ------ src/commons/eventsourcing/wal.rs | 3 +-- src/commons/storage/mod.rs | 3 +++ src/commons/storage/types.rs | 6 ++++++ src/upgrades/mod.rs | 4 ++-- src/upgrades/pre_0_14_0/mod.rs | 5 ++--- 9 files changed, 21 insertions(+), 17 deletions(-) create mode 100644 src/commons/storage/types.rs diff --git a/src/commons/eventsourcing/agg.rs b/src/commons/eventsourcing/agg.rs index 640c68e09..cbe79f819 100644 --- a/src/commons/eventsourcing/agg.rs +++ b/src/commons/eventsourcing/agg.rs @@ -1,7 +1,9 @@ use rpki::ca::idexchange::MyHandle; -use super::{AggregateStoreError, Command, Event, InitCommand, InitEvent, Storable, StoredCommand}; -use crate::commons::eventsourcing::WithStorableDetails; +use crate::commons::{ + eventsourcing::{AggregateStoreError, Command, Event, InitCommand, InitEvent, StoredCommand, WithStorableDetails}, + storage::Storable, +}; //------------ Aggregate ----------------------------------------------------- diff --git a/src/commons/eventsourcing/cmd.rs b/src/commons/eventsourcing/cmd.rs index e9ba291df..c1ea65c54 100644 --- a/src/commons/eventsourcing/cmd.rs +++ b/src/commons/eventsourcing/cmd.rs @@ -5,7 +5,8 @@ use rpki::{ca::idexchange::MyHandle, repository::x509::Time}; use crate::commons::{ actor::Actor, api::{CommandHistoryRecord, CommandSummary}, - eventsourcing::{Event, InitEvent, Storable}, + eventsourcing::{Event, InitEvent}, + storage::Storable, }; use super::Aggregate; diff --git a/src/commons/eventsourcing/evt.rs b/src/commons/eventsourcing/evt.rs index 2a3641c15..b833faa09 100644 --- a/src/commons/eventsourcing/evt.rs +++ b/src/commons/eventsourcing/evt.rs @@ -1,6 +1,6 @@ use std::fmt; -use super::Storable; +use crate::commons::storage::Storable; pub trait InitEvent: fmt::Display + Eq + PartialEq + Send + Sync + Storable + 'static {} pub trait Event: fmt::Display + Eq + PartialEq + Send + Sync + Storable + 'static {} diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index b3693dcdf..beb02acaf 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -7,7 +7,6 @@ use std::{ use kvx::Namespace; use rpki::{ca::idexchange::MyHandle, repository::x509::Time}; -use serde::{de::DeserializeOwned, Serialize}; use url::Url; use crate::commons::{ @@ -23,11 +22,6 @@ use super::InitCommand; pub type StoreResult = Result; -//------------ Storable ------------------------------------------------------ - -pub trait Storable: Clone + Serialize + DeserializeOwned + Sized + 'static {} -impl Storable for T {} - //------------ AggregateStore ------------------------------------------------ /// This type is responsible for managing aggregates. diff --git a/src/commons/eventsourcing/wal.rs b/src/commons/eventsourcing/wal.rs index ef7716c3a..5b5637dcb 100644 --- a/src/commons/eventsourcing/wal.rs +++ b/src/commons/eventsourcing/wal.rs @@ -10,8 +10,7 @@ use rpki::ca::idexchange::MyHandle; use serde::Serialize; use url::Url; -use crate::commons::eventsourcing::Storable; -use crate::commons::storage::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt}; +use crate::commons::storage::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt, Storable}; //------------ WalSupport ---------------------------------------------------- diff --git a/src/commons/storage/mod.rs b/src/commons/storage/mod.rs index 3943eead9..b9855d496 100644 --- a/src/commons/storage/mod.rs +++ b/src/commons/storage/mod.rs @@ -2,3 +2,6 @@ mod kv; pub use self::kv::{ namespace, segment, Key, KeyValueError, KeyValueStore, Namespace, Scope, Segment, SegmentBuf, SegmentExt, }; + +mod types; +pub use types::*; diff --git a/src/commons/storage/types.rs b/src/commons/storage/types.rs new file mode 100644 index 000000000..ceada29e3 --- /dev/null +++ b/src/commons/storage/types.rs @@ -0,0 +1,6 @@ +use serde::{de::DeserializeOwned, Serialize}; + +//------------ Storable ------------------------------------------------------ + +pub trait Storable: Clone + Serialize + DeserializeOwned + Sized + 'static {} +impl Storable for T {} diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index 1e77603dc..c7438957b 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -17,9 +17,9 @@ use crate::{ api::{AspaDefinition, AspaDefinitionUpdates, CustomerAsn, ProviderAsn}, error::KrillIoError, eventsourcing::{ - Aggregate, AggregateStore, AggregateStoreError, Storable, StoredCommand, WalStore, WalStoreError, - WithStorableDetails, + Aggregate, AggregateStore, AggregateStoreError, StoredCommand, WalStore, WalStoreError, WithStorableDetails, }, + storage::Storable, storage::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt}, util::KrillVersion, KrillResult, diff --git a/src/upgrades/pre_0_14_0/mod.rs b/src/upgrades/pre_0_14_0/mod.rs index 92ec5da1d..f51480c2b 100644 --- a/src/upgrades/pre_0_14_0/mod.rs +++ b/src/upgrades/pre_0_14_0/mod.rs @@ -16,10 +16,9 @@ use crate::{ commons::{ api::{AspaDefinition, CustomerAsn}, crypto::dispatch::signerinfo::{SignerInfo, SignerInfoEvent, SignerInfoInitEvent}, - eventsourcing::{ - Aggregate, AggregateStore, Storable, StoredCommand, StoredCommandBuilder, WithStorableDetails, - }, + eventsourcing::{Aggregate, AggregateStore, StoredCommand, StoredCommandBuilder, WithStorableDetails}, storage::KeyValueStore, + storage::Storable, }, daemon::{ ca::{CertAuthEvent, CertAuthInitEvent}, From 43edac3ae23060c5140844c883f08ad3539154cf Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Tue, 14 Nov 2023 15:40:46 +0100 Subject: [PATCH 3/4] Embed used functions from kvx and convert from trait to enum. --- Cargo.lock | 249 +----- Cargo.toml | 6 +- src/cli/ta_client.rs | 10 +- .../crypto/signing/signers/softsigner.rs | 8 +- src/commons/error.rs | 6 - src/commons/eventsourcing/mod.rs | 9 +- src/commons/eventsourcing/store.rs | 15 +- src/commons/eventsourcing/wal.rs | 7 +- src/commons/storage/disk.rs | 542 +++++++++++++ src/commons/storage/key.rs | 88 +++ src/commons/storage/kv.rs | 505 ++++++++---- src/commons/storage/memory.rs | 290 +++++++ src/commons/storage/mod.rs | 23 +- src/commons/storage/namespace.rs | 184 +++++ src/commons/storage/queue.rs | 732 ++++++++++++++++++ src/commons/storage/scope.rs | 210 +++++ src/commons/storage/segment.rs | 246 ++++++ src/constants.rs | 26 +- src/daemon/auth/common/crypt.rs | 14 +- src/daemon/ca/publishing.rs | 4 +- src/daemon/ca/status.rs | 35 +- src/daemon/config.rs | 3 +- src/daemon/mq.rs | 59 +- src/daemon/scheduler.rs | 4 +- src/ta/mod.rs | 6 +- src/test.rs | 5 +- src/upgrades/data_migration.rs | 3 +- src/upgrades/mod.rs | 40 +- src/upgrades/pre_0_10_0/cas_migration.rs | 24 +- src/upgrades/pre_0_10_0/pubd_migration.rs | 4 +- src/upgrades/pre_0_14_0/mod.rs | 4 +- 31 files changed, 2807 insertions(+), 554 deletions(-) create mode 100644 src/commons/storage/disk.rs create mode 100644 src/commons/storage/key.rs create mode 100644 src/commons/storage/memory.rs create mode 100644 src/commons/storage/namespace.rs create mode 100644 src/commons/storage/queue.rs create mode 100644 src/commons/storage/scope.rs create mode 100644 src/commons/storage/segment.rs diff --git a/Cargo.lock b/Cargo.lock index 13b080153..a95a3b1f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -59,17 +59,6 @@ dependencies = [ "term", ] -[[package]] -name = "async-trait" -version = "0.1.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.15", -] - [[package]] name = "atty" version = "0.2.14" @@ -191,12 +180,6 @@ version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - [[package]] name = "bytes" version = "1.4.0" @@ -466,7 +449,6 @@ checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer 0.10.4", "crypto-common", - "subtle", ] [[package]] @@ -566,12 +548,6 @@ dependencies = [ "backtrace", ] -[[package]] -name = "fallible-iterator" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" - [[package]] name = "fastrand" version = "1.9.0" @@ -821,15 +797,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest 0.10.6", -] - [[package]] name = "http" version = "0.2.9" @@ -881,7 +848,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2", "tokio", "tower-service", "tracing", @@ -1100,7 +1067,7 @@ dependencies = [ "intervaltree", "jmespatch", "kmip-protocol", - "kvx", + "lazy_static", "libc", "libflate", "log", @@ -1118,6 +1085,7 @@ dependencies = [ "serde", "serde_json", "syslog", + "tempfile", "tokio", "tokio-rustls", "toml", @@ -1127,49 +1095,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "kvx" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81ac98d13b1cc81be8b77286558858bb31f1d9bc643ec77142c1da4f47626a63" -dependencies = [ - "kvx_macros", - "kvx_types", - "lazy_static", - "postgres", - "postgres-types", - "r2d2_postgres", - "rand", - "serde_json", - "tempfile", - "thiserror", - "url", -] - -[[package]] -name = "kvx_macros" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c4c60bb89a1b1420cddf8e5d81f35f7f38a64d23a96cac2c395488ef219c9ff" -dependencies = [ - "kvx_types", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 2.0.15", -] - -[[package]] -name = "kvx_types" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35cc4a6725d21de7d030b6cfccdd2e3b75d1ac351cbbe5b55938ae4bf71f0434" -dependencies = [ - "postgres", - "postgres-types", - "thiserror", -] - [[package]] name = "lalrpop" version = "0.19.9" @@ -1304,15 +1229,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "md-5" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" -dependencies = [ - "digest 0.10.6", -] - [[package]] name = "memchr" version = "2.5.0" @@ -1603,15 +1519,6 @@ dependencies = [ "indexmap", ] -[[package]] -name = "phf" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" -dependencies = [ - "phf_shared 0.11.1", -] - [[package]] name = "phf_shared" version = "0.10.0" @@ -1621,15 +1528,6 @@ dependencies = [ "siphasher", ] -[[package]] -name = "phf_shared" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" -dependencies = [ - "siphasher", -] - [[package]] name = "pico-args" version = "0.4.2" @@ -1670,63 +1568,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "postgres" -version = "0.19.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bed5017bc2ff49649c0075d0d7a9d676933c1292480c1d137776fb205b5cd18" -dependencies = [ - "bytes", - "fallible-iterator", - "futures-util", - "log", - "tokio", - "tokio-postgres", -] - -[[package]] -name = "postgres-derive" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "070ffaa78859c779b19f9358ce035480479cf2619e968593ffbe72abcb6e0fcf" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.15", -] - -[[package]] -name = "postgres-protocol" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" -dependencies = [ - "base64 0.21.0", - "byteorder", - "bytes", - "fallible-iterator", - "hmac 0.12.1", - "md-5", - "memchr", - "rand", - "sha2 0.10.6", - "stringprep", -] - -[[package]] -name = "postgres-types" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f028f05971fe20f512bcc679e2c10227e57809a3af86a7606304435bc8896cd6" -dependencies = [ - "bytes", - "fallible-iterator", - "postgres-derive", - "postgres-protocol", - "serde", - "serde_json", -] - [[package]] name = "ppv-lite86" version = "0.2.17" @@ -1739,30 +1580,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro2" version = "1.0.56" @@ -1801,16 +1618,6 @@ dependencies = [ "scheduled-thread-pool", ] -[[package]] -name = "r2d2_postgres" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7029c56be658cb54f321e0bee597810ee16796b735fa2559d7056bf06b12230b" -dependencies = [ - "postgres", - "r2d2", -] - [[package]] name = "rand" version = "0.8.5" @@ -2087,7 +1894,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19230d10daad7f163d8c1fc8edf84fbe52ac71c2ebe5adf3f763aa1557b843e3" dependencies = [ - "hmac 0.10.1", + "hmac", "pbkdf2", "salsa20", "sha2 0.9.9", @@ -2316,16 +2123,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "socket2" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d283f86695ae989d1e18440a943880967156325ba025f05049946bff47bcc2b" -dependencies = [ - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "spin" version = "0.5.2" @@ -2341,20 +2138,10 @@ dependencies = [ "new_debug_unreachable", "once_cell", "parking_lot", - "phf_shared 0.10.0", + "phf_shared", "precomputed-hash", ] -[[package]] -name = "stringprep" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "strsim" version = "0.8.0" @@ -2533,7 +2320,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.4.9", + "socket2", "tokio-macros", "windows-sys 0.45.0", ] @@ -2559,30 +2346,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-postgres" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e89f6234aa8fd43779746012fcf53603cdb91fdd8399aa0de868c2d56b6dde1" -dependencies = [ - "async-trait", - "byteorder", - "bytes", - "fallible-iterator", - "futures-channel", - "futures-util", - "log", - "parking_lot", - "percent-encoding", - "phf", - "pin-project-lite", - "postgres-protocol", - "postgres-types", - "socket2 0.5.2", - "tokio", - "tokio-util", -] - [[package]] name = "tokio-rustls" version = "0.22.0" diff --git a/Cargo.toml b/Cargo.toml index 256120fe6..de4471cda 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,10 +38,7 @@ jmespatch = { version = "^0.3", features = ["sync"], optional = true } kmip = { version = "0.4.2", package = "kmip-protocol", features = [ "tls-with-openssl", ], optional = true } -kvx = { version = "0.9.1", features = ["macros"] } -# kvx = { version = "0.9.0", git = "https://github.com/nlnetlabs/kvx", branch = "lockfiles-outside-scope", features = [ -# "macros", -# ] } +lazy_static = "1.4.0" libflate = "^1" log = "^0.4" once_cell = { version = "^1.7.2", optional = true } @@ -62,6 +59,7 @@ rpki = { version = "0.17.2", features = ["ca", "compat", "rrdp"] } scrypt = { version = "^0.6", optional = true, default-features = false } serde = { version = "^1.0", features = ["derive", "rc"] } serde_json = "^1.0" +tempfile = "3.1.0" tokio = { version = "1", features = [ "macros", "rt", diff --git a/src/cli/ta_client.rs b/src/cli/ta_client.rs index 73a2807db..3f1f2da23 100644 --- a/src/cli/ta_client.rs +++ b/src/cli/ta_client.rs @@ -20,7 +20,7 @@ use crate::{ crypto::KrillSigner, error::Error as KrillError, eventsourcing::{AggregateStore, AggregateStoreError}, - storage::{namespace, Namespace}, + storage::NamespaceBuf, util::{file, httpclient}, }, constants::{KRILL_CLI_API_ENV, KRILL_CLI_FORMAT_ENV, KRILL_TA_CLIENT_APP, KRILL_VERSION}, @@ -1005,8 +1005,12 @@ struct TrustAnchorSignerManager { impl TrustAnchorSignerManager { fn create(config: Config) -> Result { - let store = AggregateStore::create(&config.storage_uri, namespace!("signer"), config.use_history_cache) - .map_err(KrillError::AggregateStoreError)?; + let store = AggregateStore::create( + &config.storage_uri, + NamespaceBuf::parse_lossy("signer").as_ref(), + config.use_history_cache, + ) + .map_err(KrillError::AggregateStoreError)?; let ta_handle = TrustAnchorHandle::new("ta".into()); let config = Arc::new(config); let signer = config.signer()?; diff --git a/src/commons/crypto/signing/signers/softsigner.rs b/src/commons/crypto/signing/signers/softsigner.rs index be80d3183..c40b5327c 100644 --- a/src/commons/crypto/signing/signers/softsigner.rs +++ b/src/commons/crypto/signing/signers/softsigner.rs @@ -22,7 +22,7 @@ use url::Url; use crate::{ commons::{ crypto::{dispatch::signerinfo::SignerMapper, signers::error::SignerError, SignerHandle}, - storage::{Key, KeyValueStore, Segment, SegmentExt}, + storage::{Key, KeyValueStore, SegmentBuf}, }, constants::KEYS_NS, }; @@ -136,7 +136,7 @@ impl OpenSslSigner { let json = serde_json::to_value(&kp)?; match self .keys_store - .store(&Key::new_global(Segment::parse_lossy(&key_id.to_string())), &json) // key_id should always be a valid Segment + .store(&Key::new_global(SegmentBuf::parse_lossy(&key_id.to_string())), &json) // key_id should always be a valid Segment { Ok(_) => Ok(key_id), Err(err) => Err(SignerError::Other(format!("Failed to store key: {}:", err))), @@ -165,7 +165,7 @@ impl OpenSslSigner { // TODO decrypt key after read match self .keys_store - .get(&Key::new_global(Segment::parse_lossy(&key_id.to_string()))) // key_id should always be a valid Segment + .get(&Key::new_global(SegmentBuf::parse_lossy(&key_id.to_string()))) // key_id should always be a valid Segment { Ok(Some(kp)) => Ok(kp), Ok(None) => Err(SignerError::KeyNotFound), @@ -218,7 +218,7 @@ impl OpenSslSigner { pub fn destroy_key(&self, key_id: &KeyIdentifier) -> Result<(), KeyError> { self.keys_store - .drop_key(&Key::new_global(Segment::parse_lossy(&key_id.to_string()))) // key_id should always be a valid Segment + .drop_key(&Key::new_global(SegmentBuf::parse_lossy(&key_id.to_string()))) // key_id should always be a valid Segment .map_err(|_| KeyError::Signer(SignerError::KeyNotFound)) } diff --git a/src/commons/error.rs b/src/commons/error.rs index e52d662cb..2028be5ac 100644 --- a/src/commons/error.rs +++ b/src/commons/error.rs @@ -559,12 +559,6 @@ impl From for Error { } } -impl From for Error { - fn from(e: kvx::Error) -> Self { - Error::KeyValueError(KeyValueError::Inner(e)) - } -} - impl From for Error { fn from(e: AggregateStoreError) -> Self { Error::AggregateStoreError(e) diff --git a/src/commons/eventsourcing/mod.rs b/src/commons/eventsourcing/mod.rs index bca297630..922af95ac 100644 --- a/src/commons/eventsourcing/mod.rs +++ b/src/commons/eventsourcing/mod.rs @@ -38,7 +38,7 @@ mod tests { commons::{ actor::Actor, api::{CommandHistoryCriteria, CommandSummary}, - storage::{namespace, Namespace}, + storage::NamespaceBuf, }, constants::ACTOR_DEF_TEST, test::mem_storage, @@ -338,7 +338,9 @@ mod tests { let counter = Arc::new(EventCounter::default()); - let mut manager = AggregateStore::::create(&storage_uri, namespace!("person"), false).unwrap(); + let mut manager = + AggregateStore::::create(&storage_uri, NamespaceBuf::parse_lossy("person").as_ref(), false) + .unwrap(); manager.add_post_save_listener(counter.clone()); let alice_name = "alice smith".to_string(); @@ -371,7 +373,8 @@ mod tests { assert_eq!(21, alice.age()); // Should read state again when restarted with same data store mapping. - let manager = AggregateStore::::create(&storage_uri, namespace!("person"), false).unwrap(); + let manager = + AggregateStore::::create(&storage_uri, &NamespaceBuf::parse_lossy("person"), false).unwrap(); let alice = manager.get_latest(&alice_handle).unwrap(); assert_eq!("alice smith-doe", alice.name()); diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index beb02acaf..95c3fb489 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -5,7 +5,6 @@ use std::{ sync::{Arc, Mutex, RwLock}, }; -use kvx::Namespace; use rpki::{ca::idexchange::MyHandle, repository::x509::Time}; use url::Url; @@ -15,7 +14,7 @@ use crate::commons::{ eventsourcing::{ cmd::Command, Aggregate, PostSaveEventListener, PreSaveEventListener, StoredCommand, StoredCommandBuilder, }, - storage::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt}, + storage::{Key, KeyValueError, KeyValueStore, Namespace, Scope, SegmentBuf}, }; use super::InitCommand; @@ -204,10 +203,8 @@ where self.aggregates() } - /// Get the latest aggregate and optionally apply a command to it. - /// - /// Uses `kvx::execute` to ensure that the whole operation is done inside - /// a transaction (postgres) or lock (disk). + /// Get the latest aggregate and optionally apply a command to it, all + /// inside a single transaction (postgres) or lock (disk). fn execute_opt_command( &self, handle: &MyHandle, @@ -521,17 +518,17 @@ where A::Error: From, { fn scope_for_agg(agg: &MyHandle) -> Scope { - Scope::from_segment(Segment::parse_lossy(agg.as_str())) // agg should always be a valid Segment + Scope::from_segment(SegmentBuf::parse_lossy(agg.as_str())) // agg should always be a valid Segment } fn key_for_snapshot(agg: &MyHandle) -> Key { - Key::new_scoped(Self::scope_for_agg(agg), segment!("snapshot.json")) + Key::new_scoped(Self::scope_for_agg(agg), SegmentBuf::parse_lossy("snapshot.json")) } fn key_for_command(agg: &MyHandle, version: u64) -> Key { Key::new_scoped( Self::scope_for_agg(agg), - Segment::parse(&format!("command-{}.json", version)).unwrap(), // cannot panic as a u64 cannot contain a Scope::SEPARATOR + SegmentBuf::parse_lossy(&format!("command-{}.json", version)), ) } diff --git a/src/commons/eventsourcing/wal.rs b/src/commons/eventsourcing/wal.rs index 5b5637dcb..38a509b43 100644 --- a/src/commons/eventsourcing/wal.rs +++ b/src/commons/eventsourcing/wal.rs @@ -5,12 +5,11 @@ use std::{ sync::{Arc, RwLock}, }; -use kvx::Namespace; use rpki::ca::idexchange::MyHandle; use serde::Serialize; use url::Url; -use crate::commons::storage::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt, Storable}; +use crate::commons::storage::{Key, KeyValueError, KeyValueStore, Namespace, Scope, Segment, SegmentBuf, Storable}; //------------ WalSupport ---------------------------------------------------- @@ -398,11 +397,11 @@ impl WalStore { fn scope_for_handle(handle: &MyHandle) -> Scope { // handle should always be a valid Segment - Scope::from_segment(Segment::parse_lossy(handle.as_str())) + Scope::from_segment(SegmentBuf::parse_lossy(handle.as_str())) } fn key_for_snapshot(handle: &MyHandle) -> Key { - Key::new_scoped(Self::scope_for_handle(handle), segment!("snapshot.json")) + Key::new_scoped(Self::scope_for_handle(handle), SegmentBuf::parse_lossy("snapshot.json")) } fn key_for_wal_set(handle: &MyHandle, revision: u64) -> Key { diff --git a/src/commons/storage/disk.rs b/src/commons/storage/disk.rs new file mode 100644 index 000000000..0cbc401d7 --- /dev/null +++ b/src/commons/storage/disk.rs @@ -0,0 +1,542 @@ +use std::{ + fmt::Display, + fs, + fs::{File, OpenOptions}, + ops::{Deref, DerefMut}, + path::{Component, Path, PathBuf}, + thread, + time::Duration, +}; + +use serde_json::Value; + +use crate::commons::{ + error::KrillIoError, + storage::{Key, KeyValueError, KeyValueStoreDispatcher, NamespaceBuf, Scope, SegmentBuf, StorageResult}, +}; + +pub const LOCK_FILE_NAME: &str = "lockfile.lock"; +pub const LOCK_FILE_DIR: &str = ".locks"; + +#[derive(Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct Disk { + root: PathBuf, + tmp: PathBuf, +} + +impl Disk { + /// This will create a disk based store for the given (base) path and namespace. + /// + /// Under the hood this uses two directories: path/namespace and path/tmp. + /// The latter is used for temporary files for new values for existing keys. Such + /// values are written first and then renamed (moved) to avoid issues with partially + /// written files because of I/O issues (disk full) or concurrent reads of the key + /// as its value is being updated. + /// + /// Different instances of this disk based storage that use different namespaces, + /// but share the same (base) path will all use the same tmp directory. This is + /// not an issue as the temporary files will have unique names. + pub fn new(path: &str, namespace: &str) -> StorageResult { + let root = PathBuf::from(path).join(namespace); + let tmp = PathBuf::from(path).join("tmp"); + + if !tmp.exists() { + fs::create_dir_all(&tmp).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!("Cannot create directory for tmp files: {}", tmp.display()), + e, + )) + })?; + } + + Ok(Disk { root, tmp }) + } +} + +impl Display for Disk { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "local://{}", self.root.display()) + } +} + +impl Disk { + pub fn is_empty(&self) -> StorageResult { + if let Ok(entries) = self.root.read_dir() { + for e in entries.into_iter().flatten() { + if !e.path().ends_with(LOCK_FILE_DIR) { + return Ok(false); + } + } + } + // non existent dir counts as empty + Ok(true) + } + + pub fn has(&self, key: &Key) -> StorageResult { + let exists = key.as_path(&self.root).exists(); + Ok(exists) + } + + pub fn has_scope(&self, scope: &Scope) -> StorageResult { + let exists = scope.as_path(&self.root).try_exists().map_err(|e| { + KeyValueError::IoError(KrillIoError::new(format!("cannot get path for scope: {}", scope), e)) + })?; + Ok(exists) + } + + pub fn get(&self, key: &Key) -> StorageResult> { + let path = key.as_path(&self.root); + if path.exists() { + let value = + fs::read_to_string(key.as_path(&self.root)).map_err(|_| KeyValueError::UnknownKey(key.clone()))?; + + let value: Value = serde_json::from_str(&value)?; + Ok(Some(value)) + } else { + Ok(None) + } + } + + pub fn list_keys(&self, scope: &Scope) -> StorageResult> { + let path = scope.as_path(&self.root); + if !path.exists() { + return Ok(vec![]); + } + + let lock_file_segment = SegmentBuf::parse_lossy(LOCK_FILE_NAME); + + list_files_recursive(scope.as_path(&self.root))? + .into_iter() + .map(|path| path.as_key(&self.root)) + .filter(|key_res| match key_res { + Ok(key) => key.name() != lock_file_segment.as_ref(), + _ => true, + }) + .collect() + } + + pub fn list_scopes(&self) -> StorageResult> { + list_dirs_recursive(Scope::global().as_path(&self.root))? + .into_iter() + .map(|path| path.as_scope(&self.root)) + .collect() + } +} + +impl Disk { + pub fn store(&self, key: &Key, value: Value) -> StorageResult<()> { + let path = key.as_path(&self.root); + let dir = key.scope().as_path(&self.root); + + if key.scope().to_string().starts_with(LOCK_FILE_DIR) { + return Err(KeyValueError::InvalidKey(key.clone())); + } + + if !dir.try_exists().unwrap_or_default() { + fs::create_dir_all(&dir).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!("cannot create dir for path: {}", dir.display()), + e, + )) + })?; + } + + if path.exists() { + // tempfile ensures that the temporary file is cleaned up in case it + // would be left behind because of some issue. + let tmp_file = tempfile::NamedTempFile::new_in(&self.tmp).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!( + "Issue writing tmp file for key: {}. Check permissions and space on disk.", + key + ), + e, + )) + })?; + + fs::write(&tmp_file, format!("{:#}", value).as_bytes()).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!( + "Issue writing tmp file: {} for key: {}. Check permissions and space on disk.", + tmp_file.as_ref().display(), + key + ), + e, + )) + })?; + + // persist ensures that the temporary file is not deleted + // when the instance is dropped. + tmp_file.persist(&path).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!( + "Cannot rename temp file {} to {}.", + e.file.path().display(), + path.display() + ), + e.error, + )) + })?; + } else { + fs::write(&path, format!("{:#}", value).as_bytes()).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!("cannot write file at: {}", path.display()), + e, + )) + })?; + } + + Ok(()) + } + + pub fn move_value(&self, from: &Key, to: &Key) -> StorageResult<()> { + let from_path = from.as_path(&self.root); + let to_path = to.as_path(&self.root); + + let dir = to.scope().as_path(&self.root); + if !dir.try_exists().unwrap_or_default() { + fs::create_dir_all(&dir).map_err(|e| { + KeyValueError::IoError(KrillIoError::new(format!("cannot create dir for {}", dir.display()), e)) + })?; + } + + fs::rename(&from_path, &to_path).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!( + "cannot rename file from {} to {}", + from_path.display(), + to_path.display() + ), + e, + )) + })?; + remove_empty_parent_dirs(from_path.parent().ok_or(KeyValueError::Other(format!( + "cannot get parent for path: {}", + from_path.display() + )))?); + + Ok(()) + } + + pub fn delete(&self, key: &Key) -> StorageResult<()> { + let path = key.as_path(&self.root); + + fs::remove_file(&path).map_err(|e| { + KeyValueError::IoError(KrillIoError::new(format!("cannot remove file: {}", path.display()), e)) + })?; + remove_empty_parent_dirs(path.parent().ok_or(KeyValueError::Other(format!( + "cannot get parent dir for: {}", + path.display() + )))?); + + Ok(()) + } + + pub fn delete_scope(&self, scope: &Scope) -> StorageResult<()> { + let path = scope.as_path(&self.root); + + fs::remove_dir_all(&path).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!("cannot remove dir for {}", path.display()), + e, + )) + })?; + remove_empty_parent_dirs(path); + + Ok(()) + } + + pub fn clear(&self) -> StorageResult<()> { + if self.root.exists() { + let _ = fs::remove_dir_all(&self.root); + } + + Ok(()) + } + + pub fn migrate_namespace(&mut self, namespace: NamespaceBuf) -> StorageResult<()> { + let root_parent = self.root.parent().ok_or(KeyValueError::Other(format!( + "cannot get parent dir for: {}", + self.root.display() + )))?; + + let new_root = root_parent.join(namespace.as_str()); + + if new_root.exists() { + // If the target directory already exists, then it must be empty. + if new_root + .read_dir() + .map_err(|e| { + KeyValueError::Other(format!("cannot read directory '{}'. Error: {}", new_root.display(), e,)) + })? + .next() + .is_some() + { + return Err(KeyValueError::Other(format!( + "target dir {} already exists and is not empty", + new_root.display(), + ))); + } + } + + fs::rename(&self.root, &new_root).map_err(|e| { + KeyValueError::Other(format!( + "cannot rename dir from {} to {}. Error: {}", + self.root.display(), + new_root.display(), + e + )) + })?; + self.root = new_root; + Ok(()) + } +} + +impl Disk { + pub fn execute(&self, scope: &Scope, op: F) -> Result + where + F: FnOnce(&KeyValueStoreDispatcher) -> Result, + { + let lock_file_dir = self.root.join(LOCK_FILE_DIR); + + let _lock = FileLock::lock(scope.as_path(lock_file_dir))?; + + let dispatcher = KeyValueStoreDispatcher::Disk(self); + op(&dispatcher) + } +} + +trait AsPath { + fn as_path(&self, root: impl AsRef) -> PathBuf; +} + +impl AsPath for Key { + fn as_path(&self, root: impl AsRef) -> PathBuf { + let mut path = root.as_ref().to_path_buf(); + for segment in self.scope() { + path.push(segment.as_str()); + } + path.push(self.name().as_str()); + path + } +} + +impl AsPath for Scope { + fn as_path(&self, root: impl AsRef) -> PathBuf { + let mut path = root.as_ref().to_path_buf(); + for segment in self { + path.push(segment.as_str()); + } + path + } +} + +trait PathBufExt { + fn as_key(&self, root: impl AsRef) -> StorageResult; + + fn as_scope(&self, root: impl AsRef) -> StorageResult; +} + +impl PathBufExt for PathBuf { + fn as_key(&self, root: impl AsRef) -> StorageResult { + let file_name = self + .file_name() + .ok_or(KeyValueError::Other(format!( + "cannot get file name from path: {}", + self.display() + )))? + .to_string_lossy() + .to_string(); + + let name: SegmentBuf = file_name.parse().map_err(|e| { + KeyValueError::Other(format!( + "Cannot get key segments from path '{}'. Error: {}", + file_name, e + )) + })?; + + let scope = self + .parent() + .ok_or(KeyValueError::Other(format!( + "Cannot get parent path for {}", + self.display() + )))? + .to_path_buf() + .as_scope(root)?; + + Ok(Key::new_scoped(scope, name)) + } + + fn as_scope(&self, root: impl AsRef) -> StorageResult { + let segments = self + .strip_prefix(root) + .map_err(|e| KeyValueError::Other(format!("cannot strip prefix: {}", e)))? + .components() + .map(|component| match component { + Component::Prefix(_) | Component::RootDir | Component::CurDir | Component::ParentDir => { + Err(KeyValueError::Other(format!( + "unexpected path component: {}", + component.as_os_str().to_string_lossy() + ))) + } + Component::Normal(segment) => { + let segment: SegmentBuf = segment.to_string_lossy().parse().map_err(|e| { + KeyValueError::Other(format!( + "cannot convert path component '{}' to segment. Error: {}", + segment.to_string_lossy(), + e + )) + })?; + Ok(segment) + } + }) + .collect::>()?; + + Ok(Scope::new(segments)) + } +} + +#[derive(Debug)] +struct FileLock { + file: File, + lock_path: PathBuf, +} + +impl FileLock { + const POLL_LOCK_INTERVAL: Duration = Duration::from_millis(10); + + pub fn lock(path: impl AsRef) -> StorageResult { + let path = path.as_ref(); + + let lock_path = path.join(LOCK_FILE_NAME); + if !path.try_exists().unwrap_or_default() { + fs::create_dir_all(path).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!("cannot create dir for lockfile {}", lock_path.display()), + e, + )) + })?; + } + + let file = loop { + let file = OpenOptions::new() + .create_new(true) + .read(true) + .write(true) + .open(&lock_path); + + match file { + Ok(file) => break file, + _ => thread::sleep(Self::POLL_LOCK_INTERVAL), + }; + }; + + let lock = FileLock { file, lock_path }; + + Ok(lock) + } + + pub fn unlock(&self) -> StorageResult<()> { + fs::remove_file(&self.lock_path).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!("cannot remove lock file {}", self.lock_path.display()), + e, + )) + })?; + Ok(()) + } +} + +impl Deref for FileLock { + type Target = File; + + fn deref(&self) -> &Self::Target { + &self.file + } +} + +impl DerefMut for FileLock { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.file + } +} + +impl Drop for FileLock { + fn drop(&mut self) { + self.unlock().ok(); + } +} + +fn list_files_recursive(dir: impl AsRef) -> StorageResult> { + let mut files = Vec::new(); + + for result in fs::read_dir(&dir).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!("cannot read dir {}", dir.as_ref().display()), + e, + )) + })? { + let path = result + .map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!("cannot read entry in dir {}", dir.as_ref().display()), + e, + )) + })? + .path(); + + if path.is_dir() { + files.extend(list_files_recursive(path)?); + } else { + files.push(path); + } + } + + Ok(files) +} + +fn list_dirs_recursive(dir: impl AsRef) -> StorageResult> { + let mut dirs = Vec::new(); + + for result in fs::read_dir(&dir).map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!("cannot read dir {}", dir.as_ref().display()), + e, + )) + })? { + let path = result + .map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!("cannot get entry in dir {}", dir.as_ref().display()), + e, + )) + })? + .path(); + if path.is_dir() + && !path.ends_with(LOCK_FILE_DIR) + && path + .read_dir() + .map_err(|e| { + KeyValueError::IoError(KrillIoError::new( + format!("cannot read dir {}", dir.as_ref().display()), + e, + )) + })? + .next() + .is_some() + { + // a non-empty directory exists for the scope, recurse and add + dirs.extend(list_dirs_recursive(&path)?); + dirs.push(path); + } + } + + Ok(dirs) +} + +/// Removes the given directory and all empty parent directories. This function +/// only works on empty directories and will do nothing for files. +fn remove_empty_parent_dirs(path: impl AsRef) { + let mut ancestors = path.as_ref().ancestors(); + while ancestors.next().and_then(|path| fs::remove_dir(path).ok()).is_some() {} +} diff --git a/src/commons/storage/key.rs b/src/commons/storage/key.rs new file mode 100644 index 000000000..d4028731a --- /dev/null +++ b/src/commons/storage/key.rs @@ -0,0 +1,88 @@ +use std::{ + fmt::{Display, Formatter}, + str::FromStr, +}; + +use crate::commons::storage::{ParseSegmentError, Scope, Segment, SegmentBuf}; + +/// Represents the key used in KVx. Consists of a `scope` of type [`Scope`] and +/// a `name` of type [`SegmentBuf`]. +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct Key { + scope: Scope, + name: SegmentBuf, +} + +impl Key { + /// Create a `Key` from a [`Scope`] and a [`Segment`]. + pub fn new_scoped(scope: Scope, name: impl Into) -> Key { + Key { + name: name.into(), + scope, + } + } + + /// Create a `Key` from a [`Segment`]. + pub fn new_global(name: impl Into) -> Key { + Key::new_scoped(Scope::default(), name) + } + + /// Returns the name of a `Key` (without its scope). + pub fn name(&self) -> &Segment { + &self.name + } + + /// Returns the scope of a `Key` (without its name). + pub fn scope(&self) -> &Scope { + &self.scope + } + + /// Create a new [`Key`] and add a [`Segment`] to the end of its scope. + pub fn with_sub_scope(&self, sub_scope: impl Into) -> Self { + let mut clone = self.clone(); + clone.add_sub_scope(sub_scope); + clone + } + + /// Add a [`Segment`] to the end of the scope of the key. + pub fn add_sub_scope(&mut self, sub_scope: impl Into) { + self.scope.add_sub_scope(sub_scope); + } + + /// Create a new [`Key`] and add a [`Segment`] to the front of its scope. + pub fn with_super_scope(&self, super_scope: impl Into) -> Self { + let mut clone = self.clone(); + clone.add_super_scope(super_scope); + clone + } + + /// Add a [`Segment`] to the front of the scope of the key. + pub fn add_super_scope(&mut self, super_scope: impl Into) { + self.scope.add_super_scope(super_scope); + } +} + +impl Display for Key { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + if self.scope.is_global() { + write!(f, "{}", self.name) + } else { + write!(f, "{}{}{}", self.scope, Scope::SEPARATOR, self.name) + } + } +} + +impl FromStr for Key { + type Err = ParseSegmentError; + + fn from_str(s: &str) -> Result { + let mut segments: Vec = s + .split(Scope::SEPARATOR) + .map(SegmentBuf::from_str) + .collect::>()?; + let name = segments.pop().unwrap(); + let scope = Scope::new(segments); + + Ok(Key { name, scope }) + } +} diff --git a/src/commons/storage/kv.rs b/src/commons/storage/kv.rs index 1db905ef9..536275058 100644 --- a/src/commons/storage/kv.rs +++ b/src/commons/storage/kv.rs @@ -1,58 +1,41 @@ -use std::{fmt, str::FromStr}; +use std::{collections::HashMap, fmt}; -pub use kvx::{namespace, segment, Key, Namespace, Scope, Segment, SegmentBuf}; -use kvx::{KeyValueStoreBackend, NamespaceBuf, ReadStore, WriteStore}; use serde::{de::DeserializeOwned, Serialize}; +use serde_json::Value; use url::Url; -use crate::commons::error::KrillIoError; - -pub trait SegmentExt { - fn parse_lossy(value: &str) -> SegmentBuf; - fn concat(lhs: impl Into, rhs: impl Into) -> SegmentBuf; -} - -impl SegmentExt for Segment { - fn parse_lossy(value: &str) -> SegmentBuf { - match Segment::parse(value) { - Ok(segment) => segment.to_owned(), - Err(error) => { - let sanitized = value.trim().replace(Scope::SEPARATOR, "+"); - let nonempty = sanitized.is_empty().then(|| "EMPTY".to_owned()).unwrap_or(sanitized); - let segment = Segment::parse(&nonempty).unwrap(); // cannot panic as all checks are performed above - warn!("{value} is not a valid Segment: {error}\nusing {segment} instead"); - segment.to_owned() - } - } - } - - fn concat(lhs: impl Into, rhs: impl Into) -> SegmentBuf { - Segment::parse(&format!("{}{}", lhs.into(), rhs.into())) - .unwrap() - .to_owned() - } -} +use crate::commons::{ + error::KrillIoError, + storage::{Disk, Key, Memory, Namespace, NamespaceBuf, Scope}, +}; #[derive(Debug)] -pub struct KeyValueStore { - inner: kvx::KeyValueStore, +pub enum KeyValueStore { + Memory(Memory), + Disk(Disk), } // # Construct and high level functions. impl KeyValueStore { /// Creates a new KeyValueStore. pub fn create(storage_uri: &Url, namespace: &Namespace) -> Result { - kvx::KeyValueStore::new(storage_uri, namespace) - .map(|inner| KeyValueStore { inner }) - .map_err(KeyValueError::Inner) + match storage_uri.scheme() { + "local" => { + let path = format!("{}{}", storage_uri.host_str().unwrap_or_default(), storage_uri.path()); + + Ok(KeyValueStore::Disk(Disk::new(&path, namespace.as_str())?)) + } + "memory" => Ok(KeyValueStore::Memory(Memory::new( + storage_uri.host_str(), + namespace.to_owned(), + )?)), + scheme => Err(KeyValueError::UnknownScheme(scheme.to_owned()))?, + } } /// Returns true if this KeyValueStore (with this namespace) has any entries. pub fn is_empty(&self) -> Result { - // NOTE: this is done using `self.execute` as this would result in a lockfile - // to be created for disk based inner stores, and that would make them - // appear as not empty. - self.inner.is_empty().map_err(KeyValueError::Inner) + self.execute(&Scope::global(), |kv| kv.is_empty()) } /// Wipe the complete store. Needless to say perhaps.. use with care.. @@ -60,25 +43,26 @@ impl KeyValueStore { self.execute(&Scope::global(), |kv| kv.clear()) } - /// Execute one or more `kvx::KeyValueStoreBackend` operations + /// Execute one or more `KeyValueStoreDispatcher` operations /// within a transaction or scope lock context inside the given /// closure. /// - /// The closure needs to return a Result. This - /// allows the caller to simply use the ? operator on any kvx - /// calls that could result in an error within the closure. The - /// kvx::Error is mapped to a KeyValueError to avoid that the - /// caller needs to have any specific knowledge about the kvx::Error - /// type. + /// The closure needs to return a Result. This + /// allows the caller to simply use the ? operator on any kv + /// calls that could result in an error within the closure. /// /// T can be () if no return value is needed. If anything can - /// fail in the closure, other than kvx calls, then T can be + /// fail in the closure, other than kv calls, then T can be /// a Result. pub fn execute(&self, scope: &Scope, op: F) -> Result where - F: FnMut(&dyn KeyValueStoreBackend) -> Result, + F: FnOnce(&KeyValueStoreDispatcher) -> Result, { - self.inner.execute(scope, op).map_err(KeyValueError::Inner) + let dispatcher = match self { + KeyValueStore::Memory(memory) => KeyValueStoreDispatcher::Memory(memory), + KeyValueStore::Disk(disk) => KeyValueStoreDispatcher::Disk(disk), + }; + dispatcher.execute(scope, op) } } @@ -86,20 +70,17 @@ impl KeyValueStore { impl KeyValueStore { /// Stores a key value pair, serialized as json, overwrite existing pub fn store(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { - self.execute(key.scope(), &mut move |kv: &dyn KeyValueStoreBackend| { + self.execute(key.scope(), |kv: &KeyValueStoreDispatcher| { kv.store(key, serde_json::to_value(value)?) }) } /// Stores a key value pair, serialized as json, fails if existing pub fn store_new(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { - self.execute( - key.scope(), - &mut move |kv: &dyn KeyValueStoreBackend| match kv.get(key)? { - None => kv.store(key, serde_json::to_value(value)?), - _ => Err(kvx::Error::Unknown), - }, - ) + self.execute(key.scope(), |kv: &KeyValueStoreDispatcher| match kv.get(key)? { + None => kv.store(key, serde_json::to_value(value)?), + _ => Err(KeyValueError::UnknownKey(key.to_owned())), + }) } /// Gets a value for a key, returns an error if the value cannot be deserialized, @@ -107,8 +88,10 @@ impl KeyValueStore { pub fn get(&self, key: &Key) -> Result, KeyValueError> { self.execute(key.scope(), |kv| { if let Some(value) = kv.get(key)? { + trace!("got value for key: {}", key); Ok(Some(serde_json::from_value(value)?)) } else { + trace!("got nothing for key: {}", key); Ok(None) } }) @@ -119,6 +102,11 @@ impl KeyValueStore { self.execute(key.scope(), |kv| kv.has(key)) } + /// Returns all keys for the given scope + pub fn list_keys(&self, scope: &Scope) -> StorageResult> { + self.execute(scope, |kv| kv.list_keys(scope)) + } + /// Delete a key-value pair pub fn drop_key(&self, key: &Key) -> Result<(), KeyValueError> { self.execute(key.scope(), |kv| kv.delete(key)) @@ -131,7 +119,6 @@ impl KeyValueStore { /// If matching is not empty then the key must contain the given `&str`. pub fn keys(&self, scope: &Scope, matching: &str) -> Result, KeyValueError> { self.execute(scope, |kv| { - // kvx list_keys returns keys in sub-scopes kv.list_keys(scope).map(|keys| { keys.into_iter() .filter(|key| { @@ -141,6 +128,28 @@ impl KeyValueStore { }) }) } + + /// Returns all key value pairs under a scope. + pub fn key_value_pairs(&self, scope: &Scope, matching: &str) -> Result, KeyValueError> { + self.execute(scope, |kv| { + let keys: Vec = kv.list_keys(scope).map(|keys| { + keys.into_iter() + .filter(|key| { + key.scope() == scope && (matching.is_empty() || key.name().as_str().contains(matching)) + }) + .collect() + })?; + + let mut pairs = HashMap::new(); + for key in keys { + if let Some(value) = kv.get(&key)? { + pairs.insert(key, value); + } + } + + Ok(pairs) + }) + } } // # Scopes @@ -168,16 +177,14 @@ impl KeyValueStore { /// Adds the implicit prefix "upgrade-{version}-" to the given namespace. pub fn create_upgrade_store(storage_uri: &Url, namespace: &Namespace) -> Result { let namespace = Self::prefixed_namespace(namespace, "upgrade")?; - - kvx::KeyValueStore::new(storage_uri, namespace) - .map(|inner| KeyValueStore { inner }) - .map_err(KeyValueError::Inner) + KeyValueStore::create(storage_uri, namespace.as_ref()) } fn prefixed_namespace(namespace: &Namespace, prefix: &str) -> Result { let namespace_string = format!("{}_{}", prefix, namespace); - NamespaceBuf::from_str(&namespace_string) + Namespace::parse(&namespace_string) .map_err(|e| KeyValueError::Other(format!("Cannot parse namespace: {}. Error: {}", namespace_string, e))) + .map(|ns| ns.to_owned()) } /// Archive this store (i.e. for this namespace). Deletes @@ -189,7 +196,10 @@ impl KeyValueStore { let archive_store = KeyValueStore::create(storage_uri, &archive_ns)?; archive_store.wipe()?; - self.inner.migrate_namespace(archive_ns).map_err(KeyValueError::Inner) + match self { + KeyValueStore::Memory(memory) => memory.migrate_namespace(archive_ns), + KeyValueStore::Disk(disk) => disk.migrate_namespace(archive_ns), + } } /// Make this (upgrade) store the current store. @@ -203,9 +213,10 @@ impl KeyValueStore { namespace ))) } else { - self.inner - .migrate_namespace(namespace.into()) - .map_err(KeyValueError::Inner) + match self { + KeyValueStore::Memory(memory) => memory.migrate_namespace(namespace.into()), + KeyValueStore::Disk(disk) => disk.migrate_namespace(namespace.into()), + } } } @@ -217,15 +228,25 @@ impl KeyValueStore { /// is intended to be used for migrations and testing (copy test data /// into a store) while Krill is not running. pub fn import(&self, other: &Self) -> Result<(), KeyValueError> { + debug!("Import keys from {} into {}", other, self); let mut scopes = other.scopes()?; scopes.push(Scope::global()); // not explicitly listed but should be migrated as well. for scope in scopes { - for key in other.keys(&scope, "")? { - if let Some(value) = other.inner.get(&key).map_err(KeyValueError::Inner)? { - self.inner.store(&key, value).map_err(KeyValueError::Inner)?; + let key_value_pairs = other.key_value_pairs(&scope, "")?; + trace!( + "Migrating {} key value pairs in scope {}.", + key_value_pairs.len(), + scope + ); + + self.execute(&scope, |kv| { + for (key, value) in key_value_pairs.into_iter() { + trace!(" ---storing key {}", key); + kv.store(&key, value)?; } - } + Ok(()) + })?; } Ok(()) @@ -234,10 +255,125 @@ impl KeyValueStore { impl fmt::Display for KeyValueStore { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.inner.fmt(f) + match self { + KeyValueStore::Memory(memory) => memory.fmt(f), + KeyValueStore::Disk(disk) => disk.fmt(f), + } + } +} + +//------------ KeyValueStoreDispatcher --------------------------------------- + +#[derive(Debug)] +pub enum KeyValueStoreDispatcher<'a> { + Memory(&'a Memory), + Disk(&'a Disk), +} + +impl<'a> KeyValueStoreDispatcher<'a> { + pub fn execute(&self, scope: &Scope, op: F) -> Result + where + F: FnOnce(&KeyValueStoreDispatcher) -> Result, + { + match self { + KeyValueStoreDispatcher::Memory(memory) => memory.execute(scope, op), + KeyValueStoreDispatcher::Disk(disk) => disk.execute(scope, op), + } + } + + fn is_empty(&self) -> StorageResult { + match self { + KeyValueStoreDispatcher::Memory(m) => m.is_empty(), + KeyValueStoreDispatcher::Disk(d) => d.is_empty(), + } + } + pub fn has(&self, key: &Key) -> StorageResult { + match self { + KeyValueStoreDispatcher::Memory(m) => m.has(key), + KeyValueStoreDispatcher::Disk(d) => d.has(key), + } + } + fn has_scope(&self, scope: &Scope) -> StorageResult { + match self { + KeyValueStoreDispatcher::Memory(m) => m.has_scope(scope), + KeyValueStoreDispatcher::Disk(d) => d.has_scope(scope), + } + } + + pub fn get(&self, key: &Key) -> StorageResult> { + match self { + KeyValueStoreDispatcher::Memory(m) => m.get(key), + KeyValueStoreDispatcher::Disk(d) => d.get(key), + } + } + + pub fn list_keys(&self, scope: &Scope) -> StorageResult> { + match self { + KeyValueStoreDispatcher::Memory(m) => m.list_keys(scope), + KeyValueStoreDispatcher::Disk(d) => d.list_keys(scope), + } + } + fn list_scopes(&self) -> StorageResult> { + match self { + KeyValueStoreDispatcher::Memory(m) => m.list_scopes(), + KeyValueStoreDispatcher::Disk(d) => d.list_scopes(), + } + } + + /// Store a value. + pub fn store(&self, key: &Key, value: Value) -> StorageResult<()> { + match self { + KeyValueStoreDispatcher::Memory(m) => m.store(key, value), + KeyValueStoreDispatcher::Disk(d) => d.store(key, value), + } + } + + /// Move a value to a new key. Fails if the original value does not exist. + pub fn move_value(&self, from: &Key, to: &Key) -> StorageResult<()> { + match self { + KeyValueStoreDispatcher::Memory(m) => m.move_value(from, to), + KeyValueStoreDispatcher::Disk(d) => d.move_value(from, to), + } + } + + /// Delete a value for a key. + pub fn delete(&self, key: &Key) -> StorageResult<()> { + match self { + KeyValueStoreDispatcher::Memory(m) => m.delete(key), + KeyValueStoreDispatcher::Disk(d) => d.delete(key), + } + } + + /// Delete all values for a scope. + pub fn delete_scope(&self, scope: &Scope) -> StorageResult<()> { + match self { + KeyValueStoreDispatcher::Memory(m) => m.delete_scope(scope), + KeyValueStoreDispatcher::Disk(d) => d.delete_scope(scope), + } + } + + /// Delete all values within the namespace of this store. + fn clear(&self) -> StorageResult<()> { + match self { + KeyValueStoreDispatcher::Memory(m) => m.clear(), + KeyValueStoreDispatcher::Disk(d) => d.clear(), + } + } +} + +impl<'a> fmt::Display for KeyValueStoreDispatcher<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + KeyValueStoreDispatcher::Memory(memory) => memory.fmt(f), + KeyValueStoreDispatcher::Disk(disk) => disk.fmt(f), + } } } +//------------ StorageResult ------------------------------------------------- + +pub type StorageResult = Result; + //------------ KeyValueError ------------------------------------------------- /// This type defines possible Errors for KeyStore @@ -247,8 +383,9 @@ pub enum KeyValueError { IoError(KrillIoError), JsonError(serde_json::Error), UnknownKey(Key), + InvalidKey(Key), DuplicateKey(Key), - Inner(kvx::Error), + InvalidTaskKey(Key), Other(String), } @@ -264,12 +401,6 @@ impl From for KeyValueError { } } -impl From for KeyValueError { - fn from(e: kvx::Error) -> Self { - KeyValueError::Inner(e) - } -} - impl fmt::Display for KeyValueError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { @@ -277,8 +408,9 @@ impl fmt::Display for KeyValueError { KeyValueError::IoError(e) => write!(f, "I/O error: {}", e), KeyValueError::JsonError(e) => write!(f, "JSON error: {}", e), KeyValueError::UnknownKey(key) => write!(f, "Unknown key: {}", key), + KeyValueError::InvalidKey(key) => write!(f, "Invalid key: {}", key), KeyValueError::DuplicateKey(key) => write!(f, "Duplicate key: {}", key), - KeyValueError::Inner(e) => write!(f, "Store error: {}", e), + KeyValueError::InvalidTaskKey(key) => write!(f, "Invalid task key: {}", key), KeyValueError::Other(msg) => write!(f, "{}", msg), } } @@ -288,11 +420,24 @@ impl fmt::Display for KeyValueError { #[cfg(test)] mod tests { - use super::*; - use std::env; + use futures_util::join; use rand::{distributions::Alphanumeric, Rng}; + use crate::{commons::storage::SegmentBuf, test}; + + use super::*; + + fn random_value(length: usize) -> Value { + Value::from( + rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(length) + .map(char::from) + .collect::(), + ) + } + fn random_segment() -> SegmentBuf { rand::thread_rng() .sample_iter(&Alphanumeric) @@ -313,18 +458,15 @@ mod tests { .unwrap() } - fn get_storage_uri() -> Url { - env::var("KRILL_KV_STORAGE_URL") - .ok() - .and_then(|s| Url::parse(&s).ok()) - .unwrap_or_else(|| Url::parse("memory:///tmp").unwrap()) + fn random_scope(depth: usize) -> Scope { + Scope::new(std::iter::repeat_with(random_segment).take(depth).collect()) } - #[test] - fn test_store() { - let storage_uri = get_storage_uri(); + fn random_key(depth: usize) -> Key { + Key::new_scoped(random_scope(depth), random_segment()) + } - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_store(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); @@ -333,11 +475,7 @@ mod tests { assert_eq!(store.get(&key).unwrap(), Some(content)); } - #[test] - fn test_store_new() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_store_new(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); @@ -345,14 +483,10 @@ mod tests { assert!(store.store_new(&key, &content).is_err()); } - #[test] - fn test_store_scoped() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_store_scoped(store: KeyValueStore) { let content = "content".to_owned(); let id = random_segment(); - let scope = Scope::from_segment(segment!("scope")); + let scope = Scope::from_segment(SegmentBuf::parse_lossy("scope")); let key = Key::new_scoped(scope.clone(), id.clone()); store.store(&key, &content).unwrap(); @@ -366,11 +500,7 @@ mod tests { assert_eq!(store.get(&simple).unwrap(), Some(content)); } - #[test] - fn test_get() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_get(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); assert_eq!(store.get::(&key).unwrap(), None); @@ -379,11 +509,7 @@ mod tests { assert_eq!(store.get(&key).unwrap(), Some(content)); } - #[test] - fn test_get_transactional() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_get_transactional(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); assert_eq!(store.get::(&key).unwrap(), None); @@ -392,11 +518,7 @@ mod tests { assert_eq!(store.get(&key).unwrap(), Some(content)); } - #[test] - fn test_has() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_has(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); assert!(!store.has(&key).unwrap()); @@ -405,11 +527,7 @@ mod tests { assert!(store.has(&key).unwrap()); } - #[test] - fn test_drop_key() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_drop_key(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); store.store(&key, &content).unwrap(); @@ -419,11 +537,7 @@ mod tests { assert!(!store.has(&key).unwrap()); } - #[test] - fn test_drop_scope() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_drop_scope(store: KeyValueStore) { let content = "content".to_owned(); let scope = Scope::from_segment(random_segment()); let key = Key::new_scoped(scope.clone(), random_segment()); @@ -440,13 +554,9 @@ mod tests { assert!(store.has(&key2).unwrap()); } - #[test] - fn test_wipe() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_wipe(store: KeyValueStore) { let content = "content".to_owned(); - let scope = Scope::from_segment(segment!("scope")); + let scope = Scope::from_segment(SegmentBuf::parse_lossy("scope")); let key = Key::new_scoped(scope.clone(), random_segment()); store.store(&key, &content).unwrap(); assert!(store.has_scope(&scope).unwrap()); @@ -458,15 +568,11 @@ mod tests { assert!(store.keys(&Scope::global(), "").unwrap().is_empty()); } - #[test] - fn test_scopes() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_list_scopes(store: KeyValueStore) { let content = "content".to_owned(); - let id = segment!("id"); + let id = SegmentBuf::parse_lossy("id"); let scope = Scope::from_segment(random_segment()); - let key = Key::new_scoped(scope.clone(), id); + let key = Key::new_scoped(scope.clone(), id.clone()); assert!(store.scopes().unwrap().is_empty()); @@ -487,33 +593,25 @@ mod tests { assert_eq!(store.scopes().unwrap(), vec![scope]); } - #[test] - fn test_has_scope() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_has_scope(store: KeyValueStore) { let content = "content".to_owned(); let scope = Scope::from_segment(random_segment()); - let key = Key::new_scoped(scope.clone(), segment!("id")); + let key = Key::new_scoped(scope.clone(), SegmentBuf::parse_lossy("id")); assert!(!store.has_scope(&scope).unwrap()); store.store(&key, &content).unwrap(); assert!(store.has_scope(&scope).unwrap()); } - #[test] - fn test_keys() { - let storage_uri = get_storage_uri(); - - let store = KeyValueStore::create(&storage_uri, &random_namespace()).unwrap(); + fn impl_list_keys(store: KeyValueStore) { let content = "content".to_owned(); - let id = segment!("command--id"); - let scope = Scope::from_segment(segment!("command")); + let id = SegmentBuf::parse_lossy("command--id"); + let scope = Scope::from_segment(SegmentBuf::parse_lossy("command")); let key = Key::new_scoped(scope.clone(), id); - let id2 = segment!("command--ls"); + let id2 = SegmentBuf::parse_lossy("command--ls"); let id3 = random_segment(); - let key2 = Key::new_scoped(scope.clone(), id2); + let key2 = Key::new_scoped(scope.clone(), id2.clone()); let key3 = Key::new_global(id3.clone()); store.store(&key, &content).unwrap(); @@ -537,4 +635,101 @@ mod tests { assert_eq!(keys, expected); } + + fn impl_is_empty(store: KeyValueStore) { + assert!(store.is_empty().unwrap()); + store.store(&random_key(1), &random_value(8)).unwrap(); + + assert!(!store.is_empty().unwrap()); + } + + async fn impl_execute(store: KeyValueStore) { + // Test that one transaction does not interfere with another + // We start with an empty store, then start multiple threads + // that each use the same store to add / remove and eventually + // remove all keys that they have added in a single transaction. + // + // We expect that threads use separate transactions / locks, so + // they may have to wait on one another, but they won't see any + // of the key value pairs that are put there by others (i.e. we + // clean up at the end) + + async fn one_thread_execute(store: &KeyValueStore) { + let scope = Scope::global(); + + store + .execute(&scope, |kv| { + // start with an empty kv + assert!(kv.is_empty().unwrap()); + + // add a bunch of keys, see that they are there + // and nothing else + let mut keys: Vec = (0..8).map(|_| random_key(1)).collect(); + keys.sort(); + + for key in &keys { + kv.store(key, random_value(8)).unwrap(); + } + assert!(!kv.is_empty().unwrap()); + + // TODO: use non-blocking sleep when we have an async closure + std::thread::sleep(std::time::Duration::from_millis(200)); + + let mut stored_keys = kv.list_keys(&scope).unwrap(); + stored_keys.sort(); + + assert_eq!(keys.len(), stored_keys.len()); + assert_eq!(keys, stored_keys); + + for key in &keys { + kv.delete(key).unwrap(); + } + assert!(kv.is_empty().unwrap()); + + Ok(()) + }) + .unwrap(); + } + + let thread_1 = one_thread_execute(&store); + let thread_2 = one_thread_execute(&store); + + join!(thread_1, thread_2); + } + + fn test_store(storage_uri: &Url) -> KeyValueStore { + KeyValueStore::create(storage_uri, &random_namespace()).unwrap() + } + + async fn test_impl(storage_uri: Url) { + impl_store(test_store(&storage_uri)); + impl_store_new(test_store(&storage_uri)); + impl_store_scoped(test_store(&storage_uri)); + impl_get(test_store(&storage_uri)); + impl_get_transactional(test_store(&storage_uri)); + impl_has(test_store(&storage_uri)); + impl_drop_key(test_store(&storage_uri)); + impl_drop_scope(test_store(&storage_uri)); + impl_wipe(test_store(&storage_uri)); + impl_list_scopes(test_store(&storage_uri)); + impl_has_scope(test_store(&storage_uri)); + impl_list_keys(test_store(&storage_uri)); + impl_is_empty(test_store(&storage_uri)); + impl_execute(test_store(&storage_uri)).await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn mem_store_tests() { + let storage_uri = test::mem_storage(); + test_impl(storage_uri).await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn disk_store_tests() { + let (dir, cleanup) = test::tmp_dir(); + let storage_uri = Url::parse(&format!("local://{}/{}", dir.display(), test::random_hex_string())).unwrap(); + test_impl(storage_uri).await; + + cleanup(); + } } diff --git a/src/commons/storage/memory.rs b/src/commons/storage/memory.rs new file mode 100644 index 000000000..8949609b7 --- /dev/null +++ b/src/commons/storage/memory.rs @@ -0,0 +1,290 @@ +use std::{ + collections::{BTreeSet, HashMap, HashSet}, + fmt::Display, + str::FromStr, + sync::{Mutex, MutexGuard}, +}; + +use lazy_static::lazy_static; + +use crate::commons::storage::{Key, KeyValueError, NamespaceBuf, Scope, StorageResult}; + +use super::KeyValueStoreDispatcher; + +#[derive(Debug)] +pub struct MemoryStore(HashMap>); + +impl MemoryStore { + fn new() -> Self { + MemoryStore(HashMap::new()) + } + + fn has(&self, namespace: &NamespaceBuf, key: &Key) -> bool { + self.0.get(namespace).map(|m| m.contains_key(key)).unwrap_or(false) + } + + fn namespace_is_empty(&self, namespace: &NamespaceBuf) -> bool { + self.0.get(namespace).map(|m| m.is_empty()).unwrap_or(true) + } + + fn has_scope(&self, namespace: &NamespaceBuf, scope: &Scope) -> bool { + self.0 + .get(namespace) + .map(|m| m.keys().any(|k| k.scope().starts_with(scope))) + .unwrap_or_default() + } + + fn get(&self, namespace: &NamespaceBuf, key: &Key) -> Option { + self.0.get(namespace).and_then(|m| m.get(key).cloned()) + } + + fn insert(&mut self, namespace: &NamespaceBuf, key: &Key, value: serde_json::Value) { + let map = self.0.entry(namespace.clone()).or_default(); + map.insert(key.clone(), value); + } + + fn delete(&mut self, namespace: &NamespaceBuf, key: &Key) -> StorageResult<()> { + self.0 + .get_mut(namespace) + .ok_or(KeyValueError::UnknownKey(key.clone()))? + .remove(key) + .ok_or(KeyValueError::UnknownKey(key.clone()))?; + Ok(()) + } + + fn move_value(&mut self, namespace: &NamespaceBuf, from: &Key, to: &Key) -> StorageResult<()> { + match self.0.get_mut(namespace) { + None => Err(KeyValueError::Other(format!("unknown namespace: {}", namespace))), + Some(map) => match map.remove(from) { + Some(value) => { + map.insert(to.clone(), value); + Ok(()) + } + None => Err(KeyValueError::UnknownKey(from.clone())), + }, + } + } + + fn list_keys(&self, namespace: &NamespaceBuf, scope: &Scope) -> Vec { + self.0 + .get(namespace) + .map(|m| { + m.keys() + .filter(|k| k.scope().starts_with(scope)) + .cloned() + .collect::>() + }) + .unwrap_or_default() + } + + fn list_scopes(&self, namespace: &NamespaceBuf) -> Vec { + let scopes: BTreeSet = self + .0 + .get(namespace) + .map(|m| m.keys().flat_map(|k| k.scope().sub_scopes()).collect()) + .unwrap_or_default(); + + scopes.into_iter().collect() + } + + fn delete_scope(&mut self, namespace: &NamespaceBuf, scope: &Scope) -> StorageResult<()> { + if let Some(map) = self.0.get_mut(namespace) { + map.retain(|k, _| !k.scope().starts_with(scope)); + } + + Ok(()) + } + + fn migrate_namespace(&mut self, from: &NamespaceBuf, to: &NamespaceBuf) -> StorageResult<()> { + if !self.namespace_is_empty(to) { + Err(KeyValueError::Other(format!( + "target in-memory namespace {} is not empty", + to.as_str() + ))) + } else { + match self.0.remove(from) { + None => Err(KeyValueError::Other(format!( + "original in-memory namespace {} does not exist", + from.as_str() + ))), + Some(map) => { + self.0.insert(to.clone(), map); + Ok(()) + } + } + } + } + + pub fn clear(&mut self, namespace: &NamespaceBuf) -> StorageResult<()> { + self.0.insert(namespace.clone(), HashMap::new()); + Ok(()) + } +} + +lazy_static! { + static ref STORE: Mutex = Mutex::new(MemoryStore::new()); + static ref LOCKS: Mutex> = Mutex::new(HashSet::new()); +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +struct ScopeLock(String); + +impl ScopeLock { + fn new(namespace: &NamespaceBuf, scope: &Scope) -> Self { + ScopeLock(format!("{}/{}", namespace, scope)) + } +} + +#[derive(Clone, Debug)] +pub struct Memory { + // Used to prevent namespace collisions in the shared (lazy static) in memory structure. + namespace_prefix: Option, + effective_namespace: NamespaceBuf, + inner: &'static Mutex, + locks: &'static Mutex>, +} + +impl Memory { + pub(crate) fn new(namespace_prefix: Option<&str>, namespace: NamespaceBuf) -> StorageResult { + let namespace_prefix = namespace_prefix.map(|s| s.to_string()); + let effective_namespace = Self::effective_namespace(&namespace_prefix, namespace)?; + + Ok(Memory { + namespace_prefix, + effective_namespace, + inner: &STORE, + locks: &LOCKS, + }) + } + + fn effective_namespace(namespace_prefix: &Option, namespace: NamespaceBuf) -> StorageResult { + if let Some(pfx) = namespace_prefix { + NamespaceBuf::from_str(&format!("{}_{}", pfx, namespace)).map_err(|e| { + KeyValueError::UnknownScheme(format!("cannot parse prefix '{}' for memory store: {}", pfx, e)) + }) + } else { + Ok(namespace) + } + } + + pub(super) fn lock(&self) -> StorageResult> { + self.inner + .lock() + .map_err(|e| KeyValueError::Other(format!("cannot unlock mutex: {e}"))) + } +} + +impl Display for Memory { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "memory://{}", self.effective_namespace) + } +} + +impl Memory { + pub fn execute(&self, scope: &Scope, op: F) -> Result + where + F: FnOnce(&KeyValueStoreDispatcher) -> Result, + { + // fn transaction(&self, scope: &Scope, callback: TransactionCallback) -> Result<()> { + // Try to get a lock for 10 seconds. We may need to make this configurable. + // Dependent on use cases it may actually not be that exceptional for locks + // to be kept for even longer. + let wait_ms = 10; + let tries = 1000; + + let scope_lock = ScopeLock::new(&self.effective_namespace, scope); + + for i in 0..tries { + let mut locks = self + .locks + .lock() + .map_err(|e| KeyValueError::Other(format!("Can't get lock: {e}")))?; + + if locks.contains(&scope_lock) { + if i >= tries { + return Err(KeyValueError::Other(format!("Scope {} already locked", scope))); + } else { + drop(locks); + std::thread::sleep(std::time::Duration::from_millis(wait_ms)); + } + } else { + locks.insert(scope_lock.clone()); + break; + } + } + + let dispatcher = KeyValueStoreDispatcher::Memory(self); + let res = op(&dispatcher); + + let mut locks = self + .locks + .lock() + .map_err(|e| KeyValueError::Other(format!("cannot get lock: {e}")))?; + + locks.remove(&scope_lock); + + res + } +} + +impl Memory { + pub fn is_empty(&self) -> StorageResult { + self.lock().map(|l| l.namespace_is_empty(&self.effective_namespace)) + } + + pub fn has(&self, key: &Key) -> StorageResult { + Ok(self.lock()?.has(&self.effective_namespace, key)) + } + + pub fn has_scope(&self, scope: &Scope) -> StorageResult { + Ok(self.lock()?.has_scope(&self.effective_namespace, scope)) + } + + pub fn get(&self, key: &Key) -> StorageResult> { + Ok(self.lock()?.get(&self.effective_namespace, key)) + } + + pub fn list_keys(&self, scope: &Scope) -> StorageResult> { + Ok(self.lock()?.list_keys(&self.effective_namespace, scope)) + } + + pub fn list_scopes(&self) -> StorageResult> { + Ok(self.lock()?.list_scopes(&self.effective_namespace)) + } +} + +impl Memory { + pub fn store(&self, key: &Key, value: serde_json::Value) -> StorageResult<()> { + self.lock()?.insert(&self.effective_namespace, key, value); + Ok(()) + } + + pub fn move_value(&self, from: &Key, to: &Key) -> StorageResult<()> { + self.lock()?.move_value(&self.effective_namespace, from, to) + } + + pub fn delete(&self, key: &Key) -> StorageResult<()> { + self.lock()?.delete(&self.effective_namespace, key) + } + + pub fn delete_scope(&self, scope: &Scope) -> StorageResult<()> { + self.lock()?.delete_scope(&self.effective_namespace, scope) + } + + pub fn clear(&self) -> StorageResult<()> { + self.lock()?.clear(&self.effective_namespace) + } + + pub fn migrate_namespace(&mut self, to: NamespaceBuf) -> StorageResult<()> { + // We need to preserve the namespace prefix if it was set. + // This prefix is used to prevent namespace collisions in the + // shared (lazy static) in memory structure. + let effective_to = Self::effective_namespace(&self.namespace_prefix, to)?; + + self.lock()? + .migrate_namespace(&self.effective_namespace, &effective_to)?; + self.effective_namespace = effective_to; + + Ok(()) + } +} diff --git a/src/commons/storage/mod.rs b/src/commons/storage/mod.rs index b9855d496..90d25e2c8 100644 --- a/src/commons/storage/mod.rs +++ b/src/commons/storage/mod.rs @@ -1,7 +1,20 @@ -mod kv; -pub use self::kv::{ - namespace, segment, Key, KeyValueError, KeyValueStore, Namespace, Scope, Segment, SegmentBuf, SegmentExt, -}; +pub use disk::Disk; +pub use key::Key; +pub use memory::{Memory, MemoryStore}; +pub use namespace::{Namespace, NamespaceBuf, ParseNamespaceError}; +pub use queue::*; +pub use scope::Scope; +pub use segment::{ParseSegmentError, Segment, SegmentBuf}; +pub use types::Storable; + +pub use self::kv::{KeyValueError, KeyValueStore, KeyValueStoreDispatcher, StorageResult}; +mod disk; +mod key; +mod kv; +mod memory; +mod namespace; +mod queue; +mod scope; +mod segment; mod types; -pub use types::*; diff --git a/src/commons/storage/namespace.rs b/src/commons/storage/namespace.rs new file mode 100644 index 000000000..0d6490f48 --- /dev/null +++ b/src/commons/storage/namespace.rs @@ -0,0 +1,184 @@ +use std::{ + borrow::Borrow, + fmt::{Display, Formatter}, + ops::Deref, + str::FromStr, +}; + +/// An owned Namespace. +/// +/// This is the owned variant of [`Namespace`] +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[repr(transparent)] +pub struct NamespaceBuf(String); + +impl NamespaceBuf { + pub fn parse_lossy(value: &str) -> Self { + match Namespace::parse(value) { + Ok(ns) => ns.to_owned(), + Err(error) => { + let mut sanitized = value + .trim() + .chars() + .map(|c| { + if c.is_alphanumeric() || c == '-' || c == '_' { + c + } else { + '_' + } + }) + .collect::(); + + sanitized.truncate(255); + + let nonempty = sanitized.is_empty().then(|| "EMPTY".to_owned()).unwrap_or(sanitized); + let namespace = Namespace::parse(&nonempty).unwrap(); // cannot panic as all checks are performed above + warn!("{value} is not a valid Namespace: {error}\nusing {namespace} instead"); + namespace.to_owned() + } + } + } +} + +impl AsRef for NamespaceBuf { + fn as_ref(&self) -> &Namespace { + self + } +} + +impl Borrow for NamespaceBuf { + fn borrow(&self) -> &Namespace { + self + } +} + +impl Deref for NamespaceBuf { + type Target = Namespace; + + fn deref(&self) -> &Self::Target { + unsafe { Namespace::from_str_unchecked(&self.0) } + } +} + +impl Display for NamespaceBuf { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl FromStr for NamespaceBuf { + type Err = ParseNamespaceError; + + fn from_str(s: &str) -> Result { + Ok(Namespace::parse(s)?.to_owned()) + } +} + +impl From<&Namespace> for NamespaceBuf { + fn from(value: &Namespace) -> Self { + value.to_owned() + } +} + +/// A string slice representing a namespace. +/// +/// Namespaces are used by KeyValueStore to separate +/// different instances that use a shared storage. +/// +/// Namespace MUST NOT contain any other characters +/// except a-z A-Z 0-9 - or _. +/// +/// For the owned variant, see [`NamespaceBuf`] +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct Namespace(str); + +impl Namespace { + /// Parse a Namespace from a string. + pub const fn parse(value: &str) -> Result<&Self, ParseNamespaceError> { + if value.is_empty() { + Err(ParseNamespaceError::Empty) + } else if value.len() > 255 { + Err(ParseNamespaceError::TooLong) + } else if Self::contains_only_legal_chars(value.as_bytes()) { + unsafe { Ok(Namespace::from_str_unchecked(value)) } + } else { + Err(ParseNamespaceError::IllegalCharacter) + } + } + + /// Return the encapsulated string. + pub fn as_str(&self) -> &str { + &self.0 + } + + /// Creates a Namespace from a string without performing any checks. + /// + /// # Safety + /// This should only be called for const values, where we know that + /// the input is safe, or in case the input was thoroughly checked + /// in another way. + pub const unsafe fn from_str_unchecked(s: &str) -> &Self { + &*(s as *const _ as *const Self) + } + + /// We need a const function for checking the bytes we parse + const fn contains_only_legal_chars(bytes: &[u8]) -> bool { + let mut index = 0; + + while index < bytes.len() { + let b = bytes[index]; + if b.is_ascii_alphanumeric() || b == b'-' || b == b'_' { + index += 1; + } else { + return false; + } + } + + true + } +} + +impl Display for Namespace { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", &self.0) + } +} + +impl ToOwned for Namespace { + type Owned = NamespaceBuf; + + fn to_owned(&self) -> Self::Owned { + NamespaceBuf(self.0.to_owned()) + } +} + +#[derive(Debug, Eq, PartialEq)] +pub enum ParseNamespaceError { + Empty, + TooLong, + IllegalCharacter, +} + +impl Display for ParseNamespaceError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + ParseNamespaceError::Empty => "namespaces must be nonempty", + ParseNamespaceError::TooLong => "namespaces must not be longer than 255 characters", + ParseNamespaceError::IllegalCharacter => "namespace can only alphanumeric characters and - or _", + } + .fmt(f) + } +} + +impl std::error::Error for ParseNamespaceError {} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn test_empty_namespace_fails() { + assert_eq!(Namespace::parse(""), Err(ParseNamespaceError::Empty)) + } +} diff --git a/src/commons/storage/queue.rs b/src/commons/storage/queue.rs new file mode 100644 index 000000000..dafe9a2dd --- /dev/null +++ b/src/commons/storage/queue.rs @@ -0,0 +1,732 @@ +use std::{ + borrow::Cow, + convert::TryFrom, + fmt::{Display, Formatter}, + str::FromStr, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + +use crate::commons::storage::{Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentBuf, StorageResult}; + +use super::KeyValueStoreDispatcher; + +const SEPARATOR: char = '-'; + +fn now() -> u128 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time-travel is not supported") + .as_millis() +} + +struct TaskKey<'a> { + pub name: Cow<'a, SegmentBuf>, + pub timestamp_millis: u128, +} + +impl<'a> TaskKey<'a> { + fn key(&self) -> Key { + Key::from_str(&format!("{}{}{}", self.timestamp_millis, SEPARATOR, self.name)).unwrap() + } + + fn running_key(&self) -> Key { + let mut key = self.key(); + key.add_super_scope(RunningTask::SEGMENT); + key + } + + fn pending_key(&self) -> Key { + let mut key = self.key(); + key.add_super_scope(PendingTask::SEGMENT); + key + } +} + +impl TryFrom<&Key> for TaskKey<'_> { + type Error = KeyValueError; + + fn try_from(key: &Key) -> Result { + let (ts, name) = key + .name() + .as_str() + .split_once(SEPARATOR) + .ok_or(KeyValueError::InvalidTaskKey(key.clone()))?; + + let name = Segment::parse(name) + .map_err(|_| KeyValueError::InvalidTaskKey(key.clone()))? + .to_owned(); + + let timestamp_millis: u128 = ts.parse().map_err(|_| KeyValueError::InvalidTaskKey(key.clone()))?; + + Ok(TaskKey { + name: Cow::Owned(name), + timestamp_millis, + }) + } +} + +impl From<&PendingTask> for Key { + fn from(p: &PendingTask) -> Self { + let mut key = Key::from_str(&p.to_string()).unwrap(); + key.add_super_scope(PendingTask::SEGMENT); + key + } +} + +impl From<&RunningTask> for Key { + fn from(p: &RunningTask) -> Self { + let mut key = Key::from_str(&p.to_string()).unwrap(); + key.add_super_scope(RunningTask::SEGMENT); + key + } +} + +#[derive(Clone, Debug)] +pub struct PendingTask { + pub name: SegmentBuf, + pub timestamp_millis: u128, + pub value: serde_json::Value, +} + +impl PendingTask { + const SEGMENT: &Segment = unsafe { Segment::from_str_unchecked("pending") }; +} + +impl PartialEq for PendingTask { + fn eq(&self, other: &Self) -> bool { + self.name == other.name + } +} + +impl Display for PendingTask { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}{}{}", + self.timestamp_millis, + SEPARATOR.encode_utf8(&mut [0; 4]), + self.name, + ) + } +} + +#[derive(Clone, Debug)] +pub struct RunningTask { + pub name: SegmentBuf, + pub timestamp_millis: u128, + pub value: serde_json::Value, +} + +impl RunningTask { + const SEGMENT: &Segment = unsafe { Segment::from_str_unchecked("running") }; +} + +impl Display for RunningTask { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}{}{}", + self.timestamp_millis, + SEPARATOR.encode_utf8(&mut [0; 4]), + self.name, + ) + } +} + +/// Defines scheduling behaviour in case a task by the same name already exists. +#[derive(Clone, Copy, Debug)] +pub enum ScheduleMode { + /// Store new task: + /// - replace old task if it exists + /// - do NOT finish old task + ReplaceExisting, + + /// Store new task: + /// - replace old task if it exists + /// - use the soonest scheduled time if old task exists + /// - do NOT finish old task if it is running + ReplaceExistingSoonest, + + /// Store new task: + /// - replace old task if it exists + /// - finish old task if it is running + FinishOrReplaceExisting, + + /// Store new task: + /// - replace old task if it exists + /// - use the soonest scheduled time if old task exists + /// - finish old task if it is running + FinishOrReplaceExistingSoonest, + + /// Keep existing pending or running task and in that case do not + /// add the new task. Otherwise just add the new task. + IfMissing, +} + +pub trait Queue { + const RESCHEDULE_AFTER: Duration = Duration::from_secs(15 * 60); + + fn lock_scope() -> Scope { + Scope::global() + } + + fn pending_scope() -> Scope { + Scope::from_segment(PendingTask::SEGMENT) + } + + fn running_scope() -> Scope { + Scope::from_segment(RunningTask::SEGMENT) + } + + /// Returns the number of pending tasks remaining + fn pending_tasks_remaining(&self) -> StorageResult; + + /// Returns the number of running tasks + fn running_tasks_remaining(&self) -> StorageResult; + + /// Returns the currently running tasks + fn running_tasks_keys(&self) -> StorageResult>; + + /// Schedule a task. + fn schedule_task( + &self, + name: SegmentBuf, + value: serde_json::Value, + timestamp_millis: Option, + existing: ScheduleMode, + ) -> StorageResult<()>; + + /// Returns the scheduled timestamp in ms for the named task, if any. + fn pending_task_scheduled(&self, name: SegmentBuf) -> StorageResult>; + + /// Marks a running task as finished. Fails if the task is not running. + fn finish_running_task(&self, running: &Key) -> StorageResult<()>; + + /// Reschedules a running task as pending. Fails if the task is not running. + fn reschedule_running_task(&self, running: &Key, timestamp_millis: Option) -> StorageResult<()>; + + /// Claims the next scheduled pending task, if any. + fn claim_scheduled_pending_task(&self) -> StorageResult>; + + /// Reschedules running tasks that have timed out. + fn reschedule_long_running_tasks(&self, reschedule_after: Option<&Duration>) -> StorageResult<()>; +} + +impl Queue for KeyValueStore { + fn pending_tasks_remaining(&self) -> StorageResult { + self.execute(&Self::lock_scope(), |kv| { + kv.list_keys(&Self::pending_scope()).map(|list| list.len()) + }) + } + + fn running_tasks_remaining(&self) -> StorageResult { + self.execute(&Self::lock_scope(), |kv| { + kv.list_keys(&Self::running_scope()).map(|list| list.len()) + }) + } + + fn running_tasks_keys(&self) -> StorageResult> { + self.execute(&Self::lock_scope(), |kv| kv.list_keys(&Self::running_scope())) + } + + fn schedule_task( + &self, + name: SegmentBuf, + value: serde_json::Value, + timestamp_millis: Option, + mode: ScheduleMode, + ) -> StorageResult<()> { + let mut new_task = PendingTask { + name, + timestamp_millis: timestamp_millis.unwrap_or(now()), + value, + }; + let new_task_key = Key::from(&new_task); + + self.execute(&Self::lock_scope(), |s: &KeyValueStoreDispatcher| { + let running_key_opt = s + .list_keys(&Self::running_scope())? + .into_iter() + .filter_map(|k| TaskKey::try_from(&k).ok()) + .find(|running| running.name.as_ref() == &new_task.name) + .map(|tk| tk.running_key()); + + let pending_key_opt = s + .list_keys(&Self::pending_scope())? + .into_iter() + .filter_map(|k| TaskKey::try_from(&k).ok()) + .find(|p| p.name.as_ref() == &new_task.name) + .map(|tk| tk.pending_key()); + + match mode { + ScheduleMode::IfMissing => { + if pending_key_opt.is_some() || running_key_opt.is_some() { + // nothing to do, there is something + Ok(()) + } else { + // no pending or running task exists, just add the new task + s.store(&new_task_key, new_task.value.clone()) + } + } + ScheduleMode::ReplaceExisting => { + if let Some(pending) = pending_key_opt { + s.delete(&pending)?; + } + s.store(&new_task_key, new_task.value.clone()) + } + ScheduleMode::ReplaceExistingSoonest => { + if let Some(pending) = pending_key_opt { + if let Ok(tk) = TaskKey::try_from(&pending) { + new_task.timestamp_millis = new_task.timestamp_millis.min(tk.timestamp_millis); + } + s.delete(&pending)?; + } + + let new_task_key = Key::from(&new_task); + s.store(&new_task_key, new_task.value.clone()) + } + ScheduleMode::FinishOrReplaceExisting => { + if let Some(running) = running_key_opt { + s.delete(&running)?; + } + if let Some(pending) = pending_key_opt { + s.delete(&pending)?; + } + s.store(&new_task_key, new_task.value.clone()) + } + ScheduleMode::FinishOrReplaceExistingSoonest => { + if let Some(running) = running_key_opt { + s.delete(&running)?; + } + + if let Some(pending) = pending_key_opt { + if let Ok(tk) = TaskKey::try_from(&pending) { + new_task.timestamp_millis = new_task.timestamp_millis.min(tk.timestamp_millis); + } + s.delete(&pending)?; + } + + let new_task_key = Key::from(&new_task); + s.store(&new_task_key, new_task.value.clone()) + } + } + }) + } + + fn finish_running_task(&self, running_key: &Key) -> StorageResult<()> { + self.execute(&Self::lock_scope(), |kv| { + if kv.has(running_key)? { + kv.delete(running_key) + } else { + Err(KeyValueError::Other(format!( + "Cannot finish task {}. It is not running.", + running_key + ))) + } + }) + } + + fn reschedule_running_task(&self, running: &Key, timestamp_millis: Option) -> StorageResult<()> { + let pending_key = { + let mut task_key = TaskKey::try_from(running)?; + task_key.timestamp_millis = timestamp_millis.unwrap_or_else(now); + + task_key.pending_key() + }; + + self.execute(&Self::lock_scope(), |kv| kv.move_value(running, &pending_key)) + } + + fn claim_scheduled_pending_task(&self) -> StorageResult> { + self.execute(&Self::lock_scope(), |kv| { + let tasks_before = now(); + + if let Some(pending) = kv + .list_keys(&Self::pending_scope())? + .into_iter() + .filter_map(|k| TaskKey::try_from(&k).ok()) + .filter(|tk| tk.timestamp_millis <= tasks_before) + .min_by_key(|tk| tk.timestamp_millis) + { + let pending_key = pending.pending_key(); + + if let Some(value) = kv.get(&pending_key)? { + let mut running_task = RunningTask { + name: pending.name.into_owned(), + timestamp_millis: tasks_before, + value, + }; + let mut running_key = Key::from(&running_task); + + if kv.has(&running_key)? { + // It's not pretty to sleep blocking, even if it's + // for 1 ms, but if we don't then get a name collision + // with an existing running task. + std::thread::sleep(Duration::from_millis(1)); + running_task.timestamp_millis = now(); + running_key = Key::from(&running_task); + } + + kv.move_value(&pending_key, &running_key)?; + + Ok(Some(running_task)) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + } + + fn reschedule_long_running_tasks(&self, reschedule_after: Option<&Duration>) -> StorageResult<()> { + let now = now(); + + let reschedule_after = reschedule_after.unwrap_or(&KeyValueStore::RESCHEDULE_AFTER); + let reschedule_timeout = now - reschedule_after.as_millis(); + + self.execute(&Self::lock_scope(), |s: &KeyValueStoreDispatcher| { + s.list_keys(&Self::running_scope())? + .into_iter() + .filter_map(|k| { + let task = TaskKey::try_from(&k).ok()?; + if task.timestamp_millis <= reschedule_timeout { + Some(task) + } else { + None + } + }) + .for_each(|tk| { + let running_key = tk.running_key(); + + let pending_key = TaskKey { + name: Cow::Borrowed(&tk.name), + timestamp_millis: now, + } + .pending_key(); + + let _ = s.move_value(&running_key, &pending_key); + }); + + Ok(()) + }) + } + + fn pending_task_scheduled(&self, name: SegmentBuf) -> StorageResult> { + self.execute(&Self::lock_scope(), |kv| { + kv.list_keys(&Self::pending_scope()).map(|keys| { + keys.into_iter() + .filter_map(|k| TaskKey::try_from(&k).ok()) + .find(|p| p.name.as_ref() == &name) + .map(|p| p.timestamp_millis) + }) + }) + } +} + +#[cfg(test)] +mod tests { + use std::{thread, time::Duration}; + + use serde_json::Value; + + use super::{PendingTask, Queue}; + use crate::commons::storage::{ + queue::{now, ScheduleMode}, + Key, KeyValueStore, Namespace, Scope, Segment, SegmentBuf, + }; + + fn queue_store(ns: &str) -> KeyValueStore { + let storage_url = crate::test::mem_storage(); + + KeyValueStore::create(&storage_url, Namespace::parse(ns).unwrap()).unwrap() + } + + #[test] + fn queue_thread_workers() { + let queue = queue_store("test_queue"); + queue.wipe().unwrap(); + + thread::scope(|s| { + let create = s.spawn(|| { + for i in 1..=10 { + let name = &format!("job-{i}"); + let segment = Segment::parse(name).unwrap(); + let value = Value::from("value"); + + queue + .schedule_task(segment.into(), value, None, ScheduleMode::FinishOrReplaceExisting) + .unwrap(); + println!("> Scheduled job {}", &name); + } + }); + + create.join().unwrap(); + let keys = queue.list_keys(&Scope::from_segment(PendingTask::SEGMENT)).unwrap(); + assert_eq!(keys.len(), 10); + + for _i in 1..=10 { + s.spawn(|| { + while queue.pending_tasks_remaining().unwrap() > 0 { + if let Some(running_task) = queue.claim_scheduled_pending_task().unwrap() { + queue.finish_running_task(&Key::from(&running_task)).unwrap(); + } + + std::thread::sleep(std::time::Duration::from_millis(5)); + } + }); + } + }); + + let pending = queue.pending_tasks_remaining().unwrap(); + assert_eq!(pending, 0); + + let running = queue.running_tasks_remaining().unwrap(); + assert_eq!(running, 0); + } + + #[test] + fn test_reschedule_long_running() { + let queue = queue_store("test_cleanup_queue"); + queue.wipe().unwrap(); + + let name = "job"; + let segment = Segment::parse(name).unwrap(); + let value = Value::from("value"); + + queue + .schedule_task(segment.into(), value, None, ScheduleMode::FinishOrReplaceExisting) + .unwrap(); + + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + + let job = queue.claim_scheduled_pending_task().unwrap(); + + assert!(job.is_some()); + assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); + + let job = queue.claim_scheduled_pending_task().unwrap(); + + assert!(job.is_none()); + + queue + .reschedule_long_running_tasks(Some(&Duration::from_secs(0))) + .unwrap(); + + let existing = queue.pending_task_scheduled(segment.into()).unwrap(); + + assert!(existing.is_some()); + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + + let job = queue.claim_scheduled_pending_task().unwrap(); + + assert!(job.is_some()); + assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); + } + + #[test] + fn test_reschedule_finished_task() { + let queue = queue_store("test_cleanup_queue"); + queue.wipe().unwrap(); + + let name = "task"; + let segment = Segment::parse(name).unwrap(); + let value = Value::from("value"); + + // Schedule the task + queue + .schedule_task(segment.into(), value, None, ScheduleMode::FinishOrReplaceExisting) + .unwrap(); + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + + // Get the task + let running_task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); + assert_eq!(queue.running_tasks_remaining().unwrap(), 1); + + // Finish the task and reschedule + // queue.finish_running_task(task, Some(rescheduled)).unwrap(); + queue + .schedule_task( + running_task.name, + running_task.value, + Some(now()), + ScheduleMode::FinishOrReplaceExisting, + ) + .unwrap(); + + // There should now be a new pending task, and the + // running task should be removed. + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + assert_eq!(queue.running_tasks_remaining().unwrap(), 0); + + // Get and finish the pending task, but do not reschedule it + let running_task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); + queue.finish_running_task(&Key::from(&running_task)).unwrap(); + + // There should not be a new pending task + assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); + } + + #[test] + fn test_schedule_with_existing_task() { + let queue = queue_store("test_cleanup_queue"); + queue.wipe().unwrap(); + + let name: SegmentBuf = SegmentBuf::parse_lossy("task"); + let value_1 = Value::from("value_1"); + let value_2 = Value::from("value_2"); + + let in_a_while = now() + 180; + + // Schedule a task, and then schedule again replacing the old + { + queue + .schedule_task( + name.clone(), + value_1.clone(), + None, + ScheduleMode::FinishOrReplaceExisting, + ) + .unwrap(); + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + + // Schedule again, replacing the existing task + queue + .schedule_task( + name.clone(), + value_2.clone(), + None, + ScheduleMode::FinishOrReplaceExisting, + ) + .unwrap(); + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + + // We should have one task and the value should match the new task. + let task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + assert_eq!(task.value, value_2); + + assert_eq!(queue.running_tasks_remaining().unwrap(), 1); + queue.finish_running_task(&Key::from(&task)).unwrap(); + } + + // Schedule a task, and then schedule again keeping the old + { + queue + .schedule_task( + name.clone(), + value_1.clone(), + None, + ScheduleMode::FinishOrReplaceExisting, + ) + .unwrap(); + queue + .schedule_task(name.clone(), value_2.clone(), Some(in_a_while), ScheduleMode::IfMissing) + .unwrap(); + + // there should be only one task, it should not be rescheduled, + // so we get get it and its value should match old. + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + let task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + assert_eq!(task.value, value_1); + } + + // Schedule a task, and then schedule again rescheduling it + { + queue + .schedule_task( + name.clone(), + value_1.clone(), + None, + ScheduleMode::FinishOrReplaceExisting, + ) + .unwrap(); + + // we expect one pending task + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + + // reschedule that task to 3 minutes from now, keeping the + // soonest value + queue + .schedule_task( + name.clone(), + value_2.clone(), + Some(in_a_while), + ScheduleMode::FinishOrReplaceExistingSoonest, + ) + .unwrap(); + + // we still expect one pending task with the earlier + // time and the new value. + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + let task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + assert_eq!(task.value, value_2); + + // But if we now schedule a task and then reschedule + // it to 3 minutes from now NOT using the soonest. Then + // we should see 1 pending task that we cannot claim + // because it is not due. + queue + .schedule_task( + name.clone(), + value_1.clone(), + None, + ScheduleMode::FinishOrReplaceExisting, + ) + .unwrap(); + queue + .schedule_task( + name.clone(), + value_1.clone(), + Some(in_a_while), + ScheduleMode::FinishOrReplaceExisting, + ) + .unwrap(); + + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + assert!(queue.claim_scheduled_pending_task().unwrap().is_none()); + } + + // Schedule a task, claim it, and then finish and schedule a new task + { + // schedule a task + queue + .schedule_task( + name.clone(), + value_1.clone(), + None, + ScheduleMode::FinishOrReplaceExisting, + ) + .unwrap(); + + // there should be 1 pending task, and 0 running + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + assert_eq!(queue.running_tasks_remaining().unwrap(), 0); + + // claim the task + let task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + assert_eq!(task.value, value_1); + assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); + assert_eq!(queue.running_tasks_remaining().unwrap(), 1); + + // schedule a new task + queue + .schedule_task( + name.clone(), + value_2.clone(), + None, + ScheduleMode::FinishOrReplaceExisting, + ) + .unwrap(); + + // the running task should now be finished, and there should be 1 new pending task + assert_eq!(queue.running_tasks_remaining().unwrap(), 0); + assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + + // claim the task, it should match the new task + let task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + assert_eq!(task.value, value_2); + } + } +} diff --git a/src/commons/storage/scope.rs b/src/commons/storage/scope.rs new file mode 100644 index 000000000..0b8769fcb --- /dev/null +++ b/src/commons/storage/scope.rs @@ -0,0 +1,210 @@ +use std::{ + cmp, + fmt::{Display, Formatter}, + iter::FromIterator, + str::FromStr, +}; + +use crate::commons::storage::{ParseSegmentError, SegmentBuf}; + +/// Used to scope a [`Key`]. Consists of a vector of zero or more +/// [`SegmentBuf`]s. +/// +/// [`Key`]: crate::Key +#[derive(Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[cfg_attr(feature = "postgres", derive(postgres::types::ToSql, postgres::types::FromSql))] +pub struct Scope { + segments: Vec, +} + +impl Scope { + /// Character used to split on when parsing a Scope from a string. + pub const SEPARATOR: char = '/'; + + /// Create a `Scope` from a single [`Segment`]. + pub fn from_segment(segment: impl Into) -> Self { + Scope::new(vec![segment.into()]) + } + + /// Create an empty `Scope`. + pub fn global() -> Self { + Scope::new(Vec::new()) + } + + /// Create a `Scope` from a vector of [`SegmentBuf`]s. + pub fn new(segments: Vec) -> Self { + Scope { segments } + } + + /// Returns the underlying vector of [`SegmentBuf`]s. + pub fn as_vec(&self) -> &Vec { + &self.segments + } + + /// Returns the length of the underlying vector. + pub fn len(&self) -> i32 { + self.segments.len() as i32 + } + + /// Returns `true` if the segment contains no elements. + pub fn is_empty(&self) -> bool { + self.segments.is_empty() + } + + /// Returns whether the underlying vector is empty. + pub fn is_global(&self) -> bool { + self.is_empty() + } + + /// Two scopes match if the longest of the two contains all [`Segment`]s + /// of the other. + pub fn matches(&self, other: &Self) -> bool { + let min_len = cmp::min(self.segments.len(), other.segments.len()); + self.segments[0..min_len] == other.segments[0..min_len] + } + + /// Returns whether the encapsulated vector starts with a certain prefix. + pub fn starts_with(&self, prefix: &Self) -> bool { + if prefix.segments.len() <= self.segments.len() { + self.segments[0..prefix.segments.len()] == prefix.segments + } else { + false + } + } + + /// Returns a vector of all prefixes of the scope. + pub fn sub_scopes(&self) -> Vec { + self.segments + .iter() + .scan(Scope::default(), |state, segment| { + state.segments.push(segment.clone()); + Some(state.clone()) + }) + .collect() + } + + /// Create a new [`Scope`] and add a [`Segment`] to the end of it. + pub fn with_sub_scope(&self, sub_scope: impl Into) -> Self { + let mut clone = self.clone(); + clone.add_sub_scope(sub_scope); + clone + } + + /// Add a [`Segment`] to the end of the scope. + pub fn add_sub_scope(&mut self, sub_scope: impl Into) { + self.segments.push(sub_scope.into()); + } + + /// Create a new [`Scope`] and add a [`Segment`] to the front of it. + pub fn with_super_scope(&self, super_scope: impl Into) -> Self { + let mut clone = self.clone(); + clone.add_super_scope(super_scope); + clone + } + + /// Add a [`Segment`] to the front of the scope. + pub fn add_super_scope(&mut self, super_scope: impl Into) { + self.segments.insert(0, super_scope.into()); + } +} + +impl Display for Scope { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + self.segments + .iter() + .map(|segment| segment.as_str()) + .collect::>() + .join(Self::SEPARATOR.encode_utf8(&mut [0; 4])) + ) + } +} + +impl FromStr for Scope { + type Err = ParseSegmentError; + + fn from_str(s: &str) -> Result { + let s = s.strip_suffix(Self::SEPARATOR).unwrap_or(s); + let segments = s + .split(Self::SEPARATOR) + .map(SegmentBuf::from_str) + .collect::>()?; + Ok(Scope { segments }) + } +} + +impl IntoIterator for Scope { + type IntoIter = as IntoIterator>::IntoIter; + type Item = as IntoIterator>::Item; + + fn into_iter(self) -> Self::IntoIter { + self.segments.into_iter() + } +} + +impl<'a> IntoIterator for &'a Scope { + type IntoIter = <&'a Vec as IntoIterator>::IntoIter; + type Item = <&'a Vec as IntoIterator>::Item; + + fn into_iter(self) -> Self::IntoIter { + self.segments.iter() + } +} + +impl Extend for Scope { + fn extend>(&mut self, iter: T) { + self.segments.extend(iter) + } +} + +impl FromIterator for Scope { + fn from_iter>(iter: T) -> Self { + let segments = iter.into_iter().collect(); + Scope { segments } + } +} + +impl From> for Scope { + fn from(segments: Vec) -> Self { + Scope { segments } + } +} + +#[cfg(test)] +mod tests { + use super::Scope; + + #[test] + fn test_matches() { + let full: Scope = format!("this{sep}is{sep}a{sep}beautiful{sep}scope", sep = Scope::SEPARATOR) + .parse() + .unwrap(); + let partial: Scope = format!("this{sep}is{sep}a", sep = Scope::SEPARATOR).parse().unwrap(); + let wrong: Scope = format!("this{sep}is{sep}b", sep = Scope::SEPARATOR).parse().unwrap(); + + assert!(full.matches(&partial)); + assert!(partial.matches(&full)); + assert!(!partial.matches(&wrong)); + assert!(!wrong.matches(&partial)); + assert!(!full.matches(&wrong)); + assert!(!wrong.matches(&full)); + } + + #[test] + fn test_starts_with() { + let full: Scope = format!("this{sep}is{sep}a{sep}beautiful{sep}scope", sep = Scope::SEPARATOR) + .parse() + .unwrap(); + let partial: Scope = format!("this{sep}is{sep}a", sep = Scope::SEPARATOR).parse().unwrap(); + let wrong: Scope = format!("this{sep}is{sep}b", sep = Scope::SEPARATOR).parse().unwrap(); + + assert!(full.starts_with(&partial)); + assert!(!partial.starts_with(&full)); + assert!(!partial.starts_with(&wrong)); + assert!(!wrong.starts_with(&partial)); + assert!(!full.starts_with(&wrong)); + assert!(!wrong.starts_with(&full)); + } +} diff --git a/src/commons/storage/segment.rs b/src/commons/storage/segment.rs new file mode 100644 index 000000000..c482bb220 --- /dev/null +++ b/src/commons/storage/segment.rs @@ -0,0 +1,246 @@ +use std::{ + borrow::Borrow, + fmt::{Display, Formatter}, + ops::Deref, + str::FromStr, +}; + +use crate::commons::storage::Scope; + +/// A nonempty string that does not start or end with whitespace and does not +/// contain any instances of [`Scope::SEPARATOR`]. +/// +/// This is the owned variant of [`Segment`]. +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[repr(transparent)] +pub struct SegmentBuf(String); + +impl SegmentBuf { + pub fn parse_lossy(value: &str) -> Self { + match Segment::parse(value) { + Ok(segment) => segment.to_owned(), + Err(error) => { + let sanitized = value.trim().replace(Scope::SEPARATOR, "+"); + let nonempty = sanitized.is_empty().then(|| "EMPTY".to_owned()).unwrap_or(sanitized); + let segment = Segment::parse(&nonempty).unwrap(); // cannot panic as all checks are performed above + warn!("{value} is not a valid Segment: {error}\nusing {segment} instead"); + segment.to_owned() + } + } + } + + pub fn concat(lhs: impl Into, rhs: impl Into) -> Self { + Segment::parse(&format!("{}{}", lhs.into(), rhs.into())) + .unwrap() + .to_owned() + } +} + +impl AsRef for SegmentBuf { + fn as_ref(&self) -> &Segment { + self + } +} + +impl Borrow for SegmentBuf { + fn borrow(&self) -> &Segment { + self + } +} + +impl Deref for SegmentBuf { + type Target = Segment; + + fn deref(&self) -> &Self::Target { + unsafe { Segment::from_str_unchecked(&self.0) } + } +} + +impl Display for SegmentBuf { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl FromStr for SegmentBuf { + type Err = ParseSegmentError; + + fn from_str(s: &str) -> Result { + Ok(Segment::parse(s)?.to_owned()) + } +} + +impl From<&Segment> for SegmentBuf { + fn from(value: &Segment) -> Self { + value.to_owned() + } +} + +/// A nonempty string slice that does not start or end with whitespace and does +/// not contain any instances of [`Scope::SEPARATOR`]. +/// +/// For the owned variant, see [`SegmentBuf`]. +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[repr(transparent)] +pub struct Segment(str); + +impl Segment { + /// Parse a Segment from a string. + /// + /// # Errors + /// If the string is empty, starts or ends with whitespace, or contains a + /// [`Scope::SEPARATOR`] a [`ParseSegmentError`] variant will be returned. + pub const fn parse(value: &str) -> Result<&Self, ParseSegmentError> { + if value.is_empty() { + Err(ParseSegmentError::Empty) + } else { + let bytes = value.as_bytes(); + if Self::leading_whitespace(bytes) || Self::trailing_whitespace(bytes) { + Err(ParseSegmentError::TrailingWhitespace) + } else if Self::contains_separator(bytes) { + Err(ParseSegmentError::ContainsSeparator) + } else { + unsafe { Ok(Segment::from_str_unchecked(value)) } + } + } + } + + /// Return the encapsulated string. + pub fn as_str(&self) -> &str { + &self.0 + } + + /// Creates a Segment from a string without performing any checks. + /// + /// # Safety + /// This should only be called for const values, where we know that + /// the input is safe, or in case the input was thoroughly checked + /// in another way. + pub const unsafe fn from_str_unchecked(s: &str) -> &Self { + &*(s as *const _ as *const Self) + } + + const fn leading_whitespace(bytes: &[u8]) -> bool { + matches!(bytes[0], 9 | 10 | 32) + } + + const fn trailing_whitespace(bytes: &[u8]) -> bool { + matches!(bytes[bytes.len() - 1], 9 | 10 | 32) + } + + const fn contains_separator(bytes: &[u8]) -> bool { + let mut index = 0; + + while index < bytes.len() { + if bytes[index] == Scope::SEPARATOR as u8 { + return true; + } + index += 1; + } + + false + } +} + +impl Display for Segment { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", &self.0) + } +} + +impl ToOwned for Segment { + type Owned = SegmentBuf; + + fn to_owned(&self) -> Self::Owned { + SegmentBuf(self.0.to_owned()) + } +} + +/// Represents all ways parsing a string as a [`Segment`] can fail. +#[derive(Debug)] +pub enum ParseSegmentError { + TrailingWhitespace, + Empty, + ContainsSeparator, +} + +impl Display for ParseSegmentError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + ParseSegmentError::TrailingWhitespace => "segments must not start or end with whitespace", + ParseSegmentError::Empty => "segments must be nonempty", + ParseSegmentError::ContainsSeparator => "segments must not contain scope separators", + } + .fmt(f) + } +} + +#[cfg(test)] +mod tests { + use super::{Scope, Segment}; + + #[test] + fn test_trailing_separator_fails() { + assert!(Segment::parse(&format!("test{}", Scope::SEPARATOR)).is_err()); + } + + #[test] + fn test_trailing_space_fails() { + assert!(Segment::parse("test ").is_err()); + } + + #[test] + fn test_trailing_tab_fails() { + assert!(Segment::parse("test\t").is_err()); + } + + #[test] + fn test_trailing_newline_fails() { + assert!(Segment::parse("test\n").is_err()); + } + + #[test] + fn test_leading_separator_fails() { + assert!(Segment::parse(&format!("{}test", Scope::SEPARATOR)).is_err()); + } + + #[test] + fn test_leading_space_fails() { + assert!(Segment::parse(" test").is_err()); + } + + #[test] + fn test_leading_tab_fails() { + assert!(Segment::parse("\ttest").is_err()); + } + + #[test] + fn test_leading_newline_fails() { + assert!(Segment::parse("\ntest").is_err()); + } + + #[test] + fn test_containing_separator_fails() { + assert!(Segment::parse(&format!("te{}st", Scope::SEPARATOR)).is_err()); + } + + #[test] + fn test_containing_space_succeeds() { + assert!(Segment::parse("te st").is_ok()); + } + + #[test] + fn test_containing_tab_succeeds() { + assert!(Segment::parse("te\tst").is_ok()); + } + + #[test] + fn test_containing_newline_succeeds() { + assert!(Segment::parse("te\nst").is_ok()); + } + + #[test] + fn test_segment_succeeds() { + assert!(Segment::parse("test").is_ok()) + } +} diff --git a/src/constants.rs b/src/constants.rs index 189d50b86..e891b8696 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -1,7 +1,5 @@ -use kvx::Namespace; - use crate::{ - commons::{actor::ActorDef, storage::namespace}, + commons::{actor::ActorDef, storage::Namespace}, daemon::auth::common::NoResourceType, }; @@ -45,17 +43,17 @@ pub fn test_announcements_enabled() -> bool { // until const fn's are more versatile for str's, we need to use lazy_static to be able to expand the segment macro at // compile time, while running the expanded code, which actually makes it a Segment, at runtime -pub const TASK_QUEUE_NS: &Namespace = namespace!("tasks"); -pub const CASERVER_NS: &Namespace = namespace!("cas"); -pub const CA_OBJECTS_NS: &Namespace = namespace!("ca_objects"); -pub const KEYS_NS: &Namespace = namespace!("keys"); -pub const PUBSERVER_CONTENT_NS: &Namespace = namespace!("pubd_objects"); -pub const PUBSERVER_NS: &Namespace = namespace!("pubd"); -pub const PROPERTIES_NS: &Namespace = namespace!("properties"); -pub const SIGNERS_NS: &Namespace = namespace!("signers"); -pub const STATUS_NS: &Namespace = namespace!("status"); -pub const TA_PROXY_SERVER_NS: &Namespace = namespace!("ta_proxy"); -pub const TA_SIGNER_SERVER_NS: &Namespace = namespace!("ta_signer"); +pub const TASK_QUEUE_NS: &Namespace = unsafe { Namespace::from_str_unchecked("tasks") }; +pub const CASERVER_NS: &Namespace = unsafe { Namespace::from_str_unchecked("cas") }; +pub const CA_OBJECTS_NS: &Namespace = unsafe { Namespace::from_str_unchecked("ca_objects") }; +pub const KEYS_NS: &Namespace = unsafe { Namespace::from_str_unchecked("keys") }; +pub const PUBSERVER_CONTENT_NS: &Namespace = unsafe { Namespace::from_str_unchecked("pubd_objects") }; +pub const PUBSERVER_NS: &Namespace = unsafe { Namespace::from_str_unchecked("pubd") }; +pub const PROPERTIES_NS: &Namespace = unsafe { Namespace::from_str_unchecked("properties") }; +pub const SIGNERS_NS: &Namespace = unsafe { Namespace::from_str_unchecked("signers") }; +pub const STATUS_NS: &Namespace = unsafe { Namespace::from_str_unchecked("status") }; +pub const TA_PROXY_SERVER_NS: &Namespace = unsafe { Namespace::from_str_unchecked("ta_proxy") }; +pub const TA_SIGNER_SERVER_NS: &Namespace = unsafe { Namespace::from_str_unchecked("ta_signer") }; pub const PROPERTIES_DFLT_NAME: &str = "main"; diff --git a/src/daemon/auth/common/crypt.rs b/src/daemon/auth/common/crypt.rs index a74f2cc05..56977be6e 100644 --- a/src/daemon/auth/common/crypt.rs +++ b/src/daemon/auth/common/crypt.rs @@ -18,10 +18,14 @@ use std::sync::atomic::{AtomicU64, Ordering}; -use kvx::{namespace, segment, Key, Namespace, Segment}; - use crate::{ - commons::{error::Error, util::ext_serde, KrillResult}, + commons::storage::Key, + commons::{ + error::Error, + storage::{Namespace, Segment}, + util::ext_serde, + KrillResult, + }, daemon::config::Config, }; @@ -34,8 +38,8 @@ const POLY1305_TAG_BYTE_LEN: usize = POLY1305_TAG_BIT_LEN / 8; const CLEARTEXT_PREFIX_LEN: usize = CHACHA20_NONCE_BYTE_LEN + POLY1305_TAG_BYTE_LEN; const UNUSED_AAD: [u8; 0] = [0; 0]; -const CRYPT_STATE_NS: &Namespace = namespace!("login_sessions"); -const CRYPT_STATE_KEY: &Segment = segment!("main_key"); +const CRYPT_STATE_NS: &Namespace = unsafe { Namespace::from_str_unchecked("login_sessions") }; +const CRYPT_STATE_KEY: &Segment = unsafe { Segment::from_str_unchecked("main_key") }; #[derive(Debug, Deserialize, Serialize)] pub struct NonceState { diff --git a/src/daemon/ca/publishing.rs b/src/daemon/ca/publishing.rs index fe53fee8e..198303a88 100644 --- a/src/daemon/ca/publishing.rs +++ b/src/daemon/ca/publishing.rs @@ -26,7 +26,7 @@ use crate::{ crypto::KrillSigner, error::Error, eventsourcing::PreSaveEventListener, - storage::{Key, KeyValueStore, Scope, Segment, SegmentExt}, + storage::{Key, KeyValueStore, Scope, SegmentBuf}, KrillResult, }, constants::CA_OBJECTS_NS, @@ -170,7 +170,7 @@ impl PreSaveEventListener for CaObjectsStore { impl CaObjectsStore { fn key(ca: &CaHandle) -> Key { - Key::new_global(Segment::parse_lossy(&format!("{}.json", ca))) // ca should always be a valid Segment + Key::new_global(SegmentBuf::parse_lossy(&format!("{}.json", ca))) // ca should always be a valid Segment } pub fn cas(&self) -> KrillResult> { diff --git a/src/daemon/ca/status.rs b/src/daemon/ca/status.rs index afa2af352..bc6758e92 100644 --- a/src/daemon/ca/status.rs +++ b/src/daemon/ca/status.rs @@ -1,6 +1,5 @@ use std::{collections::HashMap, str::FromStr, sync::RwLock}; -use kvx::Namespace; use rpki::ca::{ idexchange::{CaHandle, ChildHandle, ParentHandle, ServiceUri}, provisioning::ResourceClassListResponse as Entitlements, @@ -14,13 +13,13 @@ use crate::commons::{ RepoStatus, }, error::Error, - storage::{segment, Key, KeyValueStore, Scope, Segment, SegmentExt}, + storage::{Key, KeyValueStore, Namespace, Scope, Segment, SegmentBuf}, util::httpclient, KrillResult, }; -const PARENTS_PREFIX: &Segment = segment!("parents-"); -const CHILDREN_PREFIX: &Segment = segment!("children-"); +const PARENTS_PREFIX: &Segment = unsafe { Segment::from_str_unchecked("parents-") }; +const CHILDREN_PREFIX: &Segment = unsafe { Segment::from_str_unchecked("children-") }; const JSON_SUFFIX: &str = ".json"; //------------ CaStatus ------------------------------------------------------ @@ -105,7 +104,7 @@ impl StatusStore { // parents let mut parents = ParentStatuses::default(); let keys = self.store.keys( - &Scope::from_segment(Segment::parse_lossy(ca.as_str())), // ca should always be a valid Segment + &Scope::from_segment(SegmentBuf::parse_lossy(ca.as_str())), // ca should always be a valid Segment PARENTS_PREFIX.as_str(), )?; for parent_key in keys { @@ -133,7 +132,7 @@ impl StatusStore { // children let mut children = HashMap::new(); let keys = self.store.keys( - &Scope::from_segment(Segment::parse_lossy(ca.as_str())), // ca should always be a valid Segment + &Scope::from_segment(SegmentBuf::parse_lossy(ca.as_str())), // ca should always be a valid Segment CHILDREN_PREFIX.as_str(), )?; for child_key in keys { @@ -171,8 +170,8 @@ impl StatusStore { fn convert_pre_0_9_5_full_status_if_present(&self, ca: &CaHandle) -> KrillResult<()> { let key = Key::new_scoped( - Scope::from_segment(Segment::parse_lossy(ca.as_str())), - segment!("status.json"), + Scope::from_segment(SegmentBuf::parse_lossy(ca.as_str())), + SegmentBuf::parse_lossy("status.json"), ); // ca should always be a valid Segment let status = self.store.get::(&key).ok().flatten(); @@ -203,22 +202,22 @@ impl StatusStore { fn repo_status_key(ca: &CaHandle) -> Key { // we may need to support multiple repos in future Key::new_scoped( - Scope::from_segment(Segment::parse_lossy(ca.as_str())), // ca should always be a valid Segment - segment!("repos-main.json"), + Scope::from_segment(SegmentBuf::parse_lossy(ca.as_str())), // ca should always be a valid Segment + SegmentBuf::parse_lossy("repos-main.json"), ) } fn parent_status_key(ca: &CaHandle, parent: &ParentHandle) -> Key { Key::new_scoped( - Scope::from_segment(Segment::parse_lossy(ca.as_str())), // ca should always be a valid Segment - Segment::parse_lossy(&format!("{}{}{}", PARENTS_PREFIX, parent, JSON_SUFFIX)), + Scope::from_segment(SegmentBuf::parse_lossy(ca.as_str())), // ca should always be a valid Segment + SegmentBuf::parse_lossy(&format!("{}{}{}", PARENTS_PREFIX, parent, JSON_SUFFIX)), ) } fn child_status_key(ca: &CaHandle, child: &ChildHandle) -> Key { Key::new_scoped( - Scope::from_segment(Segment::parse_lossy(ca.as_str())), // ca should always be a valid Segment - Segment::parse_lossy(&format!("{}{}{}", CHILDREN_PREFIX, child, JSON_SUFFIX)), + Scope::from_segment(SegmentBuf::parse_lossy(ca.as_str())), // ca should always be a valid Segment + SegmentBuf::parse_lossy(&format!("{}{}{}", CHILDREN_PREFIX, child, JSON_SUFFIX)), ) } @@ -290,8 +289,8 @@ impl StatusStore { self.cache.write().unwrap().remove(ca); self.store - .drop_scope(&Scope::from_segment(Segment::parse_lossy(ca.as_str())))?; // will only fail if scope is present and cannot be removed - // ca should always be a valid Segment + .drop_scope(&Scope::from_segment(SegmentBuf::parse_lossy(ca.as_str())))?; // will only fail if scope is present and cannot be removed + // ca should always be a valid Segment Ok(()) } @@ -402,7 +401,7 @@ impl StatusStore { mod tests { use super::*; - use crate::{constants::STATUS_NS, test}; + use crate::{commons::storage::Segment, constants::STATUS_NS, test}; #[test] fn read_save_status() { @@ -421,7 +420,7 @@ mod tests { // using the copied the data - that will be done next and start // a migration. let testbed_status_key = Key::new_scoped( - Scope::from_segment(segment!("testbed")), + Scope::from_segment(SegmentBuf::parse_lossy("testbed")), Segment::parse("status.json").unwrap(), ); let status_testbed_before_migration: CaStatus = status_kv_store.get(&testbed_status_key).unwrap().unwrap(); diff --git a/src/daemon/config.rs b/src/daemon/config.rs index 9e414730a..b4080cd93 100644 --- a/src/daemon/config.rs +++ b/src/daemon/config.rs @@ -8,7 +8,6 @@ use std::{ }; use chrono::Duration; -use kvx::Namespace; use log::{error, LevelFilter}; use rpki::{ ca::idexchange::PublisherHandle, @@ -26,7 +25,7 @@ use crate::{ api::{PublicationServerUris, Token}, crypto::{OpenSslSignerConfig, SignSupport}, error::{Error, KrillIoError}, - storage::KeyValueStore, + storage::{KeyValueStore, Namespace}, util::ext_serde, KrillResult, }, diff --git a/src/daemon/mq.rs b/src/daemon/mq.rs index dbfae9670..67ea763cb 100644 --- a/src/daemon/mq.rs +++ b/src/daemon/mq.rs @@ -3,15 +3,10 @@ //! signed material, or asking a newly added parent for resource //! entitlements. -use std::{fmt, str::FromStr}; +use std::fmt; use url::Url; -use kvx::{ - queue::{Queue, RunningTask, ScheduleMode}, - segment, Segment, SegmentBuf, -}; - use rpki::{ ca::{ idexchange::{CaHandle, ParentHandle}, @@ -21,9 +16,12 @@ use rpki::{ }; use crate::{ - commons::api::Timestamp, - commons::eventsourcing, - commons::{eventsourcing::Aggregate, Error, KrillResult}, + commons::{ + api::Timestamp, + eventsourcing::{self, Aggregate}, + storage::{Key, KeyValueStore, Queue, RunningTask, ScheduleMode, SegmentBuf}, + {Error, KrillResult}, + }, constants::TASK_QUEUE_NS, daemon::ca::{CertAuth, CertAuthEvent}, ta::{ta_handle, TrustAnchorProxy, TrustAnchorProxyEvent}, @@ -100,23 +98,23 @@ pub enum Task { } impl Task { - fn name(&self) -> KrillResult { + fn name(&self) -> SegmentBuf { match self { - Task::SyncRepo { ca_handle: ca, .. } => SegmentBuf::from_str(&format!("sync_repo_{}", ca)), + Task::SyncRepo { ca_handle: ca, .. } => SegmentBuf::parse_lossy(&format!("sync_repo_{}", ca)), Task::SyncParent { ca_handle: ca, parent, .. - } => SegmentBuf::from_str(&format!("sync_{}_with_parent_{}", ca, parent)), + } => SegmentBuf::parse_lossy(&format!("sync_{}_with_parent_{}", ca, parent)), Task::SuspendChildrenIfNeeded { ca_handle: ca } => { - SegmentBuf::from_str(&format!("suspend_children_if_needed_{}", ca)) + SegmentBuf::parse_lossy(&format!("suspend_children_if_needed_{}", ca)) } - Task::RepublishIfNeeded => Ok(segment!("all_cas_republish_if_needed").to_owned()), - Task::RenewObjectsIfNeeded => Ok(segment!("all_cas_renew_objects_if_needed").to_owned()), + Task::RepublishIfNeeded => SegmentBuf::parse_lossy("all_cas_republish_if_needed"), + Task::RenewObjectsIfNeeded => SegmentBuf::parse_lossy("all_cas_renew_objects_if_needed"), Task::ResourceClassRemoved { ca_handle: ca, parent, rcn, .. - } => SegmentBuf::from_str(&format!( + } => SegmentBuf::parse_lossy(&format!( "resource_class_removed_ca_{}_parent_{}_rcn_{}", ca, parent, rcn )), @@ -125,22 +123,21 @@ impl Task { rcn, revocation_request, .. - } => SegmentBuf::from_str(&format!( + } => SegmentBuf::parse_lossy(&format!( "unexpected_key_{}_ca_{}_rcn_{}", revocation_request.key(), ca, rcn )), - Task::RefreshAnnouncementsInfo => Ok(segment!("refresh_bgp_announcements_info").to_owned()), - Task::UpdateSnapshots => Ok(segment!("update_stored_snapshots").to_owned()), - Task::RrdpUpdateIfNeeded => Ok(segment!("update_rrdp_if_needed").to_owned()), + Task::RefreshAnnouncementsInfo => SegmentBuf::parse_lossy("refresh_bgp_announcements_info"), + Task::UpdateSnapshots => SegmentBuf::parse_lossy("update_stored_snapshots"), + Task::RrdpUpdateIfNeeded => SegmentBuf::parse_lossy("update_rrdp_if_needed"), #[cfg(feature = "multi-user")] - Task::SweepLoginCache => Ok(segment!("sweep_login_cache").to_owned()), - Task::RenewTestbedTa => Ok(segment!("renew_testbed_ta").to_owned()), - Task::SyncTrustAnchorProxySignerIfPossible => Ok(segment!("sync_ta_proxy_signer").to_owned()), - Task::QueueStartTasks => Ok(segment!("queue_start_tasks").to_owned()), + Task::SweepLoginCache => SegmentBuf::parse_lossy("sweep_login_cache"), + Task::RenewTestbedTa => SegmentBuf::parse_lossy("renew_testbed_ta"), + Task::SyncTrustAnchorProxySignerIfPossible => SegmentBuf::parse_lossy("sync_ta_proxy_signer"), + Task::QueueStartTasks => SegmentBuf::parse_lossy("queue_start_tasks"), } - .map_err(|e| Error::Custom(format!("could not create name: {}", e))) } } @@ -185,12 +182,12 @@ pub enum TaskResult { #[derive(Debug)] pub struct TaskQueue { - q: kvx::KeyValueStore, + q: KeyValueStore, } impl TaskQueue { pub fn new(storage_uri: &Url) -> KrillResult { - kvx::KeyValueStore::new(storage_uri, TASK_QUEUE_NS) + KeyValueStore::create(storage_uri, TASK_QUEUE_NS) .map(|q| TaskQueue { q }) .map_err(Error::from) } @@ -245,7 +242,7 @@ impl TaskQueue { } fn schedule_task(&self, task: Task, mode: ScheduleMode, priority: Priority) -> KrillResult<()> { - let task_name = task.name()?; + let task_name = task.name(); debug!("add task: {} with priority: {}", task_name, priority.to_string()); let json = serde_json::to_value(&task) .map_err(|e| Error::Custom(format!("could not serialize task {}. error: {}", task_name, e)))?; @@ -256,13 +253,13 @@ impl TaskQueue { } /// Finish a running task, without rescheduling it. - pub fn finish(&self, task: &kvx::Key) -> KrillResult<()> { + pub fn finish(&self, task: &Key) -> KrillResult<()> { debug!("Finish task: {}", task); self.q.finish_running_task(task).map_err(Error::from) } /// Reschedule a running task, without finishing it. - pub fn reschedule(&self, task: &kvx::Key, priority: Priority) -> KrillResult<()> { + pub fn reschedule(&self, task: &Key, priority: Priority) -> KrillResult<()> { debug!("Reschedule task: {} to: {}", task, priority); self.q .reschedule_running_task(task, Some(priority.to_millis())) @@ -274,7 +271,7 @@ impl TaskQueue { pub fn reschedule_tasks_at_startup(&self) -> KrillResult<()> { let keys = self.q.running_tasks_keys()?; - let queue_started_key_name = Task::QueueStartTasks.name()?; + let queue_started_key_name = Task::QueueStartTasks.name(); if keys.len() > 1 { warn!("Rescheduling running tasks at startup, note that multi-node Krill servers are not yet supported."); diff --git a/src/daemon/scheduler.rs b/src/daemon/scheduler.rs index 7bd6c2782..cc65eb4f9 100644 --- a/src/daemon/scheduler.rs +++ b/src/daemon/scheduler.rs @@ -3,7 +3,6 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; -use kvx::Namespace; use tokio::time::sleep; use rpki::ca::{ @@ -20,6 +19,7 @@ use crate::{ crypto::dispatch::signerinfo::SignerInfo, error::FatalError, eventsourcing::{Aggregate, AggregateStore, WalStore, WalSupport}, + storage::{Key, Namespace}, util::KrillVersion, }, constants::{ @@ -83,7 +83,7 @@ impl Scheduler { loop { while let Some(running_task) = self.tasks.pop() { // remember the key so we can finish or re-schedule the task. - let task_key = kvx::Key::from(&running_task); + let task_key = Key::from(&running_task); match serde_json::from_value(running_task.value) { Err(e) => { diff --git a/src/ta/mod.rs b/src/ta/mod.rs index 2563e96ee..9a1f60cbc 100644 --- a/src/ta/mod.rs +++ b/src/ta/mod.rs @@ -43,7 +43,7 @@ mod tests { api::{PublicationServerInfo, RepositoryContact}, crypto::KrillSignerBuilder, eventsourcing::AggregateStore, - storage::{namespace, Namespace}, + storage::NamespaceBuf, }, daemon::config::ConfigDefaults, test, @@ -55,9 +55,9 @@ mod tests { let cleanup = test::init_logging(); let ta_signer_store: AggregateStore = - AggregateStore::create(storage_uri, namespace!("ta_signer"), false).unwrap(); + AggregateStore::create(storage_uri, NamespaceBuf::parse_lossy("ta_signer").as_ref(), false).unwrap(); let ta_proxy_store: AggregateStore = - AggregateStore::create(storage_uri, namespace!("ta_proxy"), false).unwrap(); + AggregateStore::create(storage_uri, NamespaceBuf::parse_lossy("ta_proxy").as_ref(), false).unwrap(); // We will import a TA key - this is only (supposed to be) supported for the openssl signer let signers = ConfigDefaults::openssl_signer_only(); diff --git a/src/test.rs b/src/test.rs index 45277c125..7ecd36ac5 100644 --- a/src/test.rs +++ b/src/test.rs @@ -888,7 +888,7 @@ pub fn tmp_dir() -> (PathBuf, impl FnOnce()) { }) } -fn random_hex_string() -> String { +pub fn random_hex_string() -> String { hex::encode(random_bytes()) } @@ -899,9 +899,6 @@ pub fn random_bytes() -> [u8; 8] { } pub fn mem_storage() -> Url { - let mut bytes = [0; 8]; - openssl::rand::rand_bytes(&mut bytes).unwrap(); - Url::parse(&format!("memory://{}", random_hex_string())).unwrap() } diff --git a/src/upgrades/data_migration.rs b/src/upgrades/data_migration.rs index 5df111bd0..e66015c26 100644 --- a/src/upgrades/data_migration.rs +++ b/src/upgrades/data_migration.rs @@ -2,7 +2,6 @@ use std::{str::FromStr, sync::Arc}; -use kvx::{Namespace, Scope}; use rpki::crypto::KeyIdentifier; use url::Url; @@ -10,7 +9,7 @@ use crate::{ commons::{ crypto::{dispatch::signerinfo::SignerInfo, KrillSignerBuilder, OpenSslSigner}, eventsourcing::{Aggregate, AggregateStore, WalStore, WalSupport}, - storage::KeyValueStore, + storage::{KeyValueStore, Namespace, Scope}, }, constants::{ CASERVER_NS, KEYS_NS, PROPERTIES_NS, PUBSERVER_CONTENT_NS, PUBSERVER_NS, SIGNERS_NS, TA_PROXY_SERVER_NS, diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index c7438957b..a12321a2b 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -20,7 +20,7 @@ use crate::{ Aggregate, AggregateStore, AggregateStoreError, StoredCommand, WalStore, WalStoreError, WithStorableDetails, }, storage::Storable, - storage::{segment, Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentExt}, + storage::{Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentBuf}, util::KrillVersion, KrillResult, }, @@ -548,7 +548,7 @@ pub trait UpgradeAggregateStorePre0_14 { /// version set as the target. fn preparation_store_prepare(&self) -> UpgradeResult<()> { let code_version = KrillVersion::code_version(); - let version_key = Key::new_global(segment!("version")); + let version_key = Key::new_global(SegmentBuf::parse_lossy("version")); if let Ok(Some(existing_migration_version)) = self.preparation_key_value_store().get::(&version_key) @@ -596,7 +596,7 @@ pub trait UpgradeAggregateStorePre0_14 { } fn data_upgrade_info_key(scope: Scope) -> Key { - Key::new_scoped(scope, segment!("upgrade_info.json")) + Key::new_scoped(scope, SegmentBuf::parse_lossy("upgrade_info.json")) } /// Return the DataUpgradeInfo telling us to where we got to with this migration. @@ -619,7 +619,7 @@ pub trait UpgradeAggregateStorePre0_14 { /// Clean up keys used for tracking migration progress fn clean_migration_help_files(&self) -> UpgradeResult<()> { - let version_key = Key::new_global(segment!("version")); + let version_key = Key::new_global(SegmentBuf::parse_lossy("version")); self.preparation_key_value_store() .drop_key(&version_key) .map_err(UpgradeError::KeyStoreError)?; @@ -648,7 +648,7 @@ pub trait UpgradeAggregateStorePre0_14 { cmd_keys.sort_by_key(|k| k.sequence); let cmd_keys = cmd_keys .into_iter() - .map(|ck| Key::new_scoped(scope.clone(), Segment::parse_lossy(&format!("{}.json", ck)))) // ck should always be a valid Segment + .map(|ck| Key::new_scoped(scope.clone(), SegmentBuf::parse_lossy(&format!("{}.json", ck)))) // ck should always be a valid Segment .collect(); Ok(cmd_keys) @@ -709,7 +709,7 @@ pub fn prepare_upgrade_data_migrations( // be migrated to the new setup in 0.13.0. Well.. it could be done, if there would be a strong use // case to put in the effort, but there really isn't. let ca_kv_store = KeyValueStore::create(&config.storage_uri, CASERVER_NS)?; - if ca_kv_store.has_scope(&Scope::from_segment(segment!("ta")))? { + if ca_kv_store.has_scope(&Scope::from_segment(SegmentBuf::parse_lossy("ta")))? { return Err(UpgradeError::OldTaMigration); } @@ -795,7 +795,10 @@ fn migrate_0_12_pubd_objects(config: &Config) -> KrillResult { if old_store.has(&repo_content_handle)? { let old_repo_content = old_store.get_latest(&repo_content_handle)?.as_ref().clone(); let repo_content: pubd::RepositoryContent = old_repo_content.try_into()?; - let new_key = Key::new_scoped(Scope::from_segment(segment!("0")), segment!("snapshot.json")); + let new_key = Key::new_scoped( + Scope::from_segment(SegmentBuf::parse_lossy("0")), + SegmentBuf::parse_lossy("snapshot.json"), + ); let upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_CONTENT_NS)?; upgrade_store.store(&new_key, &repo_content)?; Ok(true) @@ -808,12 +811,15 @@ fn migrate_0_12_pubd_objects(config: &Config) -> KrillResult { /// the location and way of storing it did. So, migrate if present. fn migrate_pre_0_12_pubd_objects(config: &Config) -> KrillResult<()> { let old_store = KeyValueStore::create(&config.storage_uri, PUBSERVER_CONTENT_NS)?; - let old_key = Key::new_global(segment!("0.json")); + let old_key = Key::new_global(SegmentBuf::parse_lossy("0.json")); if let Ok(Some(old_repo_content)) = old_store.get::(&old_key) { info!("Found pre 0.12.0 RC2 publication server data. Migrating.."); let repo_content: pubd::RepositoryContent = old_repo_content.try_into()?; - let new_key = Key::new_scoped(Scope::from_segment(segment!("0")), segment!("snapshot.json")); + let new_key = Key::new_scoped( + Scope::from_segment(SegmentBuf::parse_lossy("0")), + SegmentBuf::parse_lossy("snapshot.json"), + ); let upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_CONTENT_NS)?; upgrade_store.store(&new_key, &repo_content)?; } @@ -871,7 +877,7 @@ pub fn finalise_data_migration( // for this namespace that still includes a version file. If // so, remove it. let current_store = KeyValueStore::create(&config.storage_uri, ns)?; - let version_key = Key::new_global(segment!("version")); + let version_key = Key::new_global(SegmentBuf::parse_lossy("version")); if current_store.has(&version_key)? { debug!("Removing excess version key in ns: {}", ns); current_store.drop_key(&version_key)?; @@ -1019,7 +1025,8 @@ fn upgrade_versions( // latest version (if any) that counts here. for ns in &[CASERVER_NS, CA_OBJECTS_NS, PUBSERVER_NS, PUBSERVER_CONTENT_NS] { let kv_store = KeyValueStore::create(&config.storage_uri, ns)?; - let key = Key::new_global(segment!("version")); + trace!("checking for version in key value store: {}", kv_store); + let key = Key::new_global(SegmentBuf::parse_lossy("version")); if let Some(key_store_version) = kv_store.get::(&key)? { if let Some(last_seen) = ¤t { @@ -1045,11 +1052,10 @@ fn upgrade_versions( mod tests { use std::path::PathBuf; - use kvx::Namespace; use log::LevelFilter; use url::Url; - use crate::test; + use crate::{commons::storage::NamespaceBuf, test}; use super::*; @@ -1063,9 +1069,9 @@ mod tests { let source_url = Url::parse(&format!("local://{}", base_dir)).unwrap(); for ns in namespaces { - let namespace = Namespace::parse(ns).unwrap(); - let source_store = KeyValueStore::create(&source_url, namespace).unwrap(); - let target_store = KeyValueStore::create(&mem_storage_base_uri, namespace).unwrap(); + let namespace = NamespaceBuf::parse_lossy(ns); + let source_store = KeyValueStore::create(&source_url, namespace.as_ref()).unwrap(); + let target_store = KeyValueStore::create(&mem_storage_base_uri, namespace.as_ref()).unwrap(); target_store.import(&source_store).unwrap(); } @@ -1133,7 +1139,7 @@ mod tests { } #[test] - fn prepare_then_upgrade_0_13_1() { + fn prepare_then_upgrade_0_13_1_cas() { test_upgrade( "test-resources/migrations/v0_13_1/", &["ca_objects", "cas", "keys", "pubd", "pubd_objects", "signers", "status"], diff --git a/src/upgrades/pre_0_10_0/cas_migration.rs b/src/upgrades/pre_0_10_0/cas_migration.rs index b9ed15ebf..4a244aa7d 100644 --- a/src/upgrades/pre_0_10_0/cas_migration.rs +++ b/src/upgrades/pre_0_10_0/cas_migration.rs @@ -1,32 +1,30 @@ use std::convert::TryInto; -use rpki::ca::idexchange::MyHandle; -use rpki::{ca::idexchange::CaHandle, repository::x509::Time}; +use rpki::{ + ca::idexchange::{CaHandle, MyHandle}, + repository::x509::Time, +}; -use crate::commons::api::ProviderAsn; -use crate::commons::eventsourcing::StoredCommandBuilder; -use crate::daemon::ca::CaObjects; -use crate::upgrades::{AspaMigrationConfigUpdates, AspaMigrationConfigs, CommandMigrationEffect, UnconvertedEffect}; use crate::{ commons::{ - api::CertAuthStorableCommand, - eventsourcing::AggregateStore, - storage::{Key, KeyValueStore, Segment, SegmentExt}, + api::{CertAuthStorableCommand, ProviderAsn}, + eventsourcing::{AggregateStore, StoredCommandBuilder}, + storage::{Key, KeyValueStore, SegmentBuf}, }, constants::{CASERVER_NS, CA_OBJECTS_NS}, daemon::{ - ca::{CertAuth, CertAuthEvent, CertAuthInitEvent}, + ca::{CaObjects, CertAuth, CertAuthEvent, CertAuthInitEvent}, config::Config, }, + upgrades::pre_0_10_0::{OldCaObjects, Pre0_10_0CertAuthStorableCommand}, upgrades::{ pre_0_10_0::{Pre0_10CertAuthEvent, Pre0_10CertAuthInitEvent}, pre_0_14_0::OldStoredCommand, UpgradeAggregateStorePre0_14, UpgradeError, UpgradeMode, UpgradeResult, }, + upgrades::{AspaMigrationConfigUpdates, AspaMigrationConfigs, CommandMigrationEffect, UnconvertedEffect}, }; -use super::{OldCaObjects, Pre0_10_0CertAuthStorableCommand}; - /// Migrates the CaObjects for a given CA. /// /// i.e. the CA content which is NOT event-sourced. @@ -46,7 +44,7 @@ impl CaObjectsMigration { } fn prepare_new_data_for(&self, ca: &CaHandle) -> Result<(), UpgradeError> { - let key = Key::new_global(Segment::parse_lossy(&format!("{}.json", ca))); // ca should always be a valid Segment + let key = Key::new_global(SegmentBuf::parse_lossy(&format!("{}.json", ca))); // ca should always be a valid Segment if let Some(old_objects) = self.current_store.get::(&key)? { let converted: CaObjects = old_objects.try_into()?; diff --git a/src/upgrades/pre_0_10_0/pubd_migration.rs b/src/upgrades/pre_0_10_0/pubd_migration.rs index 1c742d91f..17420e149 100644 --- a/src/upgrades/pre_0_10_0/pubd_migration.rs +++ b/src/upgrades/pre_0_10_0/pubd_migration.rs @@ -4,7 +4,7 @@ use crate::{ commons::{ api::StorableRepositoryCommand, eventsourcing::{AggregateStore, StoredCommandBuilder}, - storage::{segment, KeyValueStore, Scope, Segment}, + storage::{KeyValueStore, Scope, SegmentBuf}, util::KrillVersion, }, constants::PUBSERVER_NS, @@ -38,7 +38,7 @@ impl PublicationServerRepositoryAccessMigration { if store_migration .current_kv_store - .has_scope(&Scope::from_segment(segment!("0")))? + .has_scope(&Scope::from_segment(SegmentBuf::parse_lossy("0")))? && versions.from >= KrillVersion::release(0, 9, 0) && versions.from < KrillVersion::candidate(0, 10, 0, 1) { diff --git a/src/upgrades/pre_0_14_0/mod.rs b/src/upgrades/pre_0_14_0/mod.rs index f51480c2b..eb4190c5c 100644 --- a/src/upgrades/pre_0_14_0/mod.rs +++ b/src/upgrades/pre_0_14_0/mod.rs @@ -1,6 +1,5 @@ use std::{fmt, str::FromStr}; -use kvx::Namespace; use rpki::{ ca::{idexchange::MyHandle, publication::Base64}, repository::{ @@ -17,8 +16,7 @@ use crate::{ api::{AspaDefinition, CustomerAsn}, crypto::dispatch::signerinfo::{SignerInfo, SignerInfoEvent, SignerInfoInitEvent}, eventsourcing::{Aggregate, AggregateStore, StoredCommand, StoredCommandBuilder, WithStorableDetails}, - storage::KeyValueStore, - storage::Storable, + storage::{KeyValueStore, Namespace, Storable}, }, daemon::{ ca::{CertAuthEvent, CertAuthInitEvent}, From e638d8632d56a73ee8d0b71a67c1de6e03164883 Mon Sep 17 00:00:00 2001 From: Tim Bruijnzeels Date: Tue, 14 Nov 2023 16:05:01 +0100 Subject: [PATCH 4/4] Fix feature frenzy use --- .../crypto/signing/dispatch/krillsigner.rs | 14 +++----------- src/upgrades/mod.rs | 16 ++++++---------- 2 files changed, 9 insertions(+), 21 deletions(-) diff --git a/src/commons/crypto/signing/dispatch/krillsigner.rs b/src/commons/crypto/signing/dispatch/krillsigner.rs index d0d0c3d28..105cce2e9 100644 --- a/src/commons/crypto/signing/dispatch/krillsigner.rs +++ b/src/commons/crypto/signing/dispatch/krillsigner.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Duration}; +use std::{collections::HashMap, sync::Arc, time::Duration}; use bytes::Bytes; use rpki::{ @@ -34,7 +34,7 @@ use crate::{ signerprovider::{SignerFlags, SignerProvider}, signerrouter::SignerRouter, }, - CryptoResult, OpenSslSigner, SignSupport, + CryptoResult, OpenSslSigner, SignSupport, SignerHandle, }, error::Error, KrillResult, @@ -44,13 +44,7 @@ use crate::{ }; #[cfg(feature = "hsm")] -use std::collections::HashMap; - -#[cfg(feature = "hsm")] -use crate::commons::crypto::{ - signers::{kmip::KmipSigner, pkcs11::Pkcs11Signer}, - SignerHandle, -}; +use crate::commons::crypto::signers::{kmip::KmipSigner, pkcs11::Pkcs11Signer}; /// High level signing interface between Krill and the [SignerRouter]. /// @@ -186,12 +180,10 @@ impl KrillSigner { Ok(KrillSigner { router }) } - #[cfg(feature = "hsm")] pub fn get_mapper(&self) -> Option> { self.router.get_mapper() } - #[cfg(feature = "hsm")] pub fn get_active_signers(&self) -> HashMap> { self.router.get_active_signers() } diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index a12321a2b..7c4c17daf 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -8,6 +8,7 @@ use serde::{de::DeserializeOwned, Deserialize}; use rpki::{ ca::idexchange::{CaHandle, MyHandle}, + crypto::Signer, repository::x509::Time, }; @@ -33,12 +34,6 @@ use crate::{ upgrades::pre_0_14_0::{OldStoredCommand, OldStoredEffect, OldStoredEvent}, }; -#[cfg(feature = "hsm")] -use rpki::crypto::KeyIdentifier; - -#[cfg(feature = "hsm")] -use crate::{commons::crypto::SignerHandle, rpki::crypto::Signer}; - use self::{pre_0_13_0::OldRepositoryContent, pre_0_14_0::OldCommandKey}; pub mod data_migration; @@ -904,7 +899,7 @@ pub fn finalise_data_migration( /// never created. So we detect the case that the signer store SIGNERS_DIR directory has not yet been created, i.e. no /// signers have been registered and no key mappings have been recorded, and then walk KEYS_NS adding the keys one by /// one to the mapping in the signer store, if any. -#[cfg(feature = "hsm")] +#[allow(dead_code)] // Remove when the hsm feature is removed. fn record_preexisting_openssl_keys_in_signer_mapper(config: &Config) -> Result<(), UpgradeError> { let signers_key_store = KeyValueStore::create(&config.storage_uri, SIGNERS_NS)?; if signers_key_store.is_empty()? { @@ -926,12 +921,12 @@ fn record_preexisting_openssl_keys_in_signer_mapper(config: &Config) -> Result<( // For every file (key) in the legacy OpenSSL signer keys directory - let mut openssl_signer_handle: Option = None; + let mut openssl_signer_handle: Option = None; for key in keys_key_store.keys(&Scope::global(), "")? { debug!("Found key: {}", key); // Is it a key identifier? - if let Ok(key_id) = KeyIdentifier::from_str(key.name().as_str()) { + if let Ok(key_id) = rpki::crypto::KeyIdentifier::from_str(key.name().as_str()) { // Is the key already recorded in the mapper? It shouldn't be, but asking will cause the initial // registration of the OpenSSL signer to occur and for it to be assigned a handle. We need the // handle so that we can register keys with the mapper. @@ -1173,7 +1168,8 @@ mod tests { // #[cfg(all(feature = "hsm", not(any(feature = "hsm-tests-kmip", feature = "hsm-tests-pkcs11"))))] #[allow(dead_code)] // this only looks dead because of complex features. fn unmapped_keys_test_core(do_upgrade: bool) { - let expected_key_id = KeyIdentifier::from_str("5CBCAB14B810C864F3EEA8FD102B79F4E53FCC70").unwrap(); + let expected_key_id = + rpki::crypto::KeyIdentifier::from_str("5CBCAB14B810C864F3EEA8FD102B79F4E53FCC70").unwrap(); // Copy test data into test storage let mem_storage_base_uri = test::mem_storage();