From 5f8d4dbdfff6405f9ae0b7fd3bf5df98fd3ed2aa Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Wed, 11 Oct 2023 12:42:18 +0200 Subject: [PATCH 01/98] Add `nostr-sdk-db` crate --- Cargo.toml | 4 +- crates/nostr-sdk-db/Cargo.toml | 18 ++++++ crates/nostr-sdk-db/src/error.rs | 27 ++++++++ crates/nostr-sdk-db/src/lib.rs | 102 ++++++++++++++++++++++++++++++ crates/nostr-sdk-db/src/memory.rs | 84 ++++++++++++++++++++++++ 5 files changed, 232 insertions(+), 3 deletions(-) create mode 100644 crates/nostr-sdk-db/Cargo.toml create mode 100644 crates/nostr-sdk-db/src/error.rs create mode 100644 crates/nostr-sdk-db/src/lib.rs create mode 100644 crates/nostr-sdk-db/src/memory.rs diff --git a/Cargo.toml b/Cargo.toml index 0482f1475..0e3048587 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,9 +5,7 @@ members = [ "bindings/nostr-sdk-ffi", "bindings/nostr-sdk-js", "bindings/uniffi-bindgen", - "crates/nostr", - "crates/nostr-sdk", - "crates/nostr-sdk-net", + "crates/*", ] default-members = ["crates/*"] resolver = "2" diff --git a/crates/nostr-sdk-db/Cargo.toml b/crates/nostr-sdk-db/Cargo.toml new file mode 100644 index 000000000..36ebc56c1 --- /dev/null +++ b/crates/nostr-sdk-db/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "nostr-sdk-db" +version = "0.1.0" +edition = "2021" +description = "Nostr SDK Database" +authors = ["Yuki Kishimoto "] +homepage.workspace = true +repository.workspace = true +license.workspace = true +readme = "README.md" +rust-version.workspace = true +keywords = ["nostr", "sdk", "db"] + +[dependencies] +async-trait = "0.1" +nostr = { version = "0.24", path = "../nostr", default-features = false, features = ["std"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["sync"] } diff --git a/crates/nostr-sdk-db/src/error.rs b/crates/nostr-sdk-db/src/error.rs new file mode 100644 index 000000000..95209a2f2 --- /dev/null +++ b/crates/nostr-sdk-db/src/error.rs @@ -0,0 +1,27 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Database Error + +use thiserror::Error; + +/// Database Error +#[derive(Debug, Error)] +pub enum DatabaseError { + /// An error happened in the underlying database backend. + #[error(transparent)] + Backend(Box), +} + +impl DatabaseError { + /// Create a new [`Backend`][Self::Backend] error. + /// + /// Shorthand for `Error::Backend(Box::new(error))`. + #[inline] + pub fn backend(error: E) -> Self + where + E: std::error::Error + Send + Sync + 'static, + { + Self::Backend(Box::new(error)) + } +} diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs new file mode 100644 index 000000000..e10466dd4 --- /dev/null +++ b/crates/nostr-sdk-db/src/lib.rs @@ -0,0 +1,102 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Nostr SDK Database + +#![warn(missing_docs)] +#![warn(rustdoc::bare_urls)] + +use async_trait::async_trait; +use nostr::{Event, EventId, Filter, Url}; + +mod error; +pub mod memory; + +pub use self::error::DatabaseError; + +/// Backend +pub enum Backend { + /// Memory + Memory, + /// RocksDB + RocksDB, + /// Lightning Memory-Mapped Database + LMDB, + /// SQLite + SQLite, + /// IndexedDB + IndexedDB, + /// Custom + Custom(String), +} + +/// A type-erased [`StateStore`]. +pub type DynNostrDatabase = dyn NostrDatabase; + +/// Nostr SDK Database +#[async_trait] +pub trait NostrDatabase: AsyncTraitDeps { + /// Error + type Err; + + /// Name of the backend database used (ex. rocksdb, lmdb, sqlite, indexeddb, ...) + fn backend(&self) -> Backend; + + /// Save [`Event`] into store + async fn save_event(&self, event: &Event) -> Result<(), Self::Err>; + + /// Save [`EventId`] seen by relay + /// + /// Useful for NIP65 (gossip) + async fn save_event_id_seen_by_relay( + &self, + event_id: EventId, + relay_url: Url, + ) -> Result<(), Self::Err>; + + /// Get list of relays that have seen the [`EventId`] + async fn event_recently_seen_on_relays(&self, event_id: EventId) + -> Result, Self::Err>; + + /// Query store with filters + async fn query(&self, filters: Vec) -> Result, Self::Err>; + + /// Get event IDs by filters + /// + /// Uuseful for negentropy reconciliation + async fn event_ids_by_filters(&self, filters: Vec) -> Result, Self::Err>; +} + +/// Alias for `Send` on non-wasm, empty trait (implemented by everything) on +/// wasm. +#[cfg(not(target_arch = "wasm32"))] +pub trait SendOutsideWasm: Send {} +#[cfg(not(target_arch = "wasm32"))] +impl SendOutsideWasm for T {} + +/// Alias for `Send` on non-wasm, empty trait (implemented by everything) on +/// wasm. +#[cfg(target_arch = "wasm32")] +pub trait SendOutsideWasm {} +#[cfg(target_arch = "wasm32")] +impl SendOutsideWasm for T {} + +/// Alias for `Sync` on non-wasm, empty trait (implemented by everything) on +/// wasm. +#[cfg(not(target_arch = "wasm32"))] +pub trait SyncOutsideWasm: Sync {} +#[cfg(not(target_arch = "wasm32"))] +impl SyncOutsideWasm for T {} + +/// Alias for `Sync` on non-wasm, empty trait (implemented by everything) on +/// wasm. +#[cfg(target_arch = "wasm32")] +pub trait SyncOutsideWasm {} +#[cfg(target_arch = "wasm32")] +impl SyncOutsideWasm for T {} + +/// Super trait that is used for our store traits, this trait will differ if +/// it's used on WASM. WASM targets will not require `Send` and `Sync` to have +/// implemented, while other targets will. +pub trait AsyncTraitDeps: std::fmt::Debug + SendOutsideWasm + SyncOutsideWasm {} +impl AsyncTraitDeps for T {} diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs new file mode 100644 index 000000000..2119f6a6a --- /dev/null +++ b/crates/nostr-sdk-db/src/memory.rs @@ -0,0 +1,84 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Nostr SDK Database + +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; + +use async_trait::async_trait; +use nostr::{Event, EventId, Filter, Url}; +use thiserror::Error; +use tokio::sync::RwLock; + +use crate::{Backend, DatabaseError, NostrDatabase}; + +/// Memory Database Error +#[derive(Debug, Error)] +pub enum Error {} + +impl From for DatabaseError { + fn from(e: Error) -> Self { + DatabaseError::backend(e) + } +} + +/// Memory Database (RAM) +#[derive(Debug, Default)] +pub struct MemoryDatabase { + seen_event_ids: Arc>>>, +} + +impl MemoryDatabase { + /// New Memory database + pub fn new() -> Self { + Self::default() + } +} + +#[async_trait] +impl NostrDatabase for MemoryDatabase { + type Err = DatabaseError; + + fn backend(&self) -> Backend { + Backend::Memory + } + + async fn save_event(&self, _event: &Event) -> Result<(), Self::Err> { + Ok(()) + } + + async fn save_event_id_seen_by_relay( + &self, + event_id: EventId, + relay_url: Url, + ) -> Result<(), Self::Err> { + let mut seen_event_ids = self.seen_event_ids.write().await; + seen_event_ids + .entry(event_id) + .and_modify(|set| { + set.insert(relay_url.clone()); + }) + .or_insert_with(|| { + let mut set = HashSet::with_capacity(1); + set.insert(relay_url); + set + }); + Ok(()) + } + + async fn event_recently_seen_on_relays( + &self, + _event_id: EventId, + ) -> Result, Self::Err> { + todo!() + } + + async fn query(&self, _filters: Vec) -> Result, Self::Err> { + Ok(Vec::new()) + } + + async fn event_ids_by_filters(&self, _filters: Vec) -> Result, Self::Err> { + Ok(Vec::new()) + } +} From 441c9ae2530ba242fe6348b657964729af274d2a Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 12 Oct 2023 14:29:29 +0200 Subject: [PATCH 02/98] db: add `event_id_already_seen` method --- crates/nostr-sdk-db/src/lib.rs | 3 +++ crates/nostr-sdk-db/src/memory.rs | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index e10466dd4..24616611f 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -45,6 +45,9 @@ pub trait NostrDatabase: AsyncTraitDeps { /// Save [`Event`] into store async fn save_event(&self, event: &Event) -> Result<(), Self::Err>; + /// Check if [`EventId`] was already seen + async fn event_id_already_seen(&self, event_id: EventId) -> Result; + /// Save [`EventId`] seen by relay /// /// Useful for NIP65 (gossip) diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index 2119f6a6a..bc20380b5 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -48,6 +48,11 @@ impl NostrDatabase for MemoryDatabase { Ok(()) } + async fn event_id_already_seen(&self, event_id: EventId) -> Result { + let seen_event_ids = self.seen_event_ids.read().await; + Ok(seen_event_ids.contains_key(&event_id)) + } + async fn save_event_id_seen_by_relay( &self, event_id: EventId, From 46adec32adb53974605a7e86d77b43454b92207c Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 12 Oct 2023 15:07:13 +0200 Subject: [PATCH 03/98] db: add `save_events` and rename `event_id_already_seen` and `save_event_id_seen_by_relay` methods --- crates/nostr-sdk-db/src/error.rs | 3 ++ crates/nostr-sdk-db/src/lib.rs | 32 +++++++++++---- crates/nostr-sdk-db/src/memory.rs | 68 +++++++++++++++++++++++-------- 3 files changed, 77 insertions(+), 26 deletions(-) diff --git a/crates/nostr-sdk-db/src/error.rs b/crates/nostr-sdk-db/src/error.rs index 95209a2f2..ac5c6c864 100644 --- a/crates/nostr-sdk-db/src/error.rs +++ b/crates/nostr-sdk-db/src/error.rs @@ -11,6 +11,9 @@ pub enum DatabaseError { /// An error happened in the underlying database backend. #[error(transparent)] Backend(Box), + /// Not supported + #[error("method not supported by current backend")] + NotSupported, } impl DatabaseError { diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 24616611f..c4fbe7ca4 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -6,6 +6,8 @@ #![warn(missing_docs)] #![warn(rustdoc::bare_urls)] +use std::collections::HashSet; + use async_trait::async_trait; use nostr::{Event, EventId, Filter, Url}; @@ -45,21 +47,35 @@ pub trait NostrDatabase: AsyncTraitDeps { /// Save [`Event`] into store async fn save_event(&self, event: &Event) -> Result<(), Self::Err>; - /// Check if [`EventId`] was already seen - async fn event_id_already_seen(&self, event_id: EventId) -> Result; + /// Save multiple [`Event`] into store + async fn save_events(&self, events: Vec) -> Result<(), Self::Err>; + + /// Check if [`EventId`] has already been seen + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result; - /// Save [`EventId`] seen by relay + /// Set [`EventId`] as seen /// - /// Useful for NIP65 (gossip) - async fn save_event_id_seen_by_relay( + /// Optionally, save also the relay url where the event has been seen (useful for NIP65, aka gossip) + async fn event_id_seen( &self, event_id: EventId, - relay_url: Url, + relay_url: Option, + ) -> Result<(), Self::Err>; + + /// Set multiple [`EventId`] as seen + /// + /// Optionally, save also the relay url where the event has been seen (useful for NIP65, aka gossip) + async fn event_ids_seen( + &self, + event_ids: Vec, + relay_url: Option, ) -> Result<(), Self::Err>; /// Get list of relays that have seen the [`EventId`] - async fn event_recently_seen_on_relays(&self, event_id: EventId) - -> Result, Self::Err>; + async fn event_recently_seen_on_relays( + &self, + event_id: EventId, + ) -> Result>, Self::Err>; /// Query store with filters async fn query(&self, filters: Vec) -> Result, Self::Err>; diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index bc20380b5..bd2c4ceed 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -34,6 +34,29 @@ impl MemoryDatabase { pub fn new() -> Self { Self::default() } + + fn _event_id_seen( + &self, + seen_event_ids: &mut HashMap>, + event_id: EventId, + relay_url: Option, + ) { + seen_event_ids + .entry(event_id) + .and_modify(|set| { + if let Some(relay_url) = &relay_url { + set.insert(relay_url.clone()); + } + }) + .or_insert_with(|| match relay_url { + Some(relay_url) => { + let mut set = HashSet::with_capacity(1); + set.insert(relay_url); + set + } + None => HashSet::with_capacity(0), + }); + } } #[async_trait] @@ -48,42 +71,51 @@ impl NostrDatabase for MemoryDatabase { Ok(()) } - async fn event_id_already_seen(&self, event_id: EventId) -> Result { + async fn save_events(&self, _events: Vec) -> Result<(), Self::Err> { + Ok(()) + } + + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { let seen_event_ids = self.seen_event_ids.read().await; Ok(seen_event_ids.contains_key(&event_id)) } - async fn save_event_id_seen_by_relay( + async fn event_id_seen( &self, event_id: EventId, - relay_url: Url, + relay_url: Option, ) -> Result<(), Self::Err> { let mut seen_event_ids = self.seen_event_ids.write().await; - seen_event_ids - .entry(event_id) - .and_modify(|set| { - set.insert(relay_url.clone()); - }) - .or_insert_with(|| { - let mut set = HashSet::with_capacity(1); - set.insert(relay_url); - set - }); + self._event_id_seen(&mut seen_event_ids, event_id, relay_url); + Ok(()) + } + + async fn event_ids_seen( + &self, + event_ids: Vec, + relay_url: Option, + ) -> Result<(), Self::Err> { + let mut seen_event_ids = self.seen_event_ids.write().await; + for event_id in event_ids.into_iter() { + self._event_id_seen(&mut seen_event_ids, event_id, relay_url.clone()); + } + Ok(()) } async fn event_recently_seen_on_relays( &self, - _event_id: EventId, - ) -> Result, Self::Err> { - todo!() + event_id: EventId, + ) -> Result>, Self::Err> { + let seen_event_ids = self.seen_event_ids.read().await; + Ok(seen_event_ids.get(&event_id).cloned()) } async fn query(&self, _filters: Vec) -> Result, Self::Err> { - Ok(Vec::new()) + Err(DatabaseError::NotSupported) } async fn event_ids_by_filters(&self, _filters: Vec) -> Result, Self::Err> { - Ok(Vec::new()) + Err(DatabaseError::NotSupported) } } From e211026012221d42e47995a4546fb5c61ed37ed4 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 12 Oct 2023 15:08:05 +0200 Subject: [PATCH 04/98] sdk: init integration of `NostrDatabase` --- Cargo.lock | 22 ++++++ crates/nostr-sdk/Cargo.toml | 1 + crates/nostr-sdk/src/client/blocking.rs | 5 -- crates/nostr-sdk/src/client/mod.rs | 5 +- crates/nostr-sdk/src/relay/mod.rs | 9 ++- crates/nostr-sdk/src/relay/options.rs | 6 -- crates/nostr-sdk/src/relay/pool.rs | 95 ++++++++++++------------- 7 files changed, 76 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e89e9dff8..250070df6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -123,6 +123,17 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "619743e34b5ba4e9703bba34deac3427c72507c7159f5fd030aea8cac0cfe341" +[[package]] +name = "async-trait" +version = "0.1.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-utility" version = "0.1.1" @@ -1105,6 +1116,7 @@ version = "0.25.0" dependencies = [ "async-utility", "nostr", + "nostr-sdk-db", "nostr-sdk-net", "once_cell", "thiserror", @@ -1113,6 +1125,16 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "nostr-sdk-db" +version = "0.1.0" +dependencies = [ + "async-trait", + "nostr", + "thiserror", + "tokio", +] + [[package]] name = "nostr-sdk-ffi" version = "0.1.0" diff --git a/crates/nostr-sdk/Cargo.toml b/crates/nostr-sdk/Cargo.toml index 34fd62fb2..9b6c36264 100644 --- a/crates/nostr-sdk/Cargo.toml +++ b/crates/nostr-sdk/Cargo.toml @@ -31,6 +31,7 @@ nip47 = ["nostr/nip47"] [dependencies] async-utility = "0.1" nostr = { version = "0.25", path = "../nostr", default-features = false, features = ["std"] } +nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db" } nostr-sdk-net = { version = "0.25", path = "../nostr-sdk-net" } once_cell = { workspace = true } thiserror = { workspace = true } diff --git a/crates/nostr-sdk/src/client/blocking.rs b/crates/nostr-sdk/src/client/blocking.rs index 005b67207..25c729d55 100644 --- a/crates/nostr-sdk/src/client/blocking.rs +++ b/crates/nostr-sdk/src/client/blocking.rs @@ -99,11 +99,6 @@ impl Client { RUNTIME.block_on(async { self.client.shutdown().await }) } - /// Clear already seen events - pub fn clear_already_seen_events(&self) { - RUNTIME.block_on(async { self.client.clear_already_seen_events().await }) - } - pub fn notifications(&self) -> broadcast::Receiver { self.client.notifications() } diff --git a/crates/nostr-sdk/src/client/mod.rs b/crates/nostr-sdk/src/client/mod.rs index c244724f5..1fd381ee9 100644 --- a/crates/nostr-sdk/src/client/mod.rs +++ b/crates/nostr-sdk/src/client/mod.rs @@ -266,9 +266,8 @@ impl Client { } /// Clear already seen events - pub async fn clear_already_seen_events(&self) { - self.pool.clear_already_seen_events().await; - } + #[deprecated] + pub async fn clear_already_seen_events(&self) {} /// Get new notification listener pub fn notifications(&self) -> broadcast::Receiver { diff --git a/crates/nostr-sdk/src/relay/mod.rs b/crates/nostr-sdk/src/relay/mod.rs index 7032f59fc..f628122cd 100644 --- a/crates/nostr-sdk/src/relay/mod.rs +++ b/crates/nostr-sdk/src/relay/mod.rs @@ -24,6 +24,7 @@ use nostr::{ ClientMessage, Event, EventId, Filter, JsonUtil, Keys, RawRelayMessage, RelayMessage, SubscriptionId, Timestamp, Url, }; +use nostr_sdk_db::DynNostrDatabase; use nostr_sdk_net::futures_util::{Future, SinkExt, StreamExt}; use nostr_sdk_net::{self as net, WsMessage}; use thiserror::Error; @@ -256,7 +257,7 @@ pub struct Relay { document: Arc>, opts: RelayOptions, stats: RelayConnectionStats, - // auto_connect_loop_running: Arc, + database: Arc, scheduled_for_stop: Arc, scheduled_for_termination: Arc, pool_sender: Sender, @@ -278,6 +279,7 @@ impl Relay { #[cfg(not(target_arch = "wasm32"))] pub fn new( url: Url, + database: Arc, pool_sender: Sender, notification_sender: broadcast::Sender, proxy: Option, @@ -294,7 +296,7 @@ impl Relay { document: Arc::new(RwLock::new(RelayInformationDocument::new())), opts, stats: RelayConnectionStats::new(), - // auto_connect_loop_running: Arc::new(AtomicBool::new(false)), + database, scheduled_for_stop: Arc::new(AtomicBool::new(false)), scheduled_for_termination: Arc::new(AtomicBool::new(false)), pool_sender, @@ -310,6 +312,7 @@ impl Relay { #[cfg(target_arch = "wasm32")] pub fn new( url: Url, + database: Arc, pool_sender: Sender, notification_sender: broadcast::Sender, opts: RelayOptions, @@ -324,7 +327,7 @@ impl Relay { document: Arc::new(RwLock::new(RelayInformationDocument::new())), opts, stats: RelayConnectionStats::new(), - // auto_connect_loop_running: Arc::new(AtomicBool::new(false)), + database, scheduled_for_stop: Arc::new(AtomicBool::new(false)), scheduled_for_termination: Arc::new(AtomicBool::new(false)), pool_sender, diff --git a/crates/nostr-sdk/src/relay/options.rs b/crates/nostr-sdk/src/relay/options.rs index a2357a921..8a35d3a61 100644 --- a/crates/nostr-sdk/src/relay/options.rs +++ b/crates/nostr-sdk/src/relay/options.rs @@ -215,11 +215,6 @@ pub struct RelayPoolOptions { pub notification_channel_size: usize, /// Task channel size (default: 1024) pub task_channel_size: usize, - /// Max seen events by Task thread (default: 1_000_000) - /// - /// A lower number can cause receiving in notification channel - /// the same event multiple times - pub task_max_seen_events: usize, /// Shutdown on [RelayPool](super::pool::RelayPool) drop pub shutdown_on_drop: bool, } @@ -229,7 +224,6 @@ impl Default for RelayPoolOptions { Self { notification_channel_size: 1024, task_channel_size: 1024, - task_max_seen_events: 1_000_000, shutdown_on_drop: false, } } diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index ff520578d..6b7e231a5 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -3,7 +3,7 @@ //! Relay Pool -use std::collections::{HashMap, VecDeque}; +use std::collections::HashMap; #[cfg(not(target_arch = "wasm32"))] use std::net::SocketAddr; use std::sync::atomic::{AtomicBool, Ordering}; @@ -16,6 +16,8 @@ use nostr::{ event, ClientMessage, Event, EventId, Filter, JsonUtil, MissingPartialEvent, PartialEvent, RawRelayMessage, RelayMessage, SubscriptionId, Timestamp, Url, }; +use nostr_sdk_db::memory::MemoryDatabase; +use nostr_sdk_db::DynNostrDatabase; use thiserror::Error; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::sync::{broadcast, Mutex, RwLock}; @@ -118,25 +120,23 @@ pub enum RelayPoolNotification { #[derive(Debug, Clone)] struct RelayPoolTask { + database: Arc, receiver: Arc>>, notification_sender: broadcast::Sender, - events: Arc>>, running: Arc, - max_seen_events: usize, } impl RelayPoolTask { pub fn new( + database: Arc, pool_task_receiver: Receiver, notification_sender: broadcast::Sender, - max_seen_events: usize, ) -> Self { Self { + database, receiver: Arc::new(Mutex::new(pool_task_receiver)), - events: Arc::new(Mutex::new(VecDeque::new())), notification_sender, running: Arc::new(AtomicBool::new(false)), - max_seen_events, } } @@ -150,11 +150,6 @@ impl RelayPoolTask { .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| Some(value)); } - pub async fn clear_already_seen_events(&self) { - let mut events = self.events.lock().await; - events.clear(); - } - pub fn run(&self) { if self.is_running() { tracing::warn!("Relay Pool Task is already running!") @@ -179,12 +174,32 @@ impl RelayPoolTask { match msg { RelayMessage::Event { event, .. } => { // Check if event was already seen - if this.add_event(event.id).await { - let notification = RelayPoolNotification::Event( - relay_url, - event.as_ref().clone(), + match this.database.has_event_already_been_seen(event.id).await + { + Ok(seen) => { + if !seen { + let notification = RelayPoolNotification::Event( + relay_url.clone(), + event.as_ref().clone(), + ); + let _ = + this.notification_sender.send(notification); + } + } + Err(e) => tracing::error!( + "Impossible to check if event {} was already seen: {e}", + event.id + ), + } + + // Set event as seen by relay + if let Err(e) = + this.database.event_id_seen(event.id, Some(relay_url)).await + { + tracing::error!( + "Impossible to set event {} as seen by relay: {e}", + event.id ); - let _ = this.notification_sender.send(notification); } } RelayMessage::Notice { message } => { @@ -199,7 +214,9 @@ impl RelayPoolTask { } } RelayPoolMessage::BatchEvent(ids) => { - this.add_events(ids).await; + if let Err(e) = this.database.event_ids_seen(ids, None).await { + tracing::error!("Impossible to set events as seen: {e}"); + } } RelayPoolMessage::RelayStatus { url, status } => { let _ = this @@ -272,38 +289,12 @@ impl RelayPoolTask { m => Ok(RelayMessage::try_from(m)?), } } - - async fn add_event(&self, event_id: EventId) -> bool { - let mut events = self.events.lock().await; - if events.contains(&event_id) { - false - } else { - while events.len() >= self.max_seen_events { - events.pop_front(); - } - events.push_back(event_id); - true - } - } - - async fn add_events(&self, ids: Vec) { - if !ids.is_empty() { - let mut events = self.events.lock().await; - for event_id in ids.into_iter() { - if !events.contains(&event_id) { - while events.len() >= self.max_seen_events { - events.pop_front(); - } - events.push_back(event_id); - } - } - } - } } /// Relay Pool #[derive(Debug, Clone)] pub struct RelayPool { + database: Arc, relays: Arc>>, pool_task_sender: Sender, notification_sender: broadcast::Sender, @@ -340,13 +331,16 @@ impl RelayPool { let (notification_sender, _) = broadcast::channel(opts.notification_channel_size); let (pool_task_sender, pool_task_receiver) = mpsc::channel(opts.task_channel_size); + let database = Arc::new(MemoryDatabase::new()); + let relay_pool_task = RelayPoolTask::new( + database.clone(), pool_task_receiver, notification_sender.clone(), - opts.task_max_seen_events, ); let pool = Self { + database, relays: Arc::new(RwLock::new(HashMap::new())), pool_task_sender, notification_sender, @@ -393,16 +387,16 @@ impl RelayPool { Ok(()) } - /// Clear already seen events - pub async fn clear_already_seen_events(&self) { - self.pool_task.clear_already_seen_events().await; - } - /// Get new notification listener pub fn notifications(&self) -> broadcast::Receiver { self.notification_sender.subscribe() } + /// Get database + pub fn database(&self) -> Arc { + self.database.clone() + } + /// Get relays pub async fn relays(&self) -> HashMap { let relays = self.relays.read().await; @@ -448,6 +442,7 @@ impl RelayPool { if !relays.contains_key(&url) { let relay = Relay::new( url, + self.database.clone(), self.pool_task_sender.clone(), self.notification_sender.clone(), proxy, From 9ba7b9f66399ad9f1fe4a5a7862464ae5330ed79 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 12 Oct 2023 16:21:44 +0200 Subject: [PATCH 05/98] ffi: remove `clear_already_seen_events` method --- bindings/nostr-sdk-ffi/src/client/mod.rs | 4 ---- bindings/nostr-sdk-ffi/src/nostr_sdk.udl | 1 - 2 files changed, 5 deletions(-) diff --git a/bindings/nostr-sdk-ffi/src/client/mod.rs b/bindings/nostr-sdk-ffi/src/client/mod.rs index 5b0793921..f22620e19 100644 --- a/bindings/nostr-sdk-ffi/src/client/mod.rs +++ b/bindings/nostr-sdk-ffi/src/client/mod.rs @@ -72,10 +72,6 @@ impl Client { Ok(self.inner.clone().shutdown()?) } - pub fn clear_already_seen_events(&self) { - self.inner.clear_already_seen_events() - } - pub fn relays(&self) -> HashMap> { self.inner .relays() diff --git a/bindings/nostr-sdk-ffi/src/nostr_sdk.udl b/bindings/nostr-sdk-ffi/src/nostr_sdk.udl index e95b1b12c..3938b0a71 100644 --- a/bindings/nostr-sdk-ffi/src/nostr_sdk.udl +++ b/bindings/nostr-sdk-ffi/src/nostr_sdk.udl @@ -627,7 +627,6 @@ interface Client { boolean is_running(); [Throws=NostrSdkError] void shutdown(); - void clear_already_seen_events(); record relays(); [Throws=NostrSdkError] From ced3c6733e86da79a296a06528002deefda38a49 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 12 Oct 2023 16:23:43 +0200 Subject: [PATCH 06/98] sdk: save event in database if never seen before --- crates/nostr-sdk-db/src/memory.rs | 1 + crates/nostr-sdk/src/relay/mod.rs | 16 +++++----------- crates/nostr-sdk/src/relay/pool.rs | 11 ++++++++++- 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index bd2c4ceed..f15c4f84d 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -27,6 +27,7 @@ impl From for DatabaseError { #[derive(Debug, Default)] pub struct MemoryDatabase { seen_event_ids: Arc>>>, + // TODO: add messages queue? (messages not sent) } impl MemoryDatabase { diff --git a/crates/nostr-sdk/src/relay/mod.rs b/crates/nostr-sdk/src/relay/mod.rs index f628122cd..d27f35d45 100644 --- a/crates/nostr-sdk/src/relay/mod.rs +++ b/crates/nostr-sdk/src/relay/mod.rs @@ -433,16 +433,6 @@ impl Relay { self.relay_sender.max_capacity() - self.relay_sender.capacity() } - /* fn is_auto_connect_loop_running(&self) -> bool { - self.auto_connect_loop_running.load(Ordering::SeqCst) - } - - fn set_auto_connect_loop_running(&self, value: bool) { - let _ = - self.auto_connect_loop_running - .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| Some(value)); - } */ - fn is_scheduled_for_stop(&self) -> bool { self.scheduled_for_stop.load(Ordering::SeqCst) } @@ -787,7 +777,11 @@ impl Relay { if size <= max_size { match RawRelayMessage::from_json(&data) { Ok(msg) => { - tracing::trace!("Received message to {}: {:?}", relay.url, msg); + tracing::trace!( + "Received message from {}: {:?}", + relay.url, + msg + ); if let Err(err) = relay .pool_sender .send(RelayPoolMessage::ReceivedMsg { diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index 6b7e231a5..c3453fe46 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -178,9 +178,18 @@ impl RelayPoolTask { { Ok(seen) => { if !seen { + if let Err(e) = + this.database.save_event(&event).await + { + tracing::error!( + "Impossible to save event {}: {e}", + event.id + ); + } + let notification = RelayPoolNotification::Event( relay_url.clone(), - event.as_ref().clone(), + *event.clone(), ); let _ = this.notification_sender.send(notification); From d2e478dd52d93c9ed502c19c745d9be3f071bb77 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 16 Oct 2023 14:43:27 +0200 Subject: [PATCH 07/98] sdk: add `with_database` constructor to `RelayPool` --- crates/nostr-sdk/src/lib.rs | 1 + crates/nostr-sdk/src/relay/mod.rs | 1 + crates/nostr-sdk/src/relay/pool.rs | 14 ++++++++++---- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/crates/nostr-sdk/src/lib.rs b/crates/nostr-sdk/src/lib.rs index 309f8884a..ed5b29854 100644 --- a/crates/nostr-sdk/src/lib.rs +++ b/crates/nostr-sdk/src/lib.rs @@ -4,6 +4,7 @@ #![forbid(unsafe_code)] #![warn(missing_docs)] #![warn(rustdoc::bare_urls)] +#![allow(clippy::arc_with_non_send_sync)] //! High level Nostr client library. diff --git a/crates/nostr-sdk/src/relay/mod.rs b/crates/nostr-sdk/src/relay/mod.rs index d27f35d45..0bb64df30 100644 --- a/crates/nostr-sdk/src/relay/mod.rs +++ b/crates/nostr-sdk/src/relay/mod.rs @@ -257,6 +257,7 @@ pub struct Relay { document: Arc>, opts: RelayOptions, stats: RelayConnectionStats, + #[allow(dead_code)] database: Arc, scheduled_for_stop: Arc, scheduled_for_termination: Arc, diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index c3453fe46..535c77263 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -202,8 +202,10 @@ impl RelayPoolTask { } // Set event as seen by relay - if let Err(e) = - this.database.event_id_seen(event.id, Some(relay_url)).await + if let Err(e) = this + .database + .event_id_seen(event.id, Some(relay_url)) + .await { tracing::error!( "Impossible to set event {} as seen by relay: {e}", @@ -337,11 +339,14 @@ impl Drop for RelayPool { impl RelayPool { /// Create new `RelayPool` pub fn new(opts: RelayPoolOptions) -> Self { + Self::with_database(opts, Arc::new(MemoryDatabase::new())) + } + + /// New with database + pub fn with_database(opts: RelayPoolOptions, database: Arc) -> Self { let (notification_sender, _) = broadcast::channel(opts.notification_channel_size); let (pool_task_sender, pool_task_receiver) = mpsc::channel(opts.task_channel_size); - let database = Arc::new(MemoryDatabase::new()); - let relay_pool_task = RelayPoolTask::new( database.clone(), pool_task_receiver, @@ -477,6 +482,7 @@ impl RelayPool { if !relays.contains_key(&url) { let relay = Relay::new( url, + self.database.clone(), self.pool_task_sender.clone(), self.notification_sender.clone(), opts, From 24876c036c7af6c5e340ab175943c4f0f4590d1d Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 16 Oct 2023 15:14:04 +0200 Subject: [PATCH 08/98] sdk: add `ClientBuilder` --- crates/nostr-sdk/src/client/builder.rs | 60 ++++++++++++++++++++++++++ crates/nostr-sdk/src/client/mod.rs | 28 ++++++------ crates/nostr-sdk/src/lib.rs | 2 +- 3 files changed, 77 insertions(+), 13 deletions(-) create mode 100644 crates/nostr-sdk/src/client/builder.rs diff --git a/crates/nostr-sdk/src/client/builder.rs b/crates/nostr-sdk/src/client/builder.rs new file mode 100644 index 000000000..1f5019983 --- /dev/null +++ b/crates/nostr-sdk/src/client/builder.rs @@ -0,0 +1,60 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Client builder + +use std::sync::Arc; + +use nostr::Keys; +use nostr_sdk_db::memory::MemoryDatabase; +use nostr_sdk_db::DynNostrDatabase; + +#[cfg(feature = "nip46")] +use super::RemoteSigner; +use crate::{Client, Options}; + +/// Client builder +pub struct ClientBuilder { + pub(super) keys: Keys, + pub(super) database: Arc, + pub(super) opts: Options, + #[cfg(feature = "nip46")] + pub(super) remote_signer: Option, +} + +impl ClientBuilder { + /// New client builder + pub fn new(keys: &Keys) -> Self { + Self { + keys: keys.clone(), + database: Arc::new(MemoryDatabase::new()), + opts: Options::default(), + #[cfg(feature = "nip46")] + remote_signer: None, + } + } + + /// Set database + pub fn database(mut self, database: Arc) -> Self { + self.database = database; + self + } + + /// Set opts + pub fn opts(mut self, opts: Options) -> Self { + self.opts = opts; + self + } + + /// Set remote signer + #[cfg(feature = "nip46")] + pub fn remote_signer(mut self, remote_signer: RemoteSigner) -> Self { + self.remote_signer = Some(remote_signer); + self + } + + /// Build [`Client`] + pub fn build(self) -> Client { + Client::from_builder(self) + } +} diff --git a/crates/nostr-sdk/src/client/mod.rs b/crates/nostr-sdk/src/client/mod.rs index 1fd381ee9..93479fa41 100644 --- a/crates/nostr-sdk/src/client/mod.rs +++ b/crates/nostr-sdk/src/client/mod.rs @@ -27,10 +27,12 @@ use tokio::sync::{broadcast, RwLock}; #[cfg(feature = "blocking")] pub mod blocking; +pub mod builder; pub mod options; #[cfg(feature = "nip46")] pub mod signer; +pub use self::builder::ClientBuilder; pub use self::options::Options; #[cfg(feature = "nip46")] pub use self::signer::remote::RemoteSigner; @@ -164,14 +166,7 @@ impl Client { /// let client = Client::with_opts(&my_keys, opts); /// ``` pub fn with_opts(keys: &Keys, opts: Options) -> Self { - Self { - pool: RelayPool::new(opts.pool), - keys: Arc::new(RwLock::new(keys.clone())), - opts, - dropped: Arc::new(AtomicBool::new(false)), - #[cfg(feature = "nip46")] - remote_signer: None, - } + ClientBuilder::new(keys).opts(opts).build() } /// Create a new NIP46 Client @@ -187,12 +182,21 @@ impl Client { remote_signer: RemoteSigner, opts: Options, ) -> Self { + ClientBuilder::new(app_keys) + .remote_signer(remote_signer) + .opts(opts) + .build() + } + + /// Compose [`Client`] from [`ClientBuilder`] + pub fn from_builder(builder: ClientBuilder) -> Self { Self { - pool: RelayPool::new(opts.pool), - keys: Arc::new(RwLock::new(app_keys.clone())), - opts, + pool: RelayPool::with_database(builder.opts.pool, builder.database), + keys: Arc::new(RwLock::new(builder.keys)), + opts: builder.opts, dropped: Arc::new(AtomicBool::new(false)), - remote_signer: Some(remote_signer), + #[cfg(feature = "nip46")] + remote_signer: builder.remote_signer, } } diff --git a/crates/nostr-sdk/src/lib.rs b/crates/nostr-sdk/src/lib.rs index ed5b29854..35cab6374 100644 --- a/crates/nostr-sdk/src/lib.rs +++ b/crates/nostr-sdk/src/lib.rs @@ -31,7 +31,7 @@ pub mod util; #[cfg(feature = "blocking")] pub use self::client::blocking; -pub use self::client::{Client, Options}; +pub use self::client::{Client, ClientBuilder, Options}; pub use self::relay::{ ActiveSubscription, FilterOptions, InternalSubscriptionId, Relay, RelayConnectionStats, RelayOptions, RelayPoolNotification, RelayPoolOptions, RelaySendOptions, RelayStatus, From 19cfd244f00bb3f5d3812365e4d248ab92731171 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 17 Oct 2023 11:43:10 +0200 Subject: [PATCH 09/98] db: save events in `MemoryDatabase` --- crates/nostr-sdk-db/src/memory.rs | 35 ++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index f15c4f84d..672027d11 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -7,7 +7,7 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; use async_trait::async_trait; -use nostr::{Event, EventId, Filter, Url}; +use nostr::{Event, EventId, Filter, FiltersMatchEvent, Url}; use thiserror::Error; use tokio::sync::RwLock; @@ -27,6 +27,7 @@ impl From for DatabaseError { #[derive(Debug, Default)] pub struct MemoryDatabase { seen_event_ids: Arc>>>, + events: Arc>>, // TODO: add messages queue? (messages not sent) } @@ -68,11 +69,17 @@ impl NostrDatabase for MemoryDatabase { Backend::Memory } - async fn save_event(&self, _event: &Event) -> Result<(), Self::Err> { + async fn save_event(&self, event: &Event) -> Result<(), Self::Err> { + let mut events = self.events.write().await; + events.insert(event.id, event.clone()); Ok(()) } - async fn save_events(&self, _events: Vec) -> Result<(), Self::Err> { + async fn save_events(&self, list: Vec) -> Result<(), Self::Err> { + let mut events = self.events.write().await; + for event in list.into_iter() { + events.insert(event.id, event); + } Ok(()) } @@ -112,11 +119,25 @@ impl NostrDatabase for MemoryDatabase { Ok(seen_event_ids.get(&event_id).cloned()) } - async fn query(&self, _filters: Vec) -> Result, Self::Err> { - Err(DatabaseError::NotSupported) + async fn query(&self, filters: Vec) -> Result, Self::Err> { + let events = self.events.read().await; + let mut list: Vec = Vec::new(); + for event in events.values() { + if filters.match_event(event) { + list.push(event.clone()); + } + } + Ok(list) } - async fn event_ids_by_filters(&self, _filters: Vec) -> Result, Self::Err> { - Err(DatabaseError::NotSupported) + async fn event_ids_by_filters(&self, filters: Vec) -> Result, Self::Err> { + let events = self.events.read().await; + let mut list: Vec = Vec::new(); + for event in events.values() { + if filters.match_event(event) { + list.push(event.id); + } + } + Ok(list) } } From 7e44cc3c05e01738eb23a8ef0246b095cf64ec96 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 17 Oct 2023 11:47:40 +0200 Subject: [PATCH 10/98] db: add `event_by_id` method --- crates/nostr-sdk-db/src/error.rs | 3 +++ crates/nostr-sdk-db/src/lib.rs | 3 +++ crates/nostr-sdk-db/src/memory.rs | 8 ++++++++ 3 files changed, 14 insertions(+) diff --git a/crates/nostr-sdk-db/src/error.rs b/crates/nostr-sdk-db/src/error.rs index ac5c6c864..4be1307c6 100644 --- a/crates/nostr-sdk-db/src/error.rs +++ b/crates/nostr-sdk-db/src/error.rs @@ -14,6 +14,9 @@ pub enum DatabaseError { /// Not supported #[error("method not supported by current backend")] NotSupported, + /// Not found + #[error("not found")] + NotFound, } impl DatabaseError { diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index c4fbe7ca4..61df4e40c 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -77,6 +77,9 @@ pub trait NostrDatabase: AsyncTraitDeps { event_id: EventId, ) -> Result>, Self::Err>; + /// Get [`Event`] by [`EventId`] + async fn event_by_id(&self, event_id: EventId) -> Result; + /// Query store with filters async fn query(&self, filters: Vec) -> Result, Self::Err>; diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index 672027d11..15ff70d89 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -119,6 +119,14 @@ impl NostrDatabase for MemoryDatabase { Ok(seen_event_ids.get(&event_id).cloned()) } + async fn event_by_id(&self, event_id: EventId) -> Result { + let events = self.events.read().await; + events + .get(&event_id) + .cloned() + .ok_or(DatabaseError::NotFound) + } + async fn query(&self, filters: Vec) -> Result, Self::Err> { let events = self.events.read().await; let mut list: Vec = Vec::new(); From 22ad6d44995e265b321e57cb4973bd0e0f2c663b Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 17 Oct 2023 14:27:51 +0200 Subject: [PATCH 11/98] sdk: fix clippy --- crates/nostr-sdk/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/nostr-sdk/src/lib.rs b/crates/nostr-sdk/src/lib.rs index 35cab6374..1516a6460 100644 --- a/crates/nostr-sdk/src/lib.rs +++ b/crates/nostr-sdk/src/lib.rs @@ -4,6 +4,7 @@ #![forbid(unsafe_code)] #![warn(missing_docs)] #![warn(rustdoc::bare_urls)] +#![allow(unknown_lints)] #![allow(clippy::arc_with_non_send_sync)] //! High level Nostr client library. From 019603aa6e97ae29b0b4deaf2cee3ffbf085b43a Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 17 Oct 2023 16:20:33 +0200 Subject: [PATCH 12/98] sdk: add `database` method to `Client` --- crates/nostr-sdk/src/client/mod.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/nostr-sdk/src/client/mod.rs b/crates/nostr-sdk/src/client/mod.rs index 93479fa41..9dd48efab 100644 --- a/crates/nostr-sdk/src/client/mod.rs +++ b/crates/nostr-sdk/src/client/mod.rs @@ -22,6 +22,7 @@ use nostr::{ ChannelId, ClientMessage, Contact, Event, EventBuilder, EventId, Filter, JsonUtil, Keys, Kind, Metadata, Result, Tag, }; +use nostr_sdk_db::DynNostrDatabase; use nostr_sdk_net::futures_util::Future; use tokio::sync::{broadcast, RwLock}; @@ -222,6 +223,11 @@ impl Client { self.pool.clone() } + /// Get database + pub fn database(&self) -> Arc { + self.pool.database() + } + /// Get NIP46 uri #[cfg(feature = "nip46")] pub async fn nostr_connect_uri( From f502a9a0c99b7346c82c93bc1801732bafe07ecc Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 17 Oct 2023 16:46:39 +0200 Subject: [PATCH 13/98] db: improve `save_event` method --- crates/nostr-sdk-db/src/lib.rs | 4 +- crates/nostr-sdk-db/src/memory.rs | 84 +++++++++++++++++++++++++++---- 2 files changed, 76 insertions(+), 12 deletions(-) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 61df4e40c..41bf91ad6 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -45,7 +45,9 @@ pub trait NostrDatabase: AsyncTraitDeps { fn backend(&self) -> Backend; /// Save [`Event`] into store - async fn save_event(&self, event: &Event) -> Result<(), Self::Err>; + /// + /// Return `true` if event was successfully saved into database. + async fn save_event(&self, event: &Event) -> Result; /// Save multiple [`Event`] into store async fn save_events(&self, events: Vec) -> Result<(), Self::Err>; diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index 15ff70d89..96b86146a 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -59,6 +59,75 @@ impl MemoryDatabase { None => HashSet::with_capacity(0), }); } + + async fn _query( + &self, + events: &HashMap, + filters: Vec, + ) -> Result, DatabaseError> { + let mut list: Vec = Vec::new(); + for event in events.values() { + if filters.match_event(event) { + list.push(event.clone()); + } + } + Ok(list) + } + + async fn _save_event( + &self, + events: &mut HashMap, + event: Event, + ) -> Result { + self.event_id_seen(event.id, None).await?; + + if event.is_expired() || event.is_ephemeral() { + tracing::warn!("Event {} not saved: expired or ephemeral", event.id); + return Ok(false); + } + + let mut should_insert: bool = true; + + if event.is_replaceable() { + let filter: Filter = Filter::new() + .author(event.pubkey.to_string()) + .kind(event.kind); + let res: Vec = self._query(events, vec![filter]).await?; + if let Some(ev) = res.into_iter().next() { + if ev.created_at >= event.created_at { + should_insert = false; + } else if ev.created_at < event.created_at { + events.remove(&ev.id); + } + } + } else if event.is_parameterized_replaceable() { + match event.identifier() { + Some(identifier) => { + let filter: Filter = Filter::new() + .author(event.pubkey.to_string()) + .kind(event.kind) + .identifier(identifier); + let res: Vec = self._query(events, vec![filter]).await?; + if let Some(ev) = res.into_iter().next() { + if ev.created_at >= event.created_at { + should_insert = false; + } else if ev.created_at < event.created_at { + events.remove(&ev.id); + } + } + } + None => should_insert = false, + } + } + + if should_insert { + events.insert(event.id, event); + Ok(true) + } else { + tracing::warn!("Event {} not saved: unknown", event.id); + Ok(false) + } + } } #[async_trait] @@ -69,16 +138,15 @@ impl NostrDatabase for MemoryDatabase { Backend::Memory } - async fn save_event(&self, event: &Event) -> Result<(), Self::Err> { + async fn save_event(&self, event: &Event) -> Result { let mut events = self.events.write().await; - events.insert(event.id, event.clone()); - Ok(()) + self._save_event(&mut events, event.clone()).await } async fn save_events(&self, list: Vec) -> Result<(), Self::Err> { let mut events = self.events.write().await; for event in list.into_iter() { - events.insert(event.id, event); + self._save_event(&mut events, event).await?; } Ok(()) } @@ -129,13 +197,7 @@ impl NostrDatabase for MemoryDatabase { async fn query(&self, filters: Vec) -> Result, Self::Err> { let events = self.events.read().await; - let mut list: Vec = Vec::new(); - for event in events.values() { - if filters.match_event(event) { - list.push(event.clone()); - } - } - Ok(list) + self._query(&events, filters).await } async fn event_ids_by_filters(&self, filters: Vec) -> Result, Self::Err> { From 4d9f52de8f5ddcf060a91e251d3a80686beb3f34 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 17 Oct 2023 16:56:18 +0200 Subject: [PATCH 14/98] db: remove `save_events` method --- crates/nostr-sdk-db/Cargo.toml | 1 + crates/nostr-sdk-db/src/lib.rs | 3 --- crates/nostr-sdk-db/src/memory.rs | 8 -------- 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/crates/nostr-sdk-db/Cargo.toml b/crates/nostr-sdk-db/Cargo.toml index 36ebc56c1..67915a985 100644 --- a/crates/nostr-sdk-db/Cargo.toml +++ b/crates/nostr-sdk-db/Cargo.toml @@ -16,3 +16,4 @@ async-trait = "0.1" nostr = { version = "0.24", path = "../nostr", default-features = false, features = ["std"] } thiserror = { workspace = true } tokio = { workspace = true, features = ["sync"] } +tracing = { workspace = true, features = ["std"] } diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 41bf91ad6..db3af4f07 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -49,9 +49,6 @@ pub trait NostrDatabase: AsyncTraitDeps { /// Return `true` if event was successfully saved into database. async fn save_event(&self, event: &Event) -> Result; - /// Save multiple [`Event`] into store - async fn save_events(&self, events: Vec) -> Result<(), Self::Err>; - /// Check if [`EventId`] has already been seen async fn has_event_already_been_seen(&self, event_id: EventId) -> Result; diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index 96b86146a..c2b0d2a63 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -143,14 +143,6 @@ impl NostrDatabase for MemoryDatabase { self._save_event(&mut events, event.clone()).await } - async fn save_events(&self, list: Vec) -> Result<(), Self::Err> { - let mut events = self.events.write().await; - for event in list.into_iter() { - self._save_event(&mut events, event).await?; - } - Ok(()) - } - async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { let seen_event_ids = self.seen_event_ids.read().await; Ok(seen_event_ids.contains_key(&event_id)) From e7eb9856879f77d995193907093795edf9023b6f Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 17 Oct 2023 16:57:23 +0200 Subject: [PATCH 15/98] sdk: remove `RelayPoolMessage::BatchEvent` --- crates/nostr-sdk/src/relay/pool.rs | 60 +++++++++++++----------------- 1 file changed, 25 insertions(+), 35 deletions(-) diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index 535c77263..078af20e7 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -17,7 +17,7 @@ use nostr::{ RawRelayMessage, RelayMessage, SubscriptionId, Timestamp, Url, }; use nostr_sdk_db::memory::MemoryDatabase; -use nostr_sdk_db::DynNostrDatabase; +use nostr_sdk_db::{DatabaseError, DynNostrDatabase}; use thiserror::Error; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::sync::{broadcast, Mutex, RwLock}; @@ -47,6 +47,9 @@ pub enum Error { /// Message handler error #[error(transparent)] MessageHandler(#[from] MessageHandleError), + /// Database error + #[error(transparent)] + Database(#[from] DatabaseError), /// Thread error #[error(transparent)] Thread(#[from] thread::Error), @@ -83,8 +86,6 @@ pub enum RelayPoolMessage { /// Relay message msg: RawRelayMessage, }, - /// Events sent - BatchEvent(Vec), /// Relay status changed RelayStatus { /// Relay url @@ -216,6 +217,13 @@ impl RelayPoolTask { RelayMessage::Notice { message } => { tracing::warn!("Notice from {relay_url}: {message}") } + RelayMessage::Ok { + event_id, + status, + message, + } => { + tracing::debug!("Received OK from {relay_url} for event {event_id}: status={status}, message={message}"); + } _ => (), } } @@ -224,11 +232,6 @@ impl RelayPoolTask { ), } } - RelayPoolMessage::BatchEvent(ids) => { - if let Err(e) = this.database.event_ids_seen(ids, None).await { - tracing::error!("Impossible to set events as seen: {e}"); - } - } RelayPoolMessage::RelayStatus { url, status } => { let _ = this .notification_sender @@ -509,16 +512,6 @@ impl RelayPool { Ok(()) } - async fn set_events_as_sent(&self, ids: Vec) { - if let Err(e) = self - .pool_task_sender - .send(RelayPoolMessage::BatchEvent(ids)) - .await - { - tracing::error!("{e}"); - }; - } - /// Send client message pub async fn send_msg(&self, msg: ClientMessage, wait: Option) -> Result<(), Error> { let relays = self.relays().await; @@ -528,7 +521,7 @@ impl RelayPool { } if let ClientMessage::Event(event) = &msg { - self.set_events_as_sent(vec![event.id]).await; + self.database.save_event(event).await?; } let sent_to_at_least_one_relay: Arc = Arc::new(AtomicBool::new(false)); @@ -572,17 +565,12 @@ impl RelayPool { return Err(Error::NoRelays); } - let ids: Vec = msgs - .iter() - .filter_map(|msg| { - if let ClientMessage::Event(event) = msg { - Some(event.id) - } else { - None - } - }) - .collect(); - self.set_events_as_sent(ids).await; + // Save events into database + for msg in msgs.iter() { + if let ClientMessage::Event(event) = msg { + self.database.save_event(event).await?; + } + } let sent_to_at_least_one_relay: Arc = Arc::new(AtomicBool::new(false)); let mut handles = Vec::new(); @@ -628,7 +616,7 @@ impl RelayPool { let url: Url = url.try_into_url()?; if let ClientMessage::Event(event) = &msg { - self.set_events_as_sent(vec![event.id]).await; + self.database.save_event(event).await?; } let relays = self.relays().await; @@ -648,7 +636,7 @@ impl RelayPool { return Err(Error::NoRelays); } - self.set_events_as_sent(vec![event.id]).await; + self.database.save_event(&event).await?; let sent_to_at_least_one_relay: Arc = Arc::new(AtomicBool::new(false)); let mut handles = Vec::new(); @@ -693,8 +681,10 @@ impl RelayPool { return Err(Error::NoRelays); } - let ids: Vec = events.iter().map(|e| e.id).collect(); - self.set_events_as_sent(ids).await; + // Save events into database + for event in events.iter() { + self.database.save_event(event).await?; + } let sent_to_at_least_one_relay: Arc = Arc::new(AtomicBool::new(false)); let mut handles = Vec::new(); @@ -738,7 +728,7 @@ impl RelayPool { Error: From<::Err>, { let url: Url = url.try_into_url()?; - self.set_events_as_sent(vec![event.id]).await; + self.database.save_event(&event).await?; let relays = self.relays().await; if let Some(relay) = relays.get(&url) { Ok(relay.send_event(event, opts).await?) From 287cdfa830b33ebd5432f34228f66963bc1412ed Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Fri, 20 Oct 2023 14:15:04 +0200 Subject: [PATCH 16/98] db: add `memory` example --- Cargo.lock | 1 + crates/nostr-sdk-db/Cargo.toml | 3 ++ crates/nostr-sdk-db/examples/memory.rs | 51 ++++++++++++++++++++++++++ 3 files changed, 55 insertions(+) create mode 100644 crates/nostr-sdk-db/examples/memory.rs diff --git a/Cargo.lock b/Cargo.lock index 250070df6..698804262 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1133,6 +1133,7 @@ dependencies = [ "nostr", "thiserror", "tokio", + "tracing", ] [[package]] diff --git a/crates/nostr-sdk-db/Cargo.toml b/crates/nostr-sdk-db/Cargo.toml index 67915a985..d400b09f9 100644 --- a/crates/nostr-sdk-db/Cargo.toml +++ b/crates/nostr-sdk-db/Cargo.toml @@ -17,3 +17,6 @@ nostr = { version = "0.24", path = "../nostr", default-features = false, feature thiserror = { workspace = true } tokio = { workspace = true, features = ["sync"] } tracing = { workspace = true, features = ["std"] } + +[dev-dependencies] +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } diff --git a/crates/nostr-sdk-db/examples/memory.rs b/crates/nostr-sdk-db/examples/memory.rs new file mode 100644 index 000000000..2cfc06e63 --- /dev/null +++ b/crates/nostr-sdk-db/examples/memory.rs @@ -0,0 +1,51 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use std::time::{Duration, Instant}; + +use nostr::{EventBuilder, Filter, Keys, Kind, Metadata, Tag}; +use nostr_sdk_db::memory::MemoryDatabase; +use nostr_sdk_db::NostrDatabase; + +#[tokio::main] +async fn main() { + let keys = Keys::generate(); + let database = MemoryDatabase::new(); + + for i in 0..50_000 { + let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) + .to_event(&keys) + .unwrap(); + database.save_event(&event).await.unwrap(); + + let event = EventBuilder::new_text_note( + format!("Reply to event #{i}"), + &[ + Tag::Event(event.id, None, None), + Tag::PubKey(event.pubkey, None), + ], + ) + .to_event(&keys) + .unwrap(); + database.save_event(&event).await.unwrap(); + } + + for i in 0..10 { + let metadata = Metadata::new().name(format!("Name #{i}")); + let event = EventBuilder::set_metadata(metadata) + .to_event(&keys) + .unwrap(); + database.save_event(&event).await.unwrap(); + tokio::time::sleep(Duration::from_secs(1)).await; + } + + let now = Instant::now(); + let events = database + .query(vec![Filter::new() + .kind(Kind::Metadata) + .author(keys.public_key().to_string())]) + .await + .unwrap(); + println!("{events:?}"); + println!("Time: {} ns", now.elapsed().as_nanos()); +} From ebb84e516a403dfe024fdefb1dd7110d778663f3 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 23 Oct 2023 14:08:29 +0200 Subject: [PATCH 17/98] db: add `wipe` method --- crates/nostr-sdk-db/src/lib.rs | 3 +++ crates/nostr-sdk-db/src/memory.rs | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index db3af4f07..398b004d3 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -86,6 +86,9 @@ pub trait NostrDatabase: AsyncTraitDeps { /// /// Uuseful for negentropy reconciliation async fn event_ids_by_filters(&self, filters: Vec) -> Result, Self::Err>; + + /// Wipe all data + async fn wipe(&self) -> Result<(), Self::Err>; } /// Alias for `Send` on non-wasm, empty trait (implemented by everything) on diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index c2b0d2a63..734dd1983 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -202,4 +202,12 @@ impl NostrDatabase for MemoryDatabase { } Ok(list) } + + async fn wipe(&self) -> Result<(), Self::Err> { + let mut seen_event_ids = self.seen_event_ids.write().await; + seen_event_ids.clear(); + let mut events = self.events.write().await; + events.clear(); + Ok(()) + } } From 26163ec5b6c96ac20152207c07f7701dcd362988 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 23 Oct 2023 14:24:49 +0200 Subject: [PATCH 18/98] db: allow to disable `events` store in `MemoryDatabase` --- crates/nostr-sdk-db/examples/memory.rs | 2 +- crates/nostr-sdk-db/src/error.rs | 3 + crates/nostr-sdk-db/src/memory.rs | 134 +++++++++++++++---------- crates/nostr-sdk/src/client/builder.rs | 2 +- crates/nostr-sdk/src/relay/pool.rs | 2 +- 5 files changed, 88 insertions(+), 55 deletions(-) diff --git a/crates/nostr-sdk-db/examples/memory.rs b/crates/nostr-sdk-db/examples/memory.rs index 2cfc06e63..711de3ac3 100644 --- a/crates/nostr-sdk-db/examples/memory.rs +++ b/crates/nostr-sdk-db/examples/memory.rs @@ -10,7 +10,7 @@ use nostr_sdk_db::NostrDatabase; #[tokio::main] async fn main() { let keys = Keys::generate(); - let database = MemoryDatabase::new(); + let database = MemoryDatabase::new(true); for i in 0..50_000 { let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) diff --git a/crates/nostr-sdk-db/src/error.rs b/crates/nostr-sdk-db/src/error.rs index 4be1307c6..796684d27 100644 --- a/crates/nostr-sdk-db/src/error.rs +++ b/crates/nostr-sdk-db/src/error.rs @@ -14,6 +14,9 @@ pub enum DatabaseError { /// Not supported #[error("method not supported by current backend")] NotSupported, + /// Feature disabled + #[error("feature disabled for current backend")] + FeatureDisabled, /// Not found #[error("not found")] NotFound, diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index 734dd1983..b5f7d0ac3 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -24,17 +24,31 @@ impl From for DatabaseError { } /// Memory Database (RAM) -#[derive(Debug, Default)] +#[derive(Debug)] pub struct MemoryDatabase { + store_events: bool, seen_event_ids: Arc>>>, events: Arc>>, // TODO: add messages queue? (messages not sent) } +impl Default for MemoryDatabase { + fn default() -> Self { + Self::new(false) + } +} + impl MemoryDatabase { /// New Memory database - pub fn new() -> Self { - Self::default() + /// + /// If `store_events` arg is set to `true`, the seen events will be stored in memory (a lot of it could be used). + /// If it's set to `false`, only the [`EventId`] will be stored (instead of the full [`Event`]) + pub fn new(store_events: bool) -> Self { + Self { + store_events, + seen_event_ids: Arc::new(RwLock::new(HashMap::new())), + events: Arc::new(RwLock::new(HashMap::new())), + } } fn _event_id_seen( @@ -81,50 +95,54 @@ impl MemoryDatabase { ) -> Result { self.event_id_seen(event.id, None).await?; - if event.is_expired() || event.is_ephemeral() { - tracing::warn!("Event {} not saved: expired or ephemeral", event.id); - return Ok(false); - } + if self.store_events { + if event.is_expired() || event.is_ephemeral() { + tracing::warn!("Event {} not saved: expired or ephemeral", event.id); + return Ok(false); + } - let mut should_insert: bool = true; - - if event.is_replaceable() { - let filter: Filter = Filter::new() - .author(event.pubkey.to_string()) - .kind(event.kind); - let res: Vec = self._query(events, vec![filter]).await?; - if let Some(ev) = res.into_iter().next() { - if ev.created_at >= event.created_at { - should_insert = false; - } else if ev.created_at < event.created_at { - events.remove(&ev.id); + let mut should_insert: bool = true; + + if event.is_replaceable() { + let filter: Filter = Filter::new() + .author(event.pubkey.to_string()) + .kind(event.kind); + let res: Vec = self._query(events, vec![filter]).await?; + if let Some(ev) = res.into_iter().next() { + if ev.created_at >= event.created_at { + should_insert = false; + } else if ev.created_at < event.created_at { + events.remove(&ev.id); + } } - } - } else if event.is_parameterized_replaceable() { - match event.identifier() { - Some(identifier) => { - let filter: Filter = Filter::new() - .author(event.pubkey.to_string()) - .kind(event.kind) - .identifier(identifier); - let res: Vec = self._query(events, vec![filter]).await?; - if let Some(ev) = res.into_iter().next() { - if ev.created_at >= event.created_at { - should_insert = false; - } else if ev.created_at < event.created_at { - events.remove(&ev.id); + } else if event.is_parameterized_replaceable() { + match event.identifier() { + Some(identifier) => { + let filter: Filter = Filter::new() + .author(event.pubkey.to_string()) + .kind(event.kind) + .identifier(identifier); + let res: Vec = self._query(events, vec![filter]).await?; + if let Some(ev) = res.into_iter().next() { + if ev.created_at >= event.created_at { + should_insert = false; + } else if ev.created_at < event.created_at { + events.remove(&ev.id); + } } } + None => should_insert = false, } - None => should_insert = false, } - } - if should_insert { - events.insert(event.id, event); - Ok(true) + if should_insert { + events.insert(event.id, event); + Ok(true) + } else { + tracing::warn!("Event {} not saved: unknown", event.id); + Ok(false) + } } else { - tracing::warn!("Event {} not saved: unknown", event.id); Ok(false) } } @@ -180,27 +198,39 @@ impl NostrDatabase for MemoryDatabase { } async fn event_by_id(&self, event_id: EventId) -> Result { - let events = self.events.read().await; - events - .get(&event_id) - .cloned() - .ok_or(DatabaseError::NotFound) + if self.store_events { + let events = self.events.read().await; + events + .get(&event_id) + .cloned() + .ok_or(DatabaseError::NotFound) + } else { + Err(DatabaseError::FeatureDisabled) + } } async fn query(&self, filters: Vec) -> Result, Self::Err> { - let events = self.events.read().await; - self._query(&events, filters).await + if self.store_events { + let events = self.events.read().await; + self._query(&events, filters).await + } else { + Err(DatabaseError::FeatureDisabled) + } } async fn event_ids_by_filters(&self, filters: Vec) -> Result, Self::Err> { - let events = self.events.read().await; - let mut list: Vec = Vec::new(); - for event in events.values() { - if filters.match_event(event) { - list.push(event.id); + if self.store_events { + let events = self.events.read().await; + let mut list: Vec = Vec::new(); + for event in events.values() { + if filters.match_event(event) { + list.push(event.id); + } } + Ok(list) + } else { + Err(DatabaseError::FeatureDisabled) } - Ok(list) } async fn wipe(&self) -> Result<(), Self::Err> { diff --git a/crates/nostr-sdk/src/client/builder.rs b/crates/nostr-sdk/src/client/builder.rs index 1f5019983..a74a55c03 100644 --- a/crates/nostr-sdk/src/client/builder.rs +++ b/crates/nostr-sdk/src/client/builder.rs @@ -27,7 +27,7 @@ impl ClientBuilder { pub fn new(keys: &Keys) -> Self { Self { keys: keys.clone(), - database: Arc::new(MemoryDatabase::new()), + database: Arc::new(MemoryDatabase::default()), opts: Options::default(), #[cfg(feature = "nip46")] remote_signer: None, diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index 078af20e7..4b9c1d980 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -342,7 +342,7 @@ impl Drop for RelayPool { impl RelayPool { /// Create new `RelayPool` pub fn new(opts: RelayPoolOptions) -> Self { - Self::with_database(opts, Arc::new(MemoryDatabase::new())) + Self::with_database(opts, Arc::new(MemoryDatabase::default())) } /// New with database From d7d07e58c9b0eba35aed4547ba0f819d6f4eafec Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 23 Oct 2023 14:47:45 +0200 Subject: [PATCH 19/98] db: add `DatabaseOptions` --- crates/nostr-sdk-db/examples/memory.rs | 5 +++-- crates/nostr-sdk-db/src/lib.rs | 5 +++++ crates/nostr-sdk-db/src/memory.rs | 25 +++++++++++++------------ crates/nostr-sdk-db/src/options.rs | 24 ++++++++++++++++++++++++ 4 files changed, 45 insertions(+), 14 deletions(-) create mode 100644 crates/nostr-sdk-db/src/options.rs diff --git a/crates/nostr-sdk-db/examples/memory.rs b/crates/nostr-sdk-db/examples/memory.rs index 711de3ac3..12bd92bba 100644 --- a/crates/nostr-sdk-db/examples/memory.rs +++ b/crates/nostr-sdk-db/examples/memory.rs @@ -5,12 +5,13 @@ use std::time::{Duration, Instant}; use nostr::{EventBuilder, Filter, Keys, Kind, Metadata, Tag}; use nostr_sdk_db::memory::MemoryDatabase; -use nostr_sdk_db::NostrDatabase; +use nostr_sdk_db::{DatabaseOptions, NostrDatabase}; #[tokio::main] async fn main() { let keys = Keys::generate(); - let database = MemoryDatabase::new(true); + let opts = DatabaseOptions::default(); + let database = MemoryDatabase::new(opts); for i in 0..50_000 { let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 398b004d3..4744169c2 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -13,8 +13,10 @@ use nostr::{Event, EventId, Filter, Url}; mod error; pub mod memory; +mod options; pub use self::error::DatabaseError; +pub use self::options::DatabaseOptions; /// Backend pub enum Backend { @@ -44,6 +46,9 @@ pub trait NostrDatabase: AsyncTraitDeps { /// Name of the backend database used (ex. rocksdb, lmdb, sqlite, indexeddb, ...) fn backend(&self) -> Backend; + /// Database options + fn opts(&self) -> DatabaseOptions; + /// Save [`Event`] into store /// /// Return `true` if event was successfully saved into database. diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index b5f7d0ac3..32034f72f 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -11,7 +11,7 @@ use nostr::{Event, EventId, Filter, FiltersMatchEvent, Url}; use thiserror::Error; use tokio::sync::RwLock; -use crate::{Backend, DatabaseError, NostrDatabase}; +use crate::{Backend, DatabaseError, DatabaseOptions, NostrDatabase}; /// Memory Database Error #[derive(Debug, Error)] @@ -26,7 +26,7 @@ impl From for DatabaseError { /// Memory Database (RAM) #[derive(Debug)] pub struct MemoryDatabase { - store_events: bool, + opts: DatabaseOptions, seen_event_ids: Arc>>>, events: Arc>>, // TODO: add messages queue? (messages not sent) @@ -34,18 +34,15 @@ pub struct MemoryDatabase { impl Default for MemoryDatabase { fn default() -> Self { - Self::new(false) + Self::new(DatabaseOptions { events: false }) } } impl MemoryDatabase { /// New Memory database - /// - /// If `store_events` arg is set to `true`, the seen events will be stored in memory (a lot of it could be used). - /// If it's set to `false`, only the [`EventId`] will be stored (instead of the full [`Event`]) - pub fn new(store_events: bool) -> Self { + pub fn new(opts: DatabaseOptions) -> Self { Self { - store_events, + opts, seen_event_ids: Arc::new(RwLock::new(HashMap::new())), events: Arc::new(RwLock::new(HashMap::new())), } @@ -95,7 +92,7 @@ impl MemoryDatabase { ) -> Result { self.event_id_seen(event.id, None).await?; - if self.store_events { + if self.opts.events { if event.is_expired() || event.is_ephemeral() { tracing::warn!("Event {} not saved: expired or ephemeral", event.id); return Ok(false); @@ -156,6 +153,10 @@ impl NostrDatabase for MemoryDatabase { Backend::Memory } + fn opts(&self) -> DatabaseOptions { + self.opts + } + async fn save_event(&self, event: &Event) -> Result { let mut events = self.events.write().await; self._save_event(&mut events, event.clone()).await @@ -198,7 +199,7 @@ impl NostrDatabase for MemoryDatabase { } async fn event_by_id(&self, event_id: EventId) -> Result { - if self.store_events { + if self.opts.events { let events = self.events.read().await; events .get(&event_id) @@ -210,7 +211,7 @@ impl NostrDatabase for MemoryDatabase { } async fn query(&self, filters: Vec) -> Result, Self::Err> { - if self.store_events { + if self.opts.events { let events = self.events.read().await; self._query(&events, filters).await } else { @@ -219,7 +220,7 @@ impl NostrDatabase for MemoryDatabase { } async fn event_ids_by_filters(&self, filters: Vec) -> Result, Self::Err> { - if self.store_events { + if self.opts.events { let events = self.events.read().await; let mut list: Vec = Vec::new(); for event in events.values() { diff --git a/crates/nostr-sdk-db/src/options.rs b/crates/nostr-sdk-db/src/options.rs new file mode 100644 index 000000000..038682de2 --- /dev/null +++ b/crates/nostr-sdk-db/src/options.rs @@ -0,0 +1,24 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Database options + +/// Database options +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct DatabaseOptions { + /// Store events (?) + pub events: bool, +} + +impl Default for DatabaseOptions { + fn default() -> Self { + Self { events: true } + } +} + +impl DatabaseOptions { + /// New default database options + pub fn new() -> Self { + Self::default() + } +} From 625bcb81110c791e8d18bf62063b583af2ad8573 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 23 Oct 2023 15:05:15 +0200 Subject: [PATCH 20/98] db: add `negentropy_items` database --- crates/nostr-sdk-db/src/lib.rs | 10 +++++++--- crates/nostr-sdk-db/src/memory.rs | 25 ++++++++++++++++++++++--- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 4744169c2..41c9aa0b8 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -9,7 +9,7 @@ use std::collections::HashSet; use async_trait::async_trait; -use nostr::{Event, EventId, Filter, Url}; +use nostr::{Event, EventId, Filter, Timestamp, Url}; mod error; pub mod memory; @@ -88,10 +88,14 @@ pub trait NostrDatabase: AsyncTraitDeps { async fn query(&self, filters: Vec) -> Result, Self::Err>; /// Get event IDs by filters - /// - /// Uuseful for negentropy reconciliation async fn event_ids_by_filters(&self, filters: Vec) -> Result, Self::Err>; + /// Get `negentropy` items + async fn negentropy_items( + &self, + filter: &Filter, + ) -> Result, Self::Err>; + /// Wipe all data async fn wipe(&self) -> Result<(), Self::Err>; } diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index 32034f72f..86ba78c1f 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -7,7 +7,7 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; use async_trait::async_trait; -use nostr::{Event, EventId, Filter, FiltersMatchEvent, Url}; +use nostr::{Event, EventId, Filter, FiltersMatchEvent, Timestamp, Url}; use thiserror::Error; use tokio::sync::RwLock; @@ -28,10 +28,11 @@ impl From for DatabaseError { pub struct MemoryDatabase { opts: DatabaseOptions, seen_event_ids: Arc>>>, - events: Arc>>, - // TODO: add messages queue? (messages not sent) + events: Arc>>, // TODO: order by timestamp (DESC)? } +// TODO: add queue field? + impl Default for MemoryDatabase { fn default() -> Self { Self::new(DatabaseOptions { events: false }) @@ -234,6 +235,24 @@ impl NostrDatabase for MemoryDatabase { } } + async fn negentropy_items( + &self, + filter: &Filter, + ) -> Result, Self::Err> { + if self.opts.events { + let events = self.events.read().await; + let mut items: Vec<(EventId, Timestamp)> = Vec::new(); + for event in events.values() { + if filter.match_event(event) { + items.push((event.id, event.created_at)); + } + } + Ok(items) + } else { + Err(DatabaseError::FeatureDisabled) + } + } + async fn wipe(&self) -> Result<(), Self::Err> { let mut seen_event_ids = self.seen_event_ids.write().await; seen_event_ids.clear(); From 65e6976b4a7239f9f502cd8136109c375e146bbf Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 23 Oct 2023 15:06:21 +0200 Subject: [PATCH 21/98] sdk: add `reconcile` method to `Client` Rename `reconcile` to `reconcilie_with_items` in `RelayPool` --- crates/nostr-sdk/src/client/mod.rs | 5 +++++ crates/nostr-sdk/src/relay/mod.rs | 4 ++-- crates/nostr-sdk/src/relay/pool.rs | 12 +++++++++--- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/crates/nostr-sdk/src/client/mod.rs b/crates/nostr-sdk/src/client/mod.rs index 9dd48efab..e4476a5e2 100644 --- a/crates/nostr-sdk/src/client/mod.rs +++ b/crates/nostr-sdk/src/client/mod.rs @@ -1314,6 +1314,11 @@ impl Client { self.send_event_builder(builder).await } + /// Negentropy reconciliation + pub async fn reconcilie(&self, filter: Filter, timeout: Duration) -> Result<(), Error> { + Ok(self.pool.reconcilie(filter, timeout).await?) + } + /// Get a list of channels pub async fn get_channels(&self, timeout: Option) -> Result, Error> { self.get_events_of(vec![Filter::new().kind(Kind::ChannelCreation)], timeout) diff --git a/crates/nostr-sdk/src/relay/mod.rs b/crates/nostr-sdk/src/relay/mod.rs index 0bb64df30..46164dc84 100644 --- a/crates/nostr-sdk/src/relay/mod.rs +++ b/crates/nostr-sdk/src/relay/mod.rs @@ -1473,7 +1473,7 @@ impl Relay { pub async fn reconcilie( &self, filter: Filter, - my_items: Vec<(EventId, Timestamp)>, + items: Vec<(EventId, Timestamp)>, timeout: Duration, ) -> Result<(), Error> { if !self.opts.get_read() { @@ -1484,7 +1484,7 @@ impl Relay { let mut negentropy = Negentropy::new(id_size, Some(2_500))?; - for (id, timestamp) in my_items.into_iter() { + for (id, timestamp) in items.into_iter() { let id = Bytes::from_slice(id.as_bytes()); negentropy.add_item(timestamp.as_u64(), id)?; } diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index 4b9c1d980..90b969e75 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -861,17 +861,23 @@ impl RelayPool { } /// Negentropy reconciliation - pub async fn reconcilie( + pub async fn reconcilie(&self, filter: Filter, timeout: Duration) -> Result<(), Error> { + let items: Vec<(EventId, Timestamp)> = self.database.negentropy_items(&filter).await?; + self.reconcilie_with_items(filter, items, timeout).await + } + + /// Negentropy reconciliation with custom items + pub async fn reconcilie_with_items( &self, filter: Filter, - my_items: Vec<(EventId, Timestamp)>, + items: Vec<(EventId, Timestamp)>, timeout: Duration, ) -> Result<(), Error> { let mut handles = Vec::new(); let relays = self.relays().await; for (url, relay) in relays.into_iter() { let filter = filter.clone(); - let my_items = my_items.clone(); + let my_items = items.clone(); let handle = thread::spawn(async move { if let Err(e) = relay.reconcilie(filter, my_items, timeout).await { tracing::error!("Failed to get reconcilie with {url}: {e}"); From 3d49b845b8153b42077ddb9459c19d8585394f5e Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 23 Oct 2023 15:21:45 +0200 Subject: [PATCH 22/98] db: update `negentropy_items` method for `MemoryDatabase` --- crates/nostr-sdk-db/src/memory.rs | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index 86ba78c1f..5910d5840 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -237,20 +237,9 @@ impl NostrDatabase for MemoryDatabase { async fn negentropy_items( &self, - filter: &Filter, + _filter: &Filter, ) -> Result, Self::Err> { - if self.opts.events { - let events = self.events.read().await; - let mut items: Vec<(EventId, Timestamp)> = Vec::new(); - for event in events.values() { - if filter.match_event(event) { - items.push((event.id, event.created_at)); - } - } - Ok(items) - } else { - Err(DatabaseError::FeatureDisabled) - } + Err(DatabaseError::NotSupported) } async fn wipe(&self) -> Result<(), Self::Err> { From b1c386f344eb45bd175293928181a96c2403e436 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 23 Oct 2023 15:27:07 +0200 Subject: [PATCH 23/98] sdk: add `reconcile_with_items` method to `Client` Fix typo --- crates/nostr-sdk/examples/negentropy.rs | 2 +- crates/nostr-sdk/src/client/mod.rs | 19 ++++++++++++++++--- crates/nostr-sdk/src/relay/mod.rs | 4 ++-- crates/nostr-sdk/src/relay/pool.rs | 10 +++++----- 4 files changed, 24 insertions(+), 11 deletions(-) diff --git a/crates/nostr-sdk/examples/negentropy.rs b/crates/nostr-sdk/examples/negentropy.rs index 7cf8eca96..9d122a1c2 100644 --- a/crates/nostr-sdk/examples/negentropy.rs +++ b/crates/nostr-sdk/examples/negentropy.rs @@ -23,7 +23,7 @@ async fn main() -> Result<()> { let filter = Filter::new().author(my_keys.public_key()).limit(10); let relay = client.relay("wss://relay.damus.io").await?; relay - .reconcilie(filter, my_items, Duration::from_secs(30)) + .reconcile(filter, my_items, Duration::from_secs(30)) .await?; client diff --git a/crates/nostr-sdk/src/client/mod.rs b/crates/nostr-sdk/src/client/mod.rs index e4476a5e2..336db5201 100644 --- a/crates/nostr-sdk/src/client/mod.rs +++ b/crates/nostr-sdk/src/client/mod.rs @@ -20,7 +20,7 @@ use nostr::types::metadata::Error as MetadataError; use nostr::url::Url; use nostr::{ ChannelId, ClientMessage, Contact, Event, EventBuilder, EventId, Filter, JsonUtil, Keys, Kind, - Metadata, Result, Tag, + Metadata, Result, Tag, Timestamp, }; use nostr_sdk_db::DynNostrDatabase; use nostr_sdk_net::futures_util::Future; @@ -1315,8 +1315,21 @@ impl Client { } /// Negentropy reconciliation - pub async fn reconcilie(&self, filter: Filter, timeout: Duration) -> Result<(), Error> { - Ok(self.pool.reconcilie(filter, timeout).await?) + pub async fn reconcile(&self, filter: Filter, timeout: Duration) -> Result<(), Error> { + Ok(self.pool.reconcile(filter, timeout).await?) + } + + /// Negentropy reconciliation with items + pub async fn reconcile_with_items( + &self, + filter: Filter, + items: Vec<(EventId, Timestamp)>, + timeout: Duration, + ) -> Result<(), Error> { + Ok(self + .pool + .reconcile_with_items(filter, items, timeout) + .await?) } /// Get a list of channels diff --git a/crates/nostr-sdk/src/relay/mod.rs b/crates/nostr-sdk/src/relay/mod.rs index 46164dc84..4013796b0 100644 --- a/crates/nostr-sdk/src/relay/mod.rs +++ b/crates/nostr-sdk/src/relay/mod.rs @@ -1470,7 +1470,7 @@ impl Relay { } /// Negentropy reconciliation - pub async fn reconcilie( + pub async fn reconcile( &self, filter: Filter, items: Vec<(EventId, Timestamp)>, @@ -1585,7 +1585,7 @@ impl Relay { let pk = Keys::generate(); let filter = Filter::new().author(pk.public_key()); match self - .reconcilie(filter, Vec::new(), Duration::from_secs(5)) + .reconcile(filter, Vec::new(), Duration::from_secs(5)) .await { Ok(_) => Ok(true), diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index 90b969e75..83b979c9f 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -861,13 +861,13 @@ impl RelayPool { } /// Negentropy reconciliation - pub async fn reconcilie(&self, filter: Filter, timeout: Duration) -> Result<(), Error> { + pub async fn reconcile(&self, filter: Filter, timeout: Duration) -> Result<(), Error> { let items: Vec<(EventId, Timestamp)> = self.database.negentropy_items(&filter).await?; - self.reconcilie_with_items(filter, items, timeout).await + self.reconcile_with_items(filter, items, timeout).await } /// Negentropy reconciliation with custom items - pub async fn reconcilie_with_items( + pub async fn reconcile_with_items( &self, filter: Filter, items: Vec<(EventId, Timestamp)>, @@ -879,8 +879,8 @@ impl RelayPool { let filter = filter.clone(); let my_items = items.clone(); let handle = thread::spawn(async move { - if let Err(e) = relay.reconcilie(filter, my_items, timeout).await { - tracing::error!("Failed to get reconcilie with {url}: {e}"); + if let Err(e) = relay.reconcile(filter, my_items, timeout).await { + tracing::error!("Failed to get reconcile with {url}: {e}"); } }); handles.push(handle); From 2e7b06ccb1f0a48f48bfede4f5b4edc71fa85c08 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 24 Oct 2023 10:46:37 +0200 Subject: [PATCH 24/98] db: add `#![forbid(unsafe_code)]` --- crates/nostr-sdk-db/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 41c9aa0b8..1b55b018c 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -3,6 +3,7 @@ //! Nostr SDK Database +#![forbid(unsafe_code)] #![warn(missing_docs)] #![warn(rustdoc::bare_urls)] From 81eaddc24db8a6db9a781599b3dfa9733b629b15 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 24 Oct 2023 12:31:11 +0200 Subject: [PATCH 25/98] nostr: add `to_bytes` method to `EventId` --- crates/nostr/src/event/id.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/nostr/src/event/id.rs b/crates/nostr/src/event/id.rs index 0f12fe69b..0a268db68 100644 --- a/crates/nostr/src/event/id.rs +++ b/crates/nostr/src/event/id.rs @@ -99,6 +99,11 @@ impl EventId { self.as_ref() } + /// Consume and get bytes + pub fn to_bytes(self) -> [u8; 32] { + self.0.to_byte_array() + } + /// Get as hex string pub fn to_hex(&self) -> String { self.0.to_string() From 7baab9e6f7cc1cac7be84676f46fa569219d70e0 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 24 Oct 2023 12:41:02 +0200 Subject: [PATCH 26/98] Init `nostr-sdk-fbs` crate --- Makefile | 3 + crates/nostr-sdk-fbs/Cargo.toml | 11 + crates/nostr-sdk-fbs/Makefile | 4 + crates/nostr-sdk-fbs/fbs/event.fbs | 21 + crates/nostr-sdk-fbs/src/event_generated.rs | 492 ++++++++++++++++++++ crates/nostr-sdk-fbs/src/lib.rs | 36 ++ 6 files changed, 567 insertions(+) create mode 100644 crates/nostr-sdk-fbs/Cargo.toml create mode 100644 crates/nostr-sdk-fbs/Makefile create mode 100644 crates/nostr-sdk-fbs/fbs/event.fbs create mode 100644 crates/nostr-sdk-fbs/src/event_generated.rs create mode 100644 crates/nostr-sdk-fbs/src/lib.rs diff --git a/Makefile b/Makefile index c21344608..c2d7fb8de 100644 --- a/Makefile +++ b/Makefile @@ -12,5 +12,8 @@ clean: book: cd book && make build +flatbuffers: + cd crates/nostr-sdk-fbs && make + loc: @echo "--- Counting lines of .rs files (LOC):" && find crates/ bindings/ -type f -name "*.rs" -exec cat {} \; | wc -l \ No newline at end of file diff --git a/crates/nostr-sdk-fbs/Cargo.toml b/crates/nostr-sdk-fbs/Cargo.toml new file mode 100644 index 000000000..19fbfdaf3 --- /dev/null +++ b/crates/nostr-sdk-fbs/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "nostr-sdk-fbs" +version = "0.1.0" +edition = "2021" +homepage.workspace = true +repository.workspace = true +license.workspace = true + +[dependencies] +flatbuffers = "23.5" +nostr = { version = "0.24", path = "../nostr", default-features = false, features = ["std"] } diff --git a/crates/nostr-sdk-fbs/Makefile b/crates/nostr-sdk-fbs/Makefile new file mode 100644 index 000000000..5f4a44898 --- /dev/null +++ b/crates/nostr-sdk-fbs/Makefile @@ -0,0 +1,4 @@ +all: build + +build: + flatc --rust -o ./src/ ./fbs/event.fbs \ No newline at end of file diff --git a/crates/nostr-sdk-fbs/fbs/event.fbs b/crates/nostr-sdk-fbs/fbs/event.fbs new file mode 100644 index 000000000..a2c0230a7 --- /dev/null +++ b/crates/nostr-sdk-fbs/fbs/event.fbs @@ -0,0 +1,21 @@ +namespace EventFbs; + +struct Fixed32Bytes { + val: [ubyte:32]; +} + +struct Fixed64Bytes { + val: [ubyte:64]; +} + +table Event { + id: Fixed32Bytes; + pubkey: Fixed32Bytes; + created_at: ulong; + kind: ulong; + tags: [string]; + content: string; + sig: Fixed64Bytes; +} + +root_type Event; \ No newline at end of file diff --git a/crates/nostr-sdk-fbs/src/event_generated.rs b/crates/nostr-sdk-fbs/src/event_generated.rs new file mode 100644 index 000000000..5bcfa10cd --- /dev/null +++ b/crates/nostr-sdk-fbs/src/event_generated.rs @@ -0,0 +1,492 @@ +// automatically generated by the FlatBuffers compiler, do not modify + +// @generated + +use core::cmp::Ordering; +use core::mem; + +extern crate flatbuffers; +use self::flatbuffers::{EndianScalar, Follow}; + +#[allow(unused_imports, dead_code)] +pub mod event_fbs { + + use core::cmp::Ordering; + use core::mem; + + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; + + // struct Fixed32Bytes, aligned to 1 + #[repr(transparent)] + #[derive(Clone, Copy, PartialEq)] + pub struct Fixed32Bytes(pub [u8; 32]); + impl Default for Fixed32Bytes { + fn default() -> Self { + Self([0; 32]) + } + } + impl core::fmt::Debug for Fixed32Bytes { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_struct("Fixed32Bytes") + .field("val", &self.val()) + .finish() + } + } + + impl flatbuffers::SimpleToVerifyInSlice for Fixed32Bytes {} + impl<'a> flatbuffers::Follow<'a> for Fixed32Bytes { + type Inner = &'a Fixed32Bytes; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + <&'a Fixed32Bytes>::follow(buf, loc) + } + } + impl<'a> flatbuffers::Follow<'a> for &'a Fixed32Bytes { + type Inner = &'a Fixed32Bytes; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + flatbuffers::follow_cast_ref::(buf, loc) + } + } + impl<'b> flatbuffers::Push for Fixed32Bytes { + type Output = Fixed32Bytes; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + let src = ::core::slice::from_raw_parts( + self as *const Fixed32Bytes as *const u8, + Self::size(), + ); + dst.copy_from_slice(src); + } + } + + impl<'a> flatbuffers::Verifiable for Fixed32Bytes { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.in_buffer::(pos) + } + } + + impl<'a> Fixed32Bytes { + #[allow(clippy::too_many_arguments)] + pub fn new(val: &[u8; 32]) -> Self { + let mut s = Self([0; 32]); + s.set_val(val); + s + } + + pub fn val(&'a self) -> flatbuffers::Array<'a, u8, 32> { + // Safety: + // Created from a valid Table for this object + // Which contains a valid array in this slot + unsafe { flatbuffers::Array::follow(&self.0, 0) } + } + + pub fn set_val(&mut self, items: &[u8; 32]) { + // Safety: + // Created from a valid Table for this object + // Which contains a valid array in this slot + unsafe { flatbuffers::emplace_scalar_array(&mut self.0, 0, items) }; + } + } + + // struct Fixed64Bytes, aligned to 1 + #[repr(transparent)] + #[derive(Clone, Copy, PartialEq)] + pub struct Fixed64Bytes(pub [u8; 64]); + impl Default for Fixed64Bytes { + fn default() -> Self { + Self([0; 64]) + } + } + impl core::fmt::Debug for Fixed64Bytes { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_struct("Fixed64Bytes") + .field("val", &self.val()) + .finish() + } + } + + impl flatbuffers::SimpleToVerifyInSlice for Fixed64Bytes {} + impl<'a> flatbuffers::Follow<'a> for Fixed64Bytes { + type Inner = &'a Fixed64Bytes; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + <&'a Fixed64Bytes>::follow(buf, loc) + } + } + impl<'a> flatbuffers::Follow<'a> for &'a Fixed64Bytes { + type Inner = &'a Fixed64Bytes; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + flatbuffers::follow_cast_ref::(buf, loc) + } + } + impl<'b> flatbuffers::Push for Fixed64Bytes { + type Output = Fixed64Bytes; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + let src = ::core::slice::from_raw_parts( + self as *const Fixed64Bytes as *const u8, + Self::size(), + ); + dst.copy_from_slice(src); + } + } + + impl<'a> flatbuffers::Verifiable for Fixed64Bytes { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.in_buffer::(pos) + } + } + + impl<'a> Fixed64Bytes { + #[allow(clippy::too_many_arguments)] + pub fn new(val: &[u8; 64]) -> Self { + let mut s = Self([0; 64]); + s.set_val(val); + s + } + + pub fn val(&'a self) -> flatbuffers::Array<'a, u8, 64> { + // Safety: + // Created from a valid Table for this object + // Which contains a valid array in this slot + unsafe { flatbuffers::Array::follow(&self.0, 0) } + } + + pub fn set_val(&mut self, items: &[u8; 64]) { + // Safety: + // Created from a valid Table for this object + // Which contains a valid array in this slot + unsafe { flatbuffers::emplace_scalar_array(&mut self.0, 0, items) }; + } + } + + pub enum EventOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct Event<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for Event<'a> { + type Inner = Event<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> Event<'a> { + pub const VT_ID: flatbuffers::VOffsetT = 4; + pub const VT_PUBKEY: flatbuffers::VOffsetT = 6; + pub const VT_CREATED_AT: flatbuffers::VOffsetT = 8; + pub const VT_KIND: flatbuffers::VOffsetT = 10; + pub const VT_TAGS: flatbuffers::VOffsetT = 12; + pub const VT_CONTENT: flatbuffers::VOffsetT = 14; + pub const VT_SIG: flatbuffers::VOffsetT = 16; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + Event { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, + args: &'args EventArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = EventBuilder::new(_fbb); + builder.add_kind(args.kind); + builder.add_created_at(args.created_at); + if let Some(x) = args.sig { + builder.add_sig(x); + } + if let Some(x) = args.content { + builder.add_content(x); + } + if let Some(x) = args.tags { + builder.add_tags(x); + } + if let Some(x) = args.pubkey { + builder.add_pubkey(x); + } + if let Some(x) = args.id { + builder.add_id(x); + } + builder.finish() + } + + #[inline] + pub fn id(&self) -> Option<&'a Fixed32Bytes> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(Event::VT_ID, None) } + } + #[inline] + pub fn pubkey(&self) -> Option<&'a Fixed32Bytes> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(Event::VT_PUBKEY, None) } + } + #[inline] + pub fn created_at(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(Event::VT_CREATED_AT, Some(0)).unwrap() } + } + #[inline] + pub fn kind(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(Event::VT_KIND, Some(0)).unwrap() } + } + #[inline] + pub fn tags( + &self, + ) -> Option>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>, + >>(Event::VT_TAGS, None) + } + } + #[inline] + pub fn content(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>(Event::VT_CONTENT, None) + } + } + #[inline] + pub fn sig(&self) -> Option<&'a Fixed64Bytes> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(Event::VT_SIG, None) } + } + } + + impl flatbuffers::Verifiable for Event<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("id", Self::VT_ID, false)? + .visit_field::("pubkey", Self::VT_PUBKEY, false)? + .visit_field::("created_at", Self::VT_CREATED_AT, false)? + .visit_field::("kind", Self::VT_KIND, false)? + .visit_field::>, + >>("tags", Self::VT_TAGS, false)? + .visit_field::>( + "content", + Self::VT_CONTENT, + false, + )? + .visit_field::("sig", Self::VT_SIG, false)? + .finish(); + Ok(()) + } + } + pub struct EventArgs<'a> { + pub id: Option<&'a Fixed32Bytes>, + pub pubkey: Option<&'a Fixed32Bytes>, + pub created_at: u64, + pub kind: u64, + pub tags: Option< + flatbuffers::WIPOffset>>, + >, + pub content: Option>, + pub sig: Option<&'a Fixed64Bytes>, + } + impl<'a> Default for EventArgs<'a> { + #[inline] + fn default() -> Self { + EventArgs { + id: None, + pubkey: None, + created_at: 0, + kind: 0, + tags: None, + content: None, + sig: None, + } + } + } + + pub struct EventBuilder<'a: 'b, 'b> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b> EventBuilder<'a, 'b> { + #[inline] + pub fn add_id(&mut self, id: &Fixed32Bytes) { + self.fbb_ + .push_slot_always::<&Fixed32Bytes>(Event::VT_ID, id); + } + #[inline] + pub fn add_pubkey(&mut self, pubkey: &Fixed32Bytes) { + self.fbb_ + .push_slot_always::<&Fixed32Bytes>(Event::VT_PUBKEY, pubkey); + } + #[inline] + pub fn add_created_at(&mut self, created_at: u64) { + self.fbb_ + .push_slot::(Event::VT_CREATED_AT, created_at, 0); + } + #[inline] + pub fn add_kind(&mut self, kind: u64) { + self.fbb_.push_slot::(Event::VT_KIND, kind, 0); + } + #[inline] + pub fn add_tags( + &mut self, + tags: flatbuffers::WIPOffset< + flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset<&'b str>>, + >, + ) { + self.fbb_ + .push_slot_always::>(Event::VT_TAGS, tags); + } + #[inline] + pub fn add_content(&mut self, content: flatbuffers::WIPOffset<&'b str>) { + self.fbb_ + .push_slot_always::>(Event::VT_CONTENT, content); + } + #[inline] + pub fn add_sig(&mut self, sig: &Fixed64Bytes) { + self.fbb_ + .push_slot_always::<&Fixed64Bytes>(Event::VT_SIG, sig); + } + #[inline] + pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> EventBuilder<'a, 'b> { + let start = _fbb.start_table(); + EventBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for Event<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("Event"); + ds.field("id", &self.id()); + ds.field("pubkey", &self.pubkey()); + ds.field("created_at", &self.created_at()); + ds.field("kind", &self.kind()); + ds.field("tags", &self.tags()); + ds.field("content", &self.content()); + ds.field("sig", &self.sig()); + ds.finish() + } + } + #[inline] + /// Verifies that a buffer of bytes contains a `Event` + /// and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_unchecked`. + pub fn root_as_event(buf: &[u8]) -> Result { + flatbuffers::root::(buf) + } + #[inline] + /// Verifies that a buffer of bytes contains a size prefixed + /// `Event` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `size_prefixed_root_as_event_unchecked`. + pub fn size_prefixed_root_as_event( + buf: &[u8], + ) -> Result { + flatbuffers::size_prefixed_root::(buf) + } + #[inline] + /// Verifies, with the given options, that a buffer of bytes + /// contains a `Event` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_unchecked`. + pub fn root_as_event_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::root_with_opts::>(opts, buf) + } + #[inline] + /// Verifies, with the given verifier options, that a buffer of + /// bytes contains a size prefixed `Event` and returns + /// it. Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_unchecked`. + pub fn size_prefixed_root_as_event_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::size_prefixed_root_with_opts::>(opts, buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a Event and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid `Event`. + pub unsafe fn root_as_event_unchecked(buf: &[u8]) -> Event { + flatbuffers::root_unchecked::(buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a size prefixed Event and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid size prefixed `Event`. + pub unsafe fn size_prefixed_root_as_event_unchecked(buf: &[u8]) -> Event { + flatbuffers::size_prefixed_root_unchecked::(buf) + } + #[inline] + pub fn finish_event_buffer<'a, 'b>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish(root, None); + } + + #[inline] + pub fn finish_size_prefixed_event_buffer<'a, 'b>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish_size_prefixed(root, None); + } +} // pub mod EventFbs diff --git a/crates/nostr-sdk-fbs/src/lib.rs b/crates/nostr-sdk-fbs/src/lib.rs new file mode 100644 index 000000000..ca18dc077 --- /dev/null +++ b/crates/nostr-sdk-fbs/src/lib.rs @@ -0,0 +1,36 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Nostr SDK Flatbuffers + +use event_generated::event_fbs::{Fixed32Bytes, Fixed64Bytes}; +pub use flatbuffers::FlatBufferBuilder; +use nostr::Event; + +#[allow(unused_imports, dead_code)] +mod event_generated; + +pub use self::event_generated::event_fbs; + +pub fn serialize_event<'a>(fbb: &'a mut FlatBufferBuilder, event: &Event) -> &'a [u8] { + fbb.reset(); + + let id = Fixed32Bytes::new(&event.id.to_bytes()); + let pubkey = Fixed32Bytes::new(&event.pubkey.serialize()); + let sig = Fixed64Bytes::new(event.sig.as_ref()); + let args = event_fbs::EventArgs { + id: Some(&id), + pubkey: Some(&pubkey), + created_at: event.created_at.as_u64(), + kind: event.kind.as_u64(), + tags: None, // TODO + content: None, // TODO + sig: Some(&sig), + }; + + let offset = event_fbs::Event::create(fbb, &args); + + event_fbs::finish_event_buffer(fbb, offset); + + fbb.finished_data() +} From d1feda4b39f93986622b54d10eb5d9135dc8e40a Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 24 Oct 2023 13:05:21 +0200 Subject: [PATCH 27/98] fbs: add `FlatBufferUtils` trait --- Cargo.lock | 27 ++++++++++++- crates/nostr-sdk-fbs/Cargo.toml | 1 + crates/nostr-sdk-fbs/src/lib.rs | 72 ++++++++++++++++++++++++--------- 3 files changed, 81 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 698804262..b6dbc823d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -257,6 +257,12 @@ dependencies = [ "serde", ] +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.4.0" @@ -555,6 +561,16 @@ dependencies = [ "libc", ] +[[package]] +name = "flatbuffers" +version = "23.5.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dac53e22462d78c16d64a1cd22371b54cc3fe94aa15e7886a2fa6e5d1ab8640" +dependencies = [ + "bitflags 1.3.2", + "rustc_version", +] + [[package]] name = "flate2" version = "1.0.27" @@ -1136,6 +1152,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "nostr-sdk-fbs" +version = "0.1.0" +dependencies = [ + "flatbuffers", + "nostr", + "thiserror", +] + [[package]] name = "nostr-sdk-ffi" version = "0.1.0" @@ -1450,7 +1475,7 @@ version = "0.38.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67ce50cb2e16c2903e30d1cbccfd8387a74b9d4c938b6a4c5ec6cc7556f7a8a0" dependencies = [ - "bitflags", + "bitflags 2.4.0", "errno", "libc", "linux-raw-sys", diff --git a/crates/nostr-sdk-fbs/Cargo.toml b/crates/nostr-sdk-fbs/Cargo.toml index 19fbfdaf3..494c0b42c 100644 --- a/crates/nostr-sdk-fbs/Cargo.toml +++ b/crates/nostr-sdk-fbs/Cargo.toml @@ -9,3 +9,4 @@ license.workspace = true [dependencies] flatbuffers = "23.5" nostr = { version = "0.24", path = "../nostr", default-features = false, features = ["std"] } +thiserror = { workspace = true } diff --git a/crates/nostr-sdk-fbs/src/lib.rs b/crates/nostr-sdk-fbs/src/lib.rs index ca18dc077..6d38b888d 100644 --- a/crates/nostr-sdk-fbs/src/lib.rs +++ b/crates/nostr-sdk-fbs/src/lib.rs @@ -5,32 +5,68 @@ use event_generated::event_fbs::{Fixed32Bytes, Fixed64Bytes}; pub use flatbuffers::FlatBufferBuilder; -use nostr::Event; +use flatbuffers::InvalidFlatbuffer; +use nostr::secp256k1::schnorr::Signature; +use nostr::secp256k1::{self, XOnlyPublicKey}; +use nostr::{Event, EventId, Kind, Timestamp}; +use thiserror::Error; #[allow(unused_imports, dead_code)] mod event_generated; pub use self::event_generated::event_fbs; -pub fn serialize_event<'a>(fbb: &'a mut FlatBufferBuilder, event: &Event) -> &'a [u8] { - fbb.reset(); +#[derive(Debug, Error)] +pub enum Error { + #[error(transparent)] + InvalidFlatbuffer(#[from] InvalidFlatbuffer), + #[error(transparent)] + EventId(#[from] nostr::event::id::Error), + #[error(transparent)] + Secp256k1(#[from] secp256k1::Error), + #[error("not found")] + NotFound, +} + +pub trait FlatBufferUtils: Sized { + fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8]; + fn decode(buf: &[u8]) -> Result; +} + +impl FlatBufferUtils for Event { + fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8] { + fbb.reset(); + + let id = Fixed32Bytes::new(&self.id.to_bytes()); + let pubkey = Fixed32Bytes::new(&self.pubkey.serialize()); + let sig = Fixed64Bytes::new(self.sig.as_ref()); + let args = event_fbs::EventArgs { + id: Some(&id), + pubkey: Some(&pubkey), + created_at: self.created_at.as_u64(), + kind: self.kind.as_u64(), + tags: None, // TODO + content: None, // TODO + sig: Some(&sig), + }; - let id = Fixed32Bytes::new(&event.id.to_bytes()); - let pubkey = Fixed32Bytes::new(&event.pubkey.serialize()); - let sig = Fixed64Bytes::new(event.sig.as_ref()); - let args = event_fbs::EventArgs { - id: Some(&id), - pubkey: Some(&pubkey), - created_at: event.created_at.as_u64(), - kind: event.kind.as_u64(), - tags: None, // TODO - content: None, // TODO - sig: Some(&sig), - }; + let offset = event_fbs::Event::create(fbb, &args); - let offset = event_fbs::Event::create(fbb, &args); + event_fbs::finish_event_buffer(fbb, offset); - event_fbs::finish_event_buffer(fbb, offset); + fbb.finished_data() + } - fbb.finished_data() + fn decode(buf: &[u8]) -> Result { + let ev = event_fbs::root_as_event(buf)?; + Ok(Self { + id: EventId::from_slice(&ev.id().ok_or(Error::NotFound)?.0)?, + pubkey: XOnlyPublicKey::from_slice(&ev.pubkey().ok_or(Error::NotFound)?.0)?, + created_at: Timestamp::from(ev.created_at()), + kind: Kind::from(ev.kind()), + tags: Vec::new(), // TODO + content: String::new(), // TODO + sig: Signature::from_slice(&ev.sig().ok_or(Error::NotFound)?.0)?, + }) + } } From b96cc70df1efcd3fe53a2dfede2a670b380b6273 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 26 Oct 2023 13:10:27 +0200 Subject: [PATCH 28/98] Add `nostr` dep to `workspace` --- Cargo.lock | 1 + Cargo.toml | 1 + crates/nostr-sdk-db/Cargo.toml | 2 +- crates/nostr-sdk-fbs/Cargo.toml | 3 ++- crates/nostr-sdk/Cargo.toml | 2 +- 5 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b6dbc823d..8804dadae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1159,6 +1159,7 @@ dependencies = [ "flatbuffers", "nostr", "thiserror", + "tracing", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 0e3048587..32648e1ef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,6 +17,7 @@ license = "MIT" rust-version = "1.64.0" [workspace.dependencies] +nostr = { version = "0.25", path = "./crates/nostr", default-features = false } once_cell = "1.18" thiserror = "1.0" tokio = { version = "1.32", default-features = false } diff --git a/crates/nostr-sdk-db/Cargo.toml b/crates/nostr-sdk-db/Cargo.toml index d400b09f9..cab02c3c4 100644 --- a/crates/nostr-sdk-db/Cargo.toml +++ b/crates/nostr-sdk-db/Cargo.toml @@ -13,7 +13,7 @@ keywords = ["nostr", "sdk", "db"] [dependencies] async-trait = "0.1" -nostr = { version = "0.24", path = "../nostr", default-features = false, features = ["std"] } +nostr = { workspace = true, features = ["std"] } thiserror = { workspace = true } tokio = { workspace = true, features = ["sync"] } tracing = { workspace = true, features = ["std"] } diff --git a/crates/nostr-sdk-fbs/Cargo.toml b/crates/nostr-sdk-fbs/Cargo.toml index 494c0b42c..68517aba7 100644 --- a/crates/nostr-sdk-fbs/Cargo.toml +++ b/crates/nostr-sdk-fbs/Cargo.toml @@ -8,5 +8,6 @@ license.workspace = true [dependencies] flatbuffers = "23.5" -nostr = { version = "0.24", path = "../nostr", default-features = false, features = ["std"] } +nostr = { workspace = true, features = ["std"] } thiserror = { workspace = true } +tracing = { workspace = true, features = ["std", "attributes"] } diff --git a/crates/nostr-sdk/Cargo.toml b/crates/nostr-sdk/Cargo.toml index 9b6c36264..2318e03a0 100644 --- a/crates/nostr-sdk/Cargo.toml +++ b/crates/nostr-sdk/Cargo.toml @@ -30,7 +30,7 @@ nip47 = ["nostr/nip47"] [dependencies] async-utility = "0.1" -nostr = { version = "0.25", path = "../nostr", default-features = false, features = ["std"] } +nostr = { workspace = true, features = ["std"] } nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db" } nostr-sdk-net = { version = "0.25", path = "../nostr-sdk-net" } once_cell = { workspace = true } From 0796758977afbb6ade506a012e9d1452db63d0a9 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 26 Oct 2023 13:12:15 +0200 Subject: [PATCH 29/98] fbs: complete `Event` encoding/decoding --- .gitignore | 1 + crates/nostr-sdk-fbs/fbs/event.fbs | 6 +- crates/nostr-sdk-fbs/src/event_generated.rs | 130 +++++++++++++++++++- crates/nostr-sdk-fbs/src/lib.rs | 43 +++++-- 4 files changed, 167 insertions(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index b27f2f59b..147cd5cd8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ target/ +db/ .DS_Store *.db *.db-shm diff --git a/crates/nostr-sdk-fbs/fbs/event.fbs b/crates/nostr-sdk-fbs/fbs/event.fbs index a2c0230a7..f705135b4 100644 --- a/crates/nostr-sdk-fbs/fbs/event.fbs +++ b/crates/nostr-sdk-fbs/fbs/event.fbs @@ -8,12 +8,16 @@ struct Fixed64Bytes { val: [ubyte:64]; } +table StringVector { + data: [string]; +} + table Event { id: Fixed32Bytes; pubkey: Fixed32Bytes; created_at: ulong; kind: ulong; - tags: [string]; + tags: [StringVector]; content: string; sig: Fixed64Bytes; } diff --git a/crates/nostr-sdk-fbs/src/event_generated.rs b/crates/nostr-sdk-fbs/src/event_generated.rs index 5bcfa10cd..240878f6f 100644 --- a/crates/nostr-sdk-fbs/src/event_generated.rs +++ b/crates/nostr-sdk-fbs/src/event_generated.rs @@ -173,6 +173,123 @@ pub mod event_fbs { } } + pub enum StringVectorOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct StringVector<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for StringVector<'a> { + type Inner = StringVector<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> StringVector<'a> { + pub const VT_DATA: flatbuffers::VOffsetT = 4; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + StringVector { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, + args: &'args StringVectorArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = StringVectorBuilder::new(_fbb); + if let Some(x) = args.data { + builder.add_data(x); + } + builder.finish() + } + + #[inline] + pub fn data( + &self, + ) -> Option>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>, + >>(StringVector::VT_DATA, None) + } + } + } + + impl flatbuffers::Verifiable for StringVector<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>, + >>("data", Self::VT_DATA, false)? + .finish(); + Ok(()) + } + } + pub struct StringVectorArgs<'a> { + pub data: Option< + flatbuffers::WIPOffset>>, + >, + } + impl<'a> Default for StringVectorArgs<'a> { + #[inline] + fn default() -> Self { + StringVectorArgs { data: None } + } + } + + pub struct StringVectorBuilder<'a: 'b, 'b> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b> StringVectorBuilder<'a, 'b> { + #[inline] + pub fn add_data( + &mut self, + data: flatbuffers::WIPOffset< + flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset<&'b str>>, + >, + ) { + self.fbb_ + .push_slot_always::>(StringVector::VT_DATA, data); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + ) -> StringVectorBuilder<'a, 'b> { + let start = _fbb.start_table(); + StringVectorBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for StringVector<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("StringVector"); + ds.field("data", &self.data()); + ds.finish() + } + } pub enum EventOffset {} #[derive(Copy, Clone, PartialEq)] @@ -260,13 +377,14 @@ pub mod event_fbs { #[inline] pub fn tags( &self, - ) -> Option>> { + ) -> Option>>> + { // Safety: // Created from valid Table for this object // which contains a valid value in this slot unsafe { self._tab.get::>, + flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset>, >>(Event::VT_TAGS, None) } } @@ -302,7 +420,7 @@ pub mod event_fbs { .visit_field::("created_at", Self::VT_CREATED_AT, false)? .visit_field::("kind", Self::VT_KIND, false)? .visit_field::>, + flatbuffers::Vector<'_, flatbuffers::ForwardsUOffset>, >>("tags", Self::VT_TAGS, false)? .visit_field::>( "content", @@ -320,7 +438,9 @@ pub mod event_fbs { pub created_at: u64, pub kind: u64, pub tags: Option< - flatbuffers::WIPOffset>>, + flatbuffers::WIPOffset< + flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset>>, + >, >, pub content: Option>, pub sig: Option<&'a Fixed64Bytes>, @@ -368,7 +488,7 @@ pub mod event_fbs { pub fn add_tags( &mut self, tags: flatbuffers::WIPOffset< - flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset<&'b str>>, + flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset>>, >, ) { self.fbb_ diff --git a/crates/nostr-sdk-fbs/src/lib.rs b/crates/nostr-sdk-fbs/src/lib.rs index 6d38b888d..a53c9a7ef 100644 --- a/crates/nostr-sdk-fbs/src/lib.rs +++ b/crates/nostr-sdk-fbs/src/lib.rs @@ -3,15 +3,15 @@ //! Nostr SDK Flatbuffers -use event_generated::event_fbs::{Fixed32Bytes, Fixed64Bytes}; +use event_generated::event_fbs::{Fixed32Bytes, Fixed64Bytes, StringVectorArgs}; pub use flatbuffers::FlatBufferBuilder; use flatbuffers::InvalidFlatbuffer; use nostr::secp256k1::schnorr::Signature; use nostr::secp256k1::{self, XOnlyPublicKey}; -use nostr::{Event, EventId, Kind, Timestamp}; +use nostr::{Event, EventId, Kind, Tag, Timestamp}; use thiserror::Error; -#[allow(unused_imports, dead_code)] +#[allow(unused_imports, dead_code, clippy::all)] mod event_generated; pub use self::event_generated::event_fbs; @@ -23,6 +23,8 @@ pub enum Error { #[error(transparent)] EventId(#[from] nostr::event::id::Error), #[error(transparent)] + Tag(#[from] nostr::event::tag::Error), + #[error(transparent)] Secp256k1(#[from] secp256k1::Error), #[error("not found")] NotFound, @@ -34,19 +36,35 @@ pub trait FlatBufferUtils: Sized { } impl FlatBufferUtils for Event { + #[tracing::instrument(skip_all)] fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8] { fbb.reset(); let id = Fixed32Bytes::new(&self.id.to_bytes()); let pubkey = Fixed32Bytes::new(&self.pubkey.serialize()); let sig = Fixed64Bytes::new(self.sig.as_ref()); + let tags = self + .tags + .iter() + .map(|t| { + let tags = t + .as_vec() + .iter() + .map(|t| fbb.create_string(t)) + .collect::>(); + let args = StringVectorArgs { + data: Some(fbb.create_vector(&tags)), + }; + event_fbs::StringVector::create(fbb, &args) + }) + .collect::>(); let args = event_fbs::EventArgs { id: Some(&id), pubkey: Some(&pubkey), created_at: self.created_at.as_u64(), kind: self.kind.as_u64(), - tags: None, // TODO - content: None, // TODO + tags: Some(fbb.create_vector(&tags)), + content: Some(fbb.create_string(&self.content)), sig: Some(&sig), }; @@ -57,15 +75,26 @@ impl FlatBufferUtils for Event { fbb.finished_data() } + #[tracing::instrument(skip_all)] fn decode(buf: &[u8]) -> Result { let ev = event_fbs::root_as_event(buf)?; + let tags = ev + .tags() + .ok_or(Error::NotFound)? + .into_iter() + .filter_map(|tag| { + tag.data() + .map(|tag| Tag::parse(tag.into_iter().collect::>())) + }) + .collect::, _>>()?; + Ok(Self { id: EventId::from_slice(&ev.id().ok_or(Error::NotFound)?.0)?, pubkey: XOnlyPublicKey::from_slice(&ev.pubkey().ok_or(Error::NotFound)?.0)?, created_at: Timestamp::from(ev.created_at()), kind: Kind::from(ev.kind()), - tags: Vec::new(), // TODO - content: String::new(), // TODO + tags, + content: ev.content().ok_or(Error::NotFound)?.to_owned(), sig: Signature::from_slice(&ev.sig().ok_or(Error::NotFound)?.0)?, }) } From a370fd567c2bf6a4971d7a118bcbde11bdff0f3e Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Fri, 27 Oct 2023 10:07:22 +0200 Subject: [PATCH 30/98] fbs: add `index` schema --- crates/nostr-sdk-fbs/Makefile | 3 +- crates/nostr-sdk-fbs/fbs/index.fbs | 11 + crates/nostr-sdk-fbs/src/index_generated.rs | 288 ++++++++++++++++++++ crates/nostr-sdk-fbs/src/lib.rs | 49 +++- 4 files changed, 342 insertions(+), 9 deletions(-) create mode 100644 crates/nostr-sdk-fbs/fbs/index.fbs create mode 100644 crates/nostr-sdk-fbs/src/index_generated.rs diff --git a/crates/nostr-sdk-fbs/Makefile b/crates/nostr-sdk-fbs/Makefile index 5f4a44898..f426f25f9 100644 --- a/crates/nostr-sdk-fbs/Makefile +++ b/crates/nostr-sdk-fbs/Makefile @@ -1,4 +1,5 @@ all: build build: - flatc --rust -o ./src/ ./fbs/event.fbs \ No newline at end of file + flatc --rust -o ./src/ ./fbs/event.fbs + flatc --rust -o ./src/ ./fbs/index.fbs \ No newline at end of file diff --git a/crates/nostr-sdk-fbs/fbs/index.fbs b/crates/nostr-sdk-fbs/fbs/index.fbs new file mode 100644 index 000000000..03c965768 --- /dev/null +++ b/crates/nostr-sdk-fbs/fbs/index.fbs @@ -0,0 +1,11 @@ +namespace IndexFbs; + +struct Fixed32Bytes { + val: [ubyte:32]; +} + +table IndexSet { + data: [Fixed32Bytes]; +} + +root_type IndexSet; \ No newline at end of file diff --git a/crates/nostr-sdk-fbs/src/index_generated.rs b/crates/nostr-sdk-fbs/src/index_generated.rs new file mode 100644 index 000000000..3aae754e3 --- /dev/null +++ b/crates/nostr-sdk-fbs/src/index_generated.rs @@ -0,0 +1,288 @@ +// automatically generated by the FlatBuffers compiler, do not modify + +// @generated + +use core::cmp::Ordering; +use core::mem; + +extern crate flatbuffers; +use self::flatbuffers::{EndianScalar, Follow}; + +#[allow(unused_imports, dead_code)] +pub mod index_fbs { + + use core::cmp::Ordering; + use core::mem; + + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; + + // struct Fixed32Bytes, aligned to 1 + #[repr(transparent)] + #[derive(Clone, Copy, PartialEq)] + pub struct Fixed32Bytes(pub [u8; 32]); + impl Default for Fixed32Bytes { + fn default() -> Self { + Self([0; 32]) + } + } + impl core::fmt::Debug for Fixed32Bytes { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_struct("Fixed32Bytes") + .field("val", &self.val()) + .finish() + } + } + + impl flatbuffers::SimpleToVerifyInSlice for Fixed32Bytes {} + impl<'a> flatbuffers::Follow<'a> for Fixed32Bytes { + type Inner = &'a Fixed32Bytes; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + <&'a Fixed32Bytes>::follow(buf, loc) + } + } + impl<'a> flatbuffers::Follow<'a> for &'a Fixed32Bytes { + type Inner = &'a Fixed32Bytes; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + flatbuffers::follow_cast_ref::(buf, loc) + } + } + impl<'b> flatbuffers::Push for Fixed32Bytes { + type Output = Fixed32Bytes; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + let src = ::core::slice::from_raw_parts( + self as *const Fixed32Bytes as *const u8, + Self::size(), + ); + dst.copy_from_slice(src); + } + } + + impl<'a> flatbuffers::Verifiable for Fixed32Bytes { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.in_buffer::(pos) + } + } + + impl<'a> Fixed32Bytes { + #[allow(clippy::too_many_arguments)] + pub fn new(val: &[u8; 32]) -> Self { + let mut s = Self([0; 32]); + s.set_val(val); + s + } + + pub fn val(&'a self) -> flatbuffers::Array<'a, u8, 32> { + // Safety: + // Created from a valid Table for this object + // Which contains a valid array in this slot + unsafe { flatbuffers::Array::follow(&self.0, 0) } + } + + pub fn set_val(&mut self, items: &[u8; 32]) { + // Safety: + // Created from a valid Table for this object + // Which contains a valid array in this slot + unsafe { flatbuffers::emplace_scalar_array(&mut self.0, 0, items) }; + } + } + + pub enum IndexSetOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct IndexSet<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for IndexSet<'a> { + type Inner = IndexSet<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> IndexSet<'a> { + pub const VT_DATA: flatbuffers::VOffsetT = 4; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + IndexSet { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, + args: &'args IndexSetArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = IndexSetBuilder::new(_fbb); + if let Some(x) = args.data { + builder.add_data(x); + } + builder.finish() + } + + #[inline] + pub fn data(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>>( + IndexSet::VT_DATA, + None, + ) + } + } + } + + impl flatbuffers::Verifiable for IndexSet<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>>( + "data", + Self::VT_DATA, + false, + )? + .finish(); + Ok(()) + } + } + pub struct IndexSetArgs<'a> { + pub data: Option>>, + } + impl<'a> Default for IndexSetArgs<'a> { + #[inline] + fn default() -> Self { + IndexSetArgs { data: None } + } + } + + pub struct IndexSetBuilder<'a: 'b, 'b> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b> IndexSetBuilder<'a, 'b> { + #[inline] + pub fn add_data( + &mut self, + data: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>(IndexSet::VT_DATA, data); + } + #[inline] + pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> IndexSetBuilder<'a, 'b> { + let start = _fbb.start_table(); + IndexSetBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for IndexSet<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("IndexSet"); + ds.field("data", &self.data()); + ds.finish() + } + } + #[inline] + /// Verifies that a buffer of bytes contains a `IndexSet` + /// and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_index_set_unchecked`. + pub fn root_as_index_set(buf: &[u8]) -> Result { + flatbuffers::root::(buf) + } + #[inline] + /// Verifies that a buffer of bytes contains a size prefixed + /// `IndexSet` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `size_prefixed_root_as_index_set_unchecked`. + pub fn size_prefixed_root_as_index_set( + buf: &[u8], + ) -> Result { + flatbuffers::size_prefixed_root::(buf) + } + #[inline] + /// Verifies, with the given options, that a buffer of bytes + /// contains a `IndexSet` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_index_set_unchecked`. + pub fn root_as_index_set_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::root_with_opts::>(opts, buf) + } + #[inline] + /// Verifies, with the given verifier options, that a buffer of + /// bytes contains a size prefixed `IndexSet` and returns + /// it. Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_index_set_unchecked`. + pub fn size_prefixed_root_as_index_set_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::size_prefixed_root_with_opts::>(opts, buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a IndexSet and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid `IndexSet`. + pub unsafe fn root_as_index_set_unchecked(buf: &[u8]) -> IndexSet { + flatbuffers::root_unchecked::(buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a size prefixed IndexSet and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid size prefixed `IndexSet`. + pub unsafe fn size_prefixed_root_as_index_set_unchecked(buf: &[u8]) -> IndexSet { + flatbuffers::size_prefixed_root_unchecked::(buf) + } + #[inline] + pub fn finish_index_set_buffer<'a, 'b>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish(root, None); + } + + #[inline] + pub fn finish_size_prefixed_index_set_buffer<'a, 'b>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish_size_prefixed(root, None); + } +} // pub mod IndexFbs diff --git a/crates/nostr-sdk-fbs/src/lib.rs b/crates/nostr-sdk-fbs/src/lib.rs index a53c9a7ef..f1339d3cd 100644 --- a/crates/nostr-sdk-fbs/src/lib.rs +++ b/crates/nostr-sdk-fbs/src/lib.rs @@ -3,7 +3,8 @@ //! Nostr SDK Flatbuffers -use event_generated::event_fbs::{Fixed32Bytes, Fixed64Bytes, StringVectorArgs}; +use std::collections::HashSet; + pub use flatbuffers::FlatBufferBuilder; use flatbuffers::InvalidFlatbuffer; use nostr::secp256k1::schnorr::Signature; @@ -13,8 +14,11 @@ use thiserror::Error; #[allow(unused_imports, dead_code, clippy::all)] mod event_generated; +#[allow(unused_imports, dead_code, clippy::all)] +mod index_generated; -pub use self::event_generated::event_fbs; +use self::event_generated::event_fbs; +use self::index_generated::index_fbs; #[derive(Debug, Error)] pub enum Error { @@ -36,13 +40,13 @@ pub trait FlatBufferUtils: Sized { } impl FlatBufferUtils for Event { - #[tracing::instrument(skip_all)] + #[tracing::instrument(skip_all, level = "trace")] fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8] { fbb.reset(); - let id = Fixed32Bytes::new(&self.id.to_bytes()); - let pubkey = Fixed32Bytes::new(&self.pubkey.serialize()); - let sig = Fixed64Bytes::new(self.sig.as_ref()); + let id = event_fbs::Fixed32Bytes::new(&self.id.to_bytes()); + let pubkey = event_fbs::Fixed32Bytes::new(&self.pubkey.serialize()); + let sig = event_fbs::Fixed64Bytes::new(self.sig.as_ref()); let tags = self .tags .iter() @@ -52,7 +56,7 @@ impl FlatBufferUtils for Event { .iter() .map(|t| fbb.create_string(t)) .collect::>(); - let args = StringVectorArgs { + let args = event_fbs::StringVectorArgs { data: Some(fbb.create_vector(&tags)), }; event_fbs::StringVector::create(fbb, &args) @@ -75,7 +79,7 @@ impl FlatBufferUtils for Event { fbb.finished_data() } - #[tracing::instrument(skip_all)] + #[tracing::instrument(skip_all, level = "trace")] fn decode(buf: &[u8]) -> Result { let ev = event_fbs::root_as_event(buf)?; let tags = ev @@ -99,3 +103,32 @@ impl FlatBufferUtils for Event { }) } } + +impl FlatBufferUtils for HashSet<[u8; 32]> { + #[tracing::instrument(skip_all, level = "trace")] + fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8] { + fbb.reset(); + + let list: Vec = + self.iter().map(index_fbs::Fixed32Bytes::new).collect(); + let args = index_fbs::IndexSetArgs { + data: Some(fbb.create_vector(&list)), + }; + + let offset = index_fbs::IndexSet::create(fbb, &args); + + index_fbs::finish_index_set_buffer(fbb, offset); + + fbb.finished_data() + } + + #[tracing::instrument(skip_all, level = "trace")] + fn decode(buf: &[u8]) -> Result { + Ok(index_fbs::root_as_index_set(buf)? + .data() + .ok_or(Error::NotFound)? + .into_iter() + .map(|bytes| bytes.0) + .collect()) + } +} From 31e15696f46519988b974908598b36fb0d152fd3 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Fri, 27 Oct 2023 10:09:53 +0200 Subject: [PATCH 31/98] Init `nostr-sdk-rosckdb` crate --- Cargo.lock | 170 +++++++++++- crates/nostr-sdk-rocksdb/Cargo.toml | 24 ++ crates/nostr-sdk-rocksdb/examples/rocksdb.rs | 64 +++++ crates/nostr-sdk-rocksdb/src/lib.rs | 268 +++++++++++++++++++ crates/nostr-sdk-rocksdb/src/ops.rs | 35 +++ 5 files changed, 559 insertions(+), 2 deletions(-) create mode 100644 crates/nostr-sdk-rocksdb/Cargo.toml create mode 100644 crates/nostr-sdk-rocksdb/examples/rocksdb.rs create mode 100644 crates/nostr-sdk-rocksdb/src/lib.rs create mode 100644 crates/nostr-sdk-rocksdb/src/ops.rs diff --git a/Cargo.lock b/Cargo.lock index 8804dadae..71548aa3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -208,6 +208,27 @@ dependencies = [ "serde", ] +[[package]] +name = "bindgen" +version = "0.65.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +dependencies = [ + "bitflags 1.3.2", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn", +] + [[package]] name = "bip39" version = "2.0.0" @@ -305,6 +326,17 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +[[package]] +name = "bzip2-sys" +version = "0.1.11+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + [[package]] name = "camino" version = "1.1.6" @@ -352,9 +384,19 @@ version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ + "jobserver", "libc", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -382,6 +424,17 @@ dependencies = [ "inout", ] +[[package]] +name = "clang-sys" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "clap" version = "4.4.5" @@ -948,6 +1001,15 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jobserver" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.64" @@ -963,12 +1025,53 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "libc" version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +[[package]] +name = "libloading" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +dependencies = [ + "cfg-if", + "winapi", +] + +[[package]] +name = "librocksdb-sys" +version = "0.11.0+8.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "glob", + "libc", + "libz-sys", +] + +[[package]] +name = "libz-sys" +version = "1.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linux-raw-sys" version = "0.4.10" @@ -1201,6 +1304,19 @@ dependencies = [ "ws_stream_wasm", ] +[[package]] +name = "nostr-sdk-rocksdb" +version = "0.1.0" +dependencies = [ + "nostr", + "nostr-sdk-db", + "nostr-sdk-fbs", + "rocksdb", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -1268,6 +1384,12 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + [[package]] name = "percent-encoding" version = "2.3.0" @@ -1296,6 +1418,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkg-config" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" + [[package]] name = "plain" version = "0.2.3" @@ -1308,11 +1436,21 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "prettyplease" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +dependencies = [ + "proc-macro2", + "syn", +] + [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -1455,12 +1593,28 @@ dependencies = [ "winapi", ] +[[package]] +name = "rocksdb" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" +dependencies = [ + "libc", + "librocksdb-sys", +] + [[package]] name = "rustc-demangle" version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + [[package]] name = "rustc_version" version = "0.4.0" @@ -1682,6 +1836,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" + [[package]] name = "siphasher" version = "0.3.11" @@ -2227,6 +2387,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.4" diff --git a/crates/nostr-sdk-rocksdb/Cargo.toml b/crates/nostr-sdk-rocksdb/Cargo.toml new file mode 100644 index 000000000..a9e867207 --- /dev/null +++ b/crates/nostr-sdk-rocksdb/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "nostr-sdk-rocksdb" +version = "0.1.0" +edition = "2021" +description = "TODO" +authors = ["Yuki Kishimoto "] +homepage.workspace = true +repository.workspace = true +license.workspace = true +readme = "README.md" +rust-version.workspace = true +keywords = ["nostr", "sdk", "db", "redb"] + +[dependencies] +nostr = { workspace = true, features = ["std"] } +nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db" } +nostr-sdk-fbs = { version = "0.1", path = "../nostr-sdk-fbs" } +rocksdb = { version = "0.21", default-features = false, features = ["multi-threaded-cf", "snappy"] } +tokio = { workspace = true, features = ["rt-multi-thread", "sync"] } +tracing = { workspace = true, features = ["std"] } + +[dev-dependencies] +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs new file mode 100644 index 000000000..b26c63df2 --- /dev/null +++ b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs @@ -0,0 +1,64 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use std::time::{Duration, Instant}; + +use nostr::prelude::*; +use nostr_sdk_db::NostrDatabase; +use nostr_sdk_rocksdb::RocksDatabase; +use tracing_subscriber::fmt::format::FmtSpan; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::fmt() + .with_span_events(FmtSpan::CLOSE) + .init(); + + let secret_key = + SecretKey::from_bech32("nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99") + .unwrap(); + let keys = Keys::new(secret_key); + let database = RocksDatabase::new("./db/rocksdb").unwrap(); + + /* for i in 0..50_000 { + let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) + .to_event(&keys) + .unwrap(); + database.save_event(&event).await.unwrap(); + + let event = EventBuilder::new_text_note( + format!("Reply to event #{i}"), + &[ + Tag::Event(event.id, None, None), + Tag::PubKey(event.pubkey, None), + ], + ) + .to_event(&keys) + .unwrap(); + database.save_event(&event).await.unwrap(); + println!("{}", event.id); + } + + for i in 0..10 { + let metadata = Metadata::new().name(format!("Name #{i}")); + let event = EventBuilder::set_metadata(metadata) + .to_event(&keys) + .unwrap(); + database.save_event(&event).await.unwrap(); + tokio::time::sleep(Duration::from_secs(1)).await; + } */ + + /* let event_id = + EventId::from_hex("b02c1c57a7c5b0e10245df8c26b429ad1a2cbf91d7cada3ecdb524b7e1d984b6") + .unwrap(); + let event = database.event_by_id(event_id).await.unwrap(); + println!("{event:?}"); */ + + let events = database + .query(vec![Filter::new() + .kind(Kind::Metadata) + .author(keys.public_key().to_string())]) + .await + .unwrap(); + println!("Got {} events", events.len()); +} diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs new file mode 100644 index 000000000..51ee373fc --- /dev/null +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -0,0 +1,268 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use std::collections::HashSet; +use std::path::Path; +use std::sync::Arc; + +use nostr::FiltersMatchEvent; +use nostr_sdk_db::nostr::{Event, EventId, Filter, Timestamp, Url}; +use nostr_sdk_db::{async_trait, Backend, DatabaseError, DatabaseOptions, NostrDatabase}; +use nostr_sdk_fbs::{FlatBufferBuilder, FlatBufferUtils}; +use rocksdb::{ + BoundColumnFamily, ColumnFamilyDescriptor, DBCompactionStyle, DBCompressionType, + OptimisticTransactionDB, Options, WriteBatchWithTransaction, +}; +use tokio::sync::RwLock; + +mod ops; + +use self::ops::indexes_merge_operator; + +const EVENTS_CF: &str = "events"; +const PUBKEY_INDEX_CF: &str = "pubkey_index"; +const KIND_INDEX_CF: &str = "kind_index"; + +/// RocksDB Nostr Database +#[derive(Debug, Clone)] +pub struct RocksDatabase { + db: Arc, + fbb: Arc>>, +} + +fn default_opts() -> rocksdb::Options { + let mut opts = Options::default(); + opts.set_keep_log_file_num(10); + opts.set_max_open_files(100); + opts.set_compaction_style(DBCompactionStyle::Level); + opts.set_compression_type(DBCompressionType::Snappy); + opts.set_target_file_size_base(256 << 20); + opts.set_write_buffer_size(256 << 20); + opts.set_enable_write_thread_adaptive_yield(true); + opts.set_disable_auto_compactions(false); + opts.increase_parallelism(2); + opts +} + +fn column_families() -> Vec { + let mut index_opts: Options = default_opts(); + index_opts.set_merge_operator_associative("index_merge_operator", indexes_merge_operator); + + vec![ + ColumnFamilyDescriptor::new(EVENTS_CF, default_opts()), + ColumnFamilyDescriptor::new(PUBKEY_INDEX_CF, index_opts.clone()), + ColumnFamilyDescriptor::new(KIND_INDEX_CF, index_opts), + ] +} + +impl RocksDatabase { + pub fn new

(path: P) -> Result + where + P: AsRef, + { + let path: &Path = path.as_ref(); + + tracing::debug!("Opening {}", path.display()); + + let mut db_opts = default_opts(); + db_opts.create_if_missing(true); + db_opts.create_missing_column_families(true); + + let db = OptimisticTransactionDB::open_cf_descriptors(&db_opts, path, column_families()) + .map_err(DatabaseError::backend)?; + + match db.live_files() { + Ok(live_files) => tracing::info!( + "{}: {} SST files, {} GB, {} Grows", + path.display(), + live_files.len(), + live_files.iter().map(|f| f.size).sum::() as f64 / 1e9, + live_files.iter().map(|f| f.num_entries).sum::() as f64 / 1e9 + ), + Err(_) => tracing::warn!("Impossible to get live files"), + }; + + Ok(Self { + db: Arc::new(db), + fbb: Arc::new(RwLock::new(FlatBufferBuilder::with_capacity(70_000))), + }) + } + + fn cf_handle(&self, name: &str) -> Result, DatabaseError> { + self.db.cf_handle(name).ok_or(DatabaseError::NotFound) + } +} + +#[async_trait] +impl NostrDatabase for RocksDatabase { + type Err = DatabaseError; + + fn backend(&self) -> Backend { + Backend::RocksDB + } + + fn opts(&self) -> DatabaseOptions { + DatabaseOptions::default() + } + + #[tracing::instrument(skip_all)] + async fn save_event(&self, event: &Event) -> Result { + // Acquire FlatBuffers Builder + let mut fbb = self.fbb.write().await; + + // Get Column Families + let events_cf = self.cf_handle(EVENTS_CF)?; + let pubkey_index_cf = self.cf_handle(PUBKEY_INDEX_CF)?; + let kind_index_cf = self.cf_handle(KIND_INDEX_CF)?; + + // Serialize key and value + let key: &[u8] = event.id.as_bytes(); + let value: &[u8] = event.encode(&mut fbb); + + // Prepare write batch + let mut batch = WriteBatchWithTransaction::default(); + + // Save event + batch.put_cf(&events_cf, key, value); + + // Save pubkey index + batch.merge_cf(&pubkey_index_cf, event.pubkey.serialize(), key); + + // Save kind index + batch.merge_cf(&kind_index_cf, event.kind.as_u64().to_be_bytes(), key); + + // Write batch changes + self.db.write(batch).map_err(DatabaseError::backend)?; + + // Return status + Ok(true) + } + + async fn has_event_already_been_seen(&self, _event_id: EventId) -> Result { + todo!() + } + + async fn event_id_seen( + &self, + _event_id: EventId, + _relay_url: Option, + ) -> Result<(), Self::Err> { + todo!() + } + + async fn event_ids_seen( + &self, + _event_ids: Vec, + _relay_url: Option, + ) -> Result<(), Self::Err> { + todo!() + } + + async fn event_recently_seen_on_relays( + &self, + _event_id: EventId, + ) -> Result>, Self::Err> { + todo!() + } + + #[tracing::instrument(skip_all)] + async fn event_by_id(&self, event_id: EventId) -> Result { + let this = self.clone(); + tokio::task::spawn_blocking(move || { + let cf = this.cf_handle(EVENTS_CF)?; + match this + .db + .get_pinned_cf(&cf, event_id.as_bytes()) + .map_err(DatabaseError::backend)? + { + Some(event) => Event::decode(&event).map_err(DatabaseError::backend), + None => Err(DatabaseError::NotFound), + } + }) + .await + .map_err(DatabaseError::backend)? + } + + #[tracing::instrument(skip_all)] + async fn query(&self, filters: Vec) -> Result, Self::Err> { + let this = self.clone(); + tokio::task::spawn_blocking(move || { + let mut events: Vec = Vec::new(); + + let cf = this.cf_handle(EVENTS_CF)?; + let kind_index_cf = this.cf_handle(KIND_INDEX_CF)?; + + let mut ids_to_get: HashSet<[u8; 32]> = HashSet::new(); + + let filter = filters.first().unwrap(); + if !filter.kinds.is_empty() { + let keys = filter.kinds.iter().map(|k| k.as_u64().to_be_bytes()); + for v in this + .db + .batched_multi_get_cf(&kind_index_cf, keys, false) + .into_iter() + .flatten() + .flatten() + { + let set: HashSet<[u8; 32]> = + HashSet::decode(&v).map_err(DatabaseError::backend)?; + ids_to_get.extend(set); + } + } else { + tracing::debug!("No kinds set to query"); + } + + for v in this + .db + .batched_multi_get_cf(&cf, ids_to_get, false) + .into_iter() + .flatten() + .flatten() + { + let event: Event = Event::decode(&v).map_err(DatabaseError::backend)?; + events.push(event); + } + + /* let iter = this.db.full_iterator_cf(&cf, IteratorMode::Start); + + for i in iter { + if let Ok((_key, value)) = i { + let event: Event = Event::decode(&value).map_err(DatabaseError::backend)?; + if filters.match_event(&event) { + events.push(event); + } + } + } */ + + /* iter.seek_to_first(); + while iter.valid() { + if let Some(value) = iter.value() { + let event: Event = Event::decode(value).map_err(DatabaseError::backend)?; + if filters.match_event(&event) { + events.push(event); + } + }; + iter.next(); + } */ + + Ok(events) + }) + .await + .map_err(DatabaseError::backend)? + } + + async fn event_ids_by_filters(&self, _filters: Vec) -> Result, Self::Err> { + todo!() + } + + async fn negentropy_items( + &self, + _filter: &Filter, + ) -> Result, Self::Err> { + todo!() + } + + async fn wipe(&self) -> Result<(), Self::Err> { + todo!() + } +} diff --git a/crates/nostr-sdk-rocksdb/src/ops.rs b/crates/nostr-sdk-rocksdb/src/ops.rs new file mode 100644 index 000000000..fe2397e87 --- /dev/null +++ b/crates/nostr-sdk-rocksdb/src/ops.rs @@ -0,0 +1,35 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! RocksDB Custom Operators + +use std::collections::HashSet; + +use nostr_sdk_fbs::{FlatBufferBuilder, FlatBufferUtils}; +use rocksdb::MergeOperands; + +pub(crate) fn indexes_merge_operator( + _new_key: &[u8], + existing: Option<&[u8]>, + operands: &MergeOperands, +) -> Option> { + // Create a HashSet to store the event IDs for the author. + let mut existing: HashSet<[u8; 32]> = match existing { + Some(val) => HashSet::decode(val).ok()?, + None => HashSet::with_capacity(operands.len()), + }; + + // Merge in the new event IDs. + for operand in operands.into_iter() { + if operand.len() == 32 { + let mut event_id = [0u8; 32]; + event_id.copy_from_slice(operand); + existing.insert(event_id); + } else { + tracing::warn!("Wrong operand slice len: {}", operand.len()); + } + } + + let mut fbb = FlatBufferBuilder::with_capacity(existing.len() * 32 * 2); + Some(existing.encode(&mut fbb).to_vec()) +} From 5dedf037ffe7a7853af714d74003d5c389cdf1dc Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Fri, 27 Oct 2023 10:21:07 +0200 Subject: [PATCH 32/98] Add `async-trait` to `workspace` --- Cargo.lock | 5 +++-- Cargo.toml | 1 + crates/nostr-sdk-db/Cargo.toml | 2 +- crates/nostr-sdk-db/src/lib.rs | 2 +- crates/nostr-sdk-rocksdb/Cargo.toml | 1 + crates/nostr-sdk-rocksdb/examples/rocksdb.rs | 2 +- crates/nostr-sdk-rocksdb/src/lib.rs | 6 +++--- 7 files changed, 11 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 71548aa3f..20a544c20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1308,6 +1308,7 @@ dependencies = [ name = "nostr-sdk-rocksdb" version = "0.1.0" dependencies = [ + "async-trait", "nostr", "nostr-sdk-db", "nostr-sdk-fbs", @@ -1448,9 +1449,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] diff --git a/Cargo.toml b/Cargo.toml index 32648e1ef..7ef36a6a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,6 +17,7 @@ license = "MIT" rust-version = "1.64.0" [workspace.dependencies] +async-trait = "0.1" nostr = { version = "0.25", path = "./crates/nostr", default-features = false } once_cell = "1.18" thiserror = "1.0" diff --git a/crates/nostr-sdk-db/Cargo.toml b/crates/nostr-sdk-db/Cargo.toml index cab02c3c4..671e9e89e 100644 --- a/crates/nostr-sdk-db/Cargo.toml +++ b/crates/nostr-sdk-db/Cargo.toml @@ -12,7 +12,7 @@ rust-version.workspace = true keywords = ["nostr", "sdk", "db"] [dependencies] -async-trait = "0.1" +async-trait = { workspace = true } nostr = { workspace = true, features = ["std"] } thiserror = { workspace = true } tokio = { workspace = true, features = ["sync"] } diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 1b55b018c..39fffedb4 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -9,7 +9,7 @@ use std::collections::HashSet; -use async_trait::async_trait; +pub use async_trait::async_trait; use nostr::{Event, EventId, Filter, Timestamp, Url}; mod error; diff --git a/crates/nostr-sdk-rocksdb/Cargo.toml b/crates/nostr-sdk-rocksdb/Cargo.toml index a9e867207..f2b07285e 100644 --- a/crates/nostr-sdk-rocksdb/Cargo.toml +++ b/crates/nostr-sdk-rocksdb/Cargo.toml @@ -12,6 +12,7 @@ rust-version.workspace = true keywords = ["nostr", "sdk", "db", "redb"] [dependencies] +async-trait = { workspace = true } nostr = { workspace = true, features = ["std"] } nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db" } nostr-sdk-fbs = { version = "0.1", path = "../nostr-sdk-fbs" } diff --git a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs index b26c63df2..fdfd50aa6 100644 --- a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs +++ b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -use std::time::{Duration, Instant}; +// use std::time::{Duration, Instant}; use nostr::prelude::*; use nostr_sdk_db::NostrDatabase; diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index 51ee373fc..88224704c 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -5,9 +5,9 @@ use std::collections::HashSet; use std::path::Path; use std::sync::Arc; -use nostr::FiltersMatchEvent; -use nostr_sdk_db::nostr::{Event, EventId, Filter, Timestamp, Url}; -use nostr_sdk_db::{async_trait, Backend, DatabaseError, DatabaseOptions, NostrDatabase}; +use async_trait::async_trait; +use nostr::{Event, EventId, Filter, Timestamp, Url}; +use nostr_sdk_db::{Backend, DatabaseError, DatabaseOptions, NostrDatabase}; use nostr_sdk_fbs::{FlatBufferBuilder, FlatBufferUtils}; use rocksdb::{ BoundColumnFamily, ColumnFamilyDescriptor, DBCompactionStyle, DBCompressionType, From b7ff933b5c3bb7faddee08494f710435bb2cbadc Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sat, 28 Oct 2023 11:21:20 +0200 Subject: [PATCH 33/98] rocksdb: fix `indexes_merge_operator` func --- crates/nostr-sdk-rocksdb/src/ops.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crates/nostr-sdk-rocksdb/src/ops.rs b/crates/nostr-sdk-rocksdb/src/ops.rs index fe2397e87..27f390240 100644 --- a/crates/nostr-sdk-rocksdb/src/ops.rs +++ b/crates/nostr-sdk-rocksdb/src/ops.rs @@ -13,23 +13,22 @@ pub(crate) fn indexes_merge_operator( existing: Option<&[u8]>, operands: &MergeOperands, ) -> Option> { - // Create a HashSet to store the event IDs for the author. let mut existing: HashSet<[u8; 32]> = match existing { Some(val) => HashSet::decode(val).ok()?, None => HashSet::with_capacity(operands.len()), }; - // Merge in the new event IDs. for operand in operands.into_iter() { + // Check size of operand if operand.len() == 32 { - let mut event_id = [0u8; 32]; + let mut event_id: [u8; 32] = [0u8; 32]; event_id.copy_from_slice(operand); existing.insert(event_id); } else { - tracing::warn!("Wrong operand slice len: {}", operand.len()); + existing.extend(HashSet::decode(operand).ok()?); } } - let mut fbb = FlatBufferBuilder::with_capacity(existing.len() * 32 * 2); + let mut fbb = FlatBufferBuilder::with_capacity(existing.len() * 32 * 2); // Check capacity size if correct Some(existing.encode(&mut fbb).to_vec()) } From 941e753346fb60a6d50545f6a447a4ac6bf0eeb3 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sat, 28 Oct 2023 11:21:56 +0200 Subject: [PATCH 34/98] rocksdb: add `query_single_filter` method --- crates/nostr-sdk-rocksdb/src/lib.rs | 58 +++++++++++++++++++---------- 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index 88224704c..a1acf3107 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -6,7 +6,7 @@ use std::path::Path; use std::sync::Arc; use async_trait::async_trait; -use nostr::{Event, EventId, Filter, Timestamp, Url}; +use nostr::{Event, EventId, Filter, FiltersMatchEvent, Timestamp, Url}; use nostr_sdk_db::{Backend, DatabaseError, DatabaseOptions, NostrDatabase}; use nostr_sdk_fbs::{FlatBufferBuilder, FlatBufferUtils}; use rocksdb::{ @@ -91,6 +91,29 @@ impl RocksDatabase { fn cf_handle(&self, name: &str) -> Result, DatabaseError> { self.db.cf_handle(name).ok_or(DatabaseError::NotFound) } + + fn query_single_filter( + &self, + filter: &Filter, + ids_to_get: &mut HashSet<[u8; 32]>, + ) -> Result<(), DatabaseError> { + if !filter.kinds.is_empty() { + let kind_index_cf = self.cf_handle(KIND_INDEX_CF)?; + let keys = filter.kinds.iter().map(|k| k.as_u64().to_be_bytes()); + for v in self + .db + .batched_multi_get_cf(&kind_index_cf, keys, false) + .into_iter() + .flatten() + .flatten() + { + let set: HashSet<[u8; 32]> = HashSet::decode(&v).map_err(DatabaseError::backend)?; + ids_to_get.extend(set); + } + } + + Ok(()) + } } #[async_trait] @@ -190,28 +213,15 @@ impl NostrDatabase for RocksDatabase { let mut events: Vec = Vec::new(); let cf = this.cf_handle(EVENTS_CF)?; - let kind_index_cf = this.cf_handle(KIND_INDEX_CF)?; let mut ids_to_get: HashSet<[u8; 32]> = HashSet::new(); - let filter = filters.first().unwrap(); - if !filter.kinds.is_empty() { - let keys = filter.kinds.iter().map(|k| k.as_u64().to_be_bytes()); - for v in this - .db - .batched_multi_get_cf(&kind_index_cf, keys, false) - .into_iter() - .flatten() - .flatten() - { - let set: HashSet<[u8; 32]> = - HashSet::decode(&v).map_err(DatabaseError::backend)?; - ids_to_get.extend(set); - } - } else { - tracing::debug!("No kinds set to query"); + for filter in filters.iter() { + this.query_single_filter(filter, &mut ids_to_get)?; } + //let mut counter = 0; + for v in this .db .batched_multi_get_cf(&cf, ids_to_get, false) @@ -219,8 +229,18 @@ impl NostrDatabase for RocksDatabase { .flatten() .flatten() { + /* if let Some(limit) = filter.limit { + if counter >= limit && limit != 0 { + break; + } + } */ + let event: Event = Event::decode(&v).map_err(DatabaseError::backend)?; - events.push(event); + if filters.match_event(&event) { + events.push(event); + } + + //counter += 1; } /* let iter = this.db.full_iterator_cf(&cf, IteratorMode::Start); From a8a1364c034319b2d255811f454cc9963ef3a73e Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sat, 28 Oct 2023 18:34:24 +0200 Subject: [PATCH 35/98] db: init `DatabaseIndexes` struct --- crates/nostr-sdk-db/examples/memory.rs | 2 +- crates/nostr-sdk-db/src/index.rs | 227 +++++++++++++++++++ crates/nostr-sdk-db/src/lib.rs | 2 + crates/nostr-sdk-db/src/memory.rs | 6 +- crates/nostr-sdk-rocksdb/examples/rocksdb.rs | 14 +- crates/nostr-sdk-rocksdb/src/lib.rs | 100 ++++---- 6 files changed, 294 insertions(+), 57 deletions(-) create mode 100644 crates/nostr-sdk-db/src/index.rs diff --git a/crates/nostr-sdk-db/examples/memory.rs b/crates/nostr-sdk-db/examples/memory.rs index 12bd92bba..ea0cb2eee 100644 --- a/crates/nostr-sdk-db/examples/memory.rs +++ b/crates/nostr-sdk-db/examples/memory.rs @@ -44,7 +44,7 @@ async fn main() { let events = database .query(vec![Filter::new() .kind(Kind::Metadata) - .author(keys.public_key().to_string())]) + .author(keys.public_key())]) .await .unwrap(); println!("{events:?}"); diff --git a/crates/nostr-sdk-db/src/index.rs b/crates/nostr-sdk-db/src/index.rs new file mode 100644 index 000000000..050b24393 --- /dev/null +++ b/crates/nostr-sdk-db/src/index.rs @@ -0,0 +1,227 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Indexes + +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; + +use nostr::secp256k1::XOnlyPublicKey; +use nostr::{Event, EventId, Filter, Kind, Timestamp}; +use tokio::sync::RwLock; + +/// Event Index Result +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct EventIndexResult { + /// Handled event should be stored into database? + pub to_store: bool, + /// List of events that should be removed from database + pub to_discard: HashSet, +} + +/// Events Indexes +#[derive(Debug, Clone, Default)] +pub struct DatabaseIndexes { + ids_index: Arc>>, + kinds_index: Arc>>>, + authors_index: Arc>>>, + created_at_index: Arc>>>, +} + +impl DatabaseIndexes { + /// New empty indexes + pub fn new() -> Self { + Self::default() + } + + /// Index [`Event`] + pub async fn index_event(&self, event: &Event) -> EventIndexResult { + // Check if it's expired or ephemeral + if event.is_expired() || event.is_ephemeral() { + return EventIndexResult::default(); + } + + let should_insert: bool = true; + let mut created_at_index = self.created_at_index.write().await; + + /* if event.is_replaceable() { + let filter: Filter = Filter::new() + .author(event.pubkey.to_string()) + .kind(event.kind); + let res: HashSet = self.query(&filter).await; + } else if event.is_parameterized_replaceable() { + /* match event.identifier() { + Some(identifier) => { + let filter: Filter = Filter::new() + .author(event.pubkey.to_string()) + .kind(event.kind) + .identifier(identifier); + let res: Vec = self._query(events, vec![filter]).await?; + if let Some(ev) = res.into_iter().next() { + if ev.created_at >= event.created_at { + should_insert = false; + } else if ev.created_at < event.created_at { + events.remove(&ev.id); + } + } + } + None => should_insert = false, + } */ + } */ + + if should_insert { + // Index id + let mut ids_index = self.ids_index.write().await; + self.index_event_id(&mut ids_index, event).await; + + // Index kind + let mut kinds_index = self.kinds_index.write().await; + self.index_event_kind(&mut kinds_index, event).await; + + // Index author + let mut authors_index = self.authors_index.write().await; + self.index_event_author(&mut authors_index, event).await; + + // Index created at + self.index_event_created_at(&mut created_at_index, event) + .await; + } + + EventIndexResult { + to_store: should_insert, + to_discard: HashSet::new(), + } + } + + /// Index id + async fn index_event_id(&self, ids_index: &mut HashMap, event: &Event) { + ids_index.insert(event.id, event.created_at); + } + + /// Index kind + async fn index_event_kind( + &self, + kinds_index: &mut HashMap>, + event: &Event, + ) { + kinds_index + .entry(event.kind) + .and_modify(|set| { + set.insert(event.id); + }) + .or_insert_with(|| { + let mut set = HashSet::with_capacity(1); + set.insert(event.id); + set + }); + } + + /// Index author + async fn index_event_author( + &self, + authors_index: &mut HashMap>, + event: &Event, + ) { + authors_index + .entry(event.pubkey) + .and_modify(|set| { + set.insert(event.id); + }) + .or_insert_with(|| { + let mut set = HashSet::with_capacity(1); + set.insert(event.id); + set + }); + } + + /// Index created at + async fn index_event_created_at( + &self, + created_at_index: &mut HashMap>, + event: &Event, + ) { + created_at_index + .entry(event.created_at) + .and_modify(|set| { + set.insert(event.id); + }) + .or_insert_with(|| { + let mut set = HashSet::with_capacity(1); + set.insert(event.id); + set + }); + } + + /// Query + pub async fn query(&self, filter: &Filter) -> HashSet { + let mut matching_event_ids = HashSet::new(); + + let kinds_index = self.kinds_index.read().await; + let authors_index = self.authors_index.read().await; + let created_at_index = self.created_at_index.read().await; + + if !filter.kinds.is_empty() { + let mut temp = HashSet::new(); + for kind in filter.kinds.iter() { + if let Some(ids) = kinds_index.get(kind) { + temp.extend(ids); + } + } + intersect_or_extend(&mut matching_event_ids, &temp); + } + + if !filter.authors.is_empty() { + let mut temp = HashSet::new(); + for author in filter.authors.iter() { + if let Some(ids) = authors_index.get(author) { + temp.extend(ids); + } + } + intersect_or_extend(&mut matching_event_ids, &temp); + } + + if let Some(since) = filter.since { + let mut temp = HashSet::new(); + for (timestamp, ids) in created_at_index.iter() { + if *timestamp >= since { + temp.extend(ids); + } + } + intersect_or_extend(&mut matching_event_ids, &temp); + } + + if let Some(until) = filter.until { + let mut temp = HashSet::new(); + for (timestamp, ids) in created_at_index.iter() { + if *timestamp <= until { + temp.extend(ids); + } + } + intersect_or_extend(&mut matching_event_ids, &temp); + } + + // TODO: sort by timestamp and use limit + + matching_event_ids + } + + /// Clear indexes + pub async fn clear(&self) { + let mut kinds_index = self.kinds_index.write().await; + kinds_index.clear(); + + let mut authors_index = self.authors_index.write().await; + authors_index.clear(); + + let mut created_at_index = self.created_at_index.write().await; + created_at_index.clear(); + } +} + +fn intersect_or_extend(main: &mut HashSet, second: &HashSet) { + if main.is_empty() { + main.extend(second); + } else { + *main = main.intersection(second).copied().collect(); + } +} diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 39fffedb4..ba2185b5f 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -13,10 +13,12 @@ pub use async_trait::async_trait; use nostr::{Event, EventId, Filter, Timestamp, Url}; mod error; +pub mod index; pub mod memory; mod options; pub use self::error::DatabaseError; +pub use self::index::{DatabaseIndexes, EventIndexResult}; pub use self::options::DatabaseOptions; /// Backend diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index 5910d5840..33125850c 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -102,9 +102,7 @@ impl MemoryDatabase { let mut should_insert: bool = true; if event.is_replaceable() { - let filter: Filter = Filter::new() - .author(event.pubkey.to_string()) - .kind(event.kind); + let filter: Filter = Filter::new().author(event.pubkey).kind(event.kind); let res: Vec = self._query(events, vec![filter]).await?; if let Some(ev) = res.into_iter().next() { if ev.created_at >= event.created_at { @@ -117,7 +115,7 @@ impl MemoryDatabase { match event.identifier() { Some(identifier) => { let filter: Filter = Filter::new() - .author(event.pubkey.to_string()) + .author(event.pubkey) .kind(event.kind) .identifier(identifier); let res: Vec = self._query(events, vec![filter]).await?; diff --git a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs index fdfd50aa6..fa577b289 100644 --- a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs +++ b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -// use std::time::{Duration, Instant}; +// use std::time::Duration; use nostr::prelude::*; use nostr_sdk_db::NostrDatabase; @@ -18,7 +18,9 @@ async fn main() { SecretKey::from_bech32("nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99") .unwrap(); let keys = Keys::new(secret_key); + println!("Pubkey: {}", keys.public_key()); let database = RocksDatabase::new("./db/rocksdb").unwrap(); + database.build_indexes().await.unwrap(); /* for i in 0..50_000 { let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) @@ -37,16 +39,16 @@ async fn main() { .unwrap(); database.save_event(&event).await.unwrap(); println!("{}", event.id); - } + } */ - for i in 0..10 { + /* for i in 0..10 { let metadata = Metadata::new().name(format!("Name #{i}")); let event = EventBuilder::set_metadata(metadata) .to_event(&keys) .unwrap(); database.save_event(&event).await.unwrap(); tokio::time::sleep(Duration::from_secs(1)).await; - } */ + } */ /* let event_id = EventId::from_hex("b02c1c57a7c5b0e10245df8c26b429ad1a2cbf91d7cada3ecdb524b7e1d984b6") @@ -57,8 +59,10 @@ async fn main() { let events = database .query(vec![Filter::new() .kind(Kind::Metadata) - .author(keys.public_key().to_string())]) + //.kind(Kind::Custom(123)) + .author(keys.public_key())]) .await .unwrap(); + //println!("{events:?}"); println!("Got {} events", events.len()); } diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index a1acf3107..78db90973 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -7,10 +7,13 @@ use std::sync::Arc; use async_trait::async_trait; use nostr::{Event, EventId, Filter, FiltersMatchEvent, Timestamp, Url}; -use nostr_sdk_db::{Backend, DatabaseError, DatabaseOptions, NostrDatabase}; +use nostr_sdk_db::{ + index::DatabaseIndexes, Backend, DatabaseError, DatabaseOptions, EventIndexResult, + NostrDatabase, +}; use nostr_sdk_fbs::{FlatBufferBuilder, FlatBufferUtils}; use rocksdb::{ - BoundColumnFamily, ColumnFamilyDescriptor, DBCompactionStyle, DBCompressionType, + BoundColumnFamily, ColumnFamilyDescriptor, DBCompactionStyle, DBCompressionType, IteratorMode, OptimisticTransactionDB, Options, WriteBatchWithTransaction, }; use tokio::sync::RwLock; @@ -27,6 +30,7 @@ const KIND_INDEX_CF: &str = "kind_index"; #[derive(Debug, Clone)] pub struct RocksDatabase { db: Arc, + indexes: DatabaseIndexes, fbb: Arc>>, } @@ -84,6 +88,7 @@ impl RocksDatabase { Ok(Self { db: Arc::new(db), + indexes: DatabaseIndexes::new(), fbb: Arc::new(RwLock::new(FlatBufferBuilder::with_capacity(70_000))), }) } @@ -92,23 +97,15 @@ impl RocksDatabase { self.db.cf_handle(name).ok_or(DatabaseError::NotFound) } - fn query_single_filter( - &self, - filter: &Filter, - ids_to_get: &mut HashSet<[u8; 32]>, - ) -> Result<(), DatabaseError> { - if !filter.kinds.is_empty() { - let kind_index_cf = self.cf_handle(KIND_INDEX_CF)?; - let keys = filter.kinds.iter().map(|k| k.as_u64().to_be_bytes()); - for v in self - .db - .batched_multi_get_cf(&kind_index_cf, keys, false) - .into_iter() - .flatten() - .flatten() - { - let set: HashSet<[u8; 32]> = HashSet::decode(&v).map_err(DatabaseError::backend)?; - ids_to_get.extend(set); + #[tracing::instrument(skip_all)] + pub async fn build_indexes(&self) -> Result<(), DatabaseError> { + let cf = self.cf_handle(EVENTS_CF)?; + let iter = self.db.full_iterator_cf(&cf, IteratorMode::Start); + + for i in iter { + if let Ok((_key, value)) = i { + let event = Event::decode(&value).map_err(DatabaseError::backend)?; + self.indexes.index_event(&event).await; } } @@ -130,35 +127,43 @@ impl NostrDatabase for RocksDatabase { #[tracing::instrument(skip_all)] async fn save_event(&self, event: &Event) -> Result { - // Acquire FlatBuffers Builder - let mut fbb = self.fbb.write().await; + // Index event + let EventIndexResult { + to_store, + to_discard, + } = self.indexes.index_event(&event).await; - // Get Column Families - let events_cf = self.cf_handle(EVENTS_CF)?; - let pubkey_index_cf = self.cf_handle(PUBKEY_INDEX_CF)?; - let kind_index_cf = self.cf_handle(KIND_INDEX_CF)?; + if to_store { + // Acquire FlatBuffers Builder + let mut fbb = self.fbb.write().await; - // Serialize key and value - let key: &[u8] = event.id.as_bytes(); - let value: &[u8] = event.encode(&mut fbb); + tokio::task::block_in_place(|| { + // Get Column Families + let events_cf = self.cf_handle(EVENTS_CF)?; - // Prepare write batch - let mut batch = WriteBatchWithTransaction::default(); + // Serialize key and value + let key: &[u8] = event.id.as_bytes(); + let value: &[u8] = event.encode(&mut fbb); - // Save event - batch.put_cf(&events_cf, key, value); + // Prepare write batch + let mut batch = WriteBatchWithTransaction::default(); - // Save pubkey index - batch.merge_cf(&pubkey_index_cf, event.pubkey.serialize(), key); + // Save event + batch.put_cf(&events_cf, key, value); - // Save kind index - batch.merge_cf(&kind_index_cf, event.kind.as_u64().to_be_bytes(), key); + // Discard events no longer needed + for event_id in to_discard.into_iter() { + batch.delete_cf(&events_cf, event_id.as_bytes()); + } - // Write batch changes - self.db.write(batch).map_err(DatabaseError::backend)?; + // Write batch changes + self.db.write(batch).map_err(DatabaseError::backend) + })?; - // Return status - Ok(true) + Ok(true) + } else { + Ok(false) + } } async fn has_event_already_been_seen(&self, _event_id: EventId) -> Result { @@ -208,17 +213,18 @@ impl NostrDatabase for RocksDatabase { #[tracing::instrument(skip_all)] async fn query(&self, filters: Vec) -> Result, Self::Err> { + let mut ids_to_get: HashSet = HashSet::new(); + + for filter in filters.iter() { + let ids = self.indexes.query(filter).await; + ids_to_get.extend(ids); + } + let this = self.clone(); tokio::task::spawn_blocking(move || { - let mut events: Vec = Vec::new(); - let cf = this.cf_handle(EVENTS_CF)?; - let mut ids_to_get: HashSet<[u8; 32]> = HashSet::new(); - - for filter in filters.iter() { - this.query_single_filter(filter, &mut ids_to_get)?; - } + let mut events: Vec = Vec::new(); //let mut counter = 0; From 220fd4c092691b68ac7dfad74848b820f39cb9b5 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sat, 28 Oct 2023 18:46:18 +0200 Subject: [PATCH 36/98] nostr: add `build_tags_index` method to `Event` --- crates/nostr/src/event/mod.rs | 28 +++++++- crates/nostr/src/message/subscription.rs | 88 ++++++++++-------------- 2 files changed, 65 insertions(+), 51 deletions(-) diff --git a/crates/nostr/src/event/mod.rs b/crates/nostr/src/event/mod.rs index 41960d458..e6641654a 100644 --- a/crates/nostr/src/event/mod.rs +++ b/crates/nostr/src/event/mod.rs @@ -4,9 +4,13 @@ //! Event +#[cfg(not(feature = "std"))] +use alloc::collections::{BTreeMap as AllocMap, BTreeSet as AllocSet}; use alloc::string::String; use alloc::vec::Vec; use core::fmt; +#[cfg(feature = "std")] +use std::collections::{HashMap as AllocMap, HashSet as AllocSet}; use bitcoin::secp256k1::schnorr::Signature; use bitcoin::secp256k1::{self, Message, Secp256k1, Verification, XOnlyPublicKey}; @@ -30,7 +34,7 @@ use crate::types::time::Instant; use crate::types::time::TimeSupplier; #[cfg(feature = "std")] use crate::SECP256K1; -use crate::{JsonUtil, Timestamp}; +use crate::{Alphabet, JsonUtil, Timestamp}; /// [`Event`] error #[derive(Debug)] @@ -282,6 +286,28 @@ impl Event { _ => None, }) } + + /// Build tags index + pub fn build_tags_index(&self) -> AllocMap> { + fn single_char_tagname(tagname: &str) -> Option { + tagname + .chars() + .next() + .and_then(|first| Alphabet::try_from(first).ok()) + } + + self.tags + .iter() + .map(|t| t.as_vec()) + .filter(|t| t.len() > 1) + .filter_map(|t| { + single_char_tagname(&t[0]).map(|tagnamechar| (tagnamechar, t[1].clone())) + }) + .fold(AllocMap::new(), |mut idx, (tagnamechar, tagval)| { + idx.entry(tagnamechar).or_default().insert(tagval); + idx + }) + } } impl JsonUtil for Event { diff --git a/crates/nostr/src/message/subscription.rs b/crates/nostr/src/message/subscription.rs index 68acc0d5c..1a809bf13 100644 --- a/crates/nostr/src/message/subscription.rs +++ b/crates/nostr/src/message/subscription.rs @@ -109,38 +109,46 @@ impl fmt::Display for Alphabet { } } +impl TryFrom for Alphabet { + type Error = AlphabetError; + fn try_from(c: char) -> Result { + match c { + 'a' => Ok(Self::A), + 'b' => Ok(Self::B), + 'c' => Ok(Self::C), + 'd' => Ok(Self::D), + 'e' => Ok(Self::E), + 'f' => Ok(Self::F), + 'g' => Ok(Self::G), + 'h' => Ok(Self::H), + 'i' => Ok(Self::I), + 'j' => Ok(Self::J), + 'k' => Ok(Self::K), + 'l' => Ok(Self::L), + 'm' => Ok(Self::M), + 'n' => Ok(Self::N), + 'o' => Ok(Self::O), + 'p' => Ok(Self::P), + 'q' => Ok(Self::Q), + 'r' => Ok(Self::R), + 's' => Ok(Self::S), + 't' => Ok(Self::T), + 'u' => Ok(Self::U), + 'v' => Ok(Self::V), + 'w' => Ok(Self::W), + 'x' => Ok(Self::X), + 'y' => Ok(Self::Y), + 'z' => Ok(Self::Z), + _ => Err(AlphabetError::InvalidChar), + } + } +} + impl FromStr for Alphabet { type Err = AlphabetError; fn from_str(s: &str) -> Result { - match s { - "a" => Ok(Self::A), - "b" => Ok(Self::B), - "c" => Ok(Self::C), - "d" => Ok(Self::D), - "e" => Ok(Self::E), - "f" => Ok(Self::F), - "g" => Ok(Self::G), - "h" => Ok(Self::H), - "i" => Ok(Self::I), - "j" => Ok(Self::J), - "k" => Ok(Self::K), - "l" => Ok(Self::L), - "m" => Ok(Self::M), - "n" => Ok(Self::N), - "o" => Ok(Self::O), - "p" => Ok(Self::P), - "q" => Ok(Self::Q), - "r" => Ok(Self::R), - "s" => Ok(Self::S), - "t" => Ok(Self::T), - "u" => Ok(Self::U), - "v" => Ok(Self::V), - "w" => Ok(Self::W), - "x" => Ok(Self::X), - "y" => Ok(Self::Y), - "z" => Ok(Self::Z), - _ => Err(AlphabetError::InvalidChar), - } + let c: char = s.chars().next().ok_or(AlphabetError::InvalidChar)?; + Self::try_from(c) } } @@ -566,26 +574,6 @@ impl Filter { } } -fn single_char_tagname(tagname: &str) -> Option { - tagname - .chars() - .next() - .and_then(|first| Alphabet::from_str(&first.to_string()).ok()) -} - -fn tag_idx(event: &Event) -> AllocMap> { - event - .tags - .iter() - .map(|t| t.as_vec()) - .filter(|t| t.len() > 1) - .filter_map(|t| single_char_tagname(&t[0]).map(|tagnamechar| (tagnamechar, t[1].clone()))) - .fold(AllocMap::new(), |mut idx, (tagnamechar, tagval)| { - idx.entry(tagnamechar).or_default().insert(tagval); - idx - }) -} - impl Filter { fn ids_match(&self, event: &Event) -> bool { self.ids.is_empty() || self.ids.contains(&event.id) @@ -600,7 +588,7 @@ impl Filter { return true; } - let idx: AllocMap> = tag_idx(event); + let idx: AllocMap> = event.build_tags_index(); self.generic_tags.iter().all(|(tagname, set)| { idx.get(tagname) .map(|valset| valset.intersection(set).count() > 0) From 7c5b07942485f01cc8882cbdd19349cad1691160 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sat, 28 Oct 2023 19:33:23 +0200 Subject: [PATCH 37/98] db: index event tags --- crates/nostr-sdk-db/Cargo.toml | 2 +- crates/nostr-sdk-db/src/index.rs | 102 +++++++++++++++++++------------ 2 files changed, 63 insertions(+), 41 deletions(-) diff --git a/crates/nostr-sdk-db/Cargo.toml b/crates/nostr-sdk-db/Cargo.toml index 671e9e89e..3c4459e02 100644 --- a/crates/nostr-sdk-db/Cargo.toml +++ b/crates/nostr-sdk-db/Cargo.toml @@ -16,7 +16,7 @@ async-trait = { workspace = true } nostr = { workspace = true, features = ["std"] } thiserror = { workspace = true } tokio = { workspace = true, features = ["sync"] } -tracing = { workspace = true, features = ["std"] } +tracing = { workspace = true, features = ["std", "attributes"] } [dev-dependencies] tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } diff --git a/crates/nostr-sdk-db/src/index.rs b/crates/nostr-sdk-db/src/index.rs index 050b24393..b72a1ae42 100644 --- a/crates/nostr-sdk-db/src/index.rs +++ b/crates/nostr-sdk-db/src/index.rs @@ -4,12 +4,15 @@ //! Indexes use std::collections::{HashMap, HashSet}; +use std::hash::Hash; use std::sync::Arc; use nostr::secp256k1::XOnlyPublicKey; -use nostr::{Event, EventId, Filter, Kind, Timestamp}; +use nostr::{Alphabet, Event, EventId, Filter, Kind, Timestamp}; use tokio::sync::RwLock; +type TagIndex = HashMap>>; + /// Event Index Result #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct EventIndexResult { @@ -25,7 +28,8 @@ pub struct DatabaseIndexes { ids_index: Arc>>, kinds_index: Arc>>>, authors_index: Arc>>>, - created_at_index: Arc>>>, + created_at_index: Arc>>>, // TODO: remove this and use BTreeMap? + tags_index: Arc>, } impl DatabaseIndexes { @@ -35,6 +39,7 @@ impl DatabaseIndexes { } /// Index [`Event`] + #[tracing::instrument(skip_all, level = "trace")] pub async fn index_event(&self, event: &Event) -> EventIndexResult { // Check if it's expired or ephemeral if event.is_expired() || event.is_ephemeral() { @@ -42,32 +47,8 @@ impl DatabaseIndexes { } let should_insert: bool = true; - let mut created_at_index = self.created_at_index.write().await; - - /* if event.is_replaceable() { - let filter: Filter = Filter::new() - .author(event.pubkey.to_string()) - .kind(event.kind); - let res: HashSet = self.query(&filter).await; - } else if event.is_parameterized_replaceable() { - /* match event.identifier() { - Some(identifier) => { - let filter: Filter = Filter::new() - .author(event.pubkey.to_string()) - .kind(event.kind) - .identifier(identifier); - let res: Vec = self._query(events, vec![filter]).await?; - if let Some(ev) = res.into_iter().next() { - if ev.created_at >= event.created_at { - should_insert = false; - } else if ev.created_at < event.created_at { - events.remove(&ev.id); - } - } - } - None => should_insert = false, - } */ - } */ + + // TODO: check if it's a [parametrized] replaceable event if should_insert { // Index id @@ -83,8 +64,13 @@ impl DatabaseIndexes { self.index_event_author(&mut authors_index, event).await; // Index created at + let mut created_at_index = self.created_at_index.write().await; self.index_event_created_at(&mut created_at_index, event) .await; + + // Index tags + let mut tags_index = self.tags_index.write().await; + self.index_event_tags(&mut tags_index, event).await; } EventIndexResult { @@ -152,34 +138,39 @@ impl DatabaseIndexes { }); } + /// Index tags + async fn index_event_tags( + &self, + tags_index: &mut TagIndex, + event: &Event, + ) { + if !event.tags.is_empty() { + tags_index.insert(event.id, event.build_tags_index()); + } + } + /// Query + #[tracing::instrument(skip_all)] pub async fn query(&self, filter: &Filter) -> HashSet { let mut matching_event_ids = HashSet::new(); let kinds_index = self.kinds_index.read().await; let authors_index = self.authors_index.read().await; let created_at_index = self.created_at_index.read().await; + let tags_index = self.tags_index.read().await; if !filter.kinds.is_empty() { - let mut temp = HashSet::new(); - for kind in filter.kinds.iter() { - if let Some(ids) = kinds_index.get(kind) { - temp.extend(ids); - } - } + let temp = self.query_index(&kinds_index, &filter.kinds).await; intersect_or_extend(&mut matching_event_ids, &temp); } if !filter.authors.is_empty() { - let mut temp = HashSet::new(); - for author in filter.authors.iter() { - if let Some(ids) = authors_index.get(author) { - temp.extend(ids); - } - } + let temp = self.query_index(&authors_index, &filter.authors).await; intersect_or_extend(&mut matching_event_ids, &temp); } + // TODO: check if since >= until + if let Some(since) = filter.since { let mut temp = HashSet::new(); for (timestamp, ids) in created_at_index.iter() { @@ -200,11 +191,42 @@ impl DatabaseIndexes { intersect_or_extend(&mut matching_event_ids, &temp); } + if !filter.generic_tags.is_empty() { + let mut temp = HashSet::new(); + for (id, idx) in tags_index.iter() { + if filter.generic_tags.iter().all(|(tagname, set)| { + idx.get(tagname) + .map(|valset| valset.intersection(set).count() > 0) + .unwrap_or(false) + }) { + temp.insert(*id); + } + } + intersect_or_extend(&mut matching_event_ids, &temp); + } + // TODO: sort by timestamp and use limit matching_event_ids } + async fn query_index( + &self, + index: &HashMap>, + keys: &HashSet, + ) -> HashSet + where + K: Eq + Hash, + { + let mut result: HashSet = HashSet::new(); + for key in keys.iter() { + if let Some(ids) = index.get(key) { + result.extend(ids); + } + } + result + } + /// Clear indexes pub async fn clear(&self) { let mut kinds_index = self.kinds_index.write().await; From d5fbaa64a71fddc050d705bd79f69d1509161a70 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sun, 29 Oct 2023 09:25:48 +0100 Subject: [PATCH 38/98] db: use `BTreeMap` for `created_at_index` --- crates/nostr-sdk-db/src/index.rs | 60 ++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/crates/nostr-sdk-db/src/index.rs b/crates/nostr-sdk-db/src/index.rs index b72a1ae42..a526b61a7 100644 --- a/crates/nostr-sdk-db/src/index.rs +++ b/crates/nostr-sdk-db/src/index.rs @@ -3,7 +3,7 @@ //! Indexes -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::hash::Hash; use std::sync::Arc; @@ -25,10 +25,10 @@ pub struct EventIndexResult { /// Events Indexes #[derive(Debug, Clone, Default)] pub struct DatabaseIndexes { - ids_index: Arc>>, + //ids_index: Arc>>, kinds_index: Arc>>>, authors_index: Arc>>>, - created_at_index: Arc>>>, // TODO: remove this and use BTreeMap? + created_at_index: Arc>>>, tags_index: Arc>, } @@ -47,13 +47,13 @@ impl DatabaseIndexes { } let should_insert: bool = true; - + // TODO: check if it's a [parametrized] replaceable event if should_insert { // Index id - let mut ids_index = self.ids_index.write().await; - self.index_event_id(&mut ids_index, event).await; + /* let mut ids_index = self.ids_index.write().await; + self.index_event_id(&mut ids_index, event).await; */ // Index kind let mut kinds_index = self.kinds_index.write().await; @@ -79,10 +79,10 @@ impl DatabaseIndexes { } } - /// Index id + /* /// Index id async fn index_event_id(&self, ids_index: &mut HashMap, event: &Event) { ids_index.insert(event.id, event.created_at); - } + } */ /// Index kind async fn index_event_kind( @@ -123,7 +123,7 @@ impl DatabaseIndexes { /// Index created at async fn index_event_created_at( &self, - created_at_index: &mut HashMap>, + created_at_index: &mut BTreeMap>, event: &Event, ) { created_at_index @@ -139,11 +139,7 @@ impl DatabaseIndexes { } /// Index tags - async fn index_event_tags( - &self, - tags_index: &mut TagIndex, - event: &Event, - ) { + async fn index_event_tags(&self, tags_index: &mut TagIndex, event: &Event) { if !event.tags.is_empty() { tags_index.insert(event.id, event.build_tags_index()); } @@ -152,6 +148,16 @@ impl DatabaseIndexes { /// Query #[tracing::instrument(skip_all)] pub async fn query(&self, filter: &Filter) -> HashSet { + if !filter.ids.is_empty() { + return filter.ids.clone(); + } + + if let (Some(since), Some(until)) = (filter.since, filter.until) { + if since > until { + return HashSet::new(); + } + } + let mut matching_event_ids = HashSet::new(); let kinds_index = self.kinds_index.read().await; @@ -169,26 +175,28 @@ impl DatabaseIndexes { intersect_or_extend(&mut matching_event_ids, &temp); } - // TODO: check if since >= until - - if let Some(since) = filter.since { + if let (Some(since), Some(until)) = (filter.since, filter.until) { let mut temp = HashSet::new(); - for (timestamp, ids) in created_at_index.iter() { - if *timestamp >= since { + for ids in created_at_index.range(since..=until).map(|(_, ids)| ids) { + temp.extend(ids); + } + intersect_or_extend(&mut matching_event_ids, &temp); + } else { + if let Some(since) = filter.since { + let mut temp = HashSet::new(); + for (_, ids) in created_at_index.range(since..) { temp.extend(ids); } + intersect_or_extend(&mut matching_event_ids, &temp); } - intersect_or_extend(&mut matching_event_ids, &temp); - } - if let Some(until) = filter.until { - let mut temp = HashSet::new(); - for (timestamp, ids) in created_at_index.iter() { - if *timestamp <= until { + if let Some(until) = filter.until { + let mut temp = HashSet::new(); + for (_, ids) in created_at_index.range(..=until) { temp.extend(ids); } + intersect_or_extend(&mut matching_event_ids, &temp); } - intersect_or_extend(&mut matching_event_ids, &temp); } if !filter.generic_tags.is_empty() { From 8c7d7877258e37bc777c0d9f4706a5e19d041fca Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sun, 29 Oct 2023 15:03:20 +0100 Subject: [PATCH 39/98] db: add `MappingIdentifier` --- crates/nostr-sdk-db/src/index.rs | 198 +++++++++++++++++++++---------- 1 file changed, 138 insertions(+), 60 deletions(-) diff --git a/crates/nostr-sdk-db/src/index.rs b/crates/nostr-sdk-db/src/index.rs index a526b61a7..e20ae4e73 100644 --- a/crates/nostr-sdk-db/src/index.rs +++ b/crates/nostr-sdk-db/src/index.rs @@ -3,15 +3,21 @@ //! Indexes -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::cmp::Ordering; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::hash::Hash; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; use std::sync::Arc; use nostr::secp256k1::XOnlyPublicKey; use nostr::{Alphabet, Event, EventId, Filter, Kind, Timestamp}; use tokio::sync::RwLock; -type TagIndex = HashMap>>; +type Mapping = HashMap; +type KindIndex = HashMap>; +type AuthorIndex = HashMap>; +type CreatedAtIndex = BTreeMap>; +type TagIndex = HashMap>>; /// Event Index Result #[derive(Debug, Clone, Default, PartialEq, Eq)] @@ -22,13 +28,46 @@ pub struct EventIndexResult { pub to_discard: HashSet, } -/// Events Indexes +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +struct SmallerIdentifier([u8; 8]); + +impl SmallerIdentifier { + pub fn new(sid: [u8; 8]) -> Self { + Self(sid) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy)] +struct MappingIdentifier { + pub timestamp: Timestamp, + pub sid: SmallerIdentifier, +} + +impl PartialOrd for MappingIdentifier { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for MappingIdentifier { + fn cmp(&self, other: &Self) -> Ordering { + let timestamp_cmp = other.timestamp.cmp(&self.timestamp); + if timestamp_cmp != Ordering::Equal { + return timestamp_cmp; + } + + self.sid.cmp(&other.sid) + } +} + +/// Database Indexes #[derive(Debug, Clone, Default)] pub struct DatabaseIndexes { - //ids_index: Arc>>, - kinds_index: Arc>>>, - authors_index: Arc>>>, - created_at_index: Arc>>>, + counter: Arc, + mapping: Arc>, + kinds_index: Arc>, + authors_index: Arc>, + created_at_index: Arc>, tags_index: Arc>, } @@ -51,26 +90,33 @@ impl DatabaseIndexes { // TODO: check if it's a [parametrized] replaceable event if should_insert { - // Index id - /* let mut ids_index = self.ids_index.write().await; - self.index_event_id(&mut ids_index, event).await; */ + let mapping_id = MappingIdentifier { + sid: self.next_sid(), + timestamp: event.created_at, + }; + + let mut mapping = self.mapping.write().await; + mapping.insert(mapping_id.sid, event.id); // Index kind let mut kinds_index = self.kinds_index.write().await; - self.index_event_kind(&mut kinds_index, event).await; + self.index_event_kind(&mut kinds_index, mapping_id, event) + .await; // Index author let mut authors_index = self.authors_index.write().await; - self.index_event_author(&mut authors_index, event).await; + self.index_event_author(&mut authors_index, mapping_id, event) + .await; // Index created at let mut created_at_index = self.created_at_index.write().await; - self.index_event_created_at(&mut created_at_index, event) + self.index_event_created_at(&mut created_at_index, mapping_id, event) .await; // Index tags let mut tags_index = self.tags_index.write().await; - self.index_event_tags(&mut tags_index, event).await; + self.index_event_tags(&mut tags_index, mapping_id, event) + .await; } EventIndexResult { @@ -79,25 +125,26 @@ impl DatabaseIndexes { } } - /* /// Index id - async fn index_event_id(&self, ids_index: &mut HashMap, event: &Event) { - ids_index.insert(event.id, event.created_at); - } */ + fn next_sid(&self) -> SmallerIdentifier { + let next_id: u64 = self.counter.fetch_add(1, AtomicOrdering::SeqCst); + SmallerIdentifier::new(next_id.to_be_bytes()) + } /// Index kind async fn index_event_kind( &self, - kinds_index: &mut HashMap>, + kinds_index: &mut KindIndex, + mid: MappingIdentifier, event: &Event, ) { kinds_index .entry(event.kind) .and_modify(|set| { - set.insert(event.id); + set.insert(mid); }) .or_insert_with(|| { let mut set = HashSet::with_capacity(1); - set.insert(event.id); + set.insert(mid); set }); } @@ -105,17 +152,18 @@ impl DatabaseIndexes { /// Index author async fn index_event_author( &self, - authors_index: &mut HashMap>, + authors_index: &mut AuthorIndex, + mid: MappingIdentifier, event: &Event, ) { authors_index .entry(event.pubkey) .and_modify(|set| { - set.insert(event.id); + set.insert(mid); }) .or_insert_with(|| { let mut set = HashSet::with_capacity(1); - set.insert(event.id); + set.insert(mid); set }); } @@ -123,42 +171,57 @@ impl DatabaseIndexes { /// Index created at async fn index_event_created_at( &self, - created_at_index: &mut BTreeMap>, + created_at_index: &mut CreatedAtIndex, + mid: MappingIdentifier, event: &Event, ) { created_at_index .entry(event.created_at) .and_modify(|set| { - set.insert(event.id); + set.insert(mid); }) .or_insert_with(|| { let mut set = HashSet::with_capacity(1); - set.insert(event.id); + set.insert(mid); set }); } /// Index tags - async fn index_event_tags(&self, tags_index: &mut TagIndex, event: &Event) { - if !event.tags.is_empty() { - tags_index.insert(event.id, event.build_tags_index()); + async fn index_event_tags( + &self, + tags_index: &mut TagIndex, + mid: MappingIdentifier, + event: &Event, + ) { + for (a, set) in event.build_tags_index().into_iter() { + tags_index + .entry(a) + .and_modify(|map| { + map.insert(mid, set.clone()); + }) + .or_insert_with(|| { + let mut map = HashMap::with_capacity(1); + map.insert(mid, set); + map + }); } } /// Query #[tracing::instrument(skip_all)] - pub async fn query(&self, filter: &Filter) -> HashSet { + pub async fn query(&self, filter: &Filter) -> Vec { if !filter.ids.is_empty() { - return filter.ids.clone(); + return filter.ids.iter().copied().collect(); } if let (Some(since), Some(until)) = (filter.since, filter.until) { if since > until { - return HashSet::new(); + return Vec::new(); } } - let mut matching_event_ids = HashSet::new(); + let mut matching_sids: BTreeSet = BTreeSet::new(); let kinds_index = self.kinds_index.read().await; let authors_index = self.authors_index.read().await; @@ -167,66 +230,78 @@ impl DatabaseIndexes { if !filter.kinds.is_empty() { let temp = self.query_index(&kinds_index, &filter.kinds).await; - intersect_or_extend(&mut matching_event_ids, &temp); + intersect_or_extend(&mut matching_sids, &temp); } if !filter.authors.is_empty() { let temp = self.query_index(&authors_index, &filter.authors).await; - intersect_or_extend(&mut matching_event_ids, &temp); + intersect_or_extend(&mut matching_sids, &temp); } if let (Some(since), Some(until)) = (filter.since, filter.until) { - let mut temp = HashSet::new(); + let mut temp = BTreeSet::new(); for ids in created_at_index.range(since..=until).map(|(_, ids)| ids) { temp.extend(ids); } - intersect_or_extend(&mut matching_event_ids, &temp); + intersect_or_extend(&mut matching_sids, &temp); } else { if let Some(since) = filter.since { - let mut temp = HashSet::new(); + let mut temp = BTreeSet::new(); for (_, ids) in created_at_index.range(since..) { temp.extend(ids); } - intersect_or_extend(&mut matching_event_ids, &temp); + intersect_or_extend(&mut matching_sids, &temp); } if let Some(until) = filter.until { - let mut temp = HashSet::new(); + let mut temp = BTreeSet::new(); for (_, ids) in created_at_index.range(..=until) { temp.extend(ids); } - intersect_or_extend(&mut matching_event_ids, &temp); + intersect_or_extend(&mut matching_sids, &temp); } } if !filter.generic_tags.is_empty() { - let mut temp = HashSet::new(); - for (id, idx) in tags_index.iter() { - if filter.generic_tags.iter().all(|(tagname, set)| { - idx.get(tagname) - .map(|valset| valset.intersection(set).count() > 0) - .unwrap_or(false) - }) { - temp.insert(*id); + let mut temp = BTreeSet::new(); + + for (tagname, set) in filter.generic_tags.iter() { + if let Some(tag_map) = tags_index.get(tagname) { + for (id, tag_values) in tag_map { + if set.iter().all(|value| tag_values.contains(value)) { + temp.insert(*id); + } + } } } - intersect_or_extend(&mut matching_event_ids, &temp); + + intersect_or_extend(&mut matching_sids, &temp); } - // TODO: sort by timestamp and use limit + let mapping = self.mapping.read().await; + + let limit: usize = filter.limit.unwrap_or(matching_sids.len()); + let mut matching_event_ids: Vec = Vec::with_capacity(limit); + + for mid in matching_sids.into_iter().take(limit).rev() { + match mapping.get(&mid.sid) { + Some(event_id) => matching_event_ids.push(*event_id), + None => tracing::warn!("Event ID not found for {mid:?}"), + } + } matching_event_ids } async fn query_index( &self, - index: &HashMap>, + index: &HashMap>, keys: &HashSet, - ) -> HashSet + ) -> BTreeSet where K: Eq + Hash, { - let mut result: HashSet = HashSet::new(); + let mut result: BTreeSet = BTreeSet::new(); for key in keys.iter() { if let Some(ids) = index.get(key) { result.extend(ids); @@ -243,15 +318,18 @@ impl DatabaseIndexes { let mut authors_index = self.authors_index.write().await; authors_index.clear(); - let mut created_at_index = self.created_at_index.write().await; - created_at_index.clear(); + /* let mut created_at_index = self.created_at_index.write().await; + created_at_index.clear(); */ } } -fn intersect_or_extend(main: &mut HashSet, second: &HashSet) { +fn intersect_or_extend(main: &mut BTreeSet, other: &BTreeSet) +where + T: Eq + Ord + Copy, +{ if main.is_empty() { - main.extend(second); + main.extend(other); } else { - *main = main.intersection(second).copied().collect(); + *main = main.intersection(other).copied().collect(); } } From ce526b4b8ca445a8366613b8dcff5096e5f9cc79 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sun, 29 Oct 2023 15:51:50 +0100 Subject: [PATCH 40/98] db: enable queriying of indexes by list of filter --- crates/nostr-sdk-db/src/index.rs | 205 ++++++++++++++++++------------- 1 file changed, 121 insertions(+), 84 deletions(-) diff --git a/crates/nostr-sdk-db/src/index.rs b/crates/nostr-sdk-db/src/index.rs index e20ae4e73..c216f6d6e 100644 --- a/crates/nostr-sdk-db/src/index.rs +++ b/crates/nostr-sdk-db/src/index.rs @@ -85,9 +85,57 @@ impl DatabaseIndexes { return EventIndexResult::default(); } - let should_insert: bool = true; + let mut should_insert: bool = true; + let mut to_discard = HashSet::new(); - // TODO: check if it's a [parametrized] replaceable event + let mut mapping = self.mapping.write().await; + let mut kinds_index = self.kinds_index.write().await; + let mut authors_index = self.authors_index.write().await; + + if event.is_replaceable() { + // Query event + let mut matching_sids: BTreeSet = BTreeSet::new(); + let mut kinds = HashSet::with_capacity(1); + let mut authors = HashSet::with_capacity(1); + kinds.insert(event.kind); + authors.insert(event.pubkey); + let temp = self.query_index(&kinds_index, &kinds); + intersect_or_extend(&mut matching_sids, &temp); + let temp = self.query_index(&authors_index, &authors); + intersect_or_extend(&mut matching_sids, &temp); + + if let Some(mid) = matching_sids.into_iter().next() { + match mapping.get(&mid.sid) { + Some(event_id) => { + if mid.timestamp >= event.created_at { + should_insert = false; + } else if mid.timestamp < event.created_at { + to_discard.insert(*event_id); + } + } + None => tracing::warn!("Event ID not found for {mid:?}"), + } + } + } else if event.is_parameterized_replaceable() { + match event.identifier() { + Some(_identifier) => { + should_insert = false; + /* let filter: Filter = Filter::new() + .author(event.pubkey) + .kind(event.kind) + .identifier(identifier); + let res: Vec = self._query(events, vec![filter]).await?; + if let Some(ev) = res.into_iter().next() { + if ev.created_at >= event.created_at { + should_insert = false; + } else if ev.created_at < event.created_at { + events.remove(&ev.id); + } + } */ + } + None => should_insert = false, + } + } if should_insert { let mapping_id = MappingIdentifier { @@ -95,48 +143,36 @@ impl DatabaseIndexes { timestamp: event.created_at, }; - let mut mapping = self.mapping.write().await; mapping.insert(mapping_id.sid, event.id); // Index kind - let mut kinds_index = self.kinds_index.write().await; - self.index_event_kind(&mut kinds_index, mapping_id, event) - .await; + self.index_event_kind(&mut kinds_index, mapping_id, event); // Index author - let mut authors_index = self.authors_index.write().await; - self.index_event_author(&mut authors_index, mapping_id, event) - .await; + self.index_event_author(&mut authors_index, mapping_id, event); // Index created at let mut created_at_index = self.created_at_index.write().await; - self.index_event_created_at(&mut created_at_index, mapping_id, event) - .await; + self.index_event_created_at(&mut created_at_index, mapping_id, event); // Index tags let mut tags_index = self.tags_index.write().await; - self.index_event_tags(&mut tags_index, mapping_id, event) - .await; + self.index_event_tags(&mut tags_index, mapping_id, event); } EventIndexResult { to_store: should_insert, - to_discard: HashSet::new(), + to_discard, } } fn next_sid(&self) -> SmallerIdentifier { - let next_id: u64 = self.counter.fetch_add(1, AtomicOrdering::SeqCst); + let next_id = self.counter.fetch_add(1, AtomicOrdering::SeqCst); SmallerIdentifier::new(next_id.to_be_bytes()) } /// Index kind - async fn index_event_kind( - &self, - kinds_index: &mut KindIndex, - mid: MappingIdentifier, - event: &Event, - ) { + fn index_event_kind(&self, kinds_index: &mut KindIndex, mid: MappingIdentifier, event: &Event) { kinds_index .entry(event.kind) .and_modify(|set| { @@ -150,7 +186,7 @@ impl DatabaseIndexes { } /// Index author - async fn index_event_author( + fn index_event_author( &self, authors_index: &mut AuthorIndex, mid: MappingIdentifier, @@ -169,7 +205,7 @@ impl DatabaseIndexes { } /// Index created at - async fn index_event_created_at( + fn index_event_created_at( &self, created_at_index: &mut CreatedAtIndex, mid: MappingIdentifier, @@ -188,12 +224,7 @@ impl DatabaseIndexes { } /// Index tags - async fn index_event_tags( - &self, - tags_index: &mut TagIndex, - mid: MappingIdentifier, - event: &Event, - ) { + fn index_event_tags(&self, tags_index: &mut TagIndex, mid: MappingIdentifier, event: &Event) { for (a, set) in event.build_tags_index().into_iter() { tags_index .entry(a) @@ -210,90 +241,96 @@ impl DatabaseIndexes { /// Query #[tracing::instrument(skip_all)] - pub async fn query(&self, filter: &Filter) -> Vec { - if !filter.ids.is_empty() { - return filter.ids.iter().copied().collect(); - } - - if let (Some(since), Some(until)) = (filter.since, filter.until) { - if since > until { - return Vec::new(); - } - } - - let mut matching_sids: BTreeSet = BTreeSet::new(); + pub async fn query(&self, filters: Vec) -> HashSet { + let mut matching_event_ids: HashSet = HashSet::new(); let kinds_index = self.kinds_index.read().await; let authors_index = self.authors_index.read().await; let created_at_index = self.created_at_index.read().await; let tags_index = self.tags_index.read().await; + let mapping = self.mapping.read().await; - if !filter.kinds.is_empty() { - let temp = self.query_index(&kinds_index, &filter.kinds).await; - intersect_or_extend(&mut matching_sids, &temp); - } + for filter in filters.into_iter() { + if !filter.ids.is_empty() { + matching_event_ids.extend(filter.ids.iter().copied()); + continue; + } - if !filter.authors.is_empty() { - let temp = self.query_index(&authors_index, &filter.authors).await; - intersect_or_extend(&mut matching_sids, &temp); - } + if let (Some(since), Some(until)) = (filter.since, filter.until) { + if since > until { + continue; + } + } - if let (Some(since), Some(until)) = (filter.since, filter.until) { - let mut temp = BTreeSet::new(); - for ids in created_at_index.range(since..=until).map(|(_, ids)| ids) { - temp.extend(ids); + let mut matching_sids: BTreeSet = BTreeSet::new(); + + if !filter.kinds.is_empty() { + let temp = self.query_index(&kinds_index, &filter.kinds); + intersect_or_extend(&mut matching_sids, &temp); } - intersect_or_extend(&mut matching_sids, &temp); - } else { - if let Some(since) = filter.since { - let mut temp = BTreeSet::new(); - for (_, ids) in created_at_index.range(since..) { - temp.extend(ids); - } + + if !filter.authors.is_empty() { + let temp = self.query_index(&authors_index, &filter.authors); intersect_or_extend(&mut matching_sids, &temp); } - if let Some(until) = filter.until { + if let (Some(since), Some(until)) = (filter.since, filter.until) { let mut temp = BTreeSet::new(); - for (_, ids) in created_at_index.range(..=until) { + for ids in created_at_index.range(since..=until).map(|(_, ids)| ids) { temp.extend(ids); } intersect_or_extend(&mut matching_sids, &temp); + } else { + if let Some(since) = filter.since { + let mut temp = BTreeSet::new(); + for (_, ids) in created_at_index.range(since..) { + temp.extend(ids); + } + intersect_or_extend(&mut matching_sids, &temp); + } + + if let Some(until) = filter.until { + let mut temp = BTreeSet::new(); + for (_, ids) in created_at_index.range(..=until) { + temp.extend(ids); + } + intersect_or_extend(&mut matching_sids, &temp); + } } - } - if !filter.generic_tags.is_empty() { - let mut temp = BTreeSet::new(); + if !filter.generic_tags.is_empty() { + let mut temp = BTreeSet::new(); - for (tagname, set) in filter.generic_tags.iter() { - if let Some(tag_map) = tags_index.get(tagname) { - for (id, tag_values) in tag_map { - if set.iter().all(|value| tag_values.contains(value)) { - temp.insert(*id); + for (tagname, set) in filter.generic_tags.iter() { + if let Some(tag_map) = tags_index.get(tagname) { + for (id, tag_values) in tag_map { + if set.iter().all(|value| tag_values.contains(value)) { + temp.insert(*id); + } } } } - } - - intersect_or_extend(&mut matching_sids, &temp); - } - let mapping = self.mapping.read().await; + intersect_or_extend(&mut matching_sids, &temp); + } - let limit: usize = filter.limit.unwrap_or(matching_sids.len()); - let mut matching_event_ids: Vec = Vec::with_capacity(limit); + let limit: usize = filter.limit.unwrap_or(matching_sids.len()); + let mut ids: Vec = Vec::with_capacity(limit); - for mid in matching_sids.into_iter().take(limit).rev() { - match mapping.get(&mid.sid) { - Some(event_id) => matching_event_ids.push(*event_id), - None => tracing::warn!("Event ID not found for {mid:?}"), + for mid in matching_sids.into_iter().take(limit) { + match mapping.get(&mid.sid) { + Some(event_id) => ids.push(*event_id), + None => tracing::warn!("Event ID not found for {mid:?}"), + } } + + matching_event_ids.extend(ids); } matching_event_ids } - async fn query_index( + fn query_index( &self, index: &HashMap>, keys: &HashSet, @@ -318,8 +355,8 @@ impl DatabaseIndexes { let mut authors_index = self.authors_index.write().await; authors_index.clear(); - /* let mut created_at_index = self.created_at_index.write().await; - created_at_index.clear(); */ + let mut created_at_index = self.created_at_index.write().await; + created_at_index.clear(); } } From a3bbd60674d64dab6556fc2ce987e74c1ddfa800 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sun, 29 Oct 2023 15:52:55 +0100 Subject: [PATCH 41/98] rocksdb: remove not needed CFs --- crates/nostr-sdk-rocksdb/src/lib.rs | 56 ++--------------------------- 1 file changed, 3 insertions(+), 53 deletions(-) diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index 78db90973..490384ab8 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -18,13 +18,7 @@ use rocksdb::{ }; use tokio::sync::RwLock; -mod ops; - -use self::ops::indexes_merge_operator; - const EVENTS_CF: &str = "events"; -const PUBKEY_INDEX_CF: &str = "pubkey_index"; -const KIND_INDEX_CF: &str = "kind_index"; /// RocksDB Nostr Database #[derive(Debug, Clone)] @@ -49,14 +43,7 @@ fn default_opts() -> rocksdb::Options { } fn column_families() -> Vec { - let mut index_opts: Options = default_opts(); - index_opts.set_merge_operator_associative("index_merge_operator", indexes_merge_operator); - - vec![ - ColumnFamilyDescriptor::new(EVENTS_CF, default_opts()), - ColumnFamilyDescriptor::new(PUBKEY_INDEX_CF, index_opts.clone()), - ColumnFamilyDescriptor::new(KIND_INDEX_CF, index_opts), - ] + vec![ColumnFamilyDescriptor::new(EVENTS_CF, default_opts())] } impl RocksDatabase { @@ -213,12 +200,7 @@ impl NostrDatabase for RocksDatabase { #[tracing::instrument(skip_all)] async fn query(&self, filters: Vec) -> Result, Self::Err> { - let mut ids_to_get: HashSet = HashSet::new(); - - for filter in filters.iter() { - let ids = self.indexes.query(filter).await; - ids_to_get.extend(ids); - } + let ids = self.indexes.query(filters.clone()).await; let this = self.clone(); tokio::task::spawn_blocking(move || { @@ -226,51 +208,19 @@ impl NostrDatabase for RocksDatabase { let mut events: Vec = Vec::new(); - //let mut counter = 0; - for v in this .db - .batched_multi_get_cf(&cf, ids_to_get, false) + .batched_multi_get_cf(&cf, ids, false) .into_iter() .flatten() .flatten() { - /* if let Some(limit) = filter.limit { - if counter >= limit && limit != 0 { - break; - } - } */ - let event: Event = Event::decode(&v).map_err(DatabaseError::backend)?; if filters.match_event(&event) { events.push(event); } - - //counter += 1; } - /* let iter = this.db.full_iterator_cf(&cf, IteratorMode::Start); - - for i in iter { - if let Ok((_key, value)) = i { - let event: Event = Event::decode(&value).map_err(DatabaseError::backend)?; - if filters.match_event(&event) { - events.push(event); - } - } - } */ - - /* iter.seek_to_first(); - while iter.valid() { - if let Some(value) = iter.value() { - let event: Event = Event::decode(value).map_err(DatabaseError::backend)?; - if filters.match_event(&event) { - events.push(event); - } - }; - iter.next(); - } */ - Ok(events) }) .await From babad78d5a5ef770d806cb4336195de574aeaea5 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sun, 29 Oct 2023 15:53:23 +0100 Subject: [PATCH 42/98] Add `nostr-sdk-db` and `nostr-sdk-fbs` to `pre-push` script --- .githooks/pre-push | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.githooks/pre-push b/.githooks/pre-push index 3bb6cd118..edc1dcba1 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -11,6 +11,8 @@ buildargs=( "-p nostr-sdk" "-p nostr-sdk --no-default-features" "-p nostr-sdk --features blocking" + "-p nostr-sdk-db" + "-p nostr-sdk-fbs" "-p nostr-ffi" "-p nostr-sdk-ffi" ) From e4d365c63eb7f7bee2bf81f5bd013adf1463d01a Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sun, 29 Oct 2023 15:54:07 +0100 Subject: [PATCH 43/98] rocksdb: update example --- crates/nostr-sdk-rocksdb/examples/rocksdb.rs | 43 ++++++++++++++------ 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs index fa577b289..bed273884 100644 --- a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs +++ b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -// use std::time::Duration; +use std::time::Duration; use nostr::prelude::*; use nostr_sdk_db::NostrDatabase; @@ -17,14 +17,21 @@ async fn main() { let secret_key = SecretKey::from_bech32("nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99") .unwrap(); - let keys = Keys::new(secret_key); - println!("Pubkey: {}", keys.public_key()); + let keys_a = Keys::new(secret_key); + println!("Pubkey A: {}", keys_a.public_key()); + + let secret_key = + SecretKey::from_bech32("nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85") + .unwrap(); + let keys_b = Keys::new(secret_key); + println!("Pubkey B: {}", keys_b.public_key()); + let database = RocksDatabase::new("./db/rocksdb").unwrap(); database.build_indexes().await.unwrap(); - /* for i in 0..50_000 { + /* for i in 0..100_000 { let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) - .to_event(&keys) + .to_event(&keys_a) .unwrap(); database.save_event(&event).await.unwrap(); @@ -35,20 +42,24 @@ async fn main() { Tag::PubKey(event.pubkey, None), ], ) - .to_event(&keys) + .to_event(&keys_b) .unwrap(); - database.save_event(&event).await.unwrap(); - println!("{}", event.id); - } */ + database.save_event(&event).await.unwrap(); + } - /* for i in 0..10 { + for i in 0..10 { let metadata = Metadata::new().name(format!("Name #{i}")); let event = EventBuilder::set_metadata(metadata) - .to_event(&keys) + .to_event(&keys_a) .unwrap(); database.save_event(&event).await.unwrap(); tokio::time::sleep(Duration::from_secs(1)).await; - } */ + } */ + + /* let event = EventBuilder::new(Kind::Custom(123), "Custom with d tag", &[Tag::Identifier(String::from("myid"))]) + .to_event(&keys) + .unwrap(); + database.save_event(&event).await.unwrap(); */ /* let event_id = EventId::from_hex("b02c1c57a7c5b0e10245df8c26b429ad1a2cbf91d7cada3ecdb524b7e1d984b6") @@ -59,10 +70,16 @@ async fn main() { let events = database .query(vec![Filter::new() .kind(Kind::Metadata) + //.limit(1) //.kind(Kind::Custom(123)) - .author(keys.public_key())]) + //.identifier("myid") + .author(keys_a.public_key())]) .await .unwrap(); //println!("{events:?}"); println!("Got {} events", events.len()); + + loop { + tokio::time::sleep(Duration::from_secs(30)).await + } } From 14edb87a3202f6f9d8acf4fbecb5e69b8f06bcf0 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Fri, 3 Nov 2023 15:46:42 +0100 Subject: [PATCH 44/98] db: improve indexes query --- Cargo.lock | 1 + crates/nostr-sdk-db/Cargo.toml | 1 + crates/nostr-sdk-db/examples/indexes.rs | 76 +++++++++++++++++++++++++ crates/nostr-sdk-db/src/index.rs | 71 ++++++++++++----------- 4 files changed, 117 insertions(+), 32 deletions(-) create mode 100644 crates/nostr-sdk-db/examples/indexes.rs diff --git a/Cargo.lock b/Cargo.lock index 20a544c20..38a70fa14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1253,6 +1253,7 @@ dependencies = [ "thiserror", "tokio", "tracing", + "tracing-subscriber", ] [[package]] diff --git a/crates/nostr-sdk-db/Cargo.toml b/crates/nostr-sdk-db/Cargo.toml index 3c4459e02..9c78a36a1 100644 --- a/crates/nostr-sdk-db/Cargo.toml +++ b/crates/nostr-sdk-db/Cargo.toml @@ -20,3 +20,4 @@ tracing = { workspace = true, features = ["std", "attributes"] } [dev-dependencies] tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/nostr-sdk-db/examples/indexes.rs b/crates/nostr-sdk-db/examples/indexes.rs new file mode 100644 index 000000000..00fe3871e --- /dev/null +++ b/crates/nostr-sdk-db/examples/indexes.rs @@ -0,0 +1,76 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use nostr::prelude::*; +use nostr_sdk_db::DatabaseIndexes; +use tracing_subscriber::fmt::format::FmtSpan; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::fmt() + .with_span_events(FmtSpan::CLOSE) + .init(); + + let secret_key = + SecretKey::from_bech32("nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99") + .unwrap(); + let keys_a = Keys::new(secret_key); + + let secret_key = + SecretKey::from_bech32("nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85") + .unwrap(); + let keys_b = Keys::new(secret_key); + + let index = DatabaseIndexes::new(); + + for i in 0..100_000 { + let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) + .to_event(&keys_a) + .unwrap(); + index.index_event(&event).await; + + let event = EventBuilder::new_text_note( + format!("Reply to event #{i}"), + &[ + Tag::Event(event.id, None, None), + Tag::PubKey(event.pubkey, None), + ], + ) + .to_event(&keys_b) + .unwrap(); + index.index_event(&event).await; + } + + for i in 0..10 { + let metadata = Metadata::new().name(format!("Name #{i}")); + let event = EventBuilder::set_metadata(metadata) + .to_event(&keys_a) + .unwrap(); + index.index_event(&event).await; + } + + for i in 0..50_000 { + let event = EventBuilder::new( + Kind::Custom(123), + "Custom with d tag", + &[Tag::Identifier(format!("myid{i}"))], + ) + .to_event(&keys_a) + .unwrap(); + index.index_event(&event).await; + } + + let ids = index + .query(vec![Filter::new() + .kind(Kind::Metadata) + //.limit(1) + //.kind(Kind::Custom(123)) + //.identifier("myid5000") + .author(keys_a.public_key())]) + .await; + println!("Got {} ids", ids.len()); + + /* loop { + tokio::time::sleep(Duration::from_secs(30)).await + } */ +} diff --git a/crates/nostr-sdk-db/src/index.rs b/crates/nostr-sdk-db/src/index.rs index c216f6d6e..3febf1782 100644 --- a/crates/nostr-sdk-db/src/index.rs +++ b/crates/nostr-sdk-db/src/index.rs @@ -94,17 +94,16 @@ impl DatabaseIndexes { if event.is_replaceable() { // Query event - let mut matching_sids: BTreeSet = BTreeSet::new(); let mut kinds = HashSet::with_capacity(1); let mut authors = HashSet::with_capacity(1); kinds.insert(event.kind); authors.insert(event.pubkey); - let temp = self.query_index(&kinds_index, &kinds); - intersect_or_extend(&mut matching_sids, &temp); - let temp = self.query_index(&authors_index, &authors); - intersect_or_extend(&mut matching_sids, &temp); - if let Some(mid) = matching_sids.into_iter().next() { + let res1 = self.query_index(&kinds_index, &kinds); + let res2 = self.query_index(&authors_index, &authors); + let matching_sids: HashSet = multi_intersection(vec![res1, res2]); + + for mid in matching_sids.into_iter() { match mapping.get(&mid.sid) { Some(event_id) => { if mid.timestamp >= event.created_at { @@ -242,17 +241,17 @@ impl DatabaseIndexes { /// Query #[tracing::instrument(skip_all)] pub async fn query(&self, filters: Vec) -> HashSet { - let mut matching_event_ids: HashSet = HashSet::new(); - let kinds_index = self.kinds_index.read().await; let authors_index = self.authors_index.read().await; let created_at_index = self.created_at_index.read().await; let tags_index = self.tags_index.read().await; let mapping = self.mapping.read().await; + let mut matching_event_ids: HashSet = HashSet::new(); + for filter in filters.into_iter() { if !filter.ids.is_empty() { - matching_event_ids.extend(filter.ids.iter().copied()); + matching_event_ids.extend(filter.ids); continue; } @@ -262,48 +261,46 @@ impl DatabaseIndexes { } } - let mut matching_sids: BTreeSet = BTreeSet::new(); + let mut sets: Vec> = Vec::new(); if !filter.kinds.is_empty() { - let temp = self.query_index(&kinds_index, &filter.kinds); - intersect_or_extend(&mut matching_sids, &temp); + sets.push(self.query_index(&kinds_index, &filter.kinds)); } if !filter.authors.is_empty() { - let temp = self.query_index(&authors_index, &filter.authors); - intersect_or_extend(&mut matching_sids, &temp); + sets.push(self.query_index(&authors_index, &filter.authors)); } if let (Some(since), Some(until)) = (filter.since, filter.until) { - let mut temp = BTreeSet::new(); + let mut temp: HashSet = HashSet::new(); for ids in created_at_index.range(since..=until).map(|(_, ids)| ids) { temp.extend(ids); } - intersect_or_extend(&mut matching_sids, &temp); + sets.push(temp); } else { if let Some(since) = filter.since { - let mut temp = BTreeSet::new(); + let mut temp: HashSet = HashSet::new(); for (_, ids) in created_at_index.range(since..) { temp.extend(ids); } - intersect_or_extend(&mut matching_sids, &temp); + sets.push(temp); } if let Some(until) = filter.until { - let mut temp = BTreeSet::new(); + let mut temp: HashSet = HashSet::new(); for (_, ids) in created_at_index.range(..=until) { temp.extend(ids); } - intersect_or_extend(&mut matching_sids, &temp); + sets.push(temp); } } if !filter.generic_tags.is_empty() { - let mut temp = BTreeSet::new(); + let mut temp: HashSet = HashSet::new(); for (tagname, set) in filter.generic_tags.iter() { if let Some(tag_map) = tags_index.get(tagname) { - for (id, tag_values) in tag_map { + for (id, tag_values) in tag_map.iter() { if set.iter().all(|value| tag_values.contains(value)) { temp.insert(*id); } @@ -311,15 +308,21 @@ impl DatabaseIndexes { } } - intersect_or_extend(&mut matching_sids, &temp); + sets.push(temp); } + // Intersection + let matching_sids: HashSet = multi_intersection(sets); + let matching_sids: BTreeSet = matching_sids.into_iter().collect(); + + // Limit let limit: usize = filter.limit.unwrap_or(matching_sids.len()); let mut ids: Vec = Vec::with_capacity(limit); + // Get ids for mid in matching_sids.into_iter().take(limit) { - match mapping.get(&mid.sid) { - Some(event_id) => ids.push(*event_id), + match mapping.get(&mid.sid).copied() { + Some(event_id) => ids.push(event_id), None => tracing::warn!("Event ID not found for {mid:?}"), } } @@ -334,11 +337,11 @@ impl DatabaseIndexes { &self, index: &HashMap>, keys: &HashSet, - ) -> BTreeSet + ) -> HashSet where K: Eq + Hash, { - let mut result: BTreeSet = BTreeSet::new(); + let mut result: HashSet = HashSet::new(); for key in keys.iter() { if let Some(ids) = index.get(key) { result.extend(ids); @@ -360,13 +363,17 @@ impl DatabaseIndexes { } } -fn intersect_or_extend(main: &mut BTreeSet, other: &BTreeSet) +fn multi_intersection(mut sets: Vec>) -> HashSet where - T: Eq + Ord + Copy, + T: Eq + Hash, { - if main.is_empty() { - main.extend(other); + if let Some(mut result) = sets.pop() { + if !sets.is_empty() { + result.retain(|item| sets.iter().all(|set| set.contains(item))); + } + + result } else { - *main = main.intersection(other).copied().collect(); + HashSet::new() } } From c00032a79dfbbf9b995f1e2740e3b01ab47b39b9 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Fri, 3 Nov 2023 15:47:04 +0100 Subject: [PATCH 45/98] db: add Makefile --- crates/nostr-sdk-db/.gitignore | 3 +++ crates/nostr-sdk-db/Makefile | 2 ++ 2 files changed, 5 insertions(+) create mode 100644 crates/nostr-sdk-db/.gitignore create mode 100644 crates/nostr-sdk-db/Makefile diff --git a/crates/nostr-sdk-db/.gitignore b/crates/nostr-sdk-db/.gitignore new file mode 100644 index 000000000..73c384529 --- /dev/null +++ b/crates/nostr-sdk-db/.gitignore @@ -0,0 +1,3 @@ +*.svg +perf.data +perf.data.old \ No newline at end of file diff --git a/crates/nostr-sdk-db/Makefile b/crates/nostr-sdk-db/Makefile new file mode 100644 index 000000000..499f8182c --- /dev/null +++ b/crates/nostr-sdk-db/Makefile @@ -0,0 +1,2 @@ +graph: + CARGO_PROFILE_RELEASE_DEBUG=true cargo flamegraph --release --example indexes -o flamegraph.svg \ No newline at end of file From b97a622267b2cad94cf866079130c20c1a647441 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sat, 4 Nov 2023 15:38:00 +0100 Subject: [PATCH 46/98] db: add `AuthorAndKindIndex` --- crates/nostr-sdk-db/examples/indexes.rs | 12 +- crates/nostr-sdk-db/examples/memory.rs | 55 +++- crates/nostr-sdk-db/src/index.rs | 267 +++++++++++-------- crates/nostr-sdk-db/src/lib.rs | 2 +- crates/nostr-sdk-db/src/memory.rs | 80 ++---- crates/nostr-sdk-rocksdb/examples/rocksdb.rs | 28 +- crates/nostr-sdk/src/relay/pool.rs | 3 +- 7 files changed, 249 insertions(+), 198 deletions(-) diff --git a/crates/nostr-sdk-db/examples/indexes.rs b/crates/nostr-sdk-db/examples/indexes.rs index 00fe3871e..30372d26e 100644 --- a/crates/nostr-sdk-db/examples/indexes.rs +++ b/crates/nostr-sdk-db/examples/indexes.rs @@ -41,7 +41,7 @@ async fn main() { index.index_event(&event).await; } - for i in 0..10 { + for i in 0..1000 { let metadata = Metadata::new().name(format!("Name #{i}")); let event = EventBuilder::set_metadata(metadata) .to_event(&keys_a) @@ -49,7 +49,7 @@ async fn main() { index.index_event(&event).await; } - for i in 0..50_000 { + for i in 0..500_000 { let event = EventBuilder::new( Kind::Custom(123), "Custom with d tag", @@ -62,15 +62,13 @@ async fn main() { let ids = index .query(vec![Filter::new() - .kind(Kind::Metadata) - //.limit(1) + .kinds(vec![Kind::Metadata]) + //.limit(20) //.kind(Kind::Custom(123)) //.identifier("myid5000") .author(keys_a.public_key())]) .await; println!("Got {} ids", ids.len()); - /* loop { - tokio::time::sleep(Duration::from_secs(30)).await - } */ + loop {} } diff --git a/crates/nostr-sdk-db/examples/memory.rs b/crates/nostr-sdk-db/examples/memory.rs index ea0cb2eee..6e6db2e5d 100644 --- a/crates/nostr-sdk-db/examples/memory.rs +++ b/crates/nostr-sdk-db/examples/memory.rs @@ -1,21 +1,35 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -use std::time::{Duration, Instant}; - +use nostr::prelude::*; use nostr::{EventBuilder, Filter, Keys, Kind, Metadata, Tag}; use nostr_sdk_db::memory::MemoryDatabase; use nostr_sdk_db::{DatabaseOptions, NostrDatabase}; +use tracing_subscriber::fmt::format::FmtSpan; #[tokio::main] async fn main() { - let keys = Keys::generate(); - let opts = DatabaseOptions::default(); + tracing_subscriber::fmt::fmt() + .with_span_events(FmtSpan::CLOSE) + .init(); + + let secret_key = + SecretKey::from_bech32("nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99") + .unwrap(); + let keys_a = Keys::new(secret_key); + + let secret_key = + SecretKey::from_bech32("nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85") + .unwrap(); + let keys_b = Keys::new(secret_key); + + let mut opts = DatabaseOptions::default(); + opts.events = true; let database = MemoryDatabase::new(opts); - for i in 0..50_000 { + for i in 0..100_000 { let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) - .to_event(&keys) + .to_event(&keys_a) .unwrap(); database.save_event(&event).await.unwrap(); @@ -26,7 +40,7 @@ async fn main() { Tag::PubKey(event.pubkey, None), ], ) - .to_event(&keys) + .to_event(&keys_b) .unwrap(); database.save_event(&event).await.unwrap(); } @@ -34,19 +48,32 @@ async fn main() { for i in 0..10 { let metadata = Metadata::new().name(format!("Name #{i}")); let event = EventBuilder::set_metadata(metadata) - .to_event(&keys) + .to_event(&keys_a) .unwrap(); database.save_event(&event).await.unwrap(); - tokio::time::sleep(Duration::from_secs(1)).await; } - let now = Instant::now(); + for i in 0..500_000 { + let event = EventBuilder::new( + Kind::Custom(123), + "Custom with d tag", + &[Tag::Identifier(format!("myid{i}"))], + ) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + } + let events = database .query(vec![Filter::new() - .kind(Kind::Metadata) - .author(keys.public_key())]) + .kinds(vec![Kind::Metadata, Kind::Custom(123), Kind::TextNote]) + .limit(20) + //.kind(Kind::Custom(123)) + //.identifier("myid5000") + .author(keys_a.public_key())]) .await .unwrap(); - println!("{events:?}"); - println!("Time: {} ns", now.elapsed().as_nanos()); + println!("Got {} events", events.len()); + + loop {} } diff --git a/crates/nostr-sdk-db/src/index.rs b/crates/nostr-sdk-db/src/index.rs index 3febf1782..960f3859c 100644 --- a/crates/nostr-sdk-db/src/index.rs +++ b/crates/nostr-sdk-db/src/index.rs @@ -3,20 +3,20 @@ //! Indexes -use std::cmp::Ordering; +use std::cmp::{Ordering, Reverse}; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::hash::Hash; -use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; use std::sync::Arc; use nostr::secp256k1::XOnlyPublicKey; use nostr::{Alphabet, Event, EventId, Filter, Kind, Timestamp}; use tokio::sync::RwLock; -type Mapping = HashMap; -type KindIndex = HashMap>; -type AuthorIndex = HashMap>; -type CreatedAtIndex = BTreeMap>; +//type Mapping = HashMap; +type KindIndex = HashMap>; +type AuthorIndex = HashMap>; +type AuthorAndKindIndex = HashMap<(PublicKeyPrefix, Kind), BTreeSet>; +type CreatedAtIndex = BTreeMap>; type TagIndex = HashMap>>; /// Event Index Result @@ -29,18 +29,21 @@ pub struct EventIndexResult { } #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -struct SmallerIdentifier([u8; 8]); - -impl SmallerIdentifier { - pub fn new(sid: [u8; 8]) -> Self { - Self(sid) +struct PublicKeyPrefix([u8; 8]); + +impl From for PublicKeyPrefix { + fn from(pk: XOnlyPublicKey) -> Self { + let pk = pk.serialize(); + let mut prefix = [0u8; 8]; + prefix.copy_from_slice(&pk[..8]); + Self(prefix) } } -#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] struct MappingIdentifier { pub timestamp: Timestamp, - pub sid: SmallerIdentifier, + pub eid: EventId, } impl PartialOrd for MappingIdentifier { @@ -51,24 +54,28 @@ impl PartialOrd for MappingIdentifier { impl Ord for MappingIdentifier { fn cmp(&self, other: &Self) -> Ordering { - let timestamp_cmp = other.timestamp.cmp(&self.timestamp); - if timestamp_cmp != Ordering::Equal { - return timestamp_cmp; + if self.timestamp != other.timestamp { + other.timestamp.cmp(&self.timestamp) + } else { + self.eid.cmp(&other.eid) } - - self.sid.cmp(&other.sid) } } +#[derive(Debug, Default)] +struct DatabaseIndexesInner { + //mapping: Mapping, + kinds_index: KindIndex, + authors_index: AuthorIndex, + author_and_kind_index: AuthorAndKindIndex, + created_at_index: CreatedAtIndex, + tags_index: TagIndex, +} + /// Database Indexes #[derive(Debug, Clone, Default)] pub struct DatabaseIndexes { - counter: Arc, - mapping: Arc>, - kinds_index: Arc>, - authors_index: Arc>, - created_at_index: Arc>, - tags_index: Arc>, + inner: Arc>, } impl DatabaseIndexes { @@ -88,9 +95,7 @@ impl DatabaseIndexes { let mut should_insert: bool = true; let mut to_discard = HashSet::new(); - let mut mapping = self.mapping.write().await; - let mut kinds_index = self.kinds_index.write().await; - let mut authors_index = self.authors_index.write().await; + let mut inner = self.inner.write().await; if event.is_replaceable() { // Query event @@ -99,20 +104,42 @@ impl DatabaseIndexes { kinds.insert(event.kind); authors.insert(event.pubkey); - let res1 = self.query_index(&kinds_index, &kinds); - let res2 = self.query_index(&authors_index, &authors); - let matching_sids: HashSet = multi_intersection(vec![res1, res2]); + let res1 = self.query_index(&inner.kinds_index, &kinds.into_iter().collect()); + let res2 = self.query_index( + &inner.authors_index, + &authors.into_iter().map(|pk| pk.into()).collect(), + ); + let matching_sids: BTreeSet<&MappingIdentifier> = multi_intersection(vec![res1, res2]); + + let mut mids_to_discard = HashSet::new(); for mid in matching_sids.into_iter() { - match mapping.get(&mid.sid) { - Some(event_id) => { - if mid.timestamp >= event.created_at { - should_insert = false; - } else if mid.timestamp < event.created_at { - to_discard.insert(*event_id); - } - } - None => tracing::warn!("Event ID not found for {mid:?}"), + if mid.timestamp >= event.created_at { + should_insert = false; + } else if mid.timestamp < event.created_at { + to_discard.insert(mid.eid); + mids_to_discard.insert(*mid); + } + } + + for mid in mids_to_discard.iter() { + if let Some(set) = inner.kinds_index.get_mut(&event.kind) { + set.remove(mid); + } + if let Some(set) = inner.authors_index.get_mut(&event.pubkey.into()) { + set.remove(mid); + } + if let Some(set) = inner + .author_and_kind_index + .get_mut(&(event.pubkey.into(), event.kind)) + { + set.remove(mid); + } + if let Some(set) = inner.created_at_index.get_mut(&mid.timestamp) { + set.remove(mid); + } + for (_, map) in inner.tags_index.iter_mut() { + map.remove(mid); } } } else if event.is_parameterized_replaceable() { @@ -138,25 +165,33 @@ impl DatabaseIndexes { if should_insert { let mapping_id = MappingIdentifier { - sid: self.next_sid(), + eid: event.id, timestamp: event.created_at, }; - mapping.insert(mapping_id.sid, event.id); + //inner.mapping.insert(mapping_id.sid, event.id); + + let pk: PublicKeyPrefix = event.pubkey.into(); // Index kind - self.index_event_kind(&mut kinds_index, mapping_id, event); + self.index_event_kind(&mut inner.kinds_index, mapping_id, event.kind); // Index author - self.index_event_author(&mut authors_index, mapping_id, event); + self.index_event_author(&mut inner.authors_index, mapping_id, pk); + + // Index author and kind + self.index_event_author_and_kind( + &mut inner.author_and_kind_index, + mapping_id, + pk, + event.kind, + ); // Index created at - let mut created_at_index = self.created_at_index.write().await; - self.index_event_created_at(&mut created_at_index, mapping_id, event); + self.index_event_created_at(&mut inner.created_at_index, mapping_id, event); // Index tags - let mut tags_index = self.tags_index.write().await; - self.index_event_tags(&mut tags_index, mapping_id, event); + self.index_event_tags(&mut inner.tags_index, mapping_id, event); } EventIndexResult { @@ -165,20 +200,15 @@ impl DatabaseIndexes { } } - fn next_sid(&self) -> SmallerIdentifier { - let next_id = self.counter.fetch_add(1, AtomicOrdering::SeqCst); - SmallerIdentifier::new(next_id.to_be_bytes()) - } - /// Index kind - fn index_event_kind(&self, kinds_index: &mut KindIndex, mid: MappingIdentifier, event: &Event) { + fn index_event_kind(&self, kinds_index: &mut KindIndex, mid: MappingIdentifier, kind: Kind) { kinds_index - .entry(event.kind) + .entry(kind) .and_modify(|set| { set.insert(mid); }) .or_insert_with(|| { - let mut set = HashSet::with_capacity(1); + let mut set = BTreeSet::new(); set.insert(mid); set }); @@ -189,15 +219,34 @@ impl DatabaseIndexes { &self, authors_index: &mut AuthorIndex, mid: MappingIdentifier, - event: &Event, + pk: PublicKeyPrefix, ) { authors_index - .entry(event.pubkey) + .entry(pk) .and_modify(|set| { set.insert(mid); }) .or_insert_with(|| { - let mut set = HashSet::with_capacity(1); + let mut set = BTreeSet::new(); + set.insert(mid); + set + }); + } + + fn index_event_author_and_kind( + &self, + author_and_kind_index: &mut AuthorAndKindIndex, + mid: MappingIdentifier, + pk: PublicKeyPrefix, + kind: Kind, + ) { + author_and_kind_index + .entry((pk, kind)) + .and_modify(|set| { + set.insert(mid); + }) + .or_insert_with(|| { + let mut set = BTreeSet::new(); set.insert(mid); set }); @@ -216,7 +265,7 @@ impl DatabaseIndexes { set.insert(mid); }) .or_insert_with(|| { - let mut set = HashSet::with_capacity(1); + let mut set = BTreeSet::new(); set.insert(mid); set }); @@ -241,11 +290,7 @@ impl DatabaseIndexes { /// Query #[tracing::instrument(skip_all)] pub async fn query(&self, filters: Vec) -> HashSet { - let kinds_index = self.kinds_index.read().await; - let authors_index = self.authors_index.read().await; - let created_at_index = self.created_at_index.read().await; - let tags_index = self.tags_index.read().await; - let mapping = self.mapping.read().await; + let inner = self.inner.read().await; let mut matching_event_ids: HashSet = HashSet::new(); @@ -261,34 +306,51 @@ impl DatabaseIndexes { } } - let mut sets: Vec> = Vec::new(); + let mut sets: Vec> = Vec::new(); - if !filter.kinds.is_empty() { - sets.push(self.query_index(&kinds_index, &filter.kinds)); - } + if !filter.kinds.is_empty() && !filter.authors.is_empty() { + let mut set = HashSet::new(); + for author in filter.authors.iter() { + for kind in filter.kinds.iter() { + set.insert(((*author).into(), *kind)); + } + } + sets.push(self.query_index(&inner.author_and_kind_index, &set)); + } else { + if !filter.kinds.is_empty() { + sets.push(self.query_index(&inner.kinds_index, &filter.kinds)); + } - if !filter.authors.is_empty() { - sets.push(self.query_index(&authors_index, &filter.authors)); + if !filter.authors.is_empty() { + sets.push(self.query_index( + &inner.authors_index, + &filter.authors.into_iter().map(|pk| pk.into()).collect(), + )); + } } if let (Some(since), Some(until)) = (filter.since, filter.until) { - let mut temp: HashSet = HashSet::new(); - for ids in created_at_index.range(since..=until).map(|(_, ids)| ids) { + let mut temp = BTreeSet::new(); + for ids in inner + .created_at_index + .range(since..=until) + .map(|(_, ids)| ids) + { temp.extend(ids); } sets.push(temp); } else { if let Some(since) = filter.since { - let mut temp: HashSet = HashSet::new(); - for (_, ids) in created_at_index.range(since..) { + let mut temp = BTreeSet::new(); + for (_, ids) in inner.created_at_index.range(since..) { temp.extend(ids); } sets.push(temp); } if let Some(until) = filter.until { - let mut temp: HashSet = HashSet::new(); - for (_, ids) in created_at_index.range(..=until) { + let mut temp = BTreeSet::new(); + for (_, ids) in inner.created_at_index.range(..=until) { temp.extend(ids); } sets.push(temp); @@ -296,13 +358,13 @@ impl DatabaseIndexes { } if !filter.generic_tags.is_empty() { - let mut temp: HashSet = HashSet::new(); + let mut temp = BTreeSet::new(); for (tagname, set) in filter.generic_tags.iter() { - if let Some(tag_map) = tags_index.get(tagname) { + if let Some(tag_map) = inner.tags_index.get(tagname) { for (id, tag_values) in tag_map.iter() { if set.iter().all(|value| tag_values.contains(value)) { - temp.insert(*id); + temp.insert(id); } } } @@ -312,39 +374,31 @@ impl DatabaseIndexes { } // Intersection - let matching_sids: HashSet = multi_intersection(sets); - let matching_sids: BTreeSet = matching_sids.into_iter().collect(); + let matching_sids: BTreeSet<&MappingIdentifier> = multi_intersection(sets); // Limit let limit: usize = filter.limit.unwrap_or(matching_sids.len()); - let mut ids: Vec = Vec::with_capacity(limit); // Get ids - for mid in matching_sids.into_iter().take(limit) { - match mapping.get(&mid.sid).copied() { - Some(event_id) => ids.push(event_id), - None => tracing::warn!("Event ID not found for {mid:?}"), - } - } - - matching_event_ids.extend(ids); + matching_event_ids.extend(matching_sids.into_iter().take(limit).map(|mid| mid.eid)); } matching_event_ids } - fn query_index( + #[tracing::instrument(skip_all)] + fn query_index<'a, K>( &self, - index: &HashMap>, + index: &'a HashMap>, keys: &HashSet, - ) -> HashSet + ) -> BTreeSet<&'a MappingIdentifier> where K: Eq + Hash, { - let mut result: HashSet = HashSet::new(); + let mut result: BTreeSet<&MappingIdentifier> = BTreeSet::new(); for key in keys.iter() { if let Some(ids) = index.get(key) { - result.extend(ids); + result.extend(ids.iter()); } } result @@ -352,28 +406,27 @@ impl DatabaseIndexes { /// Clear indexes pub async fn clear(&self) { - let mut kinds_index = self.kinds_index.write().await; - kinds_index.clear(); - - let mut authors_index = self.authors_index.write().await; - authors_index.clear(); - - let mut created_at_index = self.created_at_index.write().await; - created_at_index.clear(); + let mut inner = self.inner.write().await; + inner.kinds_index.clear(); + inner.authors_index.clear(); + inner.created_at_index.clear(); } } -fn multi_intersection(mut sets: Vec>) -> HashSet +#[tracing::instrument(skip_all)] +fn multi_intersection(mut sets: Vec>) -> BTreeSet<&T> where - T: Eq + Hash, + T: Ord, { + // Sort by len (DESC) + sets.sort_by_cached_key(|set| Reverse(set.len())); + if let Some(mut result) = sets.pop() { if !sets.is_empty() { result.retain(|item| sets.iter().all(|set| set.contains(item))); } - result } else { - HashSet::new() + BTreeSet::new() } } diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index ba2185b5f..78ee46ebe 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -96,7 +96,7 @@ pub trait NostrDatabase: AsyncTraitDeps { /// Get `negentropy` items async fn negentropy_items( &self, - filter: &Filter, + filter: Filter, ) -> Result, Self::Err>; /// Wipe all data diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index 33125850c..c6ec15484 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -11,7 +11,9 @@ use nostr::{Event, EventId, Filter, FiltersMatchEvent, Timestamp, Url}; use thiserror::Error; use tokio::sync::RwLock; -use crate::{Backend, DatabaseError, DatabaseOptions, NostrDatabase}; +use crate::{ + Backend, DatabaseError, DatabaseIndexes, DatabaseOptions, EventIndexResult, NostrDatabase, +}; /// Memory Database Error #[derive(Debug, Error)] @@ -28,7 +30,8 @@ impl From for DatabaseError { pub struct MemoryDatabase { opts: DatabaseOptions, seen_event_ids: Arc>>>, - events: Arc>>, // TODO: order by timestamp (DESC)? + events: Arc>>, + indexes: DatabaseIndexes, } // TODO: add queue field? @@ -46,6 +49,7 @@ impl MemoryDatabase { opts, seen_event_ids: Arc::new(RwLock::new(HashMap::new())), events: Arc::new(RwLock::new(HashMap::new())), + indexes: DatabaseIndexes::new(), } } @@ -72,20 +76,6 @@ impl MemoryDatabase { }); } - async fn _query( - &self, - events: &HashMap, - filters: Vec, - ) -> Result, DatabaseError> { - let mut list: Vec = Vec::new(); - for event in events.values() { - if filters.match_event(event) { - list.push(event.clone()); - } - } - Ok(list) - } - async fn _save_event( &self, events: &mut HashMap, @@ -94,45 +84,18 @@ impl MemoryDatabase { self.event_id_seen(event.id, None).await?; if self.opts.events { - if event.is_expired() || event.is_ephemeral() { - tracing::warn!("Event {} not saved: expired or ephemeral", event.id); - return Ok(false); - } + let EventIndexResult { + to_store, + to_discard, + } = self.indexes.index_event(&event).await; - let mut should_insert: bool = true; + if to_store { + events.insert(event.id, event); - if event.is_replaceable() { - let filter: Filter = Filter::new().author(event.pubkey).kind(event.kind); - let res: Vec = self._query(events, vec![filter]).await?; - if let Some(ev) = res.into_iter().next() { - if ev.created_at >= event.created_at { - should_insert = false; - } else if ev.created_at < event.created_at { - events.remove(&ev.id); - } + for event_id in to_discard.into_iter() { + events.remove(&event_id); } - } else if event.is_parameterized_replaceable() { - match event.identifier() { - Some(identifier) => { - let filter: Filter = Filter::new() - .author(event.pubkey) - .kind(event.kind) - .identifier(identifier); - let res: Vec = self._query(events, vec![filter]).await?; - if let Some(ev) = res.into_iter().next() { - if ev.created_at >= event.created_at { - should_insert = false; - } else if ev.created_at < event.created_at { - events.remove(&ev.id); - } - } - } - None => should_insert = false, - } - } - if should_insert { - events.insert(event.id, event); Ok(true) } else { tracing::warn!("Event {} not saved: unknown", event.id); @@ -209,10 +172,21 @@ impl NostrDatabase for MemoryDatabase { } } + #[tracing::instrument(skip_all)] async fn query(&self, filters: Vec) -> Result, Self::Err> { if self.opts.events { + let ids = self.indexes.query(filters.clone()).await; let events = self.events.read().await; - self._query(&events, filters).await + + let mut list: Vec = Vec::new(); + for event_id in ids.into_iter() { + if let Some(event) = events.get(&event_id) { + if filters.match_event(event) { + list.push(event.clone()); + } + } + } + Ok(list) } else { Err(DatabaseError::FeatureDisabled) } @@ -235,7 +209,7 @@ impl NostrDatabase for MemoryDatabase { async fn negentropy_items( &self, - _filter: &Filter, + _filter: Filter, ) -> Result, Self::Err> { Err(DatabaseError::NotSupported) } diff --git a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs index bed273884..ea86c7b06 100644 --- a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs +++ b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs @@ -53,30 +53,28 @@ async fn main() { .to_event(&keys_a) .unwrap(); database.save_event(&event).await.unwrap(); - tokio::time::sleep(Duration::from_secs(1)).await; - } */ + } - /* let event = EventBuilder::new(Kind::Custom(123), "Custom with d tag", &[Tag::Identifier(String::from("myid"))]) - .to_event(&keys) + for i in 0..500_000 { + let event = EventBuilder::new( + Kind::Custom(123), + "Custom with d tag", + &[Tag::Identifier(format!("myid{i}"))], + ) + .to_event(&keys_a) .unwrap(); - database.save_event(&event).await.unwrap(); */ - - /* let event_id = - EventId::from_hex("b02c1c57a7c5b0e10245df8c26b429ad1a2cbf91d7cada3ecdb524b7e1d984b6") - .unwrap(); - let event = database.event_by_id(event_id).await.unwrap(); - println!("{event:?}"); */ + database.save_event(&event).await.unwrap(); + } */ let events = database .query(vec![Filter::new() - .kind(Kind::Metadata) - //.limit(1) + .kinds(vec![Kind::Metadata, Kind::Custom(123), Kind::TextNote]) + .limit(20) //.kind(Kind::Custom(123)) - //.identifier("myid") + //.identifier("myid5000") .author(keys_a.public_key())]) .await .unwrap(); - //println!("{events:?}"); println!("Got {} events", events.len()); loop { diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index 83b979c9f..e23aea827 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -862,7 +862,8 @@ impl RelayPool { /// Negentropy reconciliation pub async fn reconcile(&self, filter: Filter, timeout: Duration) -> Result<(), Error> { - let items: Vec<(EventId, Timestamp)> = self.database.negentropy_items(&filter).await?; + let items: Vec<(EventId, Timestamp)> = + self.database.negentropy_items(filter.clone()).await?; self.reconcile_with_items(filter, items, timeout).await } From 1e8f1ade007057cf7821525d99115b5b9efdb860 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sat, 4 Nov 2023 15:38:44 +0100 Subject: [PATCH 47/98] rocksdb: partially complete missing methods for `NostrDatabase` trait --- crates/nostr-sdk-rocksdb/src/lib.rs | 69 ++++++++++++++++++++++++----- 1 file changed, 59 insertions(+), 10 deletions(-) diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index 490384ab8..ae270ecf7 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -19,6 +19,7 @@ use rocksdb::{ use tokio::sync::RwLock; const EVENTS_CF: &str = "events"; +//const EVENTS_SEEN_BY_RELAYS: &str = "event-seen-by-relays"; /// RocksDB Nostr Database #[derive(Debug, Clone)] @@ -34,8 +35,7 @@ fn default_opts() -> rocksdb::Options { opts.set_max_open_files(100); opts.set_compaction_style(DBCompactionStyle::Level); opts.set_compression_type(DBCompressionType::Snappy); - opts.set_target_file_size_base(256 << 20); - opts.set_write_buffer_size(256 << 20); + opts.set_write_buffer_size(5 * 1024 * 1024); // 10 MB opts.set_enable_write_thread_adaptive_yield(true); opts.set_disable_auto_compactions(false); opts.increase_parallelism(2); @@ -112,7 +112,7 @@ impl NostrDatabase for RocksDatabase { DatabaseOptions::default() } - #[tracing::instrument(skip_all)] + #[tracing::instrument(skip_all, level = "trace")] async fn save_event(&self, event: &Event) -> Result { // Index event let EventIndexResult { @@ -153,8 +153,9 @@ impl NostrDatabase for RocksDatabase { } } - async fn has_event_already_been_seen(&self, _event_id: EventId) -> Result { - todo!() + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { + let cf = self.cf_handle(EVENTS_CF)?; + Ok(self.db.key_may_exist_cf(&cf, event_id.as_bytes())) } async fn event_id_seen( @@ -227,18 +228,66 @@ impl NostrDatabase for RocksDatabase { .map_err(DatabaseError::backend)? } - async fn event_ids_by_filters(&self, _filters: Vec) -> Result, Self::Err> { - todo!() + async fn event_ids_by_filters(&self, filters: Vec) -> Result, Self::Err> { + let ids = self.indexes.query(filters.clone()).await; + + let this = self.clone(); + tokio::task::spawn_blocking(move || { + let cf = this.cf_handle(EVENTS_CF)?; + + let mut event_ids: Vec = Vec::new(); + + for v in this + .db + .batched_multi_get_cf(&cf, ids, false) + .into_iter() + .flatten() + .flatten() + { + let event: Event = Event::decode(&v).map_err(DatabaseError::backend)?; + if filters.match_event(&event) { + event_ids.push(event.id); + } + } + + Ok(event_ids) + }) + .await + .map_err(DatabaseError::backend)? } async fn negentropy_items( &self, - _filter: &Filter, + filter: Filter, ) -> Result, Self::Err> { - todo!() + let ids = self.indexes.query(vec![filter.clone()]).await; + + let this = self.clone(); + tokio::task::spawn_blocking(move || { + let cf = this.cf_handle(EVENTS_CF)?; + + let mut event_ids: Vec<(EventId, Timestamp)> = Vec::new(); + + for v in this + .db + .batched_multi_get_cf(&cf, ids, false) + .into_iter() + .flatten() + .flatten() + { + let event: Event = Event::decode(&v).map_err(DatabaseError::backend)?; + if filter.match_event(&event) { + event_ids.push((event.id, event.created_at)); + } + } + + Ok(event_ids) + }) + .await + .map_err(DatabaseError::backend)? } async fn wipe(&self) -> Result<(), Self::Err> { - todo!() + Err(DatabaseError::NotSupported) } } From e9100a93b41de276523733217a52aee48c2b4d70 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sun, 5 Nov 2023 15:54:40 +0100 Subject: [PATCH 48/98] nostr: add `TagIndexes` struct --- crates/nostr/src/event/mod.rs | 29 +---- crates/nostr/src/event/tag/indexes.rs | 102 ++++++++++++++++++ crates/nostr/src/event/{tag.rs => tag/mod.rs} | 3 + crates/nostr/src/lib.rs | 2 +- crates/nostr/src/message/subscription.rs | 6 +- 5 files changed, 114 insertions(+), 28 deletions(-) create mode 100644 crates/nostr/src/event/tag/indexes.rs rename crates/nostr/src/event/{tag.rs => tag/mod.rs} (99%) diff --git a/crates/nostr/src/event/mod.rs b/crates/nostr/src/event/mod.rs index e6641654a..1379c3bfc 100644 --- a/crates/nostr/src/event/mod.rs +++ b/crates/nostr/src/event/mod.rs @@ -4,13 +4,9 @@ //! Event -#[cfg(not(feature = "std"))] -use alloc::collections::{BTreeMap as AllocMap, BTreeSet as AllocSet}; use alloc::string::String; use alloc::vec::Vec; use core::fmt; -#[cfg(feature = "std")] -use std::collections::{HashMap as AllocMap, HashSet as AllocSet}; use bitcoin::secp256k1::schnorr::Signature; use bitcoin::secp256k1::{self, Message, Secp256k1, Verification, XOnlyPublicKey}; @@ -27,14 +23,14 @@ pub use self::builder::EventBuilder; pub use self::id::EventId; pub use self::kind::Kind; pub use self::partial::{MissingPartialEvent, PartialEvent}; -pub use self::tag::{Marker, Tag, TagKind}; +pub use self::tag::{Marker, Tag, TagIndexValues, TagIndexes, TagKind}; pub use self::unsigned::UnsignedEvent; #[cfg(feature = "std")] use crate::types::time::Instant; use crate::types::time::TimeSupplier; #[cfg(feature = "std")] use crate::SECP256K1; -use crate::{Alphabet, JsonUtil, Timestamp}; +use crate::{JsonUtil, Timestamp}; /// [`Event`] error #[derive(Debug)] @@ -288,25 +284,8 @@ impl Event { } /// Build tags index - pub fn build_tags_index(&self) -> AllocMap> { - fn single_char_tagname(tagname: &str) -> Option { - tagname - .chars() - .next() - .and_then(|first| Alphabet::try_from(first).ok()) - } - - self.tags - .iter() - .map(|t| t.as_vec()) - .filter(|t| t.len() > 1) - .filter_map(|t| { - single_char_tagname(&t[0]).map(|tagnamechar| (tagnamechar, t[1].clone())) - }) - .fold(AllocMap::new(), |mut idx, (tagnamechar, tagval)| { - idx.entry(tagnamechar).or_default().insert(tagval); - idx - }) + pub fn build_tags_index(&self) -> TagIndexes { + TagIndexes::from(self.tags.iter().map(|t| t.as_vec())) } } diff --git a/crates/nostr/src/event/tag/indexes.rs b/crates/nostr/src/event/tag/indexes.rs new file mode 100644 index 000000000..4af7df2a7 --- /dev/null +++ b/crates/nostr/src/event/tag/indexes.rs @@ -0,0 +1,102 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Tag Indexes + +use alloc::string::String; +use alloc::vec::Vec; + +#[cfg(not(feature = "std"))] +use alloc::collections::{BTreeMap as AllocMap, BTreeSet as AllocSet}; +use core::ops::{Deref, DerefMut}; +#[cfg(feature = "std")] +use std::collections::{HashMap as AllocMap, HashSet as AllocSet}; + +use bitcoin::hashes::sha256::Hash as Sha256Hash; +use bitcoin::hashes::Hash; + +use crate::Alphabet; + +/// Tag Index Value Size +pub const TAG_INDEX_VALUE_SIZE: usize = 8; + +/// Tag Indexes +#[derive(Debug, Default, PartialEq, Eq)] +pub struct TagIndexes { + inner: AllocMap, +} + +impl Deref for TagIndexes { + type Target = AllocMap; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for TagIndexes { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl From for TagIndexes +where + I: Iterator>, + S: AsRef, +{ + fn from(iter: I) -> Self { + let mut tag_index: TagIndexes = TagIndexes::default(); + for t in iter.filter(|t| t.len() > 1) { + if let Some(tagnamechar) = single_char_tagname(t[0].as_ref()) { + let mut inner: [u8; TAG_INDEX_VALUE_SIZE] = [0u8; TAG_INDEX_VALUE_SIZE]; + let hash = Sha256Hash::hash(t[1].as_ref().as_bytes()); + inner.copy_from_slice(&hash[..TAG_INDEX_VALUE_SIZE]); + tag_index.entry(tagnamechar).or_default().insert(inner); + } + } + tag_index + } +} + +#[inline] +fn single_char_tagname(tagname: &str) -> Option { + tagname + .chars() + .next() + .and_then(|first| Alphabet::try_from(first).ok()) +} + +/// Tag Index Values +#[derive(Debug, Default, PartialEq, Eq)] +pub struct TagIndexValues { + inner: AllocSet<[u8; TAG_INDEX_VALUE_SIZE]>, +} + +impl Deref for TagIndexValues { + type Target = AllocSet<[u8; TAG_INDEX_VALUE_SIZE]>; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for TagIndexValues { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl From<&AllocSet> for TagIndexValues { + fn from(value: &AllocSet) -> Self { + Self { + inner: value + .iter() + .map(|s| { + let mut inner = [0u8; TAG_INDEX_VALUE_SIZE]; + let hash = Sha256Hash::hash(s.as_bytes()); + inner.copy_from_slice(&hash[..TAG_INDEX_VALUE_SIZE]); + inner + }) + .collect(), + } + } +} diff --git a/crates/nostr/src/event/tag.rs b/crates/nostr/src/event/tag/mod.rs similarity index 99% rename from crates/nostr/src/event/tag.rs rename to crates/nostr/src/event/tag/mod.rs index 747636741..74fbe4d1c 100644 --- a/crates/nostr/src/event/tag.rs +++ b/crates/nostr/src/event/tag/mod.rs @@ -18,6 +18,9 @@ use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use url_fork::{ParseError, Url}; +pub mod indexes; + +pub use self::indexes::{TagIndexValues, TagIndexes}; use super::id::{self, EventId}; use crate::nips::nip26::{Conditions, Error as Nip26Error}; use crate::nips::nip48::Protocol; diff --git a/crates/nostr/src/lib.rs b/crates/nostr/src/lib.rs index c144c961e..6261fc5c6 100644 --- a/crates/nostr/src/lib.rs +++ b/crates/nostr/src/lib.rs @@ -50,7 +50,7 @@ pub mod util; pub use self::event::tag::{ ExternalIdentity, HttpMethod, Identity, ImageDimensions, Marker, RelayMetadata, Report, Tag, - TagKind, + TagIndexValues, TagIndexes, TagKind, }; pub use self::event::{ Event, EventBuilder, EventId, Kind, MissingPartialEvent, PartialEvent, UnsignedEvent, diff --git a/crates/nostr/src/message/subscription.rs b/crates/nostr/src/message/subscription.rs index 1a809bf13..5358f217a 100644 --- a/crates/nostr/src/message/subscription.rs +++ b/crates/nostr/src/message/subscription.rs @@ -24,6 +24,7 @@ use serde::ser::{SerializeMap, Serializer}; use serde::{Deserialize, Serialize}; use serde_json::Value; +use crate::event::{TagIndexValues, TagIndexes}; use crate::{Event, EventId, JsonUtil, Kind, Timestamp}; /// Alphabet Error @@ -588,10 +589,11 @@ impl Filter { return true; } - let idx: AllocMap> = event.build_tags_index(); + let idx: TagIndexes = event.build_tags_index(); self.generic_tags.iter().all(|(tagname, set)| { + let set = TagIndexValues::from(set); idx.get(tagname) - .map(|valset| valset.intersection(set).count() > 0) + .map(|valset| valset.intersection(&set).count() > 0) .unwrap_or(false) }) } From 2916703f1d31965e2a4da46b1e0fd81ddb5917ee Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sun, 5 Nov 2023 15:54:59 +0100 Subject: [PATCH 49/98] nostr: add `expiration` method to `Event` --- crates/nostr/src/event/mod.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/crates/nostr/src/event/mod.rs b/crates/nostr/src/event/mod.rs index 1379c3bfc..9edc68d08 100644 --- a/crates/nostr/src/event/mod.rs +++ b/crates/nostr/src/event/mod.rs @@ -173,6 +173,16 @@ impl Event { .map_err(|_| Error::InvalidSignature) } + /// Get [`Timestamp`] expiration if set + pub fn expiration(&self) -> Option<&Timestamp> { + for tag in self.tags.iter() { + if let Tag::Expiration(timestamp) = tag { + return Some(timestamp); + } + } + None + } + /// Returns `true` if the event has an expiration tag that is expired. /// If an event has no `Expiration` tag, then it will return `false`. /// @@ -191,11 +201,9 @@ impl Event { where T: TimeSupplier, { - let now: Timestamp = Timestamp::now_with_supplier(supplier); - for tag in self.tags.iter() { - if let Tag::Expiration(timestamp) = tag { - return timestamp < &now; - } + if let Some(timestamp) = self.expiration() { + let now: Timestamp = Timestamp::now_with_supplier(supplier); + return timestamp < &now; } false } From 0f8fa637520e499112e91e6adde3cfeb284c9d2a Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sun, 5 Nov 2023 16:02:29 +0100 Subject: [PATCH 50/98] fbs: remove `index.fbs` --- crates/nostr-sdk-fbs/fbs/index.fbs | 11 - crates/nostr-sdk-fbs/src/index_generated.rs | 288 -------------------- crates/nostr-sdk-fbs/src/lib.rs | 45 +-- 3 files changed, 8 insertions(+), 336 deletions(-) delete mode 100644 crates/nostr-sdk-fbs/fbs/index.fbs delete mode 100644 crates/nostr-sdk-fbs/src/index_generated.rs diff --git a/crates/nostr-sdk-fbs/fbs/index.fbs b/crates/nostr-sdk-fbs/fbs/index.fbs deleted file mode 100644 index 03c965768..000000000 --- a/crates/nostr-sdk-fbs/fbs/index.fbs +++ /dev/null @@ -1,11 +0,0 @@ -namespace IndexFbs; - -struct Fixed32Bytes { - val: [ubyte:32]; -} - -table IndexSet { - data: [Fixed32Bytes]; -} - -root_type IndexSet; \ No newline at end of file diff --git a/crates/nostr-sdk-fbs/src/index_generated.rs b/crates/nostr-sdk-fbs/src/index_generated.rs deleted file mode 100644 index 3aae754e3..000000000 --- a/crates/nostr-sdk-fbs/src/index_generated.rs +++ /dev/null @@ -1,288 +0,0 @@ -// automatically generated by the FlatBuffers compiler, do not modify - -// @generated - -use core::cmp::Ordering; -use core::mem; - -extern crate flatbuffers; -use self::flatbuffers::{EndianScalar, Follow}; - -#[allow(unused_imports, dead_code)] -pub mod index_fbs { - - use core::cmp::Ordering; - use core::mem; - - extern crate flatbuffers; - use self::flatbuffers::{EndianScalar, Follow}; - - // struct Fixed32Bytes, aligned to 1 - #[repr(transparent)] - #[derive(Clone, Copy, PartialEq)] - pub struct Fixed32Bytes(pub [u8; 32]); - impl Default for Fixed32Bytes { - fn default() -> Self { - Self([0; 32]) - } - } - impl core::fmt::Debug for Fixed32Bytes { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - f.debug_struct("Fixed32Bytes") - .field("val", &self.val()) - .finish() - } - } - - impl flatbuffers::SimpleToVerifyInSlice for Fixed32Bytes {} - impl<'a> flatbuffers::Follow<'a> for Fixed32Bytes { - type Inner = &'a Fixed32Bytes; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - <&'a Fixed32Bytes>::follow(buf, loc) - } - } - impl<'a> flatbuffers::Follow<'a> for &'a Fixed32Bytes { - type Inner = &'a Fixed32Bytes; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - flatbuffers::follow_cast_ref::(buf, loc) - } - } - impl<'b> flatbuffers::Push for Fixed32Bytes { - type Output = Fixed32Bytes; - #[inline] - unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { - let src = ::core::slice::from_raw_parts( - self as *const Fixed32Bytes as *const u8, - Self::size(), - ); - dst.copy_from_slice(src); - } - } - - impl<'a> flatbuffers::Verifiable for Fixed32Bytes { - #[inline] - fn run_verifier( - v: &mut flatbuffers::Verifier, - pos: usize, - ) -> Result<(), flatbuffers::InvalidFlatbuffer> { - use self::flatbuffers::Verifiable; - v.in_buffer::(pos) - } - } - - impl<'a> Fixed32Bytes { - #[allow(clippy::too_many_arguments)] - pub fn new(val: &[u8; 32]) -> Self { - let mut s = Self([0; 32]); - s.set_val(val); - s - } - - pub fn val(&'a self) -> flatbuffers::Array<'a, u8, 32> { - // Safety: - // Created from a valid Table for this object - // Which contains a valid array in this slot - unsafe { flatbuffers::Array::follow(&self.0, 0) } - } - - pub fn set_val(&mut self, items: &[u8; 32]) { - // Safety: - // Created from a valid Table for this object - // Which contains a valid array in this slot - unsafe { flatbuffers::emplace_scalar_array(&mut self.0, 0, items) }; - } - } - - pub enum IndexSetOffset {} - #[derive(Copy, Clone, PartialEq)] - - pub struct IndexSet<'a> { - pub _tab: flatbuffers::Table<'a>, - } - - impl<'a> flatbuffers::Follow<'a> for IndexSet<'a> { - type Inner = IndexSet<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { - _tab: flatbuffers::Table::new(buf, loc), - } - } - } - - impl<'a> IndexSet<'a> { - pub const VT_DATA: flatbuffers::VOffsetT = 4; - - #[inline] - pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { - IndexSet { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( - _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, - args: &'args IndexSetArgs<'args>, - ) -> flatbuffers::WIPOffset> { - let mut builder = IndexSetBuilder::new(_fbb); - if let Some(x) = args.data { - builder.add_data(x); - } - builder.finish() - } - - #[inline] - pub fn data(&self) -> Option> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { - self._tab - .get::>>( - IndexSet::VT_DATA, - None, - ) - } - } - } - - impl flatbuffers::Verifiable for IndexSet<'_> { - #[inline] - fn run_verifier( - v: &mut flatbuffers::Verifier, - pos: usize, - ) -> Result<(), flatbuffers::InvalidFlatbuffer> { - use self::flatbuffers::Verifiable; - v.visit_table(pos)? - .visit_field::>>( - "data", - Self::VT_DATA, - false, - )? - .finish(); - Ok(()) - } - } - pub struct IndexSetArgs<'a> { - pub data: Option>>, - } - impl<'a> Default for IndexSetArgs<'a> { - #[inline] - fn default() -> Self { - IndexSetArgs { data: None } - } - } - - pub struct IndexSetBuilder<'a: 'b, 'b> { - fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, - start_: flatbuffers::WIPOffset, - } - impl<'a: 'b, 'b> IndexSetBuilder<'a, 'b> { - #[inline] - pub fn add_data( - &mut self, - data: flatbuffers::WIPOffset>, - ) { - self.fbb_ - .push_slot_always::>(IndexSet::VT_DATA, data); - } - #[inline] - pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> IndexSetBuilder<'a, 'b> { - let start = _fbb.start_table(); - IndexSetBuilder { - fbb_: _fbb, - start_: start, - } - } - #[inline] - pub fn finish(self) -> flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - flatbuffers::WIPOffset::new(o.value()) - } - } - - impl core::fmt::Debug for IndexSet<'_> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let mut ds = f.debug_struct("IndexSet"); - ds.field("data", &self.data()); - ds.finish() - } - } - #[inline] - /// Verifies that a buffer of bytes contains a `IndexSet` - /// and returns it. - /// Note that verification is still experimental and may not - /// catch every error, or be maximally performant. For the - /// previous, unchecked, behavior use - /// `root_as_index_set_unchecked`. - pub fn root_as_index_set(buf: &[u8]) -> Result { - flatbuffers::root::(buf) - } - #[inline] - /// Verifies that a buffer of bytes contains a size prefixed - /// `IndexSet` and returns it. - /// Note that verification is still experimental and may not - /// catch every error, or be maximally performant. For the - /// previous, unchecked, behavior use - /// `size_prefixed_root_as_index_set_unchecked`. - pub fn size_prefixed_root_as_index_set( - buf: &[u8], - ) -> Result { - flatbuffers::size_prefixed_root::(buf) - } - #[inline] - /// Verifies, with the given options, that a buffer of bytes - /// contains a `IndexSet` and returns it. - /// Note that verification is still experimental and may not - /// catch every error, or be maximally performant. For the - /// previous, unchecked, behavior use - /// `root_as_index_set_unchecked`. - pub fn root_as_index_set_with_opts<'b, 'o>( - opts: &'o flatbuffers::VerifierOptions, - buf: &'b [u8], - ) -> Result, flatbuffers::InvalidFlatbuffer> { - flatbuffers::root_with_opts::>(opts, buf) - } - #[inline] - /// Verifies, with the given verifier options, that a buffer of - /// bytes contains a size prefixed `IndexSet` and returns - /// it. Note that verification is still experimental and may not - /// catch every error, or be maximally performant. For the - /// previous, unchecked, behavior use - /// `root_as_index_set_unchecked`. - pub fn size_prefixed_root_as_index_set_with_opts<'b, 'o>( - opts: &'o flatbuffers::VerifierOptions, - buf: &'b [u8], - ) -> Result, flatbuffers::InvalidFlatbuffer> { - flatbuffers::size_prefixed_root_with_opts::>(opts, buf) - } - #[inline] - /// Assumes, without verification, that a buffer of bytes contains a IndexSet and returns it. - /// # Safety - /// Callers must trust the given bytes do indeed contain a valid `IndexSet`. - pub unsafe fn root_as_index_set_unchecked(buf: &[u8]) -> IndexSet { - flatbuffers::root_unchecked::(buf) - } - #[inline] - /// Assumes, without verification, that a buffer of bytes contains a size prefixed IndexSet and returns it. - /// # Safety - /// Callers must trust the given bytes do indeed contain a valid size prefixed `IndexSet`. - pub unsafe fn size_prefixed_root_as_index_set_unchecked(buf: &[u8]) -> IndexSet { - flatbuffers::size_prefixed_root_unchecked::(buf) - } - #[inline] - pub fn finish_index_set_buffer<'a, 'b>( - fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, - root: flatbuffers::WIPOffset>, - ) { - fbb.finish(root, None); - } - - #[inline] - pub fn finish_size_prefixed_index_set_buffer<'a, 'b>( - fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, - root: flatbuffers::WIPOffset>, - ) { - fbb.finish_size_prefixed(root, None); - } -} // pub mod IndexFbs diff --git a/crates/nostr-sdk-fbs/src/lib.rs b/crates/nostr-sdk-fbs/src/lib.rs index f1339d3cd..ded882570 100644 --- a/crates/nostr-sdk-fbs/src/lib.rs +++ b/crates/nostr-sdk-fbs/src/lib.rs @@ -3,8 +3,6 @@ //! Nostr SDK Flatbuffers -use std::collections::HashSet; - pub use flatbuffers::FlatBufferBuilder; use flatbuffers::InvalidFlatbuffer; use nostr::secp256k1::schnorr::Signature; @@ -14,11 +12,8 @@ use thiserror::Error; #[allow(unused_imports, dead_code, clippy::all)] mod event_generated; -#[allow(unused_imports, dead_code, clippy::all)] -mod index_generated; -use self::event_generated::event_fbs; -use self::index_generated::index_fbs; +pub use self::event_generated::event_fbs; #[derive(Debug, Error)] pub enum Error { @@ -34,12 +29,15 @@ pub enum Error { NotFound, } -pub trait FlatBufferUtils: Sized { +pub trait FlatBufferEncode { fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8]; +} + +pub trait FlatBufferDecode: Sized { fn decode(buf: &[u8]) -> Result; } -impl FlatBufferUtils for Event { +impl FlatBufferEncode for Event { #[tracing::instrument(skip_all, level = "trace")] fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8] { fbb.reset(); @@ -78,7 +76,9 @@ impl FlatBufferUtils for Event { fbb.finished_data() } +} +impl FlatBufferDecode for Event { #[tracing::instrument(skip_all, level = "trace")] fn decode(buf: &[u8]) -> Result { let ev = event_fbs::root_as_event(buf)?; @@ -103,32 +103,3 @@ impl FlatBufferUtils for Event { }) } } - -impl FlatBufferUtils for HashSet<[u8; 32]> { - #[tracing::instrument(skip_all, level = "trace")] - fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8] { - fbb.reset(); - - let list: Vec = - self.iter().map(index_fbs::Fixed32Bytes::new).collect(); - let args = index_fbs::IndexSetArgs { - data: Some(fbb.create_vector(&list)), - }; - - let offset = index_fbs::IndexSet::create(fbb, &args); - - index_fbs::finish_index_set_buffer(fbb, offset); - - fbb.finished_data() - } - - #[tracing::instrument(skip_all, level = "trace")] - fn decode(buf: &[u8]) -> Result { - Ok(index_fbs::root_as_index_set(buf)? - .data() - .ok_or(Error::NotFound)? - .into_iter() - .map(|bytes| bytes.0) - .collect()) - } -} From 37f5220252ad25dec26de03003782e9543e50e65 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sun, 5 Nov 2023 16:08:41 +0100 Subject: [PATCH 51/98] db: rework `DatabaseIndexes` --- Cargo.lock | 1 + crates/nostr-sdk-db/Cargo.toml | 5 + crates/nostr-sdk-db/examples/indexes.rs | 4 +- crates/nostr-sdk-db/src/index.rs | 477 ++++++++---------------- 4 files changed, 166 insertions(+), 321 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38a70fa14..d91dea8f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1250,6 +1250,7 @@ version = "0.1.0" dependencies = [ "async-trait", "nostr", + "nostr-sdk-fbs", "thiserror", "tokio", "tracing", diff --git a/crates/nostr-sdk-db/Cargo.toml b/crates/nostr-sdk-db/Cargo.toml index 9c78a36a1..040cd7d77 100644 --- a/crates/nostr-sdk-db/Cargo.toml +++ b/crates/nostr-sdk-db/Cargo.toml @@ -11,9 +11,14 @@ readme = "README.md" rust-version.workspace = true keywords = ["nostr", "sdk", "db"] +[features] +default = [] +flatbuffers = ["dep:nostr-sdk-fbs"] + [dependencies] async-trait = { workspace = true } nostr = { workspace = true, features = ["std"] } +nostr-sdk-fbs = { version = "0.1", path = "../nostr-sdk-fbs", optional = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["sync"] } tracing = { workspace = true, features = ["std", "attributes"] } diff --git a/crates/nostr-sdk-db/examples/indexes.rs b/crates/nostr-sdk-db/examples/indexes.rs index 30372d26e..cdc66ea84 100644 --- a/crates/nostr-sdk-db/examples/indexes.rs +++ b/crates/nostr-sdk-db/examples/indexes.rs @@ -62,8 +62,8 @@ async fn main() { let ids = index .query(vec![Filter::new() - .kinds(vec![Kind::Metadata]) - //.limit(20) + .kinds(vec![Kind::Metadata, Kind::Custom(123), Kind::TextNote]) + .limit(20) //.kind(Kind::Custom(123)) //.identifier("myid5000") .author(keys_a.public_key())]) diff --git a/crates/nostr-sdk-db/src/index.rs b/crates/nostr-sdk-db/src/index.rs index 960f3859c..069fb2893 100644 --- a/crates/nostr-sdk-db/src/index.rs +++ b/crates/nostr-sdk-db/src/index.rs @@ -3,43 +3,130 @@ //! Indexes -use std::cmp::{Ordering, Reverse}; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -use std::hash::Hash; +use std::cmp::Ordering; +use std::collections::{BTreeSet, HashSet}; use std::sync::Arc; use nostr::secp256k1::XOnlyPublicKey; -use nostr::{Alphabet, Event, EventId, Filter, Kind, Timestamp}; +use nostr::{Event, EventId, Filter, Kind, TagIndexValues, TagIndexes, Timestamp}; +#[cfg(feature = "flatbuffers")] +use nostr_sdk_fbs::{event_fbs, Error as FlatBuffersError, FlatBufferDecode}; use tokio::sync::RwLock; -//type Mapping = HashMap; -type KindIndex = HashMap>; -type AuthorIndex = HashMap>; -type AuthorAndKindIndex = HashMap<(PublicKeyPrefix, Kind), BTreeSet>; -type CreatedAtIndex = BTreeMap>; -type TagIndex = HashMap>>; +const PUBLIC_KEY_PREFIX_SIZE: usize = 8; -/// Event Index Result -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct EventIndexResult { - /// Handled event should be stored into database? - pub to_store: bool, - /// List of events that should be removed from database - pub to_discard: HashSet, +/// Event Index +#[derive(Debug, PartialEq, Eq)] +pub struct EventIndex { + created_at: Timestamp, + event_id: EventId, + pubkey: PublicKeyPrefix, + kind: Kind, + tags: TagIndexes, +} + +impl PartialOrd for EventIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for EventIndex { + fn cmp(&self, other: &Self) -> Ordering { + if self.created_at != other.created_at { + other.created_at.cmp(&self.created_at) + } else { + self.event_id.cmp(&other.event_id) + } + } +} + +impl From<&Event> for EventIndex { + fn from(e: &Event) -> Self { + Self { + created_at: e.created_at, + event_id: e.id, + pubkey: PublicKeyPrefix::from(e.pubkey), + kind: e.kind, + tags: e.build_tags_index(), + } + } +} + +impl EventIndex { + fn filter_tags_match(&self, filter: &Filter) -> bool { + if filter.generic_tags.is_empty() || self.tags.is_empty() { + return true; + } + + filter.generic_tags.iter().all(|(tagname, set)| { + let set = TagIndexValues::from(set); + self.tags + .get(tagname) + .map(|valset| valset.intersection(&set).count() > 0) + .unwrap_or(false) + }) + } } -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -struct PublicKeyPrefix([u8; 8]); +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +struct PublicKeyPrefix([u8; PUBLIC_KEY_PREFIX_SIZE]); impl From for PublicKeyPrefix { fn from(pk: XOnlyPublicKey) -> Self { let pk = pk.serialize(); - let mut prefix = [0u8; 8]; - prefix.copy_from_slice(&pk[..8]); + let mut prefix = [0u8; PUBLIC_KEY_PREFIX_SIZE]; + prefix.copy_from_slice(&pk[..PUBLIC_KEY_PREFIX_SIZE]); Self(prefix) } } +#[cfg(feature = "flatbuffers")] +impl FlatBufferDecode for EventIndex { + fn decode(buf: &[u8]) -> Result { + let ev = event_fbs::root_as_event(buf)?; + + // Compose Public Key prefix + let pk = ev.pubkey().ok_or(FlatBuffersError::NotFound)?.0; + let mut pubkey = [0u8; PUBLIC_KEY_PREFIX_SIZE]; + pubkey.copy_from_slice(&pk[..PUBLIC_KEY_PREFIX_SIZE]); + + // Compose tags + let iter = ev + .tags() + .ok_or(FlatBuffersError::NotFound)? + .into_iter() + .filter_map(|tag| match tag.data() { + Some(t) => { + if t.len() > 1 { + Some(t.into_iter().collect::>()) + } else { + None + } + } + None => None, + }); + let tags = TagIndexes::from(iter); + + Ok(Self { + event_id: EventId::from_slice(&ev.id().ok_or(FlatBuffersError::NotFound)?.0)?, + pubkey: PublicKeyPrefix(pubkey), + created_at: Timestamp::from(ev.created_at()), + kind: Kind::from(ev.kind()), + tags, + }) + } +} + +/// Event Index Result +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct EventIndexResult { + /// Handled event should be stored into database? + pub to_store: bool, + /// List of events that should be removed from database + pub to_discard: HashSet, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] struct MappingIdentifier { pub timestamp: Timestamp, @@ -62,20 +149,10 @@ impl Ord for MappingIdentifier { } } -#[derive(Debug, Default)] -struct DatabaseIndexesInner { - //mapping: Mapping, - kinds_index: KindIndex, - authors_index: AuthorIndex, - author_and_kind_index: AuthorAndKindIndex, - created_at_index: CreatedAtIndex, - tags_index: TagIndex, -} - /// Database Indexes #[derive(Debug, Clone, Default)] pub struct DatabaseIndexes { - inner: Arc>, + index: Arc>>, } impl DatabaseIndexes { @@ -84,6 +161,16 @@ impl DatabaseIndexes { Self::default() } + /// Bulk load + #[tracing::instrument(skip_all)] + pub async fn bulk_load(&self, events: I) + where + I: IntoIterator, + { + let mut index = self.index.write().await; + index.extend(events); + } + /// Index [`Event`] #[tracing::instrument(skip_all, level = "trace")] pub async fn index_event(&self, event: &Event) -> EventIndexResult { @@ -92,61 +179,23 @@ impl DatabaseIndexes { return EventIndexResult::default(); } - let mut should_insert: bool = true; - let mut to_discard = HashSet::new(); - - let mut inner = self.inner.write().await; - - if event.is_replaceable() { - // Query event - let mut kinds = HashSet::with_capacity(1); - let mut authors = HashSet::with_capacity(1); - kinds.insert(event.kind); - authors.insert(event.pubkey); - - let res1 = self.query_index(&inner.kinds_index, &kinds.into_iter().collect()); - let res2 = self.query_index( - &inner.authors_index, - &authors.into_iter().map(|pk| pk.into()).collect(), - ); - let matching_sids: BTreeSet<&MappingIdentifier> = multi_intersection(vec![res1, res2]); + let should_insert: bool = true; + let to_discard = HashSet::new(); - let mut mids_to_discard = HashSet::new(); - - for mid in matching_sids.into_iter() { - if mid.timestamp >= event.created_at { + /* if event.is_replaceable() { + let filter: Filter = Filter::new().author(event.pubkey).kind(event.kind); + let res = self.query(events, vec![filter]).await; + if let Some(ev) = res.into_iter().next() { + if ev.created_at >= event.created_at { should_insert = false; - } else if mid.timestamp < event.created_at { - to_discard.insert(mid.eid); - mids_to_discard.insert(*mid); - } - } - - for mid in mids_to_discard.iter() { - if let Some(set) = inner.kinds_index.get_mut(&event.kind) { - set.remove(mid); - } - if let Some(set) = inner.authors_index.get_mut(&event.pubkey.into()) { - set.remove(mid); - } - if let Some(set) = inner - .author_and_kind_index - .get_mut(&(event.pubkey.into(), event.kind)) - { - set.remove(mid); - } - if let Some(set) = inner.created_at_index.get_mut(&mid.timestamp) { - set.remove(mid); - } - for (_, map) in inner.tags_index.iter_mut() { - map.remove(mid); + } else if ev.created_at < event.created_at { + events.remove(&ev.id); } } } else if event.is_parameterized_replaceable() { match event.identifier() { - Some(_identifier) => { - should_insert = false; - /* let filter: Filter = Filter::new() + Some(identifier) => { + let filter: Filter = Filter::new() .author(event.pubkey) .kind(event.kind) .identifier(identifier); @@ -157,41 +206,15 @@ impl DatabaseIndexes { } else if ev.created_at < event.created_at { events.remove(&ev.id); } - } */ + } } None => should_insert = false, } - } + } */ if should_insert { - let mapping_id = MappingIdentifier { - eid: event.id, - timestamp: event.created_at, - }; - - //inner.mapping.insert(mapping_id.sid, event.id); - - let pk: PublicKeyPrefix = event.pubkey.into(); - - // Index kind - self.index_event_kind(&mut inner.kinds_index, mapping_id, event.kind); - - // Index author - self.index_event_author(&mut inner.authors_index, mapping_id, pk); - - // Index author and kind - self.index_event_author_and_kind( - &mut inner.author_and_kind_index, - mapping_id, - pk, - event.kind, - ); - - // Index created at - self.index_event_created_at(&mut inner.created_at_index, mapping_id, event); - - // Index tags - self.index_event_tags(&mut inner.tags_index, mapping_id, event); + let mut index = self.index.write().await; + index.insert(EventIndex::from(event)); } EventIndexResult { @@ -200,233 +223,49 @@ impl DatabaseIndexes { } } - /// Index kind - fn index_event_kind(&self, kinds_index: &mut KindIndex, mid: MappingIdentifier, kind: Kind) { - kinds_index - .entry(kind) - .and_modify(|set| { - set.insert(mid); - }) - .or_insert_with(|| { - let mut set = BTreeSet::new(); - set.insert(mid); - set - }); - } - - /// Index author - fn index_event_author( - &self, - authors_index: &mut AuthorIndex, - mid: MappingIdentifier, - pk: PublicKeyPrefix, - ) { - authors_index - .entry(pk) - .and_modify(|set| { - set.insert(mid); - }) - .or_insert_with(|| { - let mut set = BTreeSet::new(); - set.insert(mid); - set - }); - } - - fn index_event_author_and_kind( - &self, - author_and_kind_index: &mut AuthorAndKindIndex, - mid: MappingIdentifier, - pk: PublicKeyPrefix, - kind: Kind, - ) { - author_and_kind_index - .entry((pk, kind)) - .and_modify(|set| { - set.insert(mid); - }) - .or_insert_with(|| { - let mut set = BTreeSet::new(); - set.insert(mid); - set - }); - } - - /// Index created at - fn index_event_created_at( - &self, - created_at_index: &mut CreatedAtIndex, - mid: MappingIdentifier, - event: &Event, - ) { - created_at_index - .entry(event.created_at) - .and_modify(|set| { - set.insert(mid); - }) - .or_insert_with(|| { - let mut set = BTreeSet::new(); - set.insert(mid); - set - }); - } - - /// Index tags - fn index_event_tags(&self, tags_index: &mut TagIndex, mid: MappingIdentifier, event: &Event) { - for (a, set) in event.build_tags_index().into_iter() { - tags_index - .entry(a) - .and_modify(|map| { - map.insert(mid, set.clone()); - }) - .or_insert_with(|| { - let mut map = HashMap::with_capacity(1); - map.insert(mid, set); - map - }); - } - } - /// Query #[tracing::instrument(skip_all)] pub async fn query(&self, filters: Vec) -> HashSet { - let inner = self.inner.read().await; + let index = self.index.read().await; - let mut matching_event_ids: HashSet = HashSet::new(); + let mut matching_ids: HashSet = HashSet::new(); for filter in filters.into_iter() { - if !filter.ids.is_empty() { - matching_event_ids.extend(filter.ids); - continue; - } - if let (Some(since), Some(until)) = (filter.since, filter.until) { if since > until { continue; } } - let mut sets: Vec> = Vec::new(); - - if !filter.kinds.is_empty() && !filter.authors.is_empty() { - let mut set = HashSet::new(); - for author in filter.authors.iter() { - for kind in filter.kinds.iter() { - set.insert(((*author).into(), *kind)); - } - } - sets.push(self.query_index(&inner.author_and_kind_index, &set)); - } else { - if !filter.kinds.is_empty() { - sets.push(self.query_index(&inner.kinds_index, &filter.kinds)); - } - - if !filter.authors.is_empty() { - sets.push(self.query_index( - &inner.authors_index, - &filter.authors.into_iter().map(|pk| pk.into()).collect(), - )); - } - } - - if let (Some(since), Some(until)) = (filter.since, filter.until) { - let mut temp = BTreeSet::new(); - for ids in inner - .created_at_index - .range(since..=until) - .map(|(_, ids)| ids) - { - temp.extend(ids); - } - sets.push(temp); + let authors: HashSet = filter + .authors + .iter() + .map(|p| PublicKeyPrefix::from(*p)) + .collect(); + let iter = index + .iter() + .filter(|m| { + (filter.ids.is_empty() || filter.ids.contains(&m.event_id)) + && filter.since.map_or(true, |t| m.created_at >= t) + && filter.until.map_or(true, |t| m.created_at <= t) + && (filter.authors.is_empty() || authors.contains(&m.pubkey)) + && (filter.kinds.is_empty() || filter.kinds.contains(&m.kind)) + && m.filter_tags_match(&filter) + }) + .map(|m| m.event_id); + if let Some(limit) = filter.limit { + matching_ids.extend(iter.take(limit)) } else { - if let Some(since) = filter.since { - let mut temp = BTreeSet::new(); - for (_, ids) in inner.created_at_index.range(since..) { - temp.extend(ids); - } - sets.push(temp); - } - - if let Some(until) = filter.until { - let mut temp = BTreeSet::new(); - for (_, ids) in inner.created_at_index.range(..=until) { - temp.extend(ids); - } - sets.push(temp); - } + matching_ids.extend(iter) } - - if !filter.generic_tags.is_empty() { - let mut temp = BTreeSet::new(); - - for (tagname, set) in filter.generic_tags.iter() { - if let Some(tag_map) = inner.tags_index.get(tagname) { - for (id, tag_values) in tag_map.iter() { - if set.iter().all(|value| tag_values.contains(value)) { - temp.insert(id); - } - } - } - } - - sets.push(temp); - } - - // Intersection - let matching_sids: BTreeSet<&MappingIdentifier> = multi_intersection(sets); - - // Limit - let limit: usize = filter.limit.unwrap_or(matching_sids.len()); - - // Get ids - matching_event_ids.extend(matching_sids.into_iter().take(limit).map(|mid| mid.eid)); } - matching_event_ids - } - - #[tracing::instrument(skip_all)] - fn query_index<'a, K>( - &self, - index: &'a HashMap>, - keys: &HashSet, - ) -> BTreeSet<&'a MappingIdentifier> - where - K: Eq + Hash, - { - let mut result: BTreeSet<&MappingIdentifier> = BTreeSet::new(); - for key in keys.iter() { - if let Some(ids) = index.get(key) { - result.extend(ids.iter()); - } - } - result + matching_ids } /// Clear indexes pub async fn clear(&self) { - let mut inner = self.inner.write().await; - inner.kinds_index.clear(); - inner.authors_index.clear(); - inner.created_at_index.clear(); - } -} - -#[tracing::instrument(skip_all)] -fn multi_intersection(mut sets: Vec>) -> BTreeSet<&T> -where - T: Ord, -{ - // Sort by len (DESC) - sets.sort_by_cached_key(|set| Reverse(set.len())); - - if let Some(mut result) = sets.pop() { - if !sets.is_empty() { - result.retain(|item| sets.iter().all(|set| set.contains(item))); - } - result - } else { - BTreeSet::new() + let mut index = self.index.write().await; + index.clear(); } } From 631e73bc0e2144702072738ad3f7b20ce6e21538 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Sun, 5 Nov 2023 16:10:23 +0100 Subject: [PATCH 52/98] rocksdb: improve `build_indexes` performance --- crates/nostr-sdk-rocksdb/Cargo.toml | 2 +- crates/nostr-sdk-rocksdb/src/lib.rs | 23 ++++++++++------------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/crates/nostr-sdk-rocksdb/Cargo.toml b/crates/nostr-sdk-rocksdb/Cargo.toml index f2b07285e..b90a5954f 100644 --- a/crates/nostr-sdk-rocksdb/Cargo.toml +++ b/crates/nostr-sdk-rocksdb/Cargo.toml @@ -14,7 +14,7 @@ keywords = ["nostr", "sdk", "db", "redb"] [dependencies] async-trait = { workspace = true } nostr = { workspace = true, features = ["std"] } -nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db" } +nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db", features = ["flatbuffers"] } nostr-sdk-fbs = { version = "0.1", path = "../nostr-sdk-fbs" } rocksdb = { version = "0.21", default-features = false, features = ["multi-threaded-cf", "snappy"] } tokio = { workspace = true, features = ["rt-multi-thread", "sync"] } diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index ae270ecf7..d27a88a88 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -8,10 +8,10 @@ use std::sync::Arc; use async_trait::async_trait; use nostr::{Event, EventId, Filter, FiltersMatchEvent, Timestamp, Url}; use nostr_sdk_db::{ - index::DatabaseIndexes, Backend, DatabaseError, DatabaseOptions, EventIndexResult, - NostrDatabase, + index::{DatabaseIndexes, EventIndex}, + Backend, DatabaseError, DatabaseOptions, EventIndexResult, NostrDatabase, }; -use nostr_sdk_fbs::{FlatBufferBuilder, FlatBufferUtils}; +use nostr_sdk_fbs::{FlatBufferBuilder, FlatBufferDecode, FlatBufferEncode}; use rocksdb::{ BoundColumnFamily, ColumnFamilyDescriptor, DBCompactionStyle, DBCompressionType, IteratorMode, OptimisticTransactionDB, Options, WriteBatchWithTransaction, @@ -87,15 +87,12 @@ impl RocksDatabase { #[tracing::instrument(skip_all)] pub async fn build_indexes(&self) -> Result<(), DatabaseError> { let cf = self.cf_handle(EVENTS_CF)?; - let iter = self.db.full_iterator_cf(&cf, IteratorMode::Start); - - for i in iter { - if let Ok((_key, value)) = i { - let event = Event::decode(&value).map_err(DatabaseError::backend)?; - self.indexes.index_event(&event).await; - } - } - + let events = self + .db + .full_iterator_cf(&cf, IteratorMode::Start) + .flatten() + .filter_map(|(_, value)| EventIndex::decode(&value).ok()); + self.indexes.bulk_load(events).await; Ok(()) } } @@ -118,7 +115,7 @@ impl NostrDatabase for RocksDatabase { let EventIndexResult { to_store, to_discard, - } = self.indexes.index_event(&event).await; + } = self.indexes.index_event(event).await; if to_store { // Acquire FlatBuffers Builder From 96ea2fcc2df92600670d43da6fe6b95ab5ed2428 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 6 Nov 2023 12:12:43 +0100 Subject: [PATCH 53/98] nostr: add `RawEvent` --- crates/nostr/src/event/mod.rs | 8 +-- crates/nostr/src/event/raw.rs | 97 +++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+), 4 deletions(-) create mode 100644 crates/nostr/src/event/raw.rs diff --git a/crates/nostr/src/event/mod.rs b/crates/nostr/src/event/mod.rs index 9edc68d08..e3c97527d 100644 --- a/crates/nostr/src/event/mod.rs +++ b/crates/nostr/src/event/mod.rs @@ -16,6 +16,7 @@ pub mod builder; pub mod id; pub mod kind; pub mod partial; +pub mod raw; pub mod tag; pub mod unsigned; @@ -372,6 +373,7 @@ mod tests { assert_eq!(Kind::Custom(123), e.kind); assert_eq!(Kind::Custom(123), deserialized.kind); } + #[test] #[cfg(feature = "std")] fn test_event_expired() { @@ -389,10 +391,8 @@ mod tests { #[test] #[cfg(feature = "std")] fn test_event_not_expired() { - let now = Timestamp::now().as_i64(); - - // To make sure it is never considered expired - let expiry_date: u64 = (now * 2).try_into().unwrap(); + let now = Timestamp::now(); + let expiry_date: u64 = now.as_u64() * 2; let my_keys = Keys::generate(); let event = EventBuilder::new_text_note( diff --git a/crates/nostr/src/event/raw.rs b/crates/nostr/src/event/raw.rs new file mode 100644 index 000000000..55085f50b --- /dev/null +++ b/crates/nostr/src/event/raw.rs @@ -0,0 +1,97 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Raw Event + +use alloc::string::String; +use alloc::vec::Vec; +use core::str::FromStr; + +use crate::Timestamp; + +use super::kind::EPHEMERAL_RANGE; + +/// Raw Event +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct RawEvent { + /// Id + pub id: [u8; 32], + /// Author + pub pubkey: [u8; 32], + /// Timestamp (seconds) + pub created_at: u64, + /// Kind + pub kind: u64, + /// Vector of [`Tag`] + pub tags: Vec>, + /// Content + pub content: String, + /// Signature + pub sig: [u8; 64], +} + +impl RawEvent { + /// Returns `true` if the event has an expiration tag that is expired. + /// If an event has no `Expiration` tag, then it will return `false`. + /// + /// + pub fn is_expired(&self, now: &Timestamp) -> bool { + for tag in self.tags.iter() { + if tag.len() == 2 && tag[0] == "expiration" { + if let Ok(timestamp) = Timestamp::from_str(&tag[1]) { + return ×tamp < now; + } + break; + } + } + false + } + + /// Check if event [`Kind`] is `Ephemeral` + /// + /// + pub fn is_ephemeral(&self) -> bool { + EPHEMERAL_RANGE.contains(&self.kind) + } +} + +#[cfg(test)] +mod tests { + #[cfg(feature = "std")] + use super::*; + + #[test] + #[cfg(feature = "std")] + fn test_event_expired() { + let raw = RawEvent { + id: [0u8; 32], + pubkey: [0u8; 32], + created_at: 0, + kind: 1, + tags: vec![vec!["expiration".to_string(), "12345".to_string()]], + content: String::new(), + sig: [0u8; 64], + }; + let now = Timestamp::now(); + assert!(raw.is_expired(&now)); + } + + #[test] + #[cfg(feature = "std")] + fn test_event_not_expired() { + let now = Timestamp::now(); + let expiry_date: u64 = now.as_u64() * 2; + + let raw = RawEvent { + id: [0u8; 32], + pubkey: [0u8; 32], + created_at: 0, + kind: 1, + tags: vec![vec!["expiration".to_string(), expiry_date.to_string()]], + content: String::new(), + sig: [0u8; 64], + }; + + assert!(!raw.is_expired(&now)); + } +} From 2488e4d82c89d89e903277383ef5bbd8d5305709 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 6 Nov 2023 12:13:43 +0100 Subject: [PATCH 54/98] nostr: impl `Clone` for `TagIndexes` --- crates/nostr/src/event/tag/indexes.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/nostr/src/event/tag/indexes.rs b/crates/nostr/src/event/tag/indexes.rs index 4af7df2a7..a744d23ff 100644 --- a/crates/nostr/src/event/tag/indexes.rs +++ b/crates/nostr/src/event/tag/indexes.rs @@ -21,7 +21,7 @@ use crate::Alphabet; pub const TAG_INDEX_VALUE_SIZE: usize = 8; /// Tag Indexes -#[derive(Debug, Default, PartialEq, Eq)] +#[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct TagIndexes { inner: AllocMap, } @@ -67,7 +67,7 @@ fn single_char_tagname(tagname: &str) -> Option { } /// Tag Index Values -#[derive(Debug, Default, PartialEq, Eq)] +#[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct TagIndexValues { inner: AllocSet<[u8; TAG_INDEX_VALUE_SIZE]>, } From fbd0115031ea9d5da1874708bfb59fa684d3344e Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 6 Nov 2023 12:17:04 +0100 Subject: [PATCH 55/98] db: move `nostr-sdk-fbs` into `flatbuffers` module --- .githooks/pre-push | 1 - Cargo.lock | 13 +- Makefile | 4 +- crates/nostr-sdk-db/Cargo.toml | 4 +- crates/nostr-sdk-db/Makefile | 5 + .../fbs/event.fbs | 0 .../src/flatbuffers}/event_generated.rs | 0 .../src/flatbuffers/mod.rs} | 45 ++++- crates/nostr-sdk-db/src/index.rs | 174 ++++++++---------- crates/nostr-sdk-db/src/lib.rs | 9 +- crates/nostr-sdk-fbs/Cargo.toml | 13 -- crates/nostr-sdk-fbs/Makefile | 5 - crates/nostr-sdk-rocksdb/Cargo.toml | 3 +- crates/nostr-sdk-rocksdb/src/lib.rs | 8 +- 14 files changed, 144 insertions(+), 140 deletions(-) rename crates/{nostr-sdk-fbs => nostr-sdk-db}/fbs/event.fbs (100%) rename crates/{nostr-sdk-fbs/src => nostr-sdk-db/src/flatbuffers}/event_generated.rs (100%) rename crates/{nostr-sdk-fbs/src/lib.rs => nostr-sdk-db/src/flatbuffers/mod.rs} (69%) delete mode 100644 crates/nostr-sdk-fbs/Cargo.toml delete mode 100644 crates/nostr-sdk-fbs/Makefile diff --git a/.githooks/pre-push b/.githooks/pre-push index edc1dcba1..b38a6ae32 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -12,7 +12,6 @@ buildargs=( "-p nostr-sdk --no-default-features" "-p nostr-sdk --features blocking" "-p nostr-sdk-db" - "-p nostr-sdk-fbs" "-p nostr-ffi" "-p nostr-sdk-ffi" ) diff --git a/Cargo.lock b/Cargo.lock index d91dea8f1..e9cf7da2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1249,24 +1249,14 @@ name = "nostr-sdk-db" version = "0.1.0" dependencies = [ "async-trait", + "flatbuffers", "nostr", - "nostr-sdk-fbs", "thiserror", "tokio", "tracing", "tracing-subscriber", ] -[[package]] -name = "nostr-sdk-fbs" -version = "0.1.0" -dependencies = [ - "flatbuffers", - "nostr", - "thiserror", - "tracing", -] - [[package]] name = "nostr-sdk-ffi" version = "0.1.0" @@ -1313,7 +1303,6 @@ dependencies = [ "async-trait", "nostr", "nostr-sdk-db", - "nostr-sdk-fbs", "rocksdb", "tokio", "tracing", diff --git a/Makefile b/Makefile index c2d7fb8de..4146bc5fa 100644 --- a/Makefile +++ b/Makefile @@ -12,8 +12,8 @@ clean: book: cd book && make build -flatbuffers: - cd crates/nostr-sdk-fbs && make +flatbuf: + cd crates/nostr-sdk-db && make flatbuf loc: @echo "--- Counting lines of .rs files (LOC):" && find crates/ bindings/ -type f -name "*.rs" -exec cat {} \; | wc -l \ No newline at end of file diff --git a/crates/nostr-sdk-db/Cargo.toml b/crates/nostr-sdk-db/Cargo.toml index 040cd7d77..c8e5665f2 100644 --- a/crates/nostr-sdk-db/Cargo.toml +++ b/crates/nostr-sdk-db/Cargo.toml @@ -13,12 +13,12 @@ keywords = ["nostr", "sdk", "db"] [features] default = [] -flatbuffers = ["dep:nostr-sdk-fbs"] +flatbuf = ["dep:flatbuffers"] [dependencies] async-trait = { workspace = true } +flatbuffers = { version = "23.5", optional = true } nostr = { workspace = true, features = ["std"] } -nostr-sdk-fbs = { version = "0.1", path = "../nostr-sdk-fbs", optional = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["sync"] } tracing = { workspace = true, features = ["std", "attributes"] } diff --git a/crates/nostr-sdk-db/Makefile b/crates/nostr-sdk-db/Makefile index 499f8182c..5c5682c04 100644 --- a/crates/nostr-sdk-db/Makefile +++ b/crates/nostr-sdk-db/Makefile @@ -1,2 +1,7 @@ +all: build + +flatbuf: + flatc --rust -o ./src/flatbuffers ./fbs/event.fbs + graph: CARGO_PROFILE_RELEASE_DEBUG=true cargo flamegraph --release --example indexes -o flamegraph.svg \ No newline at end of file diff --git a/crates/nostr-sdk-fbs/fbs/event.fbs b/crates/nostr-sdk-db/fbs/event.fbs similarity index 100% rename from crates/nostr-sdk-fbs/fbs/event.fbs rename to crates/nostr-sdk-db/fbs/event.fbs diff --git a/crates/nostr-sdk-fbs/src/event_generated.rs b/crates/nostr-sdk-db/src/flatbuffers/event_generated.rs similarity index 100% rename from crates/nostr-sdk-fbs/src/event_generated.rs rename to crates/nostr-sdk-db/src/flatbuffers/event_generated.rs diff --git a/crates/nostr-sdk-fbs/src/lib.rs b/crates/nostr-sdk-db/src/flatbuffers/mod.rs similarity index 69% rename from crates/nostr-sdk-fbs/src/lib.rs rename to crates/nostr-sdk-db/src/flatbuffers/mod.rs index ded882570..4c8d3d9f9 100644 --- a/crates/nostr-sdk-fbs/src/lib.rs +++ b/crates/nostr-sdk-db/src/flatbuffers/mod.rs @@ -1,39 +1,50 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -//! Nostr SDK Flatbuffers +//! Flatbuffers pub use flatbuffers::FlatBufferBuilder; use flatbuffers::InvalidFlatbuffer; +use nostr::event::raw::RawEvent; use nostr::secp256k1::schnorr::Signature; use nostr::secp256k1::{self, XOnlyPublicKey}; use nostr::{Event, EventId, Kind, Tag, Timestamp}; use thiserror::Error; -#[allow(unused_imports, dead_code, clippy::all)] +#[allow(unused_imports, dead_code, clippy::all, unsafe_code, missing_docs)] mod event_generated; pub use self::event_generated::event_fbs; +/// FlatBuffers Error #[derive(Debug, Error)] pub enum Error { + /// Invalud FlatBuffer #[error(transparent)] InvalidFlatbuffer(#[from] InvalidFlatbuffer), #[error(transparent)] + /// Event ID error EventId(#[from] nostr::event::id::Error), + /// Tag error #[error(transparent)] Tag(#[from] nostr::event::tag::Error), + /// Secp256k1 error #[error(transparent)] Secp256k1(#[from] secp256k1::Error), + /// Not found #[error("not found")] NotFound, } +/// FlatBuffer Encode trait pub trait FlatBufferEncode { + /// FlatBuffer encode fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8]; } +/// FlatBuffer Decode trait pub trait FlatBufferDecode: Sized { + /// FlatBuffer decode fn decode(buf: &[u8]) -> Result; } @@ -103,3 +114,33 @@ impl FlatBufferDecode for Event { }) } } + +impl FlatBufferDecode for RawEvent { + #[tracing::instrument(skip_all, level = "trace")] + fn decode(buf: &[u8]) -> Result { + let ev = event_fbs::root_as_event(buf)?; + Ok(Self { + id: ev.id().ok_or(Error::NotFound)?.0, + pubkey: ev.pubkey().ok_or(Error::NotFound)?.0, + created_at: ev.created_at(), + kind: ev.kind(), + tags: ev + .tags() + .ok_or(Error::NotFound)? + .into_iter() + .filter_map(|tag| match tag.data() { + Some(t) => { + if t.len() > 1 { + Some(t.into_iter().map(|s| s.to_owned()).collect::>()) + } else { + None + } + } + None => None, + }) + .collect(), + content: ev.content().ok_or(Error::NotFound)?.to_owned(), + sig: ev.sig().ok_or(Error::NotFound)?.0, + }) + } +} diff --git a/crates/nostr-sdk-db/src/index.rs b/crates/nostr-sdk-db/src/index.rs index 069fb2893..8a74e3464 100644 --- a/crates/nostr-sdk-db/src/index.rs +++ b/crates/nostr-sdk-db/src/index.rs @@ -7,21 +7,26 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashSet}; use std::sync::Arc; +use nostr::event::raw::RawEvent; use nostr::secp256k1::XOnlyPublicKey; use nostr::{Event, EventId, Filter, Kind, TagIndexValues, TagIndexes, Timestamp}; -#[cfg(feature = "flatbuffers")] -use nostr_sdk_fbs::{event_fbs, Error as FlatBuffersError, FlatBufferDecode}; use tokio::sync::RwLock; +/// Public Key Prefix Size const PUBLIC_KEY_PREFIX_SIZE: usize = 8; /// Event Index -#[derive(Debug, PartialEq, Eq)] -pub struct EventIndex { +#[derive(Debug, Clone, PartialEq, Eq)] +struct EventIndex { + /// Timestamp (seconds) created_at: Timestamp, + /// Event ID event_id: EventId, + /// Public key prefix pubkey: PublicKeyPrefix, + /// Kind kind: Kind, + /// Tag indexes tags: TagIndexes, } @@ -34,13 +39,26 @@ impl PartialOrd for EventIndex { impl Ord for EventIndex { fn cmp(&self, other: &Self) -> Ordering { if self.created_at != other.created_at { - other.created_at.cmp(&self.created_at) + self.created_at.cmp(&other.created_at).reverse() } else { self.event_id.cmp(&other.event_id) } } } +impl TryFrom for EventIndex { + type Error = nostr::event::id::Error; + fn try_from(raw: RawEvent) -> Result { + Ok(Self { + created_at: Timestamp::from(raw.created_at), + event_id: EventId::from_slice(&raw.id)?, + pubkey: PublicKeyPrefix::from(raw.pubkey), + kind: Kind::from(raw.kind), + tags: TagIndexes::from(raw.tags.into_iter()), + }) + } +} + impl From<&Event> for EventIndex { fn from(e: &Event) -> Self { Self { @@ -69,52 +87,22 @@ impl EventIndex { } } -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -struct PublicKeyPrefix([u8; PUBLIC_KEY_PREFIX_SIZE]); +/// Public Key prefix +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PublicKeyPrefix([u8; PUBLIC_KEY_PREFIX_SIZE]); impl From for PublicKeyPrefix { fn from(pk: XOnlyPublicKey) -> Self { - let pk = pk.serialize(); - let mut prefix = [0u8; PUBLIC_KEY_PREFIX_SIZE]; - prefix.copy_from_slice(&pk[..PUBLIC_KEY_PREFIX_SIZE]); - Self(prefix) + let pk: [u8; 32] = pk.serialize(); + Self::from(pk) } } -#[cfg(feature = "flatbuffers")] -impl FlatBufferDecode for EventIndex { - fn decode(buf: &[u8]) -> Result { - let ev = event_fbs::root_as_event(buf)?; - - // Compose Public Key prefix - let pk = ev.pubkey().ok_or(FlatBuffersError::NotFound)?.0; +impl From<[u8; 32]> for PublicKeyPrefix { + fn from(pk: [u8; 32]) -> Self { let mut pubkey = [0u8; PUBLIC_KEY_PREFIX_SIZE]; pubkey.copy_from_slice(&pk[..PUBLIC_KEY_PREFIX_SIZE]); - - // Compose tags - let iter = ev - .tags() - .ok_or(FlatBuffersError::NotFound)? - .into_iter() - .filter_map(|tag| match tag.data() { - Some(t) => { - if t.len() > 1 { - Some(t.into_iter().collect::>()) - } else { - None - } - } - None => None, - }); - let tags = TagIndexes::from(iter); - - Ok(Self { - event_id: EventId::from_slice(&ev.id().ok_or(FlatBuffersError::NotFound)?.0)?, - pubkey: PublicKeyPrefix(pubkey), - created_at: Timestamp::from(ev.created_at()), - kind: Kind::from(ev.kind()), - tags, - }) + Self(pubkey) } } @@ -127,28 +115,6 @@ pub struct EventIndexResult { pub to_discard: HashSet, } -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -struct MappingIdentifier { - pub timestamp: Timestamp, - pub eid: EventId, -} - -impl PartialOrd for MappingIdentifier { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for MappingIdentifier { - fn cmp(&self, other: &Self) -> Ordering { - if self.timestamp != other.timestamp { - other.timestamp.cmp(&self.timestamp) - } else { - self.eid.cmp(&other.eid) - } - } -} - /// Database Indexes #[derive(Debug, Clone, Default)] pub struct DatabaseIndexes { @@ -165,10 +131,17 @@ impl DatabaseIndexes { #[tracing::instrument(skip_all)] pub async fn bulk_load(&self, events: I) where - I: IntoIterator, + I: IntoIterator, { let mut index = self.index.write().await; - index.extend(events); + let now = Timestamp::now(); + // TODO: check if it's expired or ephemeral, check if it's replaceable or param replaceable + index.extend( + events + .into_iter() + .filter(|raw| !raw.is_expired(&now) && !raw.is_ephemeral()) + .filter_map(|raw| EventIndex::try_from(raw).ok()), + ); } /// Index [`Event`] @@ -179,17 +152,18 @@ impl DatabaseIndexes { return EventIndexResult::default(); } - let should_insert: bool = true; - let to_discard = HashSet::new(); + let mut index = self.index.write().await; + + let mut should_insert: bool = true; + let mut to_discard: HashSet = HashSet::new(); - /* if event.is_replaceable() { + if event.is_replaceable() { let filter: Filter = Filter::new().author(event.pubkey).kind(event.kind); - let res = self.query(events, vec![filter]).await; - if let Some(ev) = res.into_iter().next() { - if ev.created_at >= event.created_at { + for ev in self.internal_query(&index, &filter).await { + if ev.created_at > event.created_at { should_insert = false; - } else if ev.created_at < event.created_at { - events.remove(&ev.id); + } else if ev.created_at <= event.created_at { + to_discard.insert(ev.event_id); } } } else if event.is_parameterized_replaceable() { @@ -199,21 +173,23 @@ impl DatabaseIndexes { .author(event.pubkey) .kind(event.kind) .identifier(identifier); - let res: Vec = self._query(events, vec![filter]).await?; - if let Some(ev) = res.into_iter().next() { + for ev in self.internal_query(&index, &filter).await { if ev.created_at >= event.created_at { should_insert = false; } else if ev.created_at < event.created_at { - events.remove(&ev.id); + to_discard.insert(ev.event_id); } } } None => should_insert = false, } - } */ + } + // Remove events + index.retain(|e| !to_discard.contains(&e.event_id)); + + // Insert event if should_insert { - let mut index = self.index.write().await; index.insert(EventIndex::from(event)); } @@ -223,6 +199,26 @@ impl DatabaseIndexes { } } + async fn internal_query<'a>( + &self, + index: &'a BTreeSet, + filter: &'a Filter, + ) -> impl Iterator { + let authors: HashSet = filter + .authors + .iter() + .map(|p| PublicKeyPrefix::from(*p)) + .collect(); + index.iter().filter(move |m| { + (filter.ids.is_empty() || filter.ids.contains(&m.event_id)) + && filter.since.map_or(true, |t| m.created_at >= t) + && filter.until.map_or(true, |t| m.created_at <= t) + && (filter.authors.is_empty() || authors.contains(&m.pubkey)) + && (filter.kinds.is_empty() || filter.kinds.contains(&m.kind)) + && m.filter_tags_match(filter) + }) + } + /// Query #[tracing::instrument(skip_all)] pub async fn query(&self, filters: Vec) -> HashSet { @@ -237,21 +233,9 @@ impl DatabaseIndexes { } } - let authors: HashSet = filter - .authors - .iter() - .map(|p| PublicKeyPrefix::from(*p)) - .collect(); - let iter = index - .iter() - .filter(|m| { - (filter.ids.is_empty() || filter.ids.contains(&m.event_id)) - && filter.since.map_or(true, |t| m.created_at >= t) - && filter.until.map_or(true, |t| m.created_at <= t) - && (filter.authors.is_empty() || authors.contains(&m.pubkey)) - && (filter.kinds.is_empty() || filter.kinds.contains(&m.kind)) - && m.filter_tags_match(&filter) - }) + let iter = self + .internal_query(&index, &filter) + .await .map(|m| m.event_id); if let Some(limit) = filter.limit { matching_ids.extend(iter.take(limit)) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 78ee46ebe..abca1f5b7 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -3,21 +3,26 @@ //! Nostr SDK Database -#![forbid(unsafe_code)] +#![deny(unsafe_code)] #![warn(missing_docs)] #![warn(rustdoc::bare_urls)] use std::collections::HashSet; pub use async_trait::async_trait; -use nostr::{Event, EventId, Filter, Timestamp, Url}; +use nostr::secp256k1::XOnlyPublicKey; +use nostr::{Event, EventId, Filter, Metadata, Timestamp, Url}; mod error; +#[cfg(feature = "flatbuf")] +pub mod flatbuffers; pub mod index; pub mod memory; mod options; pub use self::error::DatabaseError; +#[cfg(feature = "flatbuf")] +pub use self::flatbuffers::{FlatBufferBuilder, FlatBufferDecode, FlatBufferEncode}; pub use self::index::{DatabaseIndexes, EventIndexResult}; pub use self::options::DatabaseOptions; diff --git a/crates/nostr-sdk-fbs/Cargo.toml b/crates/nostr-sdk-fbs/Cargo.toml deleted file mode 100644 index 68517aba7..000000000 --- a/crates/nostr-sdk-fbs/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "nostr-sdk-fbs" -version = "0.1.0" -edition = "2021" -homepage.workspace = true -repository.workspace = true -license.workspace = true - -[dependencies] -flatbuffers = "23.5" -nostr = { workspace = true, features = ["std"] } -thiserror = { workspace = true } -tracing = { workspace = true, features = ["std", "attributes"] } diff --git a/crates/nostr-sdk-fbs/Makefile b/crates/nostr-sdk-fbs/Makefile deleted file mode 100644 index f426f25f9..000000000 --- a/crates/nostr-sdk-fbs/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -all: build - -build: - flatc --rust -o ./src/ ./fbs/event.fbs - flatc --rust -o ./src/ ./fbs/index.fbs \ No newline at end of file diff --git a/crates/nostr-sdk-rocksdb/Cargo.toml b/crates/nostr-sdk-rocksdb/Cargo.toml index b90a5954f..deec535cb 100644 --- a/crates/nostr-sdk-rocksdb/Cargo.toml +++ b/crates/nostr-sdk-rocksdb/Cargo.toml @@ -14,8 +14,7 @@ keywords = ["nostr", "sdk", "db", "redb"] [dependencies] async-trait = { workspace = true } nostr = { workspace = true, features = ["std"] } -nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db", features = ["flatbuffers"] } -nostr-sdk-fbs = { version = "0.1", path = "../nostr-sdk-fbs" } +nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db", features = ["flatbuf"] } rocksdb = { version = "0.21", default-features = false, features = ["multi-threaded-cf", "snappy"] } tokio = { workspace = true, features = ["rt-multi-thread", "sync"] } tracing = { workspace = true, features = ["std"] } diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index d27a88a88..792d64ab7 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -6,12 +6,12 @@ use std::path::Path; use std::sync::Arc; use async_trait::async_trait; +use nostr::event::raw::RawEvent; use nostr::{Event, EventId, Filter, FiltersMatchEvent, Timestamp, Url}; use nostr_sdk_db::{ - index::{DatabaseIndexes, EventIndex}, - Backend, DatabaseError, DatabaseOptions, EventIndexResult, NostrDatabase, + Backend, DatabaseError, DatabaseIndexes, DatabaseOptions, EventIndexResult, FlatBufferBuilder, + FlatBufferDecode, FlatBufferEncode, NostrDatabase, }; -use nostr_sdk_fbs::{FlatBufferBuilder, FlatBufferDecode, FlatBufferEncode}; use rocksdb::{ BoundColumnFamily, ColumnFamilyDescriptor, DBCompactionStyle, DBCompressionType, IteratorMode, OptimisticTransactionDB, Options, WriteBatchWithTransaction, @@ -91,7 +91,7 @@ impl RocksDatabase { .db .full_iterator_cf(&cf, IteratorMode::Start) .flatten() - .filter_map(|(_, value)| EventIndex::decode(&value).ok()); + .filter_map(|(_, value)| RawEvent::decode(&value).ok()); self.indexes.bulk_load(events).await; Ok(()) } From 4de26496c210d9fd151f87f711abba12c1829d83 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 6 Nov 2023 12:46:53 +0100 Subject: [PATCH 56/98] db: add `profile` method to `NostrDatabase` trait --- crates/nostr-sdk-db/src/error.rs | 16 +++++++++++++++- crates/nostr-sdk-db/src/lib.rs | 18 ++++++++++++++++-- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/crates/nostr-sdk-db/src/error.rs b/crates/nostr-sdk-db/src/error.rs index 796684d27..8686d4772 100644 --- a/crates/nostr-sdk-db/src/error.rs +++ b/crates/nostr-sdk-db/src/error.rs @@ -9,8 +9,11 @@ use thiserror::Error; #[derive(Debug, Error)] pub enum DatabaseError { /// An error happened in the underlying database backend. - #[error(transparent)] + #[error("backend: {0}")] Backend(Box), + /// Nostr error + #[error("nostr: {0}")] + Nostr(Box), /// Not supported #[error("method not supported by current backend")] NotSupported, @@ -33,4 +36,15 @@ impl DatabaseError { { Self::Backend(Box::new(error)) } + + /// Create a new [`Nostr`][Self::Nostr] error. + /// + /// Shorthand for `Error::Nostr(Box::new(error))`. + #[inline] + pub fn nostr(error: E) -> Self + where + E: std::error::Error + Send + Sync + 'static, + { + Self::Nostr(Box::new(error)) + } } diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index abca1f5b7..1f2032919 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -11,7 +11,7 @@ use std::collections::HashSet; pub use async_trait::async_trait; use nostr::secp256k1::XOnlyPublicKey; -use nostr::{Event, EventId, Filter, Metadata, Timestamp, Url}; +use nostr::{Event, EventId, Filter, JsonUtil, Kind, Metadata, Timestamp, Url}; mod error; #[cfg(feature = "flatbuf")] @@ -49,7 +49,7 @@ pub type DynNostrDatabase = dyn NostrDatabase; #[async_trait] pub trait NostrDatabase: AsyncTraitDeps { /// Error - type Err; + type Err: From; /// Name of the backend database used (ex. rocksdb, lmdb, sqlite, indexeddb, ...) fn backend(&self) -> Backend; @@ -106,6 +106,20 @@ pub trait NostrDatabase: AsyncTraitDeps { /// Wipe all data async fn wipe(&self) -> Result<(), Self::Err>; + + /// Get profile metadata + #[tracing::instrument(skip_all)] + async fn profile(&self, public_key: XOnlyPublicKey) -> Result { + let filter = Filter::new() + .author(public_key) + .kind(Kind::Metadata) + .limit(1); + let events: Vec = self.query(vec![filter]).await?; + match events.first() { + Some(event) => Ok(Metadata::from_json(&event.content).map_err(DatabaseError::nostr)?), + None => Ok(Metadata::default()), // TODO: return an Option? + } + } } /// Alias for `Send` on non-wasm, empty trait (implemented by everything) on From 497f7f4dd79f128caa7470bd48fa1d65213d3bb2 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 6 Nov 2023 16:43:02 +0100 Subject: [PATCH 57/98] db: fix `index_event` low performance --- crates/nostr-sdk-db/src/index.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/nostr-sdk-db/src/index.rs b/crates/nostr-sdk-db/src/index.rs index 8a74e3464..61cc89dc7 100644 --- a/crates/nostr-sdk-db/src/index.rs +++ b/crates/nostr-sdk-db/src/index.rs @@ -135,7 +135,6 @@ impl DatabaseIndexes { { let mut index = self.index.write().await; let now = Timestamp::now(); - // TODO: check if it's expired or ephemeral, check if it's replaceable or param replaceable index.extend( events .into_iter() @@ -186,7 +185,9 @@ impl DatabaseIndexes { } // Remove events - index.retain(|e| !to_discard.contains(&e.event_id)); + if !to_discard.is_empty() { + index.retain(|e| !to_discard.contains(&e.event_id)); + } // Insert event if should_insert { From 61b2a2391b47adc715c24e9370aefb8a41b1f47f Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 6 Nov 2023 16:43:47 +0100 Subject: [PATCH 58/98] db: add `count` method to `NostrDatabase` --- crates/nostr-sdk-db/src/lib.rs | 4 +++- crates/nostr-sdk-db/src/memory.rs | 5 +++++ crates/nostr-sdk-rocksdb/examples/rocksdb.rs | 3 +++ crates/nostr-sdk-rocksdb/src/lib.rs | 18 ++++++++++++++++-- 4 files changed, 27 insertions(+), 3 deletions(-) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 1f2032919..26f5b8685 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -57,6 +57,9 @@ pub trait NostrDatabase: AsyncTraitDeps { /// Database options fn opts(&self) -> DatabaseOptions; + /// Count number of [`Event`] stored + async fn count(&self) -> Result; + /// Save [`Event`] into store /// /// Return `true` if event was successfully saved into database. @@ -108,7 +111,6 @@ pub trait NostrDatabase: AsyncTraitDeps { async fn wipe(&self) -> Result<(), Self::Err>; /// Get profile metadata - #[tracing::instrument(skip_all)] async fn profile(&self, public_key: XOnlyPublicKey) -> Result { let filter = Filter::new() .author(public_key) diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index c6ec15484..d735926f8 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -119,6 +119,11 @@ impl NostrDatabase for MemoryDatabase { self.opts } + async fn count(&self) -> Result { + let events = self.events.read().await; + Ok(events.len()) + } + async fn save_event(&self, event: &Event) -> Result { let mut events = self.events.write().await; self._save_event(&mut events, event.clone()).await diff --git a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs index ea86c7b06..47b86131a 100644 --- a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs +++ b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs @@ -27,6 +27,9 @@ async fn main() { println!("Pubkey B: {}", keys_b.public_key()); let database = RocksDatabase::new("./db/rocksdb").unwrap(); + + println!("Events stored: {}", database.count().await.unwrap()); + database.build_indexes().await.unwrap(); /* for i in 0..100_000 { diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index 792d64ab7..7e4c38400 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -35,7 +35,7 @@ fn default_opts() -> rocksdb::Options { opts.set_max_open_files(100); opts.set_compaction_style(DBCompactionStyle::Level); opts.set_compression_type(DBCompressionType::Snappy); - opts.set_write_buffer_size(5 * 1024 * 1024); // 10 MB + opts.set_write_buffer_size(64 * 1024 * 1024); // 64 MB opts.set_enable_write_thread_adaptive_yield(true); opts.set_disable_auto_compactions(false); opts.increase_parallelism(2); @@ -109,6 +109,20 @@ impl NostrDatabase for RocksDatabase { DatabaseOptions::default() } + async fn count(&self) -> Result { + let this = self.clone(); + tokio::task::spawn_blocking(move || { + let cf = this.cf_handle(EVENTS_CF)?; + Ok(this + .db + .full_iterator_cf(&cf, IteratorMode::Start) + .flatten() + .count()) + }) + .await + .unwrap() + } + #[tracing::instrument(skip_all, level = "trace")] async fn save_event(&self, event: &Event) -> Result { // Index event @@ -137,7 +151,7 @@ impl NostrDatabase for RocksDatabase { // Discard events no longer needed for event_id in to_discard.into_iter() { - batch.delete_cf(&events_cf, event_id.as_bytes()); + batch.delete_cf(&events_cf, event_id); } // Write batch changes From 7cc5072c634554baca7f2647008da0350fba1b8a Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 6 Nov 2023 17:07:17 +0100 Subject: [PATCH 59/98] rocksdb: add `num_cpus` dep --- Cargo.lock | 1 + crates/nostr-sdk-rocksdb/Cargo.toml | 1 + crates/nostr-sdk-rocksdb/src/lib.rs | 23 ++++++++++++++--------- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e9cf7da2f..a6bbfb439 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1303,6 +1303,7 @@ dependencies = [ "async-trait", "nostr", "nostr-sdk-db", + "num_cpus", "rocksdb", "tokio", "tracing", diff --git a/crates/nostr-sdk-rocksdb/Cargo.toml b/crates/nostr-sdk-rocksdb/Cargo.toml index deec535cb..07fb3e10b 100644 --- a/crates/nostr-sdk-rocksdb/Cargo.toml +++ b/crates/nostr-sdk-rocksdb/Cargo.toml @@ -15,6 +15,7 @@ keywords = ["nostr", "sdk", "db", "redb"] async-trait = { workspace = true } nostr = { workspace = true, features = ["std"] } nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db", features = ["flatbuf"] } +num_cpus = "1.16" rocksdb = { version = "0.21", default-features = false, features = ["multi-threaded-cf", "snappy"] } tokio = { workspace = true, features = ["rt-multi-thread", "sync"] } tracing = { workspace = true, features = ["std"] } diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index 7e4c38400..a5fc7b619 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -19,7 +19,9 @@ use rocksdb::{ use tokio::sync::RwLock; const EVENTS_CF: &str = "events"; -//const EVENTS_SEEN_BY_RELAYS: &str = "event-seen-by-relays"; +const EVENTS_SEEN_BY_RELAYS: &str = "event-seen-by-relays"; + +const COLUMN_FAMILIES: &[&str] = &[EVENTS_CF, EVENTS_SEEN_BY_RELAYS]; /// RocksDB Nostr Database #[derive(Debug, Clone)] @@ -32,20 +34,17 @@ pub struct RocksDatabase { fn default_opts() -> rocksdb::Options { let mut opts = Options::default(); opts.set_keep_log_file_num(10); - opts.set_max_open_files(100); + opts.set_max_open_files(16); opts.set_compaction_style(DBCompactionStyle::Level); opts.set_compression_type(DBCompressionType::Snappy); + opts.set_target_file_size_base(64 * 1024 * 1024); // 64 MB opts.set_write_buffer_size(64 * 1024 * 1024); // 64 MB opts.set_enable_write_thread_adaptive_yield(true); opts.set_disable_auto_compactions(false); - opts.increase_parallelism(2); + opts.increase_parallelism(num_cpus::get() as i32); opts } -fn column_families() -> Vec { - vec![ColumnFamilyDescriptor::new(EVENTS_CF, default_opts())] -} - impl RocksDatabase { pub fn new

(path: P) -> Result where @@ -59,8 +58,14 @@ impl RocksDatabase { db_opts.create_if_missing(true); db_opts.create_missing_column_families(true); - let db = OptimisticTransactionDB::open_cf_descriptors(&db_opts, path, column_families()) - .map_err(DatabaseError::backend)?; + let db = OptimisticTransactionDB::open_cf_descriptors( + &db_opts, + path, + COLUMN_FAMILIES + .iter() + .map(|&name| ColumnFamilyDescriptor::new(name, default_opts())), + ) + .map_err(DatabaseError::backend)?; match db.live_files() { Ok(live_files) => tracing::info!( From ef7a87f6064bb08d5d7d65b38921135ad574bb35 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 6 Nov 2023 18:20:19 +0100 Subject: [PATCH 60/98] db: add `event_seen_by` flatbuffers schema --- crates/nostr-sdk-db/Makefile | 1 + crates/nostr-sdk-db/fbs/event_seen_by.fbs | 7 + .../flatbuffers/event_seen_by_generated.rs | 216 ++++++++++++++++++ crates/nostr-sdk-db/src/flatbuffers/mod.rs | 43 +++- 4 files changed, 265 insertions(+), 2 deletions(-) create mode 100644 crates/nostr-sdk-db/fbs/event_seen_by.fbs create mode 100644 crates/nostr-sdk-db/src/flatbuffers/event_seen_by_generated.rs diff --git a/crates/nostr-sdk-db/Makefile b/crates/nostr-sdk-db/Makefile index 5c5682c04..1d7b2edb8 100644 --- a/crates/nostr-sdk-db/Makefile +++ b/crates/nostr-sdk-db/Makefile @@ -2,6 +2,7 @@ all: build flatbuf: flatc --rust -o ./src/flatbuffers ./fbs/event.fbs + flatc --rust -o ./src/flatbuffers ./fbs/event_seen_by.fbs graph: CARGO_PROFILE_RELEASE_DEBUG=true cargo flamegraph --release --example indexes -o flamegraph.svg \ No newline at end of file diff --git a/crates/nostr-sdk-db/fbs/event_seen_by.fbs b/crates/nostr-sdk-db/fbs/event_seen_by.fbs new file mode 100644 index 000000000..11ec0c306 --- /dev/null +++ b/crates/nostr-sdk-db/fbs/event_seen_by.fbs @@ -0,0 +1,7 @@ +namespace EventSeenByFbs; + +table EventSeenBy { + relay_urls: [string]; +} + +root_type EventSeenBy; \ No newline at end of file diff --git a/crates/nostr-sdk-db/src/flatbuffers/event_seen_by_generated.rs b/crates/nostr-sdk-db/src/flatbuffers/event_seen_by_generated.rs new file mode 100644 index 000000000..eda0ecf71 --- /dev/null +++ b/crates/nostr-sdk-db/src/flatbuffers/event_seen_by_generated.rs @@ -0,0 +1,216 @@ +// automatically generated by the FlatBuffers compiler, do not modify + +// @generated + +use core::cmp::Ordering; +use core::mem; + +extern crate flatbuffers; +use self::flatbuffers::{EndianScalar, Follow}; + +#[allow(unused_imports, dead_code)] +pub mod event_seen_by_fbs { + + use core::cmp::Ordering; + use core::mem; + + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; + + pub enum EventSeenByOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct EventSeenBy<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for EventSeenBy<'a> { + type Inner = EventSeenBy<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> EventSeenBy<'a> { + pub const VT_RELAY_URLS: flatbuffers::VOffsetT = 4; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + EventSeenBy { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, + args: &'args EventSeenByArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = EventSeenByBuilder::new(_fbb); + if let Some(x) = args.relay_urls { + builder.add_relay_urls(x); + } + builder.finish() + } + + #[inline] + pub fn relay_urls( + &self, + ) -> Option>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>, + >>(EventSeenBy::VT_RELAY_URLS, None) + } + } + } + + impl flatbuffers::Verifiable for EventSeenBy<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>, + >>("relay_urls", Self::VT_RELAY_URLS, false)? + .finish(); + Ok(()) + } + } + pub struct EventSeenByArgs<'a> { + pub relay_urls: Option< + flatbuffers::WIPOffset>>, + >, + } + impl<'a> Default for EventSeenByArgs<'a> { + #[inline] + fn default() -> Self { + EventSeenByArgs { relay_urls: None } + } + } + + pub struct EventSeenByBuilder<'a: 'b, 'b> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b> EventSeenByBuilder<'a, 'b> { + #[inline] + pub fn add_relay_urls( + &mut self, + relay_urls: flatbuffers::WIPOffset< + flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset<&'b str>>, + >, + ) { + self.fbb_.push_slot_always::>( + EventSeenBy::VT_RELAY_URLS, + relay_urls, + ); + } + #[inline] + pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> EventSeenByBuilder<'a, 'b> { + let start = _fbb.start_table(); + EventSeenByBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for EventSeenBy<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("EventSeenBy"); + ds.field("relay_urls", &self.relay_urls()); + ds.finish() + } + } + #[inline] + /// Verifies that a buffer of bytes contains a `EventSeenBy` + /// and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_seen_by_unchecked`. + pub fn root_as_event_seen_by( + buf: &[u8], + ) -> Result { + flatbuffers::root::(buf) + } + #[inline] + /// Verifies that a buffer of bytes contains a size prefixed + /// `EventSeenBy` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `size_prefixed_root_as_event_seen_by_unchecked`. + pub fn size_prefixed_root_as_event_seen_by( + buf: &[u8], + ) -> Result { + flatbuffers::size_prefixed_root::(buf) + } + #[inline] + /// Verifies, with the given options, that a buffer of bytes + /// contains a `EventSeenBy` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_seen_by_unchecked`. + pub fn root_as_event_seen_by_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::root_with_opts::>(opts, buf) + } + #[inline] + /// Verifies, with the given verifier options, that a buffer of + /// bytes contains a size prefixed `EventSeenBy` and returns + /// it. Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_seen_by_unchecked`. + pub fn size_prefixed_root_as_event_seen_by_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::size_prefixed_root_with_opts::>(opts, buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a EventSeenBy and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid `EventSeenBy`. + pub unsafe fn root_as_event_seen_by_unchecked(buf: &[u8]) -> EventSeenBy { + flatbuffers::root_unchecked::(buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a size prefixed EventSeenBy and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid size prefixed `EventSeenBy`. + pub unsafe fn size_prefixed_root_as_event_seen_by_unchecked(buf: &[u8]) -> EventSeenBy { + flatbuffers::size_prefixed_root_unchecked::(buf) + } + #[inline] + pub fn finish_event_seen_by_buffer<'a, 'b>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish(root, None); + } + + #[inline] + pub fn finish_size_prefixed_event_seen_by_buffer<'a, 'b>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish_size_prefixed(root, None); + } +} // pub mod EventSeenByFbs diff --git a/crates/nostr-sdk-db/src/flatbuffers/mod.rs b/crates/nostr-sdk-db/src/flatbuffers/mod.rs index 4c8d3d9f9..8350c3d05 100644 --- a/crates/nostr-sdk-db/src/flatbuffers/mod.rs +++ b/crates/nostr-sdk-db/src/flatbuffers/mod.rs @@ -3,18 +3,23 @@ //! Flatbuffers +use std::collections::HashSet; + pub use flatbuffers::FlatBufferBuilder; use flatbuffers::InvalidFlatbuffer; use nostr::event::raw::RawEvent; use nostr::secp256k1::schnorr::Signature; use nostr::secp256k1::{self, XOnlyPublicKey}; -use nostr::{Event, EventId, Kind, Tag, Timestamp}; +use nostr::{Event, EventId, Kind, Tag, Timestamp, Url}; use thiserror::Error; #[allow(unused_imports, dead_code, clippy::all, unsafe_code, missing_docs)] mod event_generated; +#[allow(unused_imports, dead_code, clippy::all, unsafe_code, missing_docs)] +mod event_seen_by_generated; -pub use self::event_generated::event_fbs; +use self::event_generated::event_fbs; +use self::event_seen_by_generated::event_seen_by_fbs; /// FlatBuffers Error #[derive(Debug, Error)] @@ -144,3 +149,37 @@ impl FlatBufferDecode for RawEvent { }) } } + +impl FlatBufferEncode for HashSet { + #[tracing::instrument(skip_all, level = "trace")] + fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8] { + fbb.reset(); + + let urls: Vec<_> = self + .into_iter() + .map(|url| fbb.create_string(&url.to_string())) + .collect(); + let args = event_seen_by_fbs::EventSeenByArgs { + relay_urls: Some(fbb.create_vector(&urls)), + }; + + let offset = event_seen_by_fbs::EventSeenBy::create(fbb, &args); + + event_seen_by_fbs::finish_event_seen_by_buffer(fbb, offset); + + fbb.finished_data() + } +} + +impl FlatBufferDecode for HashSet { + #[tracing::instrument(skip_all, level = "trace")] + fn decode(buf: &[u8]) -> Result { + let ev = event_seen_by_fbs::root_as_event_seen_by(buf)?; + Ok(ev + .relay_urls() + .ok_or(Error::NotFound)? + .into_iter() + .filter_map(|url| Url::parse(url).ok()) + .collect::>()) + } +} From 7fae6033c43b6fe1fa2a493a6d74c9ff75c5aed5 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 6 Nov 2023 18:22:27 +0100 Subject: [PATCH 61/98] rocksdb: complete `NostrDatabase` impl --- crates/nostr-sdk-rocksdb/examples/rocksdb.rs | 8 +++ crates/nostr-sdk-rocksdb/src/lib.rs | 57 ++++++++++++++------ crates/nostr-sdk-rocksdb/src/ops.rs | 16 ++---- 3 files changed, 55 insertions(+), 26 deletions(-) diff --git a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs index 47b86131a..b5370e845 100644 --- a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs +++ b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs @@ -69,6 +69,14 @@ async fn main() { database.save_event(&event).await.unwrap(); } */ + /* let event_id = EventId::all_zeros(); + database.event_id_seen(event_id, Some(Url::parse("wss://relay.damus.io").unwrap())).await.unwrap(); + database.event_id_seen(event_id, Some(Url::parse("wss://relay.nostr.info").unwrap())).await.unwrap(); + database.event_id_seen(event_id, Some(Url::parse("wss://relay.damus.io").unwrap())).await.unwrap(); + + let relays = database.event_recently_seen_on_relays(event_id).await.unwrap(); + println!("Seen on: {relays:?}"); */ + let events = database .query(vec![Filter::new() .kinds(vec![Kind::Metadata, Kind::Custom(123), Kind::TextNote]) diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index a5fc7b619..0aaf6a9d5 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -18,11 +18,11 @@ use rocksdb::{ }; use tokio::sync::RwLock; +mod ops; + const EVENTS_CF: &str = "events"; const EVENTS_SEEN_BY_RELAYS: &str = "event-seen-by-relays"; -const COLUMN_FAMILIES: &[&str] = &[EVENTS_CF, EVENTS_SEEN_BY_RELAYS]; - /// RocksDB Nostr Database #[derive(Debug, Clone)] pub struct RocksDatabase { @@ -45,6 +45,19 @@ fn default_opts() -> rocksdb::Options { opts } +fn column_families() -> Vec { + let mut relay_urls_opts: Options = default_opts(); + relay_urls_opts.set_merge_operator_associative( + "relay_urls_merge_operator", + ops::relay_urls_merge_operator, + ); + + vec![ + ColumnFamilyDescriptor::new(EVENTS_CF, default_opts()), + ColumnFamilyDescriptor::new(EVENTS_SEEN_BY_RELAYS, relay_urls_opts), + ] +} + impl RocksDatabase { pub fn new

(path: P) -> Result where @@ -58,14 +71,8 @@ impl RocksDatabase { db_opts.create_if_missing(true); db_opts.create_missing_column_families(true); - let db = OptimisticTransactionDB::open_cf_descriptors( - &db_opts, - path, - COLUMN_FAMILIES - .iter() - .map(|&name| ColumnFamilyDescriptor::new(name, default_opts())), - ) - .map_err(DatabaseError::backend)?; + let db = OptimisticTransactionDB::open_cf_descriptors(&db_opts, path, column_families()) + .map_err(DatabaseError::backend)?; match db.live_files() { Ok(live_files) => tracing::info!( @@ -176,10 +183,22 @@ impl NostrDatabase for RocksDatabase { async fn event_id_seen( &self, - _event_id: EventId, - _relay_url: Option, + event_id: EventId, + relay_url: Option, ) -> Result<(), Self::Err> { - todo!() + let mut fbb = self.fbb.write().await; + let cf = self.cf_handle(EVENTS_SEEN_BY_RELAYS)?; + let value: HashSet = match relay_url { + Some(relay_url) => { + let mut set = HashSet::with_capacity(1); + set.insert(relay_url); + set + } + None => HashSet::new(), + }; + self.db + .merge_cf(&cf, event_id, value.encode(&mut fbb)) + .map_err(DatabaseError::backend) } async fn event_ids_seen( @@ -192,9 +211,17 @@ impl NostrDatabase for RocksDatabase { async fn event_recently_seen_on_relays( &self, - _event_id: EventId, + event_id: EventId, ) -> Result>, Self::Err> { - todo!() + let cf = self.cf_handle(EVENTS_SEEN_BY_RELAYS)?; + match self + .db + .get_pinned_cf(&cf, event_id) + .map_err(DatabaseError::backend)? + { + Some(val) => Ok(Some(HashSet::decode(&val).map_err(DatabaseError::backend)?)), + None => Ok(None), + } } #[tracing::instrument(skip_all)] diff --git a/crates/nostr-sdk-rocksdb/src/ops.rs b/crates/nostr-sdk-rocksdb/src/ops.rs index 27f390240..1d1d433fd 100644 --- a/crates/nostr-sdk-rocksdb/src/ops.rs +++ b/crates/nostr-sdk-rocksdb/src/ops.rs @@ -5,28 +5,22 @@ use std::collections::HashSet; -use nostr_sdk_fbs::{FlatBufferBuilder, FlatBufferUtils}; +use nostr::Url; +use nostr_sdk_db::{FlatBufferBuilder, FlatBufferDecode, FlatBufferEncode}; use rocksdb::MergeOperands; -pub(crate) fn indexes_merge_operator( +pub(crate) fn relay_urls_merge_operator( _new_key: &[u8], existing: Option<&[u8]>, operands: &MergeOperands, ) -> Option> { - let mut existing: HashSet<[u8; 32]> = match existing { + let mut existing: HashSet = match existing { Some(val) => HashSet::decode(val).ok()?, None => HashSet::with_capacity(operands.len()), }; for operand in operands.into_iter() { - // Check size of operand - if operand.len() == 32 { - let mut event_id: [u8; 32] = [0u8; 32]; - event_id.copy_from_slice(operand); - existing.insert(event_id); - } else { - existing.extend(HashSet::decode(operand).ok()?); - } + existing.extend(HashSet::decode(operand).ok()?); } let mut fbb = FlatBufferBuilder::with_capacity(existing.len() * 32 * 2); // Check capacity size if correct From 40beeaea557cbac294d40d0fa2b73a15b24539d4 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 6 Nov 2023 18:23:29 +0100 Subject: [PATCH 62/98] db: remove `event_ids_seen` method from `NostrDatabase` --- crates/nostr-sdk-db/src/lib.rs | 9 --------- crates/nostr-sdk-db/src/memory.rs | 13 ------------- crates/nostr-sdk-rocksdb/src/lib.rs | 8 -------- 3 files changed, 30 deletions(-) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 26f5b8685..d8f098106 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -77,15 +77,6 @@ pub trait NostrDatabase: AsyncTraitDeps { relay_url: Option, ) -> Result<(), Self::Err>; - /// Set multiple [`EventId`] as seen - /// - /// Optionally, save also the relay url where the event has been seen (useful for NIP65, aka gossip) - async fn event_ids_seen( - &self, - event_ids: Vec, - relay_url: Option, - ) -> Result<(), Self::Err>; - /// Get list of relays that have seen the [`EventId`] async fn event_recently_seen_on_relays( &self, diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index d735926f8..1fa56a248 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -144,19 +144,6 @@ impl NostrDatabase for MemoryDatabase { Ok(()) } - async fn event_ids_seen( - &self, - event_ids: Vec, - relay_url: Option, - ) -> Result<(), Self::Err> { - let mut seen_event_ids = self.seen_event_ids.write().await; - for event_id in event_ids.into_iter() { - self._event_id_seen(&mut seen_event_ids, event_id, relay_url.clone()); - } - - Ok(()) - } - async fn event_recently_seen_on_relays( &self, event_id: EventId, diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index 0aaf6a9d5..b6ab8a908 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -201,14 +201,6 @@ impl NostrDatabase for RocksDatabase { .map_err(DatabaseError::backend) } - async fn event_ids_seen( - &self, - _event_ids: Vec, - _relay_url: Option, - ) -> Result<(), Self::Err> { - todo!() - } - async fn event_recently_seen_on_relays( &self, event_id: EventId, From 0045e7db50a8d977933e4515c3c216d44acc5d5f Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 11:10:41 +0100 Subject: [PATCH 63/98] db: add WASM support to `NostrDatabase` trait --- crates/nostr-sdk-db/src/lib.rs | 3 +- crates/nostr-sdk-db/src/memory.rs | 55 ++++++++++++++----------------- 2 files changed, 27 insertions(+), 31 deletions(-) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index d8f098106..d59cb1e79 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -46,7 +46,8 @@ pub enum Backend { pub type DynNostrDatabase = dyn NostrDatabase; /// Nostr SDK Database -#[async_trait] +#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait)] pub trait NostrDatabase: AsyncTraitDeps { /// Error type Err: From; diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index 1fa56a248..534179833 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -75,22 +75,40 @@ impl MemoryDatabase { None => HashSet::with_capacity(0), }); } +} - async fn _save_event( - &self, - events: &mut HashMap, - event: Event, - ) -> Result { +#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait)] +impl NostrDatabase for MemoryDatabase { + type Err = DatabaseError; + + fn backend(&self) -> Backend { + Backend::Memory + } + + fn opts(&self) -> DatabaseOptions { + self.opts + } + + async fn count(&self) -> Result { + let events = self.events.read().await; + Ok(events.len()) + } + + async fn save_event(&self, event: &Event) -> Result { + // Set event as seen self.event_id_seen(event.id, None).await?; if self.opts.events { let EventIndexResult { to_store, to_discard, - } = self.indexes.index_event(&event).await; + } = self.indexes.index_event(event).await; if to_store { - events.insert(event.id, event); + let mut events = self.events.write().await; + + events.insert(event.id, event.clone()); for event_id in to_discard.into_iter() { events.remove(&event_id); @@ -105,29 +123,6 @@ impl MemoryDatabase { Ok(false) } } -} - -#[async_trait] -impl NostrDatabase for MemoryDatabase { - type Err = DatabaseError; - - fn backend(&self) -> Backend { - Backend::Memory - } - - fn opts(&self) -> DatabaseOptions { - self.opts - } - - async fn count(&self) -> Result { - let events = self.events.read().await; - Ok(events.len()) - } - - async fn save_event(&self, event: &Event) -> Result { - let mut events = self.events.write().await; - self._save_event(&mut events, event.clone()).await - } async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { let seen_event_ids = self.seen_event_ids.read().await; From 7a46df978e5f25c4fb24d8ab54052f97e8c2cef6 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 14:27:46 +0100 Subject: [PATCH 64/98] db: update `event_ids_by_filters` method output --- crates/nostr-sdk-db/src/lib.rs | 5 +++- crates/nostr-sdk-db/src/memory.rs | 14 ++++------- crates/nostr-sdk-rocksdb/Cargo.toml | 6 ++--- crates/nostr-sdk-rocksdb/src/lib.rs | 39 +++++++---------------------- 4 files changed, 21 insertions(+), 43 deletions(-) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index d59cb1e79..5d88dae4d 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -91,7 +91,10 @@ pub trait NostrDatabase: AsyncTraitDeps { async fn query(&self, filters: Vec) -> Result, Self::Err>; /// Get event IDs by filters - async fn event_ids_by_filters(&self, filters: Vec) -> Result, Self::Err>; + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, Self::Err>; /// Get `negentropy` items async fn negentropy_items( diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-sdk-db/src/memory.rs index 534179833..d58d01dd5 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-sdk-db/src/memory.rs @@ -179,16 +179,12 @@ impl NostrDatabase for MemoryDatabase { } } - async fn event_ids_by_filters(&self, filters: Vec) -> Result, Self::Err> { + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, Self::Err> { if self.opts.events { - let events = self.events.read().await; - let mut list: Vec = Vec::new(); - for event in events.values() { - if filters.match_event(event) { - list.push(event.id); - } - } - Ok(list) + Ok(self.indexes.query(filters).await) } else { Err(DatabaseError::FeatureDisabled) } diff --git a/crates/nostr-sdk-rocksdb/Cargo.toml b/crates/nostr-sdk-rocksdb/Cargo.toml index 07fb3e10b..9e67939a2 100644 --- a/crates/nostr-sdk-rocksdb/Cargo.toml +++ b/crates/nostr-sdk-rocksdb/Cargo.toml @@ -2,14 +2,14 @@ name = "nostr-sdk-rocksdb" version = "0.1.0" edition = "2021" -description = "TODO" +description = "RocksDB Storage backend for Nostr SDK" authors = ["Yuki Kishimoto "] homepage.workspace = true repository.workspace = true license.workspace = true readme = "README.md" rust-version.workspace = true -keywords = ["nostr", "sdk", "db", "redb"] +keywords = ["nostr", "sdk", "db", "rocksdb"] [dependencies] async-trait = { workspace = true } @@ -18,7 +18,7 @@ nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db", features = ["flatbuf num_cpus = "1.16" rocksdb = { version = "0.21", default-features = false, features = ["multi-threaded-cf", "snappy"] } tokio = { workspace = true, features = ["rt-multi-thread", "sync"] } -tracing = { workspace = true, features = ["std"] } +tracing = { workspace = true, features = ["std", "attributes"] } [dev-dependencies] tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index b6ab8a908..321560c0c 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -21,7 +21,7 @@ use tokio::sync::RwLock; mod ops; const EVENTS_CF: &str = "events"; -const EVENTS_SEEN_BY_RELAYS: &str = "event-seen-by-relays"; +const EVENTS_SEEN_BY_RELAYS_CF: &str = "event-seen-by-relays"; /// RocksDB Nostr Database #[derive(Debug, Clone)] @@ -54,7 +54,7 @@ fn column_families() -> Vec { vec![ ColumnFamilyDescriptor::new(EVENTS_CF, default_opts()), - ColumnFamilyDescriptor::new(EVENTS_SEEN_BY_RELAYS, relay_urls_opts), + ColumnFamilyDescriptor::new(EVENTS_SEEN_BY_RELAYS_CF, relay_urls_opts), ] } @@ -187,7 +187,7 @@ impl NostrDatabase for RocksDatabase { relay_url: Option, ) -> Result<(), Self::Err> { let mut fbb = self.fbb.write().await; - let cf = self.cf_handle(EVENTS_SEEN_BY_RELAYS)?; + let cf = self.cf_handle(EVENTS_SEEN_BY_RELAYS_CF)?; let value: HashSet = match relay_url { Some(relay_url) => { let mut set = HashSet::with_capacity(1); @@ -205,7 +205,7 @@ impl NostrDatabase for RocksDatabase { &self, event_id: EventId, ) -> Result>, Self::Err> { - let cf = self.cf_handle(EVENTS_SEEN_BY_RELAYS)?; + let cf = self.cf_handle(EVENTS_SEEN_BY_RELAYS_CF)?; match self .db .get_pinned_cf(&cf, event_id) @@ -263,32 +263,11 @@ impl NostrDatabase for RocksDatabase { .map_err(DatabaseError::backend)? } - async fn event_ids_by_filters(&self, filters: Vec) -> Result, Self::Err> { - let ids = self.indexes.query(filters.clone()).await; - - let this = self.clone(); - tokio::task::spawn_blocking(move || { - let cf = this.cf_handle(EVENTS_CF)?; - - let mut event_ids: Vec = Vec::new(); - - for v in this - .db - .batched_multi_get_cf(&cf, ids, false) - .into_iter() - .flatten() - .flatten() - { - let event: Event = Event::decode(&v).map_err(DatabaseError::backend)?; - if filters.match_event(&event) { - event_ids.push(event.id); - } - } - - Ok(event_ids) - }) - .await - .map_err(DatabaseError::backend)? + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, Self::Err> { + Ok(self.indexes.query(filters).await) } async fn negentropy_items( From 13f129fad755a4fc8a13f4f44be0045a39c5a3da Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 14:32:15 +0100 Subject: [PATCH 65/98] db: fix clippy --- crates/nostr-sdk-db/src/flatbuffers/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/nostr-sdk-db/src/flatbuffers/mod.rs b/crates/nostr-sdk-db/src/flatbuffers/mod.rs index 8350c3d05..44c726e8a 100644 --- a/crates/nostr-sdk-db/src/flatbuffers/mod.rs +++ b/crates/nostr-sdk-db/src/flatbuffers/mod.rs @@ -156,8 +156,8 @@ impl FlatBufferEncode for HashSet { fbb.reset(); let urls: Vec<_> = self - .into_iter() - .map(|url| fbb.create_string(&url.to_string())) + .iter() + .map(|url| fbb.create_string(url.as_ref())) .collect(); let args = event_seen_by_fbs::EventSeenByArgs { relay_urls: Some(fbb.create_vector(&urls)), From b3af5617b76c07e22ee26544700825eb041f9ac5 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 14:38:07 +0100 Subject: [PATCH 66/98] Add `nostr-sdk-indexeddb` crate --- Cargo.lock | 149 +++++++++ crates/nostr-sdk-indexeddb/Cargo.toml | 25 ++ crates/nostr-sdk-indexeddb/README.md | 17 ++ crates/nostr-sdk-indexeddb/src/error.rs | 33 ++ crates/nostr-sdk-indexeddb/src/hex.rs | 139 +++++++++ crates/nostr-sdk-indexeddb/src/lib.rs | 384 ++++++++++++++++++++++++ 6 files changed, 747 insertions(+) create mode 100644 crates/nostr-sdk-indexeddb/Cargo.toml create mode 100644 crates/nostr-sdk-indexeddb/README.md create mode 100644 crates/nostr-sdk-indexeddb/src/error.rs create mode 100644 crates/nostr-sdk-indexeddb/src/hex.rs create mode 100644 crates/nostr-sdk-indexeddb/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index a6bbfb439..e432c9060 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,18 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "accessory" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fb0baead5bf1d7f3429259f02076dba82d2f617af7128ca4b288051edd80e93" +dependencies = [ + "macroific", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "addr2line" version = "0.21.0" @@ -555,6 +567,18 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +[[package]] +name = "delegate-display" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98a85201f233142ac819bbf6226e36d0b5e129a47bd325084674261c82d4cd66" +dependencies = [ + "macroific", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "digest" version = "0.10.7" @@ -614,6 +638,18 @@ dependencies = [ "libc", ] +[[package]] +name = "fancy_constructor" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f71f317e4af73b2f8f608fac190c52eac4b1879d2145df1db2fe48881ca69435" +dependencies = [ + "macroific", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "flatbuffers" version = "23.5.26" @@ -946,6 +982,23 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "indexed_db_futures" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cc2083760572ee02385ab8b7c02c20925d2dd1f97a1a25a8737a238608f1152" +dependencies = [ + "accessory", + "cfg-if", + "delegate-display", + "fancy_constructor", + "js-sys", + "uuid", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -1098,6 +1151,53 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "macroific" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a304d83e1ef544546dec28cafb9fcb72bfd281da112042d9279808b2ef6be600" +dependencies = [ + "macroific_attr_parse", + "macroific_core", + "macroific_macro", +] + +[[package]] +name = "macroific_attr_parse" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd34ba6db76d16ae96fbb873ea972524d6b83577040e2758582217aaa2527546" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "macroific_core" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5a4902dfaf37c1480d6573bbc009b41e50e859acd11f5dade360a0a17ecbefb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "macroific_macro" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c9853143cbed7f1e41dc39fee95f9b361bec65c8dc2a01bf609be01b61f5ae" +dependencies = [ + "macroific_attr_parse", + "macroific_core", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "matchers" version = "0.1.0" @@ -1268,6 +1368,21 @@ dependencies = [ "uniffi", ] +[[package]] +name = "nostr-sdk-indexeddb" +version = "0.1.0" +dependencies = [ + "async-trait", + "indexed_db_futures", + "nostr", + "nostr-sdk-db", + "thiserror", + "tokio", + "tracing", + "wasm-bindgen", + "wasm-bindgen-test", +] + [[package]] name = "nostr-sdk-js" version = "0.1.0" @@ -2374,6 +2489,16 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +[[package]] +name = "uuid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" +dependencies = [ + "getrandom", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.0" @@ -2473,6 +2598,30 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +[[package]] +name = "wasm-bindgen-test" +version = "0.3.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e6e302a7ea94f83a6d09e78e7dc7d9ca7b186bc2829c24a22d0753efd680671" +dependencies = [ + "console_error_panic_hook", + "js-sys", + "scoped-tls", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecb993dd8c836930ed130e020e77d9b2e65dd0fbab1b67c790b0f5d80b11a575" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "wasm-logger" version = "0.2.0" diff --git a/crates/nostr-sdk-indexeddb/Cargo.toml b/crates/nostr-sdk-indexeddb/Cargo.toml new file mode 100644 index 000000000..62ed9180b --- /dev/null +++ b/crates/nostr-sdk-indexeddb/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "nostr-sdk-indexeddb" +version = "0.1.0" +edition = "2021" +description = "Web's IndexedDB Storage backend for Nostr SDK" +authors = ["Yuki Kishimoto "] +homepage.workspace = true +repository.workspace = true +license.workspace = true +readme = "README.md" +rust-version.workspace = true +keywords = ["nostr", "sdk", "db", "indexeddb"] + +[dependencies] +async-trait = { workspace = true } +indexed_db_futures = "0.4" +nostr = { workspace = true, features = ["std"] } +nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db", features = ["flatbuf"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["sync"] } +tracing = { workspace = true, features = ["std", "attributes"] } +wasm-bindgen = "0.2" + +[dev-dependencies] +wasm-bindgen-test = "0.3" diff --git a/crates/nostr-sdk-indexeddb/README.md b/crates/nostr-sdk-indexeddb/README.md new file mode 100644 index 000000000..8a114938a --- /dev/null +++ b/crates/nostr-sdk-indexeddb/README.md @@ -0,0 +1,17 @@ +# Nostr SDK IndexedDB + +This crate implements a storage backend on IndexedDB for web environments. + +## State + +**This library is in an ALPHA state**, things that are implemented generally work but the API will change in breaking ways. + +## License + +This project is distributed under the MIT software license - see the [LICENSE](../../LICENSE) file for details + +## Donations + +⚡ Tips: + +⚡ Lightning Address: yuki@getalby.com \ No newline at end of file diff --git a/crates/nostr-sdk-indexeddb/src/error.rs b/crates/nostr-sdk-indexeddb/src/error.rs new file mode 100644 index 000000000..07216895f --- /dev/null +++ b/crates/nostr-sdk-indexeddb/src/error.rs @@ -0,0 +1,33 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use nostr_sdk_db::DatabaseError; +use thiserror::Error; + +/// IndexedDB error +#[derive(Debug, Error)] +pub enum IndexedDBError { + /// DOM error + #[error("DomException {name} ({code}): {message}")] + DomException { + /// DomException code + code: u16, + /// Specific name of the DomException + name: String, + /// Message given to the DomException + message: String, + }, + /// Database error + #[error(transparent)] + Database(#[from] DatabaseError), +} + +impl From for IndexedDBError { + fn from(frm: indexed_db_futures::web_sys::DomException) -> Self { + Self::DomException { + name: frm.name(), + message: frm.message(), + code: frm.code(), + } + } +} diff --git a/crates/nostr-sdk-indexeddb/src/hex.rs b/crates/nostr-sdk-indexeddb/src/hex.rs new file mode 100644 index 000000000..49e231cf7 --- /dev/null +++ b/crates/nostr-sdk-indexeddb/src/hex.rs @@ -0,0 +1,139 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Hex + +use std::fmt; +use std::string::String; +use std::vec::Vec; + +/// Hex error +#[derive(Debug, PartialEq, Eq)] +pub enum Error { + /// An invalid character was found + InvalidHexCharacter { + /// Char + c: char, + /// Char index + index: usize, + }, + /// A hex string's length needs to be even, as two digits correspond to + /// one byte. + OddLength, +} + +impl std::error::Error for Error {} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::InvalidHexCharacter { c, index } => { + write!(f, "Invalid character {} at position {}", c, index) + } + Self::OddLength => write!(f, "Odd number of digits"), + } + } +} + +#[inline] +fn from_digit(num: u8) -> char { + if num < 10 { + (b'0' + num) as char + } else { + (b'a' + num - 10) as char + } +} + +/// Hex encode +pub fn encode(data: T) -> String +where + T: AsRef<[u8]>, +{ + let bytes: &[u8] = data.as_ref(); + let mut hex: String = String::with_capacity(2 * bytes.len()); + for byte in bytes.iter() { + hex.push(from_digit(byte >> 4)); + hex.push(from_digit(byte & 0xF)); + } + hex +} + +const fn val(c: u8, idx: usize) -> Result { + match c { + b'A'..=b'F' => Ok(c - b'A' + 10), + b'a'..=b'f' => Ok(c - b'a' + 10), + b'0'..=b'9' => Ok(c - b'0'), + _ => Err(Error::InvalidHexCharacter { + c: c as char, + index: idx, + }), + } +} + +/// Hex decode +pub fn decode(hex: T) -> Result, Error> +where + T: AsRef<[u8]>, +{ + let hex = hex.as_ref(); + let len = hex.len(); + + if len % 2 != 0 { + return Err(Error::OddLength); + } + + let mut bytes: Vec = Vec::with_capacity(len / 2); + + for i in (0..len).step_by(2) { + let high = val(hex[i], i)?; + let low = val(hex[i + 1], i + 1)?; + bytes.push(high << 4 | low); + } + + Ok(bytes) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_encode() { + assert_eq!(encode("foobar"), "666f6f626172"); + } + + #[test] + fn test_decode() { + assert_eq!( + decode("666f6f626172"), + Ok(String::from("foobar").into_bytes()) + ); + } + + #[test] + pub fn test_invalid_length() { + assert_eq!(decode("1").unwrap_err(), Error::OddLength); + assert_eq!(decode("666f6f6261721").unwrap_err(), Error::OddLength); + } + + #[test] + pub fn test_invalid_char() { + assert_eq!( + decode("66ag").unwrap_err(), + Error::InvalidHexCharacter { c: 'g', index: 3 } + ); + } +} + +#[cfg(bench)] +mod benches { + use super::*; + use crate::test::{black_box, Bencher}; + + #[bench] + pub fn hex_encode(bh: &mut Bencher) { + bh.iter(|| { + black_box(encode("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")); + }); + } +} diff --git a/crates/nostr-sdk-indexeddb/src/lib.rs b/crates/nostr-sdk-indexeddb/src/lib.rs new file mode 100644 index 000000000..a223ca681 --- /dev/null +++ b/crates/nostr-sdk-indexeddb/src/lib.rs @@ -0,0 +1,384 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! + +#![forbid(unsafe_code)] +#![warn(missing_docs)] +#![warn(rustdoc::bare_urls)] +#![allow(clippy::arc_with_non_send_sync)] +#![cfg_attr(not(target_arch = "wasm32"), allow(unused))] + +use std::collections::{HashMap, HashSet}; +use std::fmt; +use std::future::IntoFuture; +use std::sync::Arc; + +#[cfg(target_arch = "wasm32")] +use async_trait::async_trait; +use indexed_db_futures::request::{IdbOpenDbRequestLike, OpenDbRequest}; +use indexed_db_futures::web_sys::IdbTransactionMode; +use indexed_db_futures::{IdbDatabase, IdbQuerySource, IdbVersionChangeEvent}; +use nostr::event::raw::RawEvent; +use nostr::{Event, EventId, Filter, Timestamp, Url}; +#[cfg(target_arch = "wasm32")] +use nostr_sdk_db::NostrDatabase; +use nostr_sdk_db::{ + Backend, DatabaseError, DatabaseIndexes, DatabaseOptions, EventIndexResult, FlatBufferBuilder, + FlatBufferDecode, FlatBufferEncode, +}; +use tokio::sync::Mutex; +use wasm_bindgen::JsValue; + +mod error; +mod hex; + +pub use self::error::IndexedDBError; + +const CURRENT_DB_VERSION: u32 = 2; +const EVENTS_CF: &str = "events"; +const EVENTS_SEEN_BY_RELAYS_CF: &str = "event-seen-by-relays"; +const ALL_STORES: [&str; 2] = [EVENTS_CF, EVENTS_SEEN_BY_RELAYS_CF]; + +/// Helper struct for upgrading the inner DB. +#[derive(Debug, Clone, Default)] +pub struct OngoingMigration { + /// Names of stores to drop. + drop_stores: HashSet<&'static str>, + /// Names of stores to create. + create_stores: HashSet<&'static str>, + /// Store name => key-value data to add. + data: HashMap<&'static str, Vec<(JsValue, JsValue)>>, +} + +/// IndexedDB Nostr Database +#[derive(Clone)] +pub struct WebDatabase { + db: Arc, + indexes: DatabaseIndexes, + fbb: Arc>>, +} + +impl fmt::Debug for WebDatabase { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("WebDatabase") + .field("name", &self.db.name()) + .finish() + } +} + +impl WebDatabase { + /// Open IndexedDB store + pub async fn open(name: S) -> Result + where + S: AsRef, + { + let mut this = Self { + db: Arc::new(IdbDatabase::open(name.as_ref())?.into_future().await?), + indexes: DatabaseIndexes::new(), + fbb: Arc::new(Mutex::new(FlatBufferBuilder::with_capacity(70_000))), + }; + + this.migration().await?; + this.build_indexes().await?; + + Ok(this) + } + + async fn migration(&mut self) -> Result<(), IndexedDBError> { + let name: String = self.db.name(); + let mut old_version: u32 = self.db.version() as u32; + + if old_version < CURRENT_DB_VERSION { + // Inside the `onupgradeneeded` callback we would know whether it's a new DB + // because the old version would be set to 0, here it is already set to 1 so we + // check if the stores exist. + if old_version == 1 && self.db.object_store_names().next().is_none() { + old_version = 0; + } + + if old_version == 0 { + tracing::info!("Initializing database schemas..."); + let migration = OngoingMigration { + create_stores: ALL_STORES.into_iter().collect(), + ..Default::default() + }; + self.apply_migration(CURRENT_DB_VERSION, migration).await?; + tracing::info!("Database schemas initialized."); + } else { + /* if old_version < 3 { + db = migrate_to_v3(db, store_cipher).await?; + } + if old_version < 4 { + db = migrate_to_v4(db, store_cipher).await?; + } */ + } + + self.db.close(); + + let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, CURRENT_DB_VERSION)?; + db_req.set_on_upgrade_needed(Some( + move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { + // Sanity check. + // There should be no upgrade needed since the database should have already been + // upgraded to the latest version. + panic!( + "Opening database that was not fully upgraded: \ + DB version: {}; latest version: {CURRENT_DB_VERSION}", + evt.old_version() + ) + }, + )); + + self.db = Arc::new(db_req.into_future().await?); + } + + Ok(()) + } + + async fn apply_migration( + &mut self, + version: u32, + migration: OngoingMigration, + ) -> Result<(), IndexedDBError> { + let name: String = self.db.name(); + self.db.close(); + + let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, version)?; + db_req.set_on_upgrade_needed(Some( + move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { + // Changing the format can only happen in the upgrade procedure + for store in &migration.drop_stores { + evt.db().delete_object_store(store)?; + } + for store in &migration.create_stores { + evt.db().create_object_store(store)?; + tracing::debug!("Created '{store}' object store"); + } + + Ok(()) + }, + )); + + self.db = Arc::new(db_req.into_future().await?); + + // Finally, we can add data to the newly created tables if needed. + if !migration.data.is_empty() { + let stores: Vec<_> = migration.data.keys().copied().collect(); + let tx = self + .db + .transaction_on_multi_with_mode(&stores, IdbTransactionMode::Readwrite)?; + + for (name, data) in migration.data { + let store = tx.object_store(name)?; + for (key, value) in data { + store.put_key_val(&key, &value)?; + } + } + + tx.await.into_result()?; + } + + Ok(()) + } + + async fn build_indexes(&self) -> Result<(), IndexedDBError> { + tracing::debug!("Building database indexes..."); + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + let events = store + .get_all()? + .await? + .into_iter() + .filter_map(|v| v.as_string()) + .filter_map(|v| { + let bytes = hex::decode(v).ok()?; + RawEvent::decode(&bytes).ok() + }); + self.indexes.bulk_load(events).await; + tracing::info!("Database indexes loaded"); + Ok(()) + } +} + +// Small hack to have the following macro invocation act as the appropriate +// trait impl block on wasm, but still be compiled on non-wasm as a regular +// impl block otherwise. +// +// The trait impl doesn't compile on non-wasm due to unfulfilled trait bounds, +// this hack allows us to still have most of rust-analyzer's IDE functionality +// within the impl block without having to set it up to check things against +// the wasm target (which would disable many other parts of the codebase). +#[cfg(target_arch = "wasm32")] +macro_rules! impl_nostr_database { + ({ $($body:tt)* }) => { + #[async_trait(?Send)] + impl NostrDatabase for WebDatabase { + type Err = IndexedDBError; + + $($body)* + } + }; +} + +#[cfg(not(target_arch = "wasm32"))] +macro_rules! impl_nostr_database { + ({ $($body:tt)* }) => { + impl WebDatabase { + $($body)* + } + }; +} + +impl_nostr_database!({ + fn backend(&self) -> Backend { + Backend::IndexedDB + } + + fn opts(&self) -> DatabaseOptions { + DatabaseOptions::default() + } + + async fn count(&self) -> Result { + Err(DatabaseError::NotSupported.into()) + } + + #[tracing::instrument(skip_all, level = "trace")] + async fn save_event(&self, event: &Event) -> Result { + // Index event + let EventIndexResult { + to_store, + to_discard, + } = self.indexes.index_event(event).await; + + if to_store { + // Acquire FlatBuffers Builder + let mut fbb = self.fbb.lock().await; + + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readwrite)?; + let store = tx.object_store(EVENTS_CF)?; + let key = JsValue::from(event.id.to_hex()); + let value = JsValue::from(hex::encode(event.encode(&mut fbb))); + store.put_key_val(&key, &value)?; + + // Discard events no longer needed + for event_id in to_discard.into_iter() { + let key = JsValue::from(event_id.to_hex()); + store.delete(&key)?; + } + + tx.await.into_result()?; + + Ok(true) + } else { + Ok(false) + } + } + + async fn has_event_already_been_seen( + &self, + _event_id: EventId, + ) -> Result { + todo!() + } + + async fn event_id_seen( + &self, + _event_id: EventId, + _relay_url: Option, + ) -> Result<(), IndexedDBError> { + todo!() + } + + async fn event_recently_seen_on_relays( + &self, + _event_id: EventId, + ) -> Result>, IndexedDBError> { + todo!() + } + + #[tracing::instrument(skip_all)] + async fn event_by_id(&self, event_id: EventId) -> Result { + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + let key = JsValue::from(event_id.to_hex()); + match store.get(&key)?.await? { + Some(jsvalue) => { + let event_hex = jsvalue + .as_string() + .ok_or(IndexedDBError::Database(DatabaseError::NotFound))?; + let bytes = hex::decode(event_hex).map_err(DatabaseError::backend)?; + Ok(Event::decode(&bytes).map_err(DatabaseError::backend)?) + } + None => Err(IndexedDBError::Database(DatabaseError::NotFound)), + } + } + + #[tracing::instrument(skip_all)] + async fn query(&self, filters: Vec) -> Result, IndexedDBError> { + let ids = self.indexes.query(filters.clone()).await; + + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + + let mut events: Vec = Vec::new(); + + for event_id in ids.into_iter() { + let key = JsValue::from(event_id.to_hex()); + if let Some(jsvalue) = store.get(&key)?.await? { + let event_hex = jsvalue.as_string().ok_or(DatabaseError::NotFound)?; + let bytes = hex::decode(event_hex).map_err(DatabaseError::backend)?; + let event = Event::decode(&bytes).map_err(DatabaseError::backend)?; + events.push(event); + } + } + + Ok(events) + } + + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, IndexedDBError> { + Ok(self.indexes.query(filters).await) + } + + async fn negentropy_items( + &self, + filter: Filter, + ) -> Result, IndexedDBError> { + let ids = self.indexes.query(vec![filter]).await; + + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + + let mut events: Vec<(EventId, Timestamp)> = Vec::new(); + + for event_id in ids.into_iter() { + let key = JsValue::from(event_id.to_hex()); + if let Some(jsvalue) = store.get(&key)?.await? { + let event_hex = jsvalue.as_string().ok_or(DatabaseError::NotFound)?; + let bytes = hex::decode(event_hex).map_err(DatabaseError::backend)?; + let raw = RawEvent::decode(&bytes).map_err(DatabaseError::backend)?; + let event_id = EventId::from_slice(&raw.id).map_err(DatabaseError::nostr)?; + events.push((event_id, Timestamp::from(raw.created_at))); + } + } + + Ok(events) + } + + async fn wipe(&self) -> Result<(), IndexedDBError> { + Err(DatabaseError::NotSupported.into()) + } +}); From bdf339fe54c5eb7c67c07bbb3ce5a83d4bb9597b Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 14:39:16 +0100 Subject: [PATCH 67/98] rocksdb: auto build indexes when opening store --- crates/nostr-sdk-rocksdb/examples/rocksdb.rs | 4 +--- crates/nostr-sdk-rocksdb/src/lib.rs | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs index b5370e845..1e364aa8f 100644 --- a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs +++ b/crates/nostr-sdk-rocksdb/examples/rocksdb.rs @@ -26,12 +26,10 @@ async fn main() { let keys_b = Keys::new(secret_key); println!("Pubkey B: {}", keys_b.public_key()); - let database = RocksDatabase::new("./db/rocksdb").unwrap(); + let database = RocksDatabase::open("./db/rocksdb").await.unwrap(); println!("Events stored: {}", database.count().await.unwrap()); - database.build_indexes().await.unwrap(); - /* for i in 0..100_000 { let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) .to_event(&keys_a) diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index 321560c0c..42261444a 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -1,6 +1,12 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license +//! RocksDB Storage backend for Nostr SDK + +#![forbid(unsafe_code)] +#![warn(missing_docs)] +#![warn(rustdoc::bare_urls)] + use std::collections::HashSet; use std::path::Path; use std::sync::Arc; @@ -59,7 +65,8 @@ fn column_families() -> Vec { } impl RocksDatabase { - pub fn new

(path: P) -> Result + /// Open RocksDB store + pub async fn open

(path: P) -> Result where P: AsRef, { @@ -85,11 +92,15 @@ impl RocksDatabase { Err(_) => tracing::warn!("Impossible to get live files"), }; - Ok(Self { + let this = Self { db: Arc::new(db), indexes: DatabaseIndexes::new(), fbb: Arc::new(RwLock::new(FlatBufferBuilder::with_capacity(70_000))), - }) + }; + + this.build_indexes().await?; + + Ok(this) } fn cf_handle(&self, name: &str) -> Result, DatabaseError> { @@ -97,7 +108,7 @@ impl RocksDatabase { } #[tracing::instrument(skip_all)] - pub async fn build_indexes(&self) -> Result<(), DatabaseError> { + async fn build_indexes(&self) -> Result<(), DatabaseError> { let cf = self.cf_handle(EVENTS_CF)?; let events = self .db From 327286459d511800c8077b6b3592bf07dc5e9403 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 14:42:53 +0100 Subject: [PATCH 68/98] indexeddb: add webapp example --- .../examples/webapp/.cargo/config.toml | 2 + .../examples/webapp/.gitignore | 4 ++ .../examples/webapp/Cargo.toml | 18 ++++++++ .../examples/webapp/Makefile | 5 +++ .../examples/webapp/README.md | 32 ++++++++++++++ .../examples/webapp/index.html | 8 ++++ .../examples/webapp/index.scss | 35 +++++++++++++++ .../examples/webapp/src/app.rs | 44 +++++++++++++++++++ .../examples/webapp/src/main.rs | 12 +++++ 9 files changed, 160 insertions(+) create mode 100644 crates/nostr-sdk-indexeddb/examples/webapp/.cargo/config.toml create mode 100644 crates/nostr-sdk-indexeddb/examples/webapp/.gitignore create mode 100644 crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml create mode 100644 crates/nostr-sdk-indexeddb/examples/webapp/Makefile create mode 100644 crates/nostr-sdk-indexeddb/examples/webapp/README.md create mode 100644 crates/nostr-sdk-indexeddb/examples/webapp/index.html create mode 100644 crates/nostr-sdk-indexeddb/examples/webapp/index.scss create mode 100644 crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs create mode 100644 crates/nostr-sdk-indexeddb/examples/webapp/src/main.rs diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/.cargo/config.toml b/crates/nostr-sdk-indexeddb/examples/webapp/.cargo/config.toml new file mode 100644 index 000000000..435ed755e --- /dev/null +++ b/crates/nostr-sdk-indexeddb/examples/webapp/.cargo/config.toml @@ -0,0 +1,2 @@ +[build] +target = "wasm32-unknown-unknown" \ No newline at end of file diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/.gitignore b/crates/nostr-sdk-indexeddb/examples/webapp/.gitignore new file mode 100644 index 000000000..d5ae108a3 --- /dev/null +++ b/crates/nostr-sdk-indexeddb/examples/webapp/.gitignore @@ -0,0 +1,4 @@ +dist/ +target/ +Cargo.lock +.DS_Store diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml b/crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml new file mode 100644 index 000000000..65600cffd --- /dev/null +++ b/crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "nostr-sdk-indexeddb-example" +version = "0.1.0" +edition = "2021" +publish = false + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[dependencies] +nostr = { path = "../../../nostr", features = ["std"] } +nostr-sdk-db = { path = "../../../nostr-sdk-db" } +nostr-sdk-indexeddb = { path = "../../" } +tracing-wasm = "0.2" +wasm-bindgen-futures = "0.4" +web-sys = "0.3" +yew = { version = "0.21", features = ["csr"] } diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/Makefile b/crates/nostr-sdk-indexeddb/examples/webapp/Makefile new file mode 100644 index 000000000..36bf72d2f --- /dev/null +++ b/crates/nostr-sdk-indexeddb/examples/webapp/Makefile @@ -0,0 +1,5 @@ +init: + cargo install --locked trunk + +serve: + trunk serve \ No newline at end of file diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/README.md b/crates/nostr-sdk-indexeddb/examples/webapp/README.md new file mode 100644 index 000000000..ffd66c779 --- /dev/null +++ b/crates/nostr-sdk-indexeddb/examples/webapp/README.md @@ -0,0 +1,32 @@ +# Yew Trunk Template + +### Installation + +If you don't already have it installed, it's time to install Rust: . +The rest of this guide assumes a typical Rust installation which contains both `rustup` and Cargo. + +To compile Rust to WASM, we need to have the `wasm32-unknown-unknown` target installed. +If you don't already have it, install it with the following command: + +```bash +rustup target add wasm32-unknown-unknown +``` + +Now that we have our basics covered, it's time to install the star of the show: [Trunk]. +Simply run the following command to install it: + +```bash +cargo install trunk wasm-bindgen-cli +``` + +That's it, we're done! + +### Running + +```bash +trunk serve +``` + +Rebuilds the app whenever a change is detected and runs a local server to host it. + +There's also the `trunk watch` command which does the same thing but without hosting it. diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/index.html b/crates/nostr-sdk-indexeddb/examples/webapp/index.html new file mode 100644 index 000000000..4d13cf1a8 --- /dev/null +++ b/crates/nostr-sdk-indexeddb/examples/webapp/index.html @@ -0,0 +1,8 @@ + + + + + Trunk Template + + + diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/index.scss b/crates/nostr-sdk-indexeddb/examples/webapp/index.scss new file mode 100644 index 000000000..710545f3a --- /dev/null +++ b/crates/nostr-sdk-indexeddb/examples/webapp/index.scss @@ -0,0 +1,35 @@ +html, +body { + height: 100%; + margin: 0; +} + +body { + align-items: center; + display: flex; + justify-content: center; + + background: linear-gradient(to bottom right, #444444, #009a5b); + font-size: 1.5rem; +} + +main { + color: #fff6d5; + font-family: sans-serif; + text-align: center; +} + +.logo { + height: 20em; +} + +.heart:after { + content: "❤️"; + + font-size: 1.75em; +} + +h1 + .subtitle { + display: block; + margin-top: -1em; +} diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs b/crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs new file mode 100644 index 000000000..1ae297993 --- /dev/null +++ b/crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs @@ -0,0 +1,44 @@ +use nostr::prelude::*; +use nostr_sdk_db::NostrDatabase; +use nostr_sdk_indexeddb::WebDatabase; +use wasm_bindgen_futures::spawn_local; +use web_sys::console; +use yew::prelude::*; + +#[function_component(App)] +pub fn app() -> Html { + spawn_local(async { + let secret_key = SecretKey::from_bech32( + "nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99", + ) + .unwrap(); + let keys_a = Keys::new(secret_key); + console::log_1(&format!("Pubkey A: {}", keys_a.public_key()).into()); + + let database = WebDatabase::open("nostr-sdk-indexeddb-test").await.unwrap(); + + let metadata = Metadata::new().name("Name"); + let event = EventBuilder::set_metadata(metadata) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + + let events = database + .query(vec![Filter::new() + .kinds(vec![Kind::Metadata, Kind::Custom(123), Kind::TextNote]) + .limit(20) + .author(keys_a.public_key())]) + .await + .unwrap(); + console::log_1(&format!("Events: {events:?}").into()); + console::log_1(&format!("Got {} events", events.len()).into()); + }); + + html! { +

+ +

{ "Hello World!" }

+ { "from Yew with " } +
+ } +} diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/src/main.rs b/crates/nostr-sdk-indexeddb/examples/webapp/src/main.rs new file mode 100644 index 000000000..b4b65d646 --- /dev/null +++ b/crates/nostr-sdk-indexeddb/examples/webapp/src/main.rs @@ -0,0 +1,12 @@ +mod app; + +use app::App; + +fn main() { + // Init logger + //wasm_logger::init(wasm_logger::Config::default()); + tracing_wasm::set_as_global_default(); + + // Start WASM app + yew::Renderer::::new().render(); +} From 859a55f02fc0169ccd3741b06bfdeb9ff1f72c81 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 14:44:11 +0100 Subject: [PATCH 69/98] ci: add `nostr-sdk-indexeddb` --- .githooks/pre-push | 1 + .github/workflows/ci.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.githooks/pre-push b/.githooks/pre-push index b38a6ae32..4a5fcb01a 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -26,6 +26,7 @@ done buildargs=( "-p nostr-js --target wasm32-unknown-unknown" "-p nostr-sdk-js --target wasm32-unknown-unknown" + "-p nostr-sdk-indexeddb --target wasm32-unknown-unknown" ) for arg in "${buildargs[@]}"; do diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 55002ceb8..a426db171 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -69,6 +69,7 @@ jobs: [ -p nostr, -p nostr-sdk, + -p nostr-sdk-indexeddb, -p nostr-js, ] steps: From 0bdcf7a9f2dc7e4ab832396dc92892f605255c86 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 14:45:40 +0100 Subject: [PATCH 70/98] Update README.md files --- README.md | 3 +++ crates/nostr-sdk-db/README.md | 15 +++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 crates/nostr-sdk-db/README.md diff --git a/README.md b/README.md index 710e95b62..22f41fadb 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,9 @@ The project is split up into several crates in the `crates/` directory: * [**nostr**](./crates/nostr/): Rust implementation of Nostr protocol. * [**nostr-sdk**](./crates/nostr-sdk/): High level client library. * [**nostr-sdk-net**](./crates/nostr-sdk-net/): Network library for [**nostr-sdk**](./crates/nostr-sdk/) +* [**nostr-sdk-db**](./crates/nostr-sdk-db/): Database for [**nostr-sdk**](./crates/nostr-sdk/) +* [**nostr-sdk-rocksdb**](./crates/nostr-sdk-rocksdb/): RocksDB Storage backend for [**nostr-sdk**](./crates/nostr-sdk/) +* [**nostr-sdk-indexeddb**](./crates/nostr-sdk-indexeddb/): IndexedDB Storage backend for [**nostr-sdk**](./crates/nostr-sdk/) ### Bindings diff --git a/crates/nostr-sdk-db/README.md b/crates/nostr-sdk-db/README.md new file mode 100644 index 000000000..fc2706f20 --- /dev/null +++ b/crates/nostr-sdk-db/README.md @@ -0,0 +1,15 @@ +# Nostr SDK Database + +## State + +**This library is in an ALPHA state**, things that are implemented generally work but the API will change in breaking ways. + +## License + +This project is distributed under the MIT software license - see the [LICENSE](../../LICENSE) file for details + +## Donations + +⚡ Tips: + +⚡ Lightning Address: yuki@getalby.com \ No newline at end of file From f14ac47652c1779ecae43163ef0795951d7b2d12 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 14:58:07 +0100 Subject: [PATCH 71/98] indexeddb: update webapp's README.md --- .../nostr-sdk-indexeddb/examples/webapp/Makefile | 1 + .../examples/webapp/README.md | 16 +++------------- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/Makefile b/crates/nostr-sdk-indexeddb/examples/webapp/Makefile index 36bf72d2f..ac0a4858c 100644 --- a/crates/nostr-sdk-indexeddb/examples/webapp/Makefile +++ b/crates/nostr-sdk-indexeddb/examples/webapp/Makefile @@ -1,4 +1,5 @@ init: + rustup target add wasm32-unknown-unknown cargo install --locked trunk serve: diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/README.md b/crates/nostr-sdk-indexeddb/examples/webapp/README.md index ffd66c779..9827add57 100644 --- a/crates/nostr-sdk-indexeddb/examples/webapp/README.md +++ b/crates/nostr-sdk-indexeddb/examples/webapp/README.md @@ -5,18 +5,10 @@ If you don't already have it installed, it's time to install Rust: . The rest of this guide assumes a typical Rust installation which contains both `rustup` and Cargo. -To compile Rust to WASM, we need to have the `wasm32-unknown-unknown` target installed. -If you don't already have it, install it with the following command: +### Initalization ```bash -rustup target add wasm32-unknown-unknown -``` - -Now that we have our basics covered, it's time to install the star of the show: [Trunk]. -Simply run the following command to install it: - -```bash -cargo install trunk wasm-bindgen-cli +make init ``` That's it, we're done! @@ -24,9 +16,7 @@ That's it, we're done! ### Running ```bash -trunk serve +make serve ``` Rebuilds the app whenever a change is detected and runs a local server to host it. - -There's also the `trunk watch` command which does the same thing but without hosting it. From 0cb7ab0b8a1b35d4c39a2359e9cd10cb511ff72d Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 15:05:50 +0100 Subject: [PATCH 72/98] Re-export `nostr` and `nostr-sdk-db` from `nostr-sdk-rocksdb` and `nostr-sdk-indexeddb` crates --- crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml | 2 -- crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs | 7 +++++-- crates/nostr-sdk-indexeddb/examples/webapp/src/main.rs | 3 +++ crates/nostr-sdk-indexeddb/src/lib.rs | 5 ++++- crates/nostr-sdk-rocksdb/src/lib.rs | 3 +++ 5 files changed, 15 insertions(+), 5 deletions(-) diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml b/crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml index 65600cffd..6ec74248a 100644 --- a/crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml +++ b/crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml @@ -9,8 +9,6 @@ publish = false members = ["."] [dependencies] -nostr = { path = "../../../nostr", features = ["std"] } -nostr-sdk-db = { path = "../../../nostr-sdk-db" } nostr-sdk-indexeddb = { path = "../../" } tracing-wasm = "0.2" wasm-bindgen-futures = "0.4" diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs b/crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs index 1ae297993..dc9dceda8 100644 --- a/crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs +++ b/crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs @@ -1,5 +1,8 @@ -use nostr::prelude::*; -use nostr_sdk_db::NostrDatabase; +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use nostr_sdk_indexeddb::database::NostrDatabase; +use nostr_sdk_indexeddb::nostr::prelude::*; use nostr_sdk_indexeddb::WebDatabase; use wasm_bindgen_futures::spawn_local; use web_sys::console; diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/src/main.rs b/crates/nostr-sdk-indexeddb/examples/webapp/src/main.rs index b4b65d646..3fe8b9ea2 100644 --- a/crates/nostr-sdk-indexeddb/examples/webapp/src/main.rs +++ b/crates/nostr-sdk-indexeddb/examples/webapp/src/main.rs @@ -1,3 +1,6 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + mod app; use app::App; diff --git a/crates/nostr-sdk-indexeddb/src/lib.rs b/crates/nostr-sdk-indexeddb/src/lib.rs index a223ca681..e7bea1962 100644 --- a/crates/nostr-sdk-indexeddb/src/lib.rs +++ b/crates/nostr-sdk-indexeddb/src/lib.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -//! +//! Web's IndexedDB Storage backend for Nostr SDK #![forbid(unsafe_code)] #![warn(missing_docs)] @@ -14,6 +14,9 @@ use std::fmt; use std::future::IntoFuture; use std::sync::Arc; +pub extern crate nostr; +pub extern crate nostr_sdk_db as database; + #[cfg(target_arch = "wasm32")] use async_trait::async_trait; use indexed_db_futures::request::{IdbOpenDbRequestLike, OpenDbRequest}; diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-sdk-rocksdb/src/lib.rs index 42261444a..44d9a9ed4 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-sdk-rocksdb/src/lib.rs @@ -11,6 +11,9 @@ use std::collections::HashSet; use std::path::Path; use std::sync::Arc; +pub extern crate nostr; +pub extern crate nostr_sdk_db as database; + use async_trait::async_trait; use nostr::event::raw::RawEvent; use nostr::{Event, EventId, Filter, FiltersMatchEvent, Timestamp, Url}; From 3e325b947d4e8a84a2e58af5de6966e0aa883004 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 15:38:23 +0100 Subject: [PATCH 73/98] indexeddb: allow unknown lints --- crates/nostr-sdk-indexeddb/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/nostr-sdk-indexeddb/src/lib.rs b/crates/nostr-sdk-indexeddb/src/lib.rs index e7bea1962..0447b6fe9 100644 --- a/crates/nostr-sdk-indexeddb/src/lib.rs +++ b/crates/nostr-sdk-indexeddb/src/lib.rs @@ -6,7 +6,7 @@ #![forbid(unsafe_code)] #![warn(missing_docs)] #![warn(rustdoc::bare_urls)] -#![allow(clippy::arc_with_non_send_sync)] +#![allow(unknown_lints, clippy::arc_with_non_send_sync)] #![cfg_attr(not(target_arch = "wasm32"), allow(unused))] use std::collections::{HashMap, HashSet}; From 0a206a7eb133bc38b6c8ee11859a4dfe2f929d02 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 15:39:25 +0100 Subject: [PATCH 74/98] db: allow `where-clauses-object-safety` --- crates/nostr-sdk-db/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-sdk-db/src/lib.rs index 5d88dae4d..3bde8acea 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-sdk-db/src/lib.rs @@ -6,6 +6,7 @@ #![deny(unsafe_code)] #![warn(missing_docs)] #![warn(rustdoc::bare_urls)] +#![allow(where_clauses_object_safety)] use std::collections::HashSet; From 499c78193c96663e0f2d0a41a2682e779e2f2d5b Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 15:56:55 +0100 Subject: [PATCH 75/98] sdk: update `database` method in `ClientBuilder` --- crates/nostr-sdk/src/client/builder.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/crates/nostr-sdk/src/client/builder.rs b/crates/nostr-sdk/src/client/builder.rs index a74a55c03..f0ca8cddd 100644 --- a/crates/nostr-sdk/src/client/builder.rs +++ b/crates/nostr-sdk/src/client/builder.rs @@ -7,7 +7,7 @@ use std::sync::Arc; use nostr::Keys; use nostr_sdk_db::memory::MemoryDatabase; -use nostr_sdk_db::DynNostrDatabase; +use nostr_sdk_db::{DatabaseError, DynNostrDatabase, NostrDatabase}; #[cfg(feature = "nip46")] use super::RemoteSigner; @@ -35,8 +35,11 @@ impl ClientBuilder { } /// Set database - pub fn database(mut self, database: Arc) -> Self { - self.database = database; + pub fn database(mut self, database: D) -> Self + where + D: NostrDatabase + 'static, + { + self.database = Arc::new(database); self } From cbfd3673822408e78d6c7c92e885ffd9d3828f94 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 15:58:49 +0100 Subject: [PATCH 76/98] sdk: add `rocksdb` example --- Cargo.lock | 2 ++ crates/nostr-sdk/Cargo.toml | 8 +++++++ crates/nostr-sdk/examples/rocksdb.rs | 32 ++++++++++++++++++++++++++++ crates/nostr-sdk/src/lib.rs | 4 ++++ 4 files changed, 46 insertions(+) create mode 100644 crates/nostr-sdk/examples/rocksdb.rs diff --git a/Cargo.lock b/Cargo.lock index e432c9060..43c7119e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1336,7 +1336,9 @@ dependencies = [ "async-utility", "nostr", "nostr-sdk-db", + "nostr-sdk-indexeddb", "nostr-sdk-net", + "nostr-sdk-rocksdb", "once_cell", "thiserror", "tokio", diff --git a/crates/nostr-sdk/Cargo.toml b/crates/nostr-sdk/Cargo.toml index 2318e03a0..b3c9cab33 100644 --- a/crates/nostr-sdk/Cargo.toml +++ b/crates/nostr-sdk/Cargo.toml @@ -18,6 +18,8 @@ rustdoc-args = ["--cfg", "docsrs"] [features] default = ["all-nips"] blocking = ["async-utility/blocking", "nostr/blocking"] +rocksdb = ["dep:nostr-sdk-rocksdb"] +indexeddb = ["dep:nostr-sdk-indexeddb"] all-nips = ["nip04", "nip05", "nip06", "nip11", "nip46", "nip47"] nip03 = ["nostr/nip03"] nip04 = ["nostr/nip04"] @@ -38,9 +40,11 @@ thiserror = { workspace = true } tracing = { workspace = true, features = ["std"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] +nostr-sdk-rocksdb = { version = "0.1", path = "../nostr-sdk-rocksdb", optional = true } tokio = { workspace = true, features = ["rt-multi-thread", "time", "macros", "sync"] } [target.'cfg(target_arch = "wasm32")'.dependencies] +nostr-sdk-indexeddb = { version = "0.1", path = "../nostr-sdk-indexeddb", optional = true } tokio = { workspace = true, features = ["rt", "macros", "sync"] } [dev-dependencies] @@ -74,6 +78,10 @@ required-features = ["all-nips"] name = "client-stop" required-features = ["all-nips"] +[[example]] +name = "rocksdb" +required-features = ["all-nips", "rocksdb"] + [[example]] name = "shutdown-on-drop" diff --git a/crates/nostr-sdk/examples/rocksdb.rs b/crates/nostr-sdk/examples/rocksdb.rs new file mode 100644 index 000000000..1f8135f92 --- /dev/null +++ b/crates/nostr-sdk/examples/rocksdb.rs @@ -0,0 +1,32 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use nostr_sdk::prelude::*; + +const BECH32_SK: &str = "nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85"; + +#[tokio::main] +async fn main() -> Result<()> { + tracing_subscriber::fmt::init(); + + let secret_key = SecretKey::from_bech32(BECH32_SK)?; + let my_keys = Keys::new(secret_key); + + let database = RocksDatabase::open("./db/rocksdb").await?; + let client: Client = ClientBuilder::new(&my_keys).database(database).build(); + + client.add_relay("wss://relay.damus.io", None).await?; + client.add_relay("wss://nostr.wine", None).await?; + + client.connect().await; + + // Publish a text note + client.publish_text_note("Hello world", &[]).await?; + + // Query events from database + let filter = Filter::new().author(my_keys.public_key()).limit(10); + let events = client.database().query(vec![filter]).await?; + println!("Events: {events:?}"); + + Ok(()) +} diff --git a/crates/nostr-sdk/src/lib.rs b/crates/nostr-sdk/src/lib.rs index 1516a6460..5534625e3 100644 --- a/crates/nostr-sdk/src/lib.rs +++ b/crates/nostr-sdk/src/lib.rs @@ -18,8 +18,12 @@ compile_error!("`blocking` feature can't be enabled for WASM targets"); pub use nostr::{self, *}; +#[cfg(feature = "indexeddb")] +pub use nostr_sdk_indexeddb::WebDatabase; #[cfg(feature = "blocking")] use nostr_sdk_net::futures_util::Future; +#[cfg(feature = "rocksdb")] +pub use nostr_sdk_rocksdb::RocksDatabase; #[cfg(feature = "blocking")] use once_cell::sync::Lazy; #[cfg(feature = "blocking")] From f0d2cadf52a9b507af09d768dd3a674082dfdc2a Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 16:00:42 +0100 Subject: [PATCH 77/98] sdk: update README.md features --- crates/nostr-sdk/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/nostr-sdk/README.md b/crates/nostr-sdk/README.md index 2b2b25d79..c03eeaa89 100644 --- a/crates/nostr-sdk/README.md +++ b/crates/nostr-sdk/README.md @@ -123,6 +123,8 @@ The following crate feature flags are available: | Feature | Default | Description | | ------------------- | :-----: | ---------------------------------------------------------------------------------------- | | `blocking` | No | Needed to use `NIP-05` and `NIP-11` features in not async/await context | +| `rocksdb` | No | Enable RocksDB Storage backend | +| `indexeddb` | No | Enable Web's IndexedDb Storage backend | | `all-nips` | Yes | Enable all NIPs | | `nip03` | No | Enable NIP-03: OpenTimestamps Attestations for Events | | `nip04` | Yes | Enable NIP-04: Encrypted Direct Message | From 26aadfe63ef1e814264f6b6a41d10d6307b5bb79 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Tue, 7 Nov 2023 18:07:34 +0100 Subject: [PATCH 78/98] sdk: add `NegentropyOptions` Fix negentropy reconciliation timeout when client already have all events --- crates/nostr-sdk/examples/negentropy.rs | 11 ++--- crates/nostr-sdk/examples/rocksdb.rs | 11 ++++- crates/nostr-sdk/src/client/mod.rs | 15 ++++--- crates/nostr-sdk/src/lib.rs | 5 ++- crates/nostr-sdk/src/relay/mod.rs | 54 ++++++++++++++++++++----- crates/nostr-sdk/src/relay/options.rs | 43 ++++++++++++++++++++ crates/nostr-sdk/src/relay/pool.rs | 12 +++--- 7 files changed, 116 insertions(+), 35 deletions(-) diff --git a/crates/nostr-sdk/examples/negentropy.rs b/crates/nostr-sdk/examples/negentropy.rs index 9d122a1c2..e81957844 100644 --- a/crates/nostr-sdk/examples/negentropy.rs +++ b/crates/nostr-sdk/examples/negentropy.rs @@ -1,8 +1,6 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -use std::time::Duration; - use nostr_sdk::prelude::*; const BECH32_SK: &str = "nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85"; @@ -15,16 +13,15 @@ async fn main() -> Result<()> { let my_keys = Keys::new(secret_key); let client = Client::new(&my_keys); - client.add_relay("wss://relay.damus.io", None).await?; + client.add_relay("wss://atl.purplerelay.com", None).await?; client.connect().await; let my_items = Vec::new(); let filter = Filter::new().author(my_keys.public_key()).limit(10); - let relay = client.relay("wss://relay.damus.io").await?; - relay - .reconcile(filter, my_items, Duration::from_secs(30)) - .await?; + let relay = client.relay("wss://atl.purplerelay.com").await?; + let opts = NegentropyOptions::default().syncrounous(false); + relay.reconcile(filter, my_items, opts).await?; client .handle_notifications(|notification| async { diff --git a/crates/nostr-sdk/examples/rocksdb.rs b/crates/nostr-sdk/examples/rocksdb.rs index 1f8135f92..fb936243f 100644 --- a/crates/nostr-sdk/examples/rocksdb.rs +++ b/crates/nostr-sdk/examples/rocksdb.rs @@ -17,11 +17,18 @@ async fn main() -> Result<()> { client.add_relay("wss://relay.damus.io", None).await?; client.add_relay("wss://nostr.wine", None).await?; + client.add_relay("wss://atl.purplerelay.com", None).await?; client.connect().await; - // Publish a text note - client.publish_text_note("Hello world", &[]).await?; + /* // Publish a text note + client.publish_text_note("Hello world", &[]).await?; */ + + // Negentropy reconcile + let filter = Filter::new().author(my_keys.public_key()); + client + .reconcile(filter, NegentropyOptions::default()) + .await?; // Query events from database let filter = Filter::new().author(my_keys.public_key()).limit(10); diff --git a/crates/nostr-sdk/src/client/mod.rs b/crates/nostr-sdk/src/client/mod.rs index 336db5201..4dd8433cc 100644 --- a/crates/nostr-sdk/src/client/mod.rs +++ b/crates/nostr-sdk/src/client/mod.rs @@ -38,7 +38,9 @@ pub use self::options::Options; #[cfg(feature = "nip46")] pub use self::signer::remote::RemoteSigner; use crate::relay::pool::{self, Error as RelayPoolError, RelayPool}; -use crate::relay::{FilterOptions, Relay, RelayOptions, RelayPoolNotification, RelaySendOptions}; +use crate::relay::{ + FilterOptions, NegentropyOptions, Relay, RelayOptions, RelayPoolNotification, RelaySendOptions, +}; use crate::util::TryIntoUrl; /// [`Client`] error @@ -1315,8 +1317,8 @@ impl Client { } /// Negentropy reconciliation - pub async fn reconcile(&self, filter: Filter, timeout: Duration) -> Result<(), Error> { - Ok(self.pool.reconcile(filter, timeout).await?) + pub async fn reconcile(&self, filter: Filter, opts: NegentropyOptions) -> Result<(), Error> { + Ok(self.pool.reconcile(filter, opts).await?) } /// Negentropy reconciliation with items @@ -1324,12 +1326,9 @@ impl Client { &self, filter: Filter, items: Vec<(EventId, Timestamp)>, - timeout: Duration, + opts: NegentropyOptions, ) -> Result<(), Error> { - Ok(self - .pool - .reconcile_with_items(filter, items, timeout) - .await?) + Ok(self.pool.reconcile_with_items(filter, items, opts).await?) } /// Get a list of channels diff --git a/crates/nostr-sdk/src/lib.rs b/crates/nostr-sdk/src/lib.rs index 5534625e3..551b5004a 100644 --- a/crates/nostr-sdk/src/lib.rs +++ b/crates/nostr-sdk/src/lib.rs @@ -38,8 +38,9 @@ pub mod util; pub use self::client::blocking; pub use self::client::{Client, ClientBuilder, Options}; pub use self::relay::{ - ActiveSubscription, FilterOptions, InternalSubscriptionId, Relay, RelayConnectionStats, - RelayOptions, RelayPoolNotification, RelayPoolOptions, RelaySendOptions, RelayStatus, + ActiveSubscription, FilterOptions, InternalSubscriptionId, NegentropyOptions, Relay, + RelayConnectionStats, RelayOptions, RelayPoolNotification, RelayPoolOptions, RelaySendOptions, + RelayStatus, }; #[cfg(feature = "blocking")] diff --git a/crates/nostr-sdk/src/relay/mod.rs b/crates/nostr-sdk/src/relay/mod.rs index 4013796b0..cd78c48ee 100644 --- a/crates/nostr-sdk/src/relay/mod.rs +++ b/crates/nostr-sdk/src/relay/mod.rs @@ -37,7 +37,9 @@ pub mod pool; mod stats; pub use self::limits::Limits; -pub use self::options::{FilterOptions, RelayOptions, RelayPoolOptions, RelaySendOptions}; +pub use self::options::{ + FilterOptions, NegentropyOptions, RelayOptions, RelayPoolOptions, RelaySendOptions, +}; use self::options::{MAX_ADJ_RETRY_SEC, MIN_RETRY_SEC}; pub use self::pool::{RelayPoolMessage, RelayPoolNotification}; pub use self::stats::RelayConnectionStats; @@ -1474,15 +1476,22 @@ impl Relay { &self, filter: Filter, items: Vec<(EventId, Timestamp)>, - timeout: Duration, + opts: NegentropyOptions, ) -> Result<(), Error> { if !self.opts.get_read() { return Err(Error::ReadDisabled); } + if !self.is_connected().await + && self.stats.attempts() > 1 + && self.stats.uptime() < MIN_UPTIME + { + return Err(Error::NotConnected); + } + let id_size: usize = 32; - let mut negentropy = Negentropy::new(id_size, Some(2_500))?; + let mut negentropy = Negentropy::new(id_size, Some(4_096))?; for (id, timestamp) in items.into_iter() { let id = Bytes::from_slice(id.as_bytes()); @@ -1497,8 +1506,9 @@ impl Relay { self.send_msg(open_msg, Some(Duration::from_secs(10))) .await?; + // TODO: improve timeouts let mut notifications = self.notification_sender.subscribe(); - time::timeout(Some(timeout), async { + time::timeout(Some(opts.timeout), async { while let Ok(notification) = notifications.recv().await { if let RelayPoolNotification::Message(url, msg) = notification { if url == self.url { @@ -1516,15 +1526,33 @@ impl Relay { &mut need_ids, )?; + if need_ids.is_empty() { + tracing::info!("Reconciliation terminated"); + break; + } + let ids = need_ids .into_iter() .filter_map(|id| EventId::from_slice(&id).ok()); let filter = Filter::new().ids(ids); - self.req_events_of( - vec![filter], - Duration::from_secs(120), - FilterOptions::ExitOnEOSE, - ); + if !filter.ids.is_empty() { + if opts.syncrounous { + self.get_events_of( + vec![filter], + Duration::from_secs(30), + FilterOptions::ExitOnEOSE, + ) + .await?; + } else { + self.req_events_of( + vec![filter], + Duration::from_secs(30), + FilterOptions::ExitOnEOSE, + ); + } + } else { + tracing::warn!("negentropy reconciliation: tried to send empty filters to {}", self.url); + } match msg { Some(query) => { @@ -1585,7 +1613,13 @@ impl Relay { let pk = Keys::generate(); let filter = Filter::new().author(pk.public_key()); match self - .reconcile(filter, Vec::new(), Duration::from_secs(5)) + .reconcile( + filter, + Vec::new(), + NegentropyOptions::new() + .timeout(Duration::from_secs(5)) + .syncrounous(false), + ) .await { Ok(_) => Ok(true), diff --git a/crates/nostr-sdk/src/relay/options.rs b/crates/nostr-sdk/src/relay/options.rs index 8a35d3a61..a43c1edac 100644 --- a/crates/nostr-sdk/src/relay/options.rs +++ b/crates/nostr-sdk/src/relay/options.rs @@ -243,3 +243,46 @@ impl RelayPoolOptions { } } } + +/// Negentropy reconciliation options +#[derive(Debug, Clone, Copy)] +pub struct NegentropyOptions { + /// Timeout for sending event (default: 30 secs) + pub timeout: Duration, + /// Syncronous (default: true) + /// + /// If `true`, request events and wait that relay send them. + /// If `false`, request events but continue the reconciliation + pub syncrounous: bool, +} + +impl Default for NegentropyOptions { + fn default() -> Self { + Self { + timeout: Duration::from_secs(30), + syncrounous: true, + } + } +} + +impl NegentropyOptions { + /// New default [`NegentropyOptions`] + pub fn new() -> Self { + Self::default() + } + + /// Timeout for sending event (default: 30 secs) + pub fn timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + /// Syncronous (default: true) + /// + /// If `true`, request events and wait that relay send them. + /// If `false`, request events but continue the reconciliation + pub fn syncrounous(mut self, syncrounous: bool) -> Self { + self.syncrounous = syncrounous; + self + } +} diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index e23aea827..8d8ffcaa6 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -24,8 +24,8 @@ use tokio::sync::{broadcast, Mutex, RwLock}; use super::options::RelayPoolOptions; use super::{ - Error as RelayError, FilterOptions, InternalSubscriptionId, Limits, Relay, RelayOptions, - RelaySendOptions, RelayStatus, + Error as RelayError, FilterOptions, InternalSubscriptionId, Limits, NegentropyOptions, Relay, + RelayOptions, RelaySendOptions, RelayStatus, }; use crate::util::TryIntoUrl; @@ -861,10 +861,10 @@ impl RelayPool { } /// Negentropy reconciliation - pub async fn reconcile(&self, filter: Filter, timeout: Duration) -> Result<(), Error> { + pub async fn reconcile(&self, filter: Filter, opts: NegentropyOptions) -> Result<(), Error> { let items: Vec<(EventId, Timestamp)> = self.database.negentropy_items(filter.clone()).await?; - self.reconcile_with_items(filter, items, timeout).await + self.reconcile_with_items(filter, items, opts).await } /// Negentropy reconciliation with custom items @@ -872,7 +872,7 @@ impl RelayPool { &self, filter: Filter, items: Vec<(EventId, Timestamp)>, - timeout: Duration, + opts: NegentropyOptions, ) -> Result<(), Error> { let mut handles = Vec::new(); let relays = self.relays().await; @@ -880,7 +880,7 @@ impl RelayPool { let filter = filter.clone(); let my_items = items.clone(); let handle = thread::spawn(async move { - if let Err(e) = relay.reconcile(filter, my_items, timeout).await { + if let Err(e) = relay.reconcile(filter, my_items, opts).await { tracing::error!("Failed to get reconcile with {url}: {e}"); } }); From f64be691195ab79952eca21ae62474ca81f1605d Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 9 Nov 2023 13:52:40 +0100 Subject: [PATCH 79/98] database: rename `nostr-sdk-db` to `nostr-database` --- .githooks/pre-push | 4 +- .github/workflows/ci.yml | 3 +- Cargo.lock | 84 +++++++++---------- Makefile | 2 +- README.md | 6 +- .../.gitignore | 0 .../Cargo.toml | 4 +- .../{nostr-sdk-db => nostr-database}/Makefile | 0 .../README.md | 4 +- .../examples/indexes.rs | 2 +- .../examples/memory.rs | 4 +- .../fbs/event.fbs | 0 .../fbs/event_seen_by.fbs | 0 .../src/error.rs | 2 +- .../src/flatbuffers/event_generated.rs | 0 .../flatbuffers/event_seen_by_generated.rs | 0 .../src/flatbuffers/mod.rs | 2 +- .../src/index.rs | 2 +- .../src/lib.rs | 3 +- .../src/memory.rs | 2 +- .../src/options.rs | 2 +- .../Cargo.toml | 8 +- .../README.md | 2 +- .../examples/webapp/.cargo/config.toml | 0 .../examples/webapp/.gitignore | 0 .../examples/webapp/Cargo.toml | 4 +- .../examples/webapp/Makefile | 0 .../examples/webapp/README.md | 0 .../examples/webapp/index.html | 0 .../examples/webapp/index.scss | 0 .../examples/webapp/src/app.rs | 6 +- .../examples/webapp/src/main.rs | 0 .../src/error.rs | 2 +- .../src/hex.rs | 0 .../src/lib.rs | 6 +- .../Cargo.toml | 6 +- .../examples/rocksdb.rs | 4 +- .../src/lib.rs | 4 +- .../src/ops.rs | 2 +- crates/nostr-sdk/Cargo.toml | 10 +-- crates/nostr-sdk/src/client/builder.rs | 4 +- crates/nostr-sdk/src/client/mod.rs | 2 +- crates/nostr-sdk/src/relay/mod.rs | 2 +- crates/nostr-sdk/src/relay/pool.rs | 3 +- 44 files changed, 97 insertions(+), 94 deletions(-) rename crates/{nostr-sdk-db => nostr-database}/.gitignore (100%) rename crates/{nostr-sdk-db => nostr-database}/Cargo.toml (92%) rename crates/{nostr-sdk-db => nostr-database}/Makefile (100%) rename crates/{nostr-sdk-db => nostr-database}/README.md (89%) rename crates/{nostr-sdk-db => nostr-database}/examples/indexes.rs (98%) rename crates/{nostr-sdk-db => nostr-database}/examples/memory.rs (95%) rename crates/{nostr-sdk-db => nostr-database}/fbs/event.fbs (100%) rename crates/{nostr-sdk-db => nostr-database}/fbs/event_seen_by.fbs (100%) rename crates/{nostr-sdk-db => nostr-database}/src/error.rs (98%) rename crates/{nostr-sdk-db => nostr-database}/src/flatbuffers/event_generated.rs (100%) rename crates/{nostr-sdk-db => nostr-database}/src/flatbuffers/event_seen_by_generated.rs (100%) rename crates/{nostr-sdk-db => nostr-database}/src/flatbuffers/mod.rs (99%) rename crates/{nostr-sdk-db => nostr-database}/src/index.rs (99%) rename crates/{nostr-sdk-db => nostr-database}/src/lib.rs (98%) rename crates/{nostr-sdk-db => nostr-database}/src/memory.rs (99%) rename crates/{nostr-sdk-db => nostr-database}/src/options.rs (94%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/Cargo.toml (72%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/README.md (95%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/examples/webapp/.cargo/config.toml (100%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/examples/webapp/.gitignore (100%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/examples/webapp/Cargo.toml (77%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/examples/webapp/Makefile (100%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/examples/webapp/README.md (100%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/examples/webapp/index.html (100%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/examples/webapp/index.scss (100%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/examples/webapp/src/app.rs (91%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/examples/webapp/src/main.rs (100%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/src/error.rs (96%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/src/hex.rs (100%) rename crates/{nostr-sdk-indexeddb => nostr-indexeddb}/src/lib.rs (99%) rename crates/{nostr-sdk-rocksdb => nostr-rocksdb}/Cargo.toml (82%) rename crates/{nostr-sdk-rocksdb => nostr-rocksdb}/examples/rocksdb.rs (97%) rename crates/{nostr-sdk-rocksdb => nostr-rocksdb}/src/lib.rs (99%) rename crates/{nostr-sdk-rocksdb => nostr-rocksdb}/src/ops.rs (90%) diff --git a/.githooks/pre-push b/.githooks/pre-push index 4a5fcb01a..91692c763 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -7,11 +7,11 @@ buildargs=( "-p nostr --no-default-features --features alloc" "-p nostr --no-default-features --features alloc,all-nips" "-p nostr --features blocking" + "-p nostr-database" "-p nostr-sdk-net" "-p nostr-sdk" "-p nostr-sdk --no-default-features" "-p nostr-sdk --features blocking" - "-p nostr-sdk-db" "-p nostr-ffi" "-p nostr-sdk-ffi" ) @@ -25,8 +25,8 @@ done buildargs=( "-p nostr-js --target wasm32-unknown-unknown" + "-p nostr-indexeddb --target wasm32-unknown-unknown" "-p nostr-sdk-js --target wasm32-unknown-unknown" - "-p nostr-sdk-indexeddb --target wasm32-unknown-unknown" ) for arg in "${buildargs[@]}"; do diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a426db171..80e4c05b9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,6 +31,7 @@ jobs: -p nostr --no-default-features --features alloc, -p nostr --no-default-features --features "alloc all-nips", -p nostr --features blocking, + -p nostr-database -p nostr-sdk, -p nostr-sdk --no-default-features, -p nostr-sdk --features blocking, @@ -68,8 +69,8 @@ jobs: build-args: [ -p nostr, + -p nostr-indexeddb, -p nostr-sdk, - -p nostr-sdk-indexeddb, -p nostr-js, ] steps: diff --git a/Cargo.lock b/Cargo.lock index 43c7119e8..90690fc65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1296,6 +1296,19 @@ dependencies = [ "url-fork", ] +[[package]] +name = "nostr-database" +version = "0.1.0" +dependencies = [ + "async-trait", + "flatbuffers", + "nostr", + "thiserror", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "nostr-ffi" version = "0.1.0" @@ -1304,6 +1317,21 @@ dependencies = [ "uniffi", ] +[[package]] +name = "nostr-indexeddb" +version = "0.1.0" +dependencies = [ + "async-trait", + "indexed_db_futures", + "nostr", + "nostr-database", + "thiserror", + "tokio", + "tracing", + "wasm-bindgen", + "wasm-bindgen-test", +] + [[package]] name = "nostr-js" version = "0.1.0" @@ -1330,29 +1358,30 @@ dependencies = [ ] [[package]] -name = "nostr-sdk" -version = "0.25.0" +name = "nostr-rocksdb" +version = "0.1.0" dependencies = [ - "async-utility", + "async-trait", "nostr", - "nostr-sdk-db", - "nostr-sdk-indexeddb", - "nostr-sdk-net", - "nostr-sdk-rocksdb", - "once_cell", - "thiserror", + "nostr-database", + "num_cpus", + "rocksdb", "tokio", "tracing", "tracing-subscriber", ] [[package]] -name = "nostr-sdk-db" -version = "0.1.0" +name = "nostr-sdk" +version = "0.25.0" dependencies = [ - "async-trait", - "flatbuffers", + "async-utility", "nostr", + "nostr-database", + "nostr-indexeddb", + "nostr-rocksdb", + "nostr-sdk-net", + "once_cell", "thiserror", "tokio", "tracing", @@ -1370,21 +1399,6 @@ dependencies = [ "uniffi", ] -[[package]] -name = "nostr-sdk-indexeddb" -version = "0.1.0" -dependencies = [ - "async-trait", - "indexed_db_futures", - "nostr", - "nostr-sdk-db", - "thiserror", - "tokio", - "tracing", - "wasm-bindgen", - "wasm-bindgen-test", -] - [[package]] name = "nostr-sdk-js" version = "0.1.0" @@ -1413,20 +1427,6 @@ dependencies = [ "ws_stream_wasm", ] -[[package]] -name = "nostr-sdk-rocksdb" -version = "0.1.0" -dependencies = [ - "async-trait", - "nostr", - "nostr-sdk-db", - "num_cpus", - "rocksdb", - "tokio", - "tracing", - "tracing-subscriber", -] - [[package]] name = "nu-ansi-term" version = "0.46.0" diff --git a/Makefile b/Makefile index 4146bc5fa..dc6edf046 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ book: cd book && make build flatbuf: - cd crates/nostr-sdk-db && make flatbuf + cd crates/nostr-database && make flatbuf loc: @echo "--- Counting lines of .rs files (LOC):" && find crates/ bindings/ -type f -name "*.rs" -exec cat {} \; | wc -l \ No newline at end of file diff --git a/README.md b/README.md index 22f41fadb..e8123d1bb 100644 --- a/README.md +++ b/README.md @@ -5,11 +5,11 @@ The project is split up into several crates in the `crates/` directory: * [**nostr**](./crates/nostr/): Rust implementation of Nostr protocol. +* [**nostr-database**](./crates/nostr-database/): Database for Nostr apps + * [**nostr-rocksdb**](./crates/nostr-rocksdb/): RocksDB Storage backend for Nostr apps + * [**nostr-indexeddb**](./crates/nostr-indexeddb/): IndexedDB Storage backend for Nostr apps * [**nostr-sdk**](./crates/nostr-sdk/): High level client library. * [**nostr-sdk-net**](./crates/nostr-sdk-net/): Network library for [**nostr-sdk**](./crates/nostr-sdk/) -* [**nostr-sdk-db**](./crates/nostr-sdk-db/): Database for [**nostr-sdk**](./crates/nostr-sdk/) -* [**nostr-sdk-rocksdb**](./crates/nostr-sdk-rocksdb/): RocksDB Storage backend for [**nostr-sdk**](./crates/nostr-sdk/) -* [**nostr-sdk-indexeddb**](./crates/nostr-sdk-indexeddb/): IndexedDB Storage backend for [**nostr-sdk**](./crates/nostr-sdk/) ### Bindings diff --git a/crates/nostr-sdk-db/.gitignore b/crates/nostr-database/.gitignore similarity index 100% rename from crates/nostr-sdk-db/.gitignore rename to crates/nostr-database/.gitignore diff --git a/crates/nostr-sdk-db/Cargo.toml b/crates/nostr-database/Cargo.toml similarity index 92% rename from crates/nostr-sdk-db/Cargo.toml rename to crates/nostr-database/Cargo.toml index c8e5665f2..f1fa47fde 100644 --- a/crates/nostr-sdk-db/Cargo.toml +++ b/crates/nostr-database/Cargo.toml @@ -1,8 +1,8 @@ [package] -name = "nostr-sdk-db" +name = "nostr-database" version = "0.1.0" edition = "2021" -description = "Nostr SDK Database" +description = "Database for Nostr apps" authors = ["Yuki Kishimoto "] homepage.workspace = true repository.workspace = true diff --git a/crates/nostr-sdk-db/Makefile b/crates/nostr-database/Makefile similarity index 100% rename from crates/nostr-sdk-db/Makefile rename to crates/nostr-database/Makefile diff --git a/crates/nostr-sdk-db/README.md b/crates/nostr-database/README.md similarity index 89% rename from crates/nostr-sdk-db/README.md rename to crates/nostr-database/README.md index fc2706f20..61c7e1139 100644 --- a/crates/nostr-sdk-db/README.md +++ b/crates/nostr-database/README.md @@ -1,4 +1,6 @@ -# Nostr SDK Database +# Nostr Database + +Database for Nostr apps ## State diff --git a/crates/nostr-sdk-db/examples/indexes.rs b/crates/nostr-database/examples/indexes.rs similarity index 98% rename from crates/nostr-sdk-db/examples/indexes.rs rename to crates/nostr-database/examples/indexes.rs index cdc66ea84..e22242cbc 100644 --- a/crates/nostr-sdk-db/examples/indexes.rs +++ b/crates/nostr-database/examples/indexes.rs @@ -2,7 +2,7 @@ // Distributed under the MIT software license use nostr::prelude::*; -use nostr_sdk_db::DatabaseIndexes; +use nostr_database::DatabaseIndexes; use tracing_subscriber::fmt::format::FmtSpan; #[tokio::main] diff --git a/crates/nostr-sdk-db/examples/memory.rs b/crates/nostr-database/examples/memory.rs similarity index 95% rename from crates/nostr-sdk-db/examples/memory.rs rename to crates/nostr-database/examples/memory.rs index 6e6db2e5d..42ca6e51d 100644 --- a/crates/nostr-sdk-db/examples/memory.rs +++ b/crates/nostr-database/examples/memory.rs @@ -3,8 +3,8 @@ use nostr::prelude::*; use nostr::{EventBuilder, Filter, Keys, Kind, Metadata, Tag}; -use nostr_sdk_db::memory::MemoryDatabase; -use nostr_sdk_db::{DatabaseOptions, NostrDatabase}; +use nostr_database::memory::MemoryDatabase; +use nostr_database::{DatabaseOptions, NostrDatabase}; use tracing_subscriber::fmt::format::FmtSpan; #[tokio::main] diff --git a/crates/nostr-sdk-db/fbs/event.fbs b/crates/nostr-database/fbs/event.fbs similarity index 100% rename from crates/nostr-sdk-db/fbs/event.fbs rename to crates/nostr-database/fbs/event.fbs diff --git a/crates/nostr-sdk-db/fbs/event_seen_by.fbs b/crates/nostr-database/fbs/event_seen_by.fbs similarity index 100% rename from crates/nostr-sdk-db/fbs/event_seen_by.fbs rename to crates/nostr-database/fbs/event_seen_by.fbs diff --git a/crates/nostr-sdk-db/src/error.rs b/crates/nostr-database/src/error.rs similarity index 98% rename from crates/nostr-sdk-db/src/error.rs rename to crates/nostr-database/src/error.rs index 8686d4772..71945864a 100644 --- a/crates/nostr-sdk-db/src/error.rs +++ b/crates/nostr-database/src/error.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -//! Database Error +//! Nostr Database Error use thiserror::Error; diff --git a/crates/nostr-sdk-db/src/flatbuffers/event_generated.rs b/crates/nostr-database/src/flatbuffers/event_generated.rs similarity index 100% rename from crates/nostr-sdk-db/src/flatbuffers/event_generated.rs rename to crates/nostr-database/src/flatbuffers/event_generated.rs diff --git a/crates/nostr-sdk-db/src/flatbuffers/event_seen_by_generated.rs b/crates/nostr-database/src/flatbuffers/event_seen_by_generated.rs similarity index 100% rename from crates/nostr-sdk-db/src/flatbuffers/event_seen_by_generated.rs rename to crates/nostr-database/src/flatbuffers/event_seen_by_generated.rs diff --git a/crates/nostr-sdk-db/src/flatbuffers/mod.rs b/crates/nostr-database/src/flatbuffers/mod.rs similarity index 99% rename from crates/nostr-sdk-db/src/flatbuffers/mod.rs rename to crates/nostr-database/src/flatbuffers/mod.rs index 44c726e8a..afcc24fa6 100644 --- a/crates/nostr-sdk-db/src/flatbuffers/mod.rs +++ b/crates/nostr-database/src/flatbuffers/mod.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -//! Flatbuffers +//! Nostr Database Flatbuffers use std::collections::HashSet; diff --git a/crates/nostr-sdk-db/src/index.rs b/crates/nostr-database/src/index.rs similarity index 99% rename from crates/nostr-sdk-db/src/index.rs rename to crates/nostr-database/src/index.rs index 61cc89dc7..9f70b0d70 100644 --- a/crates/nostr-sdk-db/src/index.rs +++ b/crates/nostr-database/src/index.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -//! Indexes +//! Nostr Database Indexes use std::cmp::Ordering; use std::collections::{BTreeSet, HashSet}; diff --git a/crates/nostr-sdk-db/src/lib.rs b/crates/nostr-database/src/lib.rs similarity index 98% rename from crates/nostr-sdk-db/src/lib.rs rename to crates/nostr-database/src/lib.rs index 3bde8acea..1ab264b8a 100644 --- a/crates/nostr-sdk-db/src/lib.rs +++ b/crates/nostr-database/src/lib.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -//! Nostr SDK Database +//! Nostr Database #![deny(unsafe_code)] #![warn(missing_docs)] @@ -25,6 +25,7 @@ pub use self::error::DatabaseError; #[cfg(feature = "flatbuf")] pub use self::flatbuffers::{FlatBufferBuilder, FlatBufferDecode, FlatBufferEncode}; pub use self::index::{DatabaseIndexes, EventIndexResult}; +pub use self::memory::MemoryDatabase; pub use self::options::DatabaseOptions; /// Backend diff --git a/crates/nostr-sdk-db/src/memory.rs b/crates/nostr-database/src/memory.rs similarity index 99% rename from crates/nostr-sdk-db/src/memory.rs rename to crates/nostr-database/src/memory.rs index d58d01dd5..6a199c1fa 100644 --- a/crates/nostr-sdk-db/src/memory.rs +++ b/crates/nostr-database/src/memory.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -//! Nostr SDK Database +//! Memory (RAM) Storage backend for Nostr apps use std::collections::{HashMap, HashSet}; use std::sync::Arc; diff --git a/crates/nostr-sdk-db/src/options.rs b/crates/nostr-database/src/options.rs similarity index 94% rename from crates/nostr-sdk-db/src/options.rs rename to crates/nostr-database/src/options.rs index 038682de2..2ee9cf72e 100644 --- a/crates/nostr-sdk-db/src/options.rs +++ b/crates/nostr-database/src/options.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -//! Database options +//! Nostr Database options /// Database options #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] diff --git a/crates/nostr-sdk-indexeddb/Cargo.toml b/crates/nostr-indexeddb/Cargo.toml similarity index 72% rename from crates/nostr-sdk-indexeddb/Cargo.toml rename to crates/nostr-indexeddb/Cargo.toml index 62ed9180b..bd704aba8 100644 --- a/crates/nostr-sdk-indexeddb/Cargo.toml +++ b/crates/nostr-indexeddb/Cargo.toml @@ -1,21 +1,21 @@ [package] -name = "nostr-sdk-indexeddb" +name = "nostr-indexeddb" version = "0.1.0" edition = "2021" -description = "Web's IndexedDB Storage backend for Nostr SDK" +description = "Web's IndexedDB Storage backend for Nostr apps" authors = ["Yuki Kishimoto "] homepage.workspace = true repository.workspace = true license.workspace = true readme = "README.md" rust-version.workspace = true -keywords = ["nostr", "sdk", "db", "indexeddb"] +keywords = ["nostr", "database", "indexeddb"] [dependencies] async-trait = { workspace = true } indexed_db_futures = "0.4" nostr = { workspace = true, features = ["std"] } -nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db", features = ["flatbuf"] } +nostr-database = { version = "0.1", path = "../nostr-database", features = ["flatbuf"] } thiserror = { workspace = true } tokio = { workspace = true, features = ["sync"] } tracing = { workspace = true, features = ["std", "attributes"] } diff --git a/crates/nostr-sdk-indexeddb/README.md b/crates/nostr-indexeddb/README.md similarity index 95% rename from crates/nostr-sdk-indexeddb/README.md rename to crates/nostr-indexeddb/README.md index 8a114938a..bb0a7ac7e 100644 --- a/crates/nostr-sdk-indexeddb/README.md +++ b/crates/nostr-indexeddb/README.md @@ -1,4 +1,4 @@ -# Nostr SDK IndexedDB +# Nostr IndexedDB This crate implements a storage backend on IndexedDB for web environments. diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/.cargo/config.toml b/crates/nostr-indexeddb/examples/webapp/.cargo/config.toml similarity index 100% rename from crates/nostr-sdk-indexeddb/examples/webapp/.cargo/config.toml rename to crates/nostr-indexeddb/examples/webapp/.cargo/config.toml diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/.gitignore b/crates/nostr-indexeddb/examples/webapp/.gitignore similarity index 100% rename from crates/nostr-sdk-indexeddb/examples/webapp/.gitignore rename to crates/nostr-indexeddb/examples/webapp/.gitignore diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml b/crates/nostr-indexeddb/examples/webapp/Cargo.toml similarity index 77% rename from crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml rename to crates/nostr-indexeddb/examples/webapp/Cargo.toml index 6ec74248a..a3c820d48 100644 --- a/crates/nostr-sdk-indexeddb/examples/webapp/Cargo.toml +++ b/crates/nostr-indexeddb/examples/webapp/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "nostr-sdk-indexeddb-example" +name = "nostr-indexeddb-example" version = "0.1.0" edition = "2021" publish = false @@ -9,7 +9,7 @@ publish = false members = ["."] [dependencies] -nostr-sdk-indexeddb = { path = "../../" } +nostr-indexeddb = { path = "../../" } tracing-wasm = "0.2" wasm-bindgen-futures = "0.4" web-sys = "0.3" diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/Makefile b/crates/nostr-indexeddb/examples/webapp/Makefile similarity index 100% rename from crates/nostr-sdk-indexeddb/examples/webapp/Makefile rename to crates/nostr-indexeddb/examples/webapp/Makefile diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/README.md b/crates/nostr-indexeddb/examples/webapp/README.md similarity index 100% rename from crates/nostr-sdk-indexeddb/examples/webapp/README.md rename to crates/nostr-indexeddb/examples/webapp/README.md diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/index.html b/crates/nostr-indexeddb/examples/webapp/index.html similarity index 100% rename from crates/nostr-sdk-indexeddb/examples/webapp/index.html rename to crates/nostr-indexeddb/examples/webapp/index.html diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/index.scss b/crates/nostr-indexeddb/examples/webapp/index.scss similarity index 100% rename from crates/nostr-sdk-indexeddb/examples/webapp/index.scss rename to crates/nostr-indexeddb/examples/webapp/index.scss diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs b/crates/nostr-indexeddb/examples/webapp/src/app.rs similarity index 91% rename from crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs rename to crates/nostr-indexeddb/examples/webapp/src/app.rs index dc9dceda8..385301304 100644 --- a/crates/nostr-sdk-indexeddb/examples/webapp/src/app.rs +++ b/crates/nostr-indexeddb/examples/webapp/src/app.rs @@ -1,9 +1,9 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -use nostr_sdk_indexeddb::database::NostrDatabase; -use nostr_sdk_indexeddb::nostr::prelude::*; -use nostr_sdk_indexeddb::WebDatabase; +use nostr_indexeddb::database::NostrDatabase; +use nostr_indexeddb::nostr::prelude::*; +use nostr_indexeddb::WebDatabase; use wasm_bindgen_futures::spawn_local; use web_sys::console; use yew::prelude::*; diff --git a/crates/nostr-sdk-indexeddb/examples/webapp/src/main.rs b/crates/nostr-indexeddb/examples/webapp/src/main.rs similarity index 100% rename from crates/nostr-sdk-indexeddb/examples/webapp/src/main.rs rename to crates/nostr-indexeddb/examples/webapp/src/main.rs diff --git a/crates/nostr-sdk-indexeddb/src/error.rs b/crates/nostr-indexeddb/src/error.rs similarity index 96% rename from crates/nostr-sdk-indexeddb/src/error.rs rename to crates/nostr-indexeddb/src/error.rs index 07216895f..f390a66c6 100644 --- a/crates/nostr-sdk-indexeddb/src/error.rs +++ b/crates/nostr-indexeddb/src/error.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -use nostr_sdk_db::DatabaseError; +use nostr_database::DatabaseError; use thiserror::Error; /// IndexedDB error diff --git a/crates/nostr-sdk-indexeddb/src/hex.rs b/crates/nostr-indexeddb/src/hex.rs similarity index 100% rename from crates/nostr-sdk-indexeddb/src/hex.rs rename to crates/nostr-indexeddb/src/hex.rs diff --git a/crates/nostr-sdk-indexeddb/src/lib.rs b/crates/nostr-indexeddb/src/lib.rs similarity index 99% rename from crates/nostr-sdk-indexeddb/src/lib.rs rename to crates/nostr-indexeddb/src/lib.rs index 0447b6fe9..db3186931 100644 --- a/crates/nostr-sdk-indexeddb/src/lib.rs +++ b/crates/nostr-indexeddb/src/lib.rs @@ -15,7 +15,7 @@ use std::future::IntoFuture; use std::sync::Arc; pub extern crate nostr; -pub extern crate nostr_sdk_db as database; +pub extern crate nostr_database as database; #[cfg(target_arch = "wasm32")] use async_trait::async_trait; @@ -25,8 +25,8 @@ use indexed_db_futures::{IdbDatabase, IdbQuerySource, IdbVersionChangeEvent}; use nostr::event::raw::RawEvent; use nostr::{Event, EventId, Filter, Timestamp, Url}; #[cfg(target_arch = "wasm32")] -use nostr_sdk_db::NostrDatabase; -use nostr_sdk_db::{ +use nostr_database::NostrDatabase; +use nostr_database::{ Backend, DatabaseError, DatabaseIndexes, DatabaseOptions, EventIndexResult, FlatBufferBuilder, FlatBufferDecode, FlatBufferEncode, }; diff --git a/crates/nostr-sdk-rocksdb/Cargo.toml b/crates/nostr-rocksdb/Cargo.toml similarity index 82% rename from crates/nostr-sdk-rocksdb/Cargo.toml rename to crates/nostr-rocksdb/Cargo.toml index 9e67939a2..949380bb0 100644 --- a/crates/nostr-sdk-rocksdb/Cargo.toml +++ b/crates/nostr-rocksdb/Cargo.toml @@ -1,8 +1,8 @@ [package] -name = "nostr-sdk-rocksdb" +name = "nostr-rocksdb" version = "0.1.0" edition = "2021" -description = "RocksDB Storage backend for Nostr SDK" +description = "RocksDB Storage backend for Nostr apps" authors = ["Yuki Kishimoto "] homepage.workspace = true repository.workspace = true @@ -14,7 +14,7 @@ keywords = ["nostr", "sdk", "db", "rocksdb"] [dependencies] async-trait = { workspace = true } nostr = { workspace = true, features = ["std"] } -nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db", features = ["flatbuf"] } +nostr-database = { version = "0.1", path = "../nostr-database", features = ["flatbuf"] } num_cpus = "1.16" rocksdb = { version = "0.21", default-features = false, features = ["multi-threaded-cf", "snappy"] } tokio = { workspace = true, features = ["rt-multi-thread", "sync"] } diff --git a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs b/crates/nostr-rocksdb/examples/rocksdb.rs similarity index 97% rename from crates/nostr-sdk-rocksdb/examples/rocksdb.rs rename to crates/nostr-rocksdb/examples/rocksdb.rs index 1e364aa8f..fe835a3e4 100644 --- a/crates/nostr-sdk-rocksdb/examples/rocksdb.rs +++ b/crates/nostr-rocksdb/examples/rocksdb.rs @@ -4,8 +4,8 @@ use std::time::Duration; use nostr::prelude::*; -use nostr_sdk_db::NostrDatabase; -use nostr_sdk_rocksdb::RocksDatabase; +use nostr_database::NostrDatabase; +use nostr_rocksdb::RocksDatabase; use tracing_subscriber::fmt::format::FmtSpan; #[tokio::main] diff --git a/crates/nostr-sdk-rocksdb/src/lib.rs b/crates/nostr-rocksdb/src/lib.rs similarity index 99% rename from crates/nostr-sdk-rocksdb/src/lib.rs rename to crates/nostr-rocksdb/src/lib.rs index 44d9a9ed4..3b95db1b3 100644 --- a/crates/nostr-sdk-rocksdb/src/lib.rs +++ b/crates/nostr-rocksdb/src/lib.rs @@ -12,12 +12,12 @@ use std::path::Path; use std::sync::Arc; pub extern crate nostr; -pub extern crate nostr_sdk_db as database; +pub extern crate nostr_database as database; use async_trait::async_trait; use nostr::event::raw::RawEvent; use nostr::{Event, EventId, Filter, FiltersMatchEvent, Timestamp, Url}; -use nostr_sdk_db::{ +use nostr_database::{ Backend, DatabaseError, DatabaseIndexes, DatabaseOptions, EventIndexResult, FlatBufferBuilder, FlatBufferDecode, FlatBufferEncode, NostrDatabase, }; diff --git a/crates/nostr-sdk-rocksdb/src/ops.rs b/crates/nostr-rocksdb/src/ops.rs similarity index 90% rename from crates/nostr-sdk-rocksdb/src/ops.rs rename to crates/nostr-rocksdb/src/ops.rs index 1d1d433fd..50a1ff7ae 100644 --- a/crates/nostr-sdk-rocksdb/src/ops.rs +++ b/crates/nostr-rocksdb/src/ops.rs @@ -6,7 +6,7 @@ use std::collections::HashSet; use nostr::Url; -use nostr_sdk_db::{FlatBufferBuilder, FlatBufferDecode, FlatBufferEncode}; +use nostr_database::{FlatBufferBuilder, FlatBufferDecode, FlatBufferEncode}; use rocksdb::MergeOperands; pub(crate) fn relay_urls_merge_operator( diff --git a/crates/nostr-sdk/Cargo.toml b/crates/nostr-sdk/Cargo.toml index b3c9cab33..eb22c5b70 100644 --- a/crates/nostr-sdk/Cargo.toml +++ b/crates/nostr-sdk/Cargo.toml @@ -18,8 +18,8 @@ rustdoc-args = ["--cfg", "docsrs"] [features] default = ["all-nips"] blocking = ["async-utility/blocking", "nostr/blocking"] -rocksdb = ["dep:nostr-sdk-rocksdb"] -indexeddb = ["dep:nostr-sdk-indexeddb"] +rocksdb = ["dep:nostr-rocksdb"] +indexeddb = ["dep:nostr-indexeddb"] all-nips = ["nip04", "nip05", "nip06", "nip11", "nip46", "nip47"] nip03 = ["nostr/nip03"] nip04 = ["nostr/nip04"] @@ -33,18 +33,18 @@ nip47 = ["nostr/nip47"] [dependencies] async-utility = "0.1" nostr = { workspace = true, features = ["std"] } -nostr-sdk-db = { version = "0.1", path = "../nostr-sdk-db" } +nostr-database = { version = "0.1", path = "../nostr-database" } nostr-sdk-net = { version = "0.25", path = "../nostr-sdk-net" } once_cell = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true, features = ["std"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -nostr-sdk-rocksdb = { version = "0.1", path = "../nostr-sdk-rocksdb", optional = true } +nostr-rocksdb = { version = "0.1", path = "../nostr-rocksdb", optional = true } tokio = { workspace = true, features = ["rt-multi-thread", "time", "macros", "sync"] } [target.'cfg(target_arch = "wasm32")'.dependencies] -nostr-sdk-indexeddb = { version = "0.1", path = "../nostr-sdk-indexeddb", optional = true } +nostr-indexeddb = { version = "0.1", path = "../nostr-indexeddb", optional = true } tokio = { workspace = true, features = ["rt", "macros", "sync"] } [dev-dependencies] diff --git a/crates/nostr-sdk/src/client/builder.rs b/crates/nostr-sdk/src/client/builder.rs index f0ca8cddd..94953ec53 100644 --- a/crates/nostr-sdk/src/client/builder.rs +++ b/crates/nostr-sdk/src/client/builder.rs @@ -6,8 +6,8 @@ use std::sync::Arc; use nostr::Keys; -use nostr_sdk_db::memory::MemoryDatabase; -use nostr_sdk_db::{DatabaseError, DynNostrDatabase, NostrDatabase}; +use nostr_database::memory::MemoryDatabase; +use nostr_database::{DatabaseError, DynNostrDatabase, NostrDatabase}; #[cfg(feature = "nip46")] use super::RemoteSigner; diff --git a/crates/nostr-sdk/src/client/mod.rs b/crates/nostr-sdk/src/client/mod.rs index 4dd8433cc..48b607dc5 100644 --- a/crates/nostr-sdk/src/client/mod.rs +++ b/crates/nostr-sdk/src/client/mod.rs @@ -22,7 +22,7 @@ use nostr::{ ChannelId, ClientMessage, Contact, Event, EventBuilder, EventId, Filter, JsonUtil, Keys, Kind, Metadata, Result, Tag, Timestamp, }; -use nostr_sdk_db::DynNostrDatabase; +use nostr_database::DynNostrDatabase; use nostr_sdk_net::futures_util::Future; use tokio::sync::{broadcast, RwLock}; diff --git a/crates/nostr-sdk/src/relay/mod.rs b/crates/nostr-sdk/src/relay/mod.rs index cd78c48ee..4b33b3eca 100644 --- a/crates/nostr-sdk/src/relay/mod.rs +++ b/crates/nostr-sdk/src/relay/mod.rs @@ -24,7 +24,7 @@ use nostr::{ ClientMessage, Event, EventId, Filter, JsonUtil, Keys, RawRelayMessage, RelayMessage, SubscriptionId, Timestamp, Url, }; -use nostr_sdk_db::DynNostrDatabase; +use nostr_database::DynNostrDatabase; use nostr_sdk_net::futures_util::{Future, SinkExt, StreamExt}; use nostr_sdk_net::{self as net, WsMessage}; use thiserror::Error; diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index 8d8ffcaa6..7976bff13 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -16,8 +16,7 @@ use nostr::{ event, ClientMessage, Event, EventId, Filter, JsonUtil, MissingPartialEvent, PartialEvent, RawRelayMessage, RelayMessage, SubscriptionId, Timestamp, Url, }; -use nostr_sdk_db::memory::MemoryDatabase; -use nostr_sdk_db::{DatabaseError, DynNostrDatabase}; +use nostr_database::{DatabaseError, DynNostrDatabase, MemoryDatabase}; use thiserror::Error; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::sync::{broadcast, Mutex, RwLock}; From 01c6bf7bfbc348d28e82a60c970ef7acd24c9955 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 9 Nov 2023 14:59:07 +0100 Subject: [PATCH 80/98] database: add `NostrDatabaseExt` trait --- crates/nostr-database/src/lib.rs | 12 ++++++++++-- crates/nostr-database/src/memory.rs | 11 ----------- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/crates/nostr-database/src/lib.rs b/crates/nostr-database/src/lib.rs index 1ab264b8a..6fab53364 100644 --- a/crates/nostr-database/src/lib.rs +++ b/crates/nostr-database/src/lib.rs @@ -6,7 +6,6 @@ #![deny(unsafe_code)] #![warn(missing_docs)] #![warn(rustdoc::bare_urls)] -#![allow(where_clauses_object_safety)] use std::collections::HashSet; @@ -47,7 +46,7 @@ pub enum Backend { /// A type-erased [`StateStore`]. pub type DynNostrDatabase = dyn NostrDatabase; -/// Nostr SDK Database +/// Nostr Database #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] #[cfg_attr(not(target_arch = "wasm32"), async_trait)] pub trait NostrDatabase: AsyncTraitDeps { @@ -106,7 +105,12 @@ pub trait NostrDatabase: AsyncTraitDeps { /// Wipe all data async fn wipe(&self) -> Result<(), Self::Err>; +} +/// Nostr Database Extension +#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait)] +pub trait NostrDatabaseExt: NostrDatabase { /// Get profile metadata async fn profile(&self, public_key: XOnlyPublicKey) -> Result { let filter = Filter::new() @@ -121,6 +125,10 @@ pub trait NostrDatabase: AsyncTraitDeps { } } +#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait)] +impl NostrDatabaseExt for T {} + /// Alias for `Send` on non-wasm, empty trait (implemented by everything) on /// wasm. #[cfg(not(target_arch = "wasm32"))] diff --git a/crates/nostr-database/src/memory.rs b/crates/nostr-database/src/memory.rs index 6a199c1fa..90ed4f7ad 100644 --- a/crates/nostr-database/src/memory.rs +++ b/crates/nostr-database/src/memory.rs @@ -8,23 +8,12 @@ use std::sync::Arc; use async_trait::async_trait; use nostr::{Event, EventId, Filter, FiltersMatchEvent, Timestamp, Url}; -use thiserror::Error; use tokio::sync::RwLock; use crate::{ Backend, DatabaseError, DatabaseIndexes, DatabaseOptions, EventIndexResult, NostrDatabase, }; -/// Memory Database Error -#[derive(Debug, Error)] -pub enum Error {} - -impl From for DatabaseError { - fn from(e: Error) -> Self { - DatabaseError::backend(e) - } -} - /// Memory Database (RAM) #[derive(Debug)] pub struct MemoryDatabase { From 834e6471711e800a5c8862863d3b671771a7fc3b Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 9 Nov 2023 15:14:42 +0100 Subject: [PATCH 81/98] Update README.md files --- crates/nostr-database/README.md | 10 ++++++++++ crates/nostr-sdk/README.md | 5 +++-- crates/nostr/README.md | 4 +++- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/crates/nostr-database/README.md b/crates/nostr-database/README.md index 61c7e1139..e3b7d98bc 100644 --- a/crates/nostr-database/README.md +++ b/crates/nostr-database/README.md @@ -2,6 +2,16 @@ Database for Nostr apps +## Nostr Database Trait + +This library cointains the `NostrDatabase` and `NostrDatabaseExt` traits. You can use the [default backends](#default-backends) or implement your one (PostgreSQL, SQLite, ...). + +## Default backends + +* Memory (RAM), available in this library +* RocksDB (desktop, server and mobile devices), available at [`nostr-rocksdb`](https://crates.io/crates/nostr-rocksdb) +* IndexedDB (web), available at [`nostr-indexeddb`](https://crates.io/crates/nostr-indexeddb) + ## State **This library is in an ALPHA state**, things that are implemented generally work but the API will change in breaking ways. diff --git a/crates/nostr-sdk/README.md b/crates/nostr-sdk/README.md index c03eeaa89..a08b1a1c5 100644 --- a/crates/nostr-sdk/README.md +++ b/crates/nostr-sdk/README.md @@ -17,8 +17,9 @@ If you're writing a typical Nostr client or bot, this is likely the crate you ne However, the crate is designed in a modular way and depends on several other lower-level crates. If you're attempting something more custom, you might be interested in these: -- [`nostr`](https://crates.io/crates/nostr): Rust implementation of Nostr protocol -- [`nostr-sdk-net`](https://crates.io/crates/nostr-sdk-net): Nostr SDK Network library +* [`nostr`](https://crates.io/crates/nostr): Rust implementation of Nostr protocol +* [`nostr-database`](https://crates.io/crates/nostr-database): Database for Nostr apps +* [`nostr-sdk-net`](https://crates.io/crates/nostr-sdk-net): Nostr SDK Network library ## Getting started diff --git a/crates/nostr/README.md b/crates/nostr/README.md index d3e69b348..5c9d55250 100644 --- a/crates/nostr/README.md +++ b/crates/nostr/README.md @@ -12,7 +12,9 @@ Rust implementation of Nostr protocol. -If you're writing a typical Nostr client or bot, you may be interested in [nostr-sdk](https://crates.io/crates/nostr-sdk). +You may be interested in: +* [`nostr-sdk`](https://crates.io/crates/nostr-sdk) if you want to write a typical Nostr client or bot +* [`nostr-database`](https://crates.io/crates/nostr-database) if you need a database for your Nostr app (native or web) ## Getting started From f3cd63e77790fee804082d31231dfb59c4c4424f Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 9 Nov 2023 15:22:10 +0100 Subject: [PATCH 82/98] Add `nostr-database` to workspace --- Cargo.toml | 1 + crates/nostr-indexeddb/Cargo.toml | 2 +- crates/nostr-rocksdb/Cargo.toml | 2 +- crates/nostr-sdk/Cargo.toml | 4 ++-- crates/nostr-sdk/src/lib.rs | 7 ++++--- 5 files changed, 9 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7ef36a6a6..2c9c0670f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ rust-version = "1.64.0" [workspace.dependencies] async-trait = "0.1" nostr = { version = "0.25", path = "./crates/nostr", default-features = false } +nostr-database = { version = "0.1", path = "./crates/nostr-database", default-features = false } once_cell = "1.18" thiserror = "1.0" tokio = { version = "1.32", default-features = false } diff --git a/crates/nostr-indexeddb/Cargo.toml b/crates/nostr-indexeddb/Cargo.toml index bd704aba8..1dd058e36 100644 --- a/crates/nostr-indexeddb/Cargo.toml +++ b/crates/nostr-indexeddb/Cargo.toml @@ -15,7 +15,7 @@ keywords = ["nostr", "database", "indexeddb"] async-trait = { workspace = true } indexed_db_futures = "0.4" nostr = { workspace = true, features = ["std"] } -nostr-database = { version = "0.1", path = "../nostr-database", features = ["flatbuf"] } +nostr-database = { workspace = true, features = ["flatbuf"] } thiserror = { workspace = true } tokio = { workspace = true, features = ["sync"] } tracing = { workspace = true, features = ["std", "attributes"] } diff --git a/crates/nostr-rocksdb/Cargo.toml b/crates/nostr-rocksdb/Cargo.toml index 949380bb0..4aa84205f 100644 --- a/crates/nostr-rocksdb/Cargo.toml +++ b/crates/nostr-rocksdb/Cargo.toml @@ -14,7 +14,7 @@ keywords = ["nostr", "sdk", "db", "rocksdb"] [dependencies] async-trait = { workspace = true } nostr = { workspace = true, features = ["std"] } -nostr-database = { version = "0.1", path = "../nostr-database", features = ["flatbuf"] } +nostr-database = { workspace = true, features = ["flatbuf"] } num_cpus = "1.16" rocksdb = { version = "0.21", default-features = false, features = ["multi-threaded-cf", "snappy"] } tokio = { workspace = true, features = ["rt-multi-thread", "sync"] } diff --git a/crates/nostr-sdk/Cargo.toml b/crates/nostr-sdk/Cargo.toml index eb22c5b70..9c2cbbfac 100644 --- a/crates/nostr-sdk/Cargo.toml +++ b/crates/nostr-sdk/Cargo.toml @@ -3,7 +3,7 @@ name = "nostr-sdk" version = "0.25.0" edition = "2021" description = "High level Nostr client library." -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] homepage.workspace = true repository.workspace = true license.workspace = true @@ -33,7 +33,7 @@ nip47 = ["nostr/nip47"] [dependencies] async-utility = "0.1" nostr = { workspace = true, features = ["std"] } -nostr-database = { version = "0.1", path = "../nostr-database" } +nostr-database = { workspace = true } nostr-sdk-net = { version = "0.25", path = "../nostr-sdk-net" } once_cell = { workspace = true } thiserror = { workspace = true } diff --git a/crates/nostr-sdk/src/lib.rs b/crates/nostr-sdk/src/lib.rs index 551b5004a..335278828 100644 --- a/crates/nostr-sdk/src/lib.rs +++ b/crates/nostr-sdk/src/lib.rs @@ -18,12 +18,13 @@ compile_error!("`blocking` feature can't be enabled for WASM targets"); pub use nostr::{self, *}; +pub use nostr_database as database; #[cfg(feature = "indexeddb")] -pub use nostr_sdk_indexeddb::WebDatabase; +pub use nostr_indexeddb::WebDatabase; +#[cfg(feature = "rocksdb")] +pub use nostr_rocksdb::RocksDatabase; #[cfg(feature = "blocking")] use nostr_sdk_net::futures_util::Future; -#[cfg(feature = "rocksdb")] -pub use nostr_sdk_rocksdb::RocksDatabase; #[cfg(feature = "blocking")] use once_cell::sync::Lazy; #[cfg(feature = "blocking")] From 065ce2bdebeb3ce57ee42061dcddffd6f871b613 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 9 Nov 2023 15:22:25 +0100 Subject: [PATCH 83/98] database: re-export `nostr` crate --- crates/nostr-database/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/nostr-database/src/lib.rs b/crates/nostr-database/src/lib.rs index 6fab53364..b1ae4e4e2 100644 --- a/crates/nostr-database/src/lib.rs +++ b/crates/nostr-database/src/lib.rs @@ -10,6 +10,7 @@ use std::collections::HashSet; pub use async_trait::async_trait; +pub use nostr; use nostr::secp256k1::XOnlyPublicKey; use nostr::{Event, EventId, Filter, JsonUtil, Kind, Metadata, Timestamp, Url}; From 3bce5f5722b5541454816d7edc0ef64681dab36c Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 9 Nov 2023 15:22:44 +0100 Subject: [PATCH 84/98] Update email --- bindings/nostr-js/Cargo.toml | 2 +- bindings/nostr-sdk-ffi/Cargo.toml | 2 +- bindings/nostr-sdk-js/Cargo.toml | 2 +- crates/nostr-sdk-net/Cargo.toml | 2 +- crates/nostr/Cargo.toml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bindings/nostr-js/Cargo.toml b/bindings/nostr-js/Cargo.toml index 115ad3c39..7e00a42f6 100644 --- a/bindings/nostr-js/Cargo.toml +++ b/bindings/nostr-js/Cargo.toml @@ -3,7 +3,7 @@ name = "nostr-js" version = "0.1.0" edition = "2021" description = "Nostr protocol implementation, for JavaScript" -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] publish = false [lib] diff --git a/bindings/nostr-sdk-ffi/Cargo.toml b/bindings/nostr-sdk-ffi/Cargo.toml index 53a55f4c2..f9d41d218 100644 --- a/bindings/nostr-sdk-ffi/Cargo.toml +++ b/bindings/nostr-sdk-ffi/Cargo.toml @@ -2,7 +2,7 @@ name = "nostr-sdk-ffi" version = "0.1.0" edition = "2021" -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] publish = false [lib] diff --git a/bindings/nostr-sdk-js/Cargo.toml b/bindings/nostr-sdk-js/Cargo.toml index 0ae35b570..a4137ab4d 100644 --- a/bindings/nostr-sdk-js/Cargo.toml +++ b/bindings/nostr-sdk-js/Cargo.toml @@ -2,7 +2,7 @@ name = "nostr-sdk-js" version = "0.1.0" edition = "2021" -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] publish = false [lib] diff --git a/crates/nostr-sdk-net/Cargo.toml b/crates/nostr-sdk-net/Cargo.toml index 54ac975c5..6f40a2406 100644 --- a/crates/nostr-sdk-net/Cargo.toml +++ b/crates/nostr-sdk-net/Cargo.toml @@ -3,7 +3,7 @@ name = "nostr-sdk-net" version = "0.25.0" edition = "2021" description = "Nostr SDK Network library." -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] homepage.workspace = true repository.workspace = true license.workspace = true diff --git a/crates/nostr/Cargo.toml b/crates/nostr/Cargo.toml index 1041f287b..3120c5c00 100644 --- a/crates/nostr/Cargo.toml +++ b/crates/nostr/Cargo.toml @@ -3,7 +3,7 @@ name = "nostr" version = "0.25.0" edition = "2021" description = "Rust implementation of the Nostr protocol." -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] homepage.workspace = true repository.workspace = true license.workspace = true From 70d894161223c1f34f4274d21dee746b961dd4cd Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 9 Nov 2023 15:23:08 +0100 Subject: [PATCH 85/98] database: add feature flags list --- crates/nostr-database/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/nostr-database/README.md b/crates/nostr-database/README.md index e3b7d98bc..ee54207a0 100644 --- a/crates/nostr-database/README.md +++ b/crates/nostr-database/README.md @@ -12,6 +12,14 @@ This library cointains the `NostrDatabase` and `NostrDatabaseExt` traits. You ca * RocksDB (desktop, server and mobile devices), available at [`nostr-rocksdb`](https://crates.io/crates/nostr-rocksdb) * IndexedDB (web), available at [`nostr-indexeddb`](https://crates.io/crates/nostr-indexeddb) +## Crate Feature Flags + +The following crate feature flags are available: + +| Feature | Default | Description | +| ------------------- | :-----: | ---------------------------------------------------------------------------------------- | +| `flatbuf` | No | Enable `flatbuffers` de/serialization for nostr events | + ## State **This library is in an ALPHA state**, things that are implemented generally work but the API will change in breaking ways. From f9244670843ccc0ef9037ce6c3e13d268114628e Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 9 Nov 2023 15:26:19 +0100 Subject: [PATCH 86/98] ci: update build-args --- .githooks/pre-push | 3 ++- .github/workflows/ci.yml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.githooks/pre-push b/.githooks/pre-push index 91692c763..45136d426 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -12,6 +12,7 @@ buildargs=( "-p nostr-sdk" "-p nostr-sdk --no-default-features" "-p nostr-sdk --features blocking" + #"-p nostr-sdk --features rocksdb" "-p nostr-ffi" "-p nostr-sdk-ffi" ) @@ -24,8 +25,8 @@ for arg in "${buildargs[@]}"; do done buildargs=( + "-p nostr-sdk --features indexeddb --target wasm32-unknown-unknown" "-p nostr-js --target wasm32-unknown-unknown" - "-p nostr-indexeddb --target wasm32-unknown-unknown" "-p nostr-sdk-js --target wasm32-unknown-unknown" ) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 80e4c05b9..024e41168 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,6 +35,7 @@ jobs: -p nostr-sdk, -p nostr-sdk --no-default-features, -p nostr-sdk --features blocking, + -p nostr-sdk --features rocksdb, ] steps: - name: Checkout @@ -69,8 +70,8 @@ jobs: build-args: [ -p nostr, - -p nostr-indexeddb, -p nostr-sdk, + -p nostr-sdk --features indexeddb, -p nostr-js, ] steps: From 6c3c08d8cb728bc62e36e084d5d88fb01247738c Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Thu, 9 Nov 2023 16:31:10 +0100 Subject: [PATCH 87/98] Set MSRV per crate --- .github/workflows/ci.yml | 35 ++- Cargo.lock | 404 ++++++++++++++++-------------- Cargo.toml | 1 - README.md | 4 - crates/nostr-database/Cargo.toml | 4 +- crates/nostr-indexeddb/Cargo.toml | 2 +- crates/nostr-rocksdb/Cargo.toml | 4 +- crates/nostr-sdk-net/Cargo.toml | 2 +- crates/nostr-sdk/Cargo.toml | 2 +- crates/nostr/Cargo.toml | 2 +- 10 files changed, 252 insertions(+), 208 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 024e41168..81eb886db 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,7 +35,6 @@ jobs: -p nostr-sdk, -p nostr-sdk --no-default-features, -p nostr-sdk --features blocking, - -p nostr-sdk --features rocksdb, ] steps: - name: Checkout @@ -59,6 +58,40 @@ jobs: - name: Clippy run: cargo clippy ${{ matrix.build-args }} -- -D warnings + build-msrv-1660: + name: Build + runs-on: ubuntu-latest + strategy: + matrix: + rust: + - version: stable # STABLE + - version: 1.66.0 # MSRV + build-args: + [ + -p nostr-sdk --features rocksdb, + ] + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Cache + uses: actions/cache@v3 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-msrv-1.66.0-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }} + - name: Set default toolchain + run: rustup default ${{ matrix.rust.version }} + - name: Set profile + run: rustup set profile minimal && rustup component add clippy + - name: Build + run: cargo build ${{ matrix.build-args }} + - name: Tests + run: cargo test ${{ matrix.build-args }} + - name: Clippy + run: cargo clippy ${{ matrix.build-args }} -- -D warnings + build-wasm: name: Build WASM runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index 90690fc65..c54e9951d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,18 +42,18 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] [[package]] name = "anstream" -version = "0.5.0" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" dependencies = [ "anstyle", "anstyle-parse", @@ -65,15 +65,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" dependencies = [ "utf8parse", ] @@ -89,9 +89,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "2.1.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" dependencies = [ "anstyle", "windows-sys", @@ -105,9 +105,9 @@ checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "askama" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47cbc3cf73fa8d9833727bbee4835ba5c421a0d65b72daf9a7b5d0e0f9cfb57e" +checksum = "b79091df18a97caea757e28cd2d5fda49c6cd4bd01ddffd7ff01ace0c0ad2c28" dependencies = [ "askama_derive", "askama_escape", @@ -115,14 +115,14 @@ dependencies = [ [[package]] name = "askama_derive" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22fbe0413545c098358e56966ff22cdd039e10215ae213cfbd65032b119fc94" +checksum = "9a0fc7dcf8bd4ead96b1d36b41df47c14beedf7b0301fc543d8f2384e66a2ec0" dependencies = [ + "askama_parser", "basic-toml", "mime", "mime_guess", - "nom", "proc-macro2", "quote", "serde", @@ -135,6 +135,15 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "619743e34b5ba4e9703bba34deac3427c72507c7159f5fd030aea8cac0cfe341" +[[package]] +name = "askama_parser" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c268a96e01a4c47c8c5c2472aaa570707e006a875ea63e819f75474ceedaf7b4" +dependencies = [ + "nom", +] + [[package]] name = "async-trait" version = "0.1.74" @@ -192,15 +201,15 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.4" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "basic-toml" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bfc506e7a2370ec239e1d072507b2a80c833083699d3c6fa176fbb4de8448c6" +checksum = "2f2139706359229bfa8f19142ac1155b4b80beafb7a60471ac5dd109d4a19778" dependencies = [ "serde", ] @@ -298,9 +307,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "block-buffer" @@ -328,9 +337,9 @@ checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -360,9 +369,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" +checksum = "12024c4645c97566567129c204f65d5815a8c9aecf30fcbe682b2fe034996d36" dependencies = [ "serde", ] @@ -449,9 +458,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.5" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824956d0dca8334758a5b7f7e50518d66ea319330cbceedcf76905c2f6ab30e3" +checksum = "ac495e00dcec98c83465d5ad66c5c4fabd652fd6686e7c6269b117e729a6f17b" dependencies = [ "clap_builder", "clap_derive", @@ -459,9 +468,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.5" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122ec64120a49b4563ccaedcbea7818d069ed8e9aa6d829b82d8a4128936b2ab" +checksum = "c77ed9a32a62e6ca27175d00d29d05ca32e396ea1eb5fb01d8256b669cec7663" dependencies = [ "anstream", "anstyle", @@ -471,9 +480,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.2" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck", "proc-macro2", @@ -483,9 +492,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "colorchoice" @@ -503,6 +512,22 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" + [[package]] name = "core2" version = "0.3.3" @@ -514,9 +539,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" dependencies = [ "libc", ] @@ -542,9 +567,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ "csv-core", "itoa", @@ -554,9 +579,9 @@ dependencies = [ [[package]] name = "csv-core" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ "memchr", ] @@ -619,25 +644,14 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.3" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" +checksum = "7c18ee0ed65a5f1f81cac6b1d213b69c35fa47d4252ad41f1486dbd8226fe36e" dependencies = [ - "errno-dragonfly", "libc", "windows-sys", ] -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "fancy_constructor" version = "1.2.2" @@ -662,9 +676,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", @@ -693,9 +707,9 @@ checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", @@ -708,9 +722,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", "futures-sink", @@ -718,15 +732,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" dependencies = [ "futures-core", "futures-task", @@ -735,15 +749,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", @@ -752,21 +766,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures-channel", "futures-core", @@ -805,9 +819,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if", "js-sys", @@ -951,7 +965,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -960,9 +974,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http", @@ -1033,9 +1047,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" @@ -1065,9 +1079,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" dependencies = [ "wasm-bindgen", ] @@ -1086,9 +1100,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.148" +version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "libloading" @@ -1127,9 +1141,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "log" @@ -1209,9 +1223,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.3" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "mime" @@ -1246,9 +1260,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "wasi", @@ -1423,7 +1437,7 @@ dependencies = [ "tokio-socks", "tokio-tungstenite", "url-fork", - "webpki-roots 0.25.2", + "webpki-roots", "ws_stream_wasm", ] @@ -1558,9 +1572,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -1606,13 +1620,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.5" +version = "1.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.8", + "regex-automata 0.3.9", "regex-syntax 0.7.5", ] @@ -1627,9 +1641,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" dependencies = [ "aho-corasick", "memchr", @@ -1650,9 +1664,9 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "reqwest" -version = "0.11.20" +version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ "base64", "bytes", @@ -1676,6 +1690,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-rustls", "tokio-socks", @@ -1684,23 +1699,22 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.25.2", + "webpki-roots", "winreg", ] [[package]] name = "ring" -version = "0.16.20" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" dependencies = [ "cc", + "getrandom", "libc", - "once_cell", "spin", "untrusted", - "web-sys", - "winapi", + "windows-sys", ] [[package]] @@ -1736,11 +1750,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.20" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ce50cb2e16c2903e30d1cbccfd8387a74b9d4c938b6a4c5ec6cc7556f7a8a0" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", @@ -1749,13 +1763,13 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" dependencies = [ "log", "ring", - "rustls-webpki 0.101.6", + "rustls-webpki", "sct", ] @@ -1770,19 +1784,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.100.3" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustls-webpki" -version = "0.101.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ "ring", "untrusted", @@ -1828,9 +1832,9 @@ dependencies = [ [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ "ring", "untrusted", @@ -1859,9 +1863,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" dependencies = [ "serde", ] @@ -1874,9 +1878,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.188" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" dependencies = [ "serde_derive", ] @@ -1894,9 +1898,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", @@ -1905,9 +1909,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", "ryu", @@ -1939,9 +1943,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1b21f559e07218024e7e9f90f96f601825397de0e25420135f7f952453fed0b" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] @@ -1975,9 +1979,9 @@ checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -1985,9 +1989,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys", @@ -1995,9 +1999,9 @@ dependencies = [ [[package]] name = "spin" -version = "0.5.2" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "static_assertions" @@ -2013,15 +2017,36 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "2.0.37" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "termcolor" version = "1.3.0" @@ -2033,18 +2058,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.49" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.49" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", @@ -2078,9 +2103,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.32.0" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ "backtrace", "bytes", @@ -2088,7 +2113,7 @@ dependencies = [ "mio", "num_cpus", "pin-project-lite", - "socket2 0.5.4", + "socket2 0.5.5", "tokio-macros", "windows-sys", ] @@ -2138,14 +2163,14 @@ dependencies = [ "tokio", "tokio-rustls", "tungstenite", - "webpki-roots 0.25.2", + "webpki-roots", ] [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", @@ -2172,11 +2197,10 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -2195,9 +2219,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -2205,12 +2229,12 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" dependencies = [ - "lazy_static", "log", + "once_cell", "tracing-core", ] @@ -2435,24 +2459,24 @@ dependencies = [ [[package]] name = "untrusted" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b11c96ac7ee530603dcdf68ed1557050f374ce55a5a07193ebf8cbc9f8927e9" +checksum = "f5ccd538d4a604753ebc2f17cd9946e89b77bf87f6a8e2309667c6f2e87855e3" dependencies = [ "base64", "flate2", "log", "once_cell", "rustls", - "rustls-webpki 0.100.3", + "rustls-webpki", "url", - "webpki-roots 0.23.1", + "webpki-roots", ] [[package]] @@ -2536,9 +2560,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -2546,9 +2570,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" dependencies = [ "bumpalo", "log", @@ -2561,9 +2585,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02" dependencies = [ "cfg-if", "js-sys", @@ -2573,9 +2597,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2583,9 +2607,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", @@ -2596,15 +2620,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" [[package]] name = "wasm-bindgen-test" -version = "0.3.37" +version = "0.3.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e6e302a7ea94f83a6d09e78e7dc7d9ca7b186bc2829c24a22d0753efd680671" +checksum = "c6433b7c56db97397842c46b67e11873eda263170afeb3a2dc74a7cb370fee0d" dependencies = [ "console_error_panic_hook", "js-sys", @@ -2616,12 +2640,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.37" +version = "0.3.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecb993dd8c836930ed130e020e77d9b2e65dd0fbab1b67c790b0f5d80b11a575" +checksum = "493fcbab756bb764fa37e6bee8cec2dd709eb4273d06d0c282a5e74275ded735" dependencies = [ "proc-macro2", "quote", + "syn", ] [[package]] @@ -2637,23 +2662,14 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85" dependencies = [ "js-sys", "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" -dependencies = [ - "rustls-webpki 0.100.3", -] - [[package]] name = "webpki-roots" version = "0.25.2" diff --git a/Cargo.toml b/Cargo.toml index 2c9c0670f..71c051021 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,6 @@ resolver = "2" homepage = "https://github.com/rust-nostr/nostr" repository = "https://github.com/rust-nostr/nostr.git" license = "MIT" -rust-version = "1.64.0" [workspace.dependencies] async-trait = "0.1" diff --git a/README.md b/README.md index e8123d1bb..532591c21 100644 --- a/README.md +++ b/README.md @@ -36,10 +36,6 @@ Check the example in the [`embedded/`](./crates/nostr/examples/embedded/) direct * Swift: https://github.com/rust-nostr/nostr-sdk-swift * JavaScript: TODO -## Minimum Supported Rust Version (MSRV) - -These crates are built with the Rust language version 2021 and require a minimum compiler version of `1.64.0` - ## State **These libraries are in ALPHA state**, things that are implemented generally work but the API will change in breaking ways. diff --git a/crates/nostr-database/Cargo.toml b/crates/nostr-database/Cargo.toml index f1fa47fde..a0abb27ae 100644 --- a/crates/nostr-database/Cargo.toml +++ b/crates/nostr-database/Cargo.toml @@ -8,8 +8,8 @@ homepage.workspace = true repository.workspace = true license.workspace = true readme = "README.md" -rust-version.workspace = true -keywords = ["nostr", "sdk", "db"] +rust-version = "1.64.0" +keywords = ["nostr", "database"] [features] default = [] diff --git a/crates/nostr-indexeddb/Cargo.toml b/crates/nostr-indexeddb/Cargo.toml index 1dd058e36..8e9bac43a 100644 --- a/crates/nostr-indexeddb/Cargo.toml +++ b/crates/nostr-indexeddb/Cargo.toml @@ -8,7 +8,7 @@ homepage.workspace = true repository.workspace = true license.workspace = true readme = "README.md" -rust-version.workspace = true +rust-version = "1.64.0" keywords = ["nostr", "database", "indexeddb"] [dependencies] diff --git a/crates/nostr-rocksdb/Cargo.toml b/crates/nostr-rocksdb/Cargo.toml index 4aa84205f..bb6dfa26f 100644 --- a/crates/nostr-rocksdb/Cargo.toml +++ b/crates/nostr-rocksdb/Cargo.toml @@ -8,8 +8,8 @@ homepage.workspace = true repository.workspace = true license.workspace = true readme = "README.md" -rust-version.workspace = true -keywords = ["nostr", "sdk", "db", "rocksdb"] +rust-version = "1.66.0" +keywords = ["nostr", "database", "rocksdb"] [dependencies] async-trait = { workspace = true } diff --git a/crates/nostr-sdk-net/Cargo.toml b/crates/nostr-sdk-net/Cargo.toml index 6f40a2406..b306f0478 100644 --- a/crates/nostr-sdk-net/Cargo.toml +++ b/crates/nostr-sdk-net/Cargo.toml @@ -8,7 +8,7 @@ homepage.workspace = true repository.workspace = true license.workspace = true readme = "README.md" -rust-version.workspace = true +rust-version = "1.64.0" keywords = ["nostr", "sdk", "net"] [dependencies] diff --git a/crates/nostr-sdk/Cargo.toml b/crates/nostr-sdk/Cargo.toml index 9c2cbbfac..eb4bf0927 100644 --- a/crates/nostr-sdk/Cargo.toml +++ b/crates/nostr-sdk/Cargo.toml @@ -8,7 +8,7 @@ homepage.workspace = true repository.workspace = true license.workspace = true readme = "README.md" -rust-version.workspace = true +rust-version = "1.64.0" keywords = ["nostr", "sdk"] [package.metadata.docs.rs] diff --git a/crates/nostr/Cargo.toml b/crates/nostr/Cargo.toml index 3120c5c00..bef7ea5c2 100644 --- a/crates/nostr/Cargo.toml +++ b/crates/nostr/Cargo.toml @@ -8,7 +8,7 @@ homepage.workspace = true repository.workspace = true license.workspace = true readme = "README.md" -rust-version.workspace = true +rust-version = "1.64.0" keywords = ["nostr", "protocol", "no_std"] [package.metadata.docs.rs] From 85a227ea387ffc81389c6318d3f7aa98b5288287 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Fri, 10 Nov 2023 11:59:24 +0100 Subject: [PATCH 88/98] sdk: allow `bidirectional` negentropy reconciliation --- crates/nostr-sdk/src/relay/mod.rs | 18 +++++++++++++++--- crates/nostr-sdk/src/relay/options.rs | 15 ++++++++++++++- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/crates/nostr-sdk/src/relay/mod.rs b/crates/nostr-sdk/src/relay/mod.rs index 4b33b3eca..0ceec7a49 100644 --- a/crates/nostr-sdk/src/relay/mod.rs +++ b/crates/nostr-sdk/src/relay/mod.rs @@ -24,7 +24,7 @@ use nostr::{ ClientMessage, Event, EventId, Filter, JsonUtil, Keys, RawRelayMessage, RelayMessage, SubscriptionId, Timestamp, Url, }; -use nostr_database::DynNostrDatabase; +use nostr_database::{DatabaseError, DynNostrDatabase}; use nostr_sdk_net::futures_util::{Future, SinkExt, StreamExt}; use nostr_sdk_net::{self as net, WsMessage}; use thiserror::Error; @@ -58,6 +58,9 @@ pub enum Error { /// Negentropy error #[error(transparent)] Negentropy(#[from] negentropy::Error), + /// Database error + #[error(transparent)] + Database(#[from] DatabaseError), /// Channel timeout #[error("channel timeout")] ChannelTimeout, @@ -259,7 +262,6 @@ pub struct Relay { document: Arc>, opts: RelayOptions, stats: RelayConnectionStats, - #[allow(dead_code)] database: Arc, scheduled_for_stop: Arc, scheduled_for_termination: Arc, @@ -1519,13 +1521,23 @@ impl Relay { } => { if subscription_id == sub_id { let query: Bytes = Bytes::from_hex(message)?; + let mut have_ids: Vec = Vec::new(); let mut need_ids: Vec = Vec::new(); let msg: Option = negentropy.reconcile_with_ids( &query, - &mut Vec::new(), + &mut have_ids, &mut need_ids, )?; + if opts.bidirectional { + let ids = have_ids.into_iter().filter_map(|id| EventId::from_slice(&id).ok()); + let filter = Filter::new().ids(ids); + let events: Vec = self.database.query(vec![filter]).await?; + if let Err(e) = self.batch_event(events, RelaySendOptions::default()).await { + tracing::error!("Impossible to batch events to {}: {e}", self.url); + } + } + if need_ids.is_empty() { tracing::info!("Reconciliation terminated"); break; diff --git a/crates/nostr-sdk/src/relay/options.rs b/crates/nostr-sdk/src/relay/options.rs index a43c1edac..439221325 100644 --- a/crates/nostr-sdk/src/relay/options.rs +++ b/crates/nostr-sdk/src/relay/options.rs @@ -247,13 +247,17 @@ impl RelayPoolOptions { /// Negentropy reconciliation options #[derive(Debug, Clone, Copy)] pub struct NegentropyOptions { - /// Timeout for sending event (default: 30 secs) + /// Timeout for reconciliation (default: 30 secs) pub timeout: Duration, /// Syncronous (default: true) /// /// If `true`, request events and wait that relay send them. /// If `false`, request events but continue the reconciliation pub syncrounous: bool, + /// Bidirectional Sync (default: false) + /// + /// If `true`, perform the set reconciliation on each side. + pub bidirectional: bool, } impl Default for NegentropyOptions { @@ -261,6 +265,7 @@ impl Default for NegentropyOptions { Self { timeout: Duration::from_secs(30), syncrounous: true, + bidirectional: false, } } } @@ -285,4 +290,12 @@ impl NegentropyOptions { self.syncrounous = syncrounous; self } + + /// Bidirectional Sync (default: false) + /// + /// If `true`, perform the set reconciliation on each side. + pub fn bidirectional(mut self, bidirectional: bool) -> Self { + self.bidirectional = bidirectional; + self + } } From 62c406ac3e57fc66c6d3ac6db7cdd1707eefc4b9 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Fri, 10 Nov 2023 12:00:25 +0100 Subject: [PATCH 89/98] db: add `NostrDatabase::has_event_already_been_saved` method --- crates/nostr-database/src/lib.rs | 3 +++ crates/nostr-database/src/memory.rs | 12 +++++++++--- crates/nostr-indexeddb/src/lib.rs | 27 +++++++++++++++++++++++---- crates/nostr-rocksdb/src/lib.rs | 7 ++++++- 4 files changed, 41 insertions(+), 8 deletions(-) diff --git a/crates/nostr-database/src/lib.rs b/crates/nostr-database/src/lib.rs index b1ae4e4e2..9c1d8e437 100644 --- a/crates/nostr-database/src/lib.rs +++ b/crates/nostr-database/src/lib.rs @@ -68,6 +68,9 @@ pub trait NostrDatabase: AsyncTraitDeps { /// Return `true` if event was successfully saved into database. async fn save_event(&self, event: &Event) -> Result; + /// Check if [`Event`] has already been saved + async fn has_event_already_been_saved(&self, event_id: EventId) -> Result; + /// Check if [`EventId`] has already been seen async fn has_event_already_been_seen(&self, event_id: EventId) -> Result; diff --git a/crates/nostr-database/src/memory.rs b/crates/nostr-database/src/memory.rs index 90ed4f7ad..7bc5da179 100644 --- a/crates/nostr-database/src/memory.rs +++ b/crates/nostr-database/src/memory.rs @@ -85,9 +85,6 @@ impl NostrDatabase for MemoryDatabase { } async fn save_event(&self, event: &Event) -> Result { - // Set event as seen - self.event_id_seen(event.id, None).await?; - if self.opts.events { let EventIndexResult { to_store, @@ -113,6 +110,15 @@ impl NostrDatabase for MemoryDatabase { } } + async fn has_event_already_been_saved(&self, event_id: EventId) -> Result { + if self.opts.events { + let events = self.events.read().await; + Ok(events.contains_key(&event_id)) + } else { + Ok(false) + } + } + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { let seen_event_ids = self.seen_event_ids.read().await; Ok(seen_event_ids.contains_key(&event_id)) diff --git a/crates/nostr-indexeddb/src/lib.rs b/crates/nostr-indexeddb/src/lib.rs index db3186931..9debb96cf 100644 --- a/crates/nostr-indexeddb/src/lib.rs +++ b/crates/nostr-indexeddb/src/lib.rs @@ -245,7 +245,12 @@ impl_nostr_database!({ } async fn count(&self) -> Result { - Err(DatabaseError::NotSupported.into()) + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + let count: u32 = store.count()?.await?; + Ok(count as usize) } #[tracing::instrument(skip_all, level = "trace")] @@ -282,11 +287,25 @@ impl_nostr_database!({ } } - async fn has_event_already_been_seen( + async fn has_event_already_been_saved( &self, - _event_id: EventId, + event_id: EventId, ) -> Result { - todo!() + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + let key = JsValue::from(event_id.to_hex()); + Ok(store.get(&key)?.await?.is_some()) + } + + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_SEEN_BY_RELAYS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_SEEN_BY_RELAYS_CF)?; + let key = JsValue::from(event_id.to_hex()); + Ok(store.get(&key)?.await?.is_some()) } async fn event_id_seen( diff --git a/crates/nostr-rocksdb/src/lib.rs b/crates/nostr-rocksdb/src/lib.rs index 3b95db1b3..ae6718d90 100644 --- a/crates/nostr-rocksdb/src/lib.rs +++ b/crates/nostr-rocksdb/src/lib.rs @@ -190,11 +190,16 @@ impl NostrDatabase for RocksDatabase { } } - async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { + async fn has_event_already_been_saved(&self, event_id: EventId) -> Result { let cf = self.cf_handle(EVENTS_CF)?; Ok(self.db.key_may_exist_cf(&cf, event_id.as_bytes())) } + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { + let cf = self.cf_handle(EVENTS_SEEN_BY_RELAYS_CF)?; + Ok(self.db.key_may_exist_cf(&cf, event_id.as_bytes())) + } + async fn event_id_seen( &self, event_id: EventId, From df062df9f51f2987b2cbe9651380948de80d442b Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 13 Nov 2023 11:11:33 +0100 Subject: [PATCH 90/98] sdk: improve `handle_relay_message` method --- crates/nostr-sdk/src/relay/pool.rs | 85 ++++++++++++++++++------------ 1 file changed, 51 insertions(+), 34 deletions(-) diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index 7976bff13..53e0b20ba 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -162,8 +162,8 @@ impl RelayPoolTask { while let Some(msg) = receiver.recv().await { match msg { RelayPoolMessage::ReceivedMsg { relay_url, msg } => { - match this.handle_relay_message(msg).await { - Ok(msg) => { + match this.handle_relay_message(relay_url.clone(), msg).await { + Ok(Some(msg)) => { let _ = this.notification_sender.send( RelayPoolNotification::Message( relay_url.clone(), @@ -178,21 +178,11 @@ impl RelayPoolTask { { Ok(seen) => { if !seen { - if let Err(e) = - this.database.save_event(&event).await - { - tracing::error!( - "Impossible to save event {}: {e}", - event.id - ); - } - - let notification = RelayPoolNotification::Event( - relay_url.clone(), - *event.clone(), - ); let _ = - this.notification_sender.send(notification); + this.notification_sender.send(RelayPoolNotification::Event( + relay_url, + *event.clone(), + )); } } Err(e) => tracing::error!( @@ -200,18 +190,6 @@ impl RelayPoolTask { event.id ), } - - // Set event as seen by relay - if let Err(e) = this - .database - .event_id_seen(event.id, Some(relay_url)) - .await - { - tracing::error!( - "Impossible to set event {} as seen by relay: {e}", - event.id - ); - } } RelayMessage::Notice { message } => { tracing::warn!("Notice from {relay_url}: {message}") @@ -226,6 +204,7 @@ impl RelayPoolTask { _ => (), } } + Ok(None) => (), Err(e) => tracing::error!( "Impossible to handle relay message from {relay_url}: {e}" ), @@ -266,7 +245,11 @@ impl RelayPoolTask { } } - async fn handle_relay_message(&self, msg: RawRelayMessage) -> Result { + async fn handle_relay_message( + &self, + relay_url: Url, + msg: RawRelayMessage, + ) -> Result, Error> { match msg { RawRelayMessage::Event { subscription_id, @@ -275,6 +258,28 @@ impl RelayPoolTask { // Deserialize partial event (id, pubkey and sig) let partial_event: PartialEvent = PartialEvent::from_json(event.to_string())?; + // Set event as seen by relay + if let Err(e) = self + .database + .event_id_seen(partial_event.id, Some(relay_url)) + .await + { + tracing::error!( + "Impossible to set event {} as seen by relay: {e}", + partial_event.id + ); + } + + // Check if event was already saved + if self + .database + .has_event_already_been_saved(partial_event.id) + .await? + { + tracing::trace!("Event {} already saved into database", partial_event.id); + return Ok(None); + } + // Verify signature partial_event.verify_signature()?; @@ -293,13 +298,16 @@ impl RelayPoolTask { // Verify event ID event.verify_id()?; + // Save event + self.database.save_event(&event).await?; + // Compose RelayMessage - Ok(RelayMessage::Event { + Ok(Some(RelayMessage::Event { subscription_id: SubscriptionId::new(subscription_id), event: Box::new(event), - }) + })) } - m => Ok(RelayMessage::try_from(m)?), + m => Ok(Some(RelayMessage::try_from(m)?)), } } } @@ -764,13 +772,20 @@ impl RelayPool { } /// Get events of filters + /// + /// Get events from local database and relays pub async fn get_events_of( &self, filters: Vec, timeout: Duration, opts: FilterOptions, ) -> Result, Error> { - let events: Arc>> = Arc::new(Mutex::new(Vec::new())); + let stored_events: Vec = self + .database + .query(filters.clone()) + .await + .unwrap_or_default(); + let events: Arc>> = Arc::new(Mutex::new(stored_events)); let mut handles = Vec::new(); let relays = self.relays().await; for (url, relay) in relays.into_iter() { @@ -796,7 +811,9 @@ impl RelayPool { Ok(events.lock_owned().await.clone()) } - /// Request events of filter. All events will be sent to notification listener + /// Request events of filter. + /// + /// If the events aren't already stored in the database, will be sent to notification listener /// until the EOSE "end of stored events" message is received from the relay. pub async fn req_events_of( &self, From 6fd954954a2862fac6638f3ae59f9fcfe9d24732 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 13 Nov 2023 11:13:56 +0100 Subject: [PATCH 91/98] sdk: update `Realy::get_events_of` method --- crates/nostr-sdk/src/relay/mod.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/crates/nostr-sdk/src/relay/mod.rs b/crates/nostr-sdk/src/relay/mod.rs index 0ceec7a49..b6e53f515 100644 --- a/crates/nostr-sdk/src/relay/mod.rs +++ b/crates/nostr-sdk/src/relay/mod.rs @@ -1349,7 +1349,7 @@ impl Relay { } /// Get events of filters with custom callback - pub async fn get_events_of_with_callback( + async fn get_events_of_with_callback( &self, filters: Vec, timeout: Duration, @@ -1378,13 +1378,20 @@ impl Relay { } /// Get events of filters + /// + /// Get events from local database and relay pub async fn get_events_of( &self, filters: Vec, timeout: Duration, opts: FilterOptions, ) -> Result, Error> { - let events: Mutex> = Mutex::new(Vec::new()); + let stored_events: Vec = self + .database + .query(filters.clone()) + .await + .unwrap_or_default(); + let events: Mutex> = Mutex::new(stored_events); self.get_events_of_with_callback(filters, timeout, opts, |event| async { let mut events = events.lock().await; events.push(event); From ed714f2799fe8c15b456267b8cfc6309bdd68982 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 13 Nov 2023 15:39:00 +0100 Subject: [PATCH 92/98] database: update `event_id_seen` method --- crates/nostr-database/src/lib.rs | 10 +++------- crates/nostr-database/src/memory.rs | 23 +++++++---------------- crates/nostr-indexeddb/src/lib.rs | 4 ++-- crates/nostr-rocksdb/src/lib.rs | 19 ++++++------------- crates/nostr-sdk/src/relay/pool.rs | 2 +- 5 files changed, 19 insertions(+), 39 deletions(-) diff --git a/crates/nostr-database/src/lib.rs b/crates/nostr-database/src/lib.rs index 9c1d8e437..61aeedc3b 100644 --- a/crates/nostr-database/src/lib.rs +++ b/crates/nostr-database/src/lib.rs @@ -74,14 +74,10 @@ pub trait NostrDatabase: AsyncTraitDeps { /// Check if [`EventId`] has already been seen async fn has_event_already_been_seen(&self, event_id: EventId) -> Result; - /// Set [`EventId`] as seen + /// Set [`EventId`] as seen by relay /// - /// Optionally, save also the relay url where the event has been seen (useful for NIP65, aka gossip) - async fn event_id_seen( - &self, - event_id: EventId, - relay_url: Option, - ) -> Result<(), Self::Err>; + /// Useful for NIP65 (aka gossip) + async fn event_id_seen(&self, event_id: EventId, relay_url: Url) -> Result<(), Self::Err>; /// Get list of relays that have seen the [`EventId`] async fn event_recently_seen_on_relays( diff --git a/crates/nostr-database/src/memory.rs b/crates/nostr-database/src/memory.rs index 7bc5da179..25996dfeb 100644 --- a/crates/nostr-database/src/memory.rs +++ b/crates/nostr-database/src/memory.rs @@ -46,22 +46,17 @@ impl MemoryDatabase { &self, seen_event_ids: &mut HashMap>, event_id: EventId, - relay_url: Option, + relay_url: Url, ) { seen_event_ids .entry(event_id) .and_modify(|set| { - if let Some(relay_url) = &relay_url { - set.insert(relay_url.clone()); - } + set.insert(relay_url.clone()); }) - .or_insert_with(|| match relay_url { - Some(relay_url) => { - let mut set = HashSet::with_capacity(1); - set.insert(relay_url); - set - } - None => HashSet::with_capacity(0), + .or_insert_with(|| { + let mut set = HashSet::with_capacity(1); + set.insert(relay_url); + set }); } } @@ -124,11 +119,7 @@ impl NostrDatabase for MemoryDatabase { Ok(seen_event_ids.contains_key(&event_id)) } - async fn event_id_seen( - &self, - event_id: EventId, - relay_url: Option, - ) -> Result<(), Self::Err> { + async fn event_id_seen(&self, event_id: EventId, relay_url: Url) -> Result<(), Self::Err> { let mut seen_event_ids = self.seen_event_ids.write().await; self._event_id_seen(&mut seen_event_ids, event_id, relay_url); Ok(()) diff --git a/crates/nostr-indexeddb/src/lib.rs b/crates/nostr-indexeddb/src/lib.rs index 9debb96cf..368676f3c 100644 --- a/crates/nostr-indexeddb/src/lib.rs +++ b/crates/nostr-indexeddb/src/lib.rs @@ -311,7 +311,7 @@ impl_nostr_database!({ async fn event_id_seen( &self, _event_id: EventId, - _relay_url: Option, + _relay_url: Url, ) -> Result<(), IndexedDBError> { todo!() } @@ -351,7 +351,7 @@ impl_nostr_database!({ .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; let store = tx.object_store(EVENTS_CF)?; - let mut events: Vec = Vec::new(); + let mut events: Vec = Vec::with_capacity(ids.len()); for event_id in ids.into_iter() { let key = JsValue::from(event_id.to_hex()); diff --git a/crates/nostr-rocksdb/src/lib.rs b/crates/nostr-rocksdb/src/lib.rs index ae6718d90..6f5a4ec19 100644 --- a/crates/nostr-rocksdb/src/lib.rs +++ b/crates/nostr-rocksdb/src/lib.rs @@ -200,20 +200,13 @@ impl NostrDatabase for RocksDatabase { Ok(self.db.key_may_exist_cf(&cf, event_id.as_bytes())) } - async fn event_id_seen( - &self, - event_id: EventId, - relay_url: Option, - ) -> Result<(), Self::Err> { + async fn event_id_seen(&self, event_id: EventId, relay_url: Url) -> Result<(), Self::Err> { let mut fbb = self.fbb.write().await; let cf = self.cf_handle(EVENTS_SEEN_BY_RELAYS_CF)?; - let value: HashSet = match relay_url { - Some(relay_url) => { - let mut set = HashSet::with_capacity(1); - set.insert(relay_url); - set - } - None => HashSet::new(), + let value: HashSet = { + let mut set = HashSet::with_capacity(1); + set.insert(relay_url); + set }; self.db .merge_cf(&cf, event_id, value.encode(&mut fbb)) @@ -261,7 +254,7 @@ impl NostrDatabase for RocksDatabase { tokio::task::spawn_blocking(move || { let cf = this.cf_handle(EVENTS_CF)?; - let mut events: Vec = Vec::new(); + let mut events: Vec = Vec::with_capacity(ids.len()); for v in this .db diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index 53e0b20ba..4fe0b80e2 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -261,7 +261,7 @@ impl RelayPoolTask { // Set event as seen by relay if let Err(e) = self .database - .event_id_seen(partial_event.id, Some(relay_url)) + .event_id_seen(partial_event.id, relay_url) .await { tracing::error!( From d462939be5dc0b9959f57a8e4c68d84be3afcb6b Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 13 Nov 2023 16:15:40 +0100 Subject: [PATCH 93/98] database: add `EraseNostrDatabaseError` and `IntoNostrDatabase` --- crates/nostr-database/src/lib.rs | 132 ++++++++++++++++++++++++- crates/nostr-sdk/src/client/builder.rs | 6 +- 2 files changed, 132 insertions(+), 6 deletions(-) diff --git a/crates/nostr-database/src/lib.rs b/crates/nostr-database/src/lib.rs index 61aeedc3b..76f20cc90 100644 --- a/crates/nostr-database/src/lib.rs +++ b/crates/nostr-database/src/lib.rs @@ -3,11 +3,12 @@ //! Nostr Database -#![deny(unsafe_code)] #![warn(missing_docs)] #![warn(rustdoc::bare_urls)] +use core::fmt; use std::collections::HashSet; +use std::sync::Arc; pub use async_trait::async_trait; pub use nostr; @@ -44,15 +45,49 @@ pub enum Backend { Custom(String), } -/// A type-erased [`StateStore`]. +/// A type-erased [`NostrDatabase`]. pub type DynNostrDatabase = dyn NostrDatabase; +/// A type that can be type-erased into `Arc`. +/// +/// This trait is not meant to be implemented directly outside +/// `matrix-sdk-crypto`, but it is automatically implemented for everything that +/// implements `NostrDatabase`. +pub trait IntoNostrDatabase { + #[doc(hidden)] + fn into_nostr_database(self) -> Arc; +} + +impl IntoNostrDatabase for T +where + T: NostrDatabase + Sized + 'static, +{ + fn into_nostr_database(self) -> Arc { + Arc::new(EraseNostrDatabaseError(self)) + } +} + +// Turns a given `Arc` into `Arc` by attaching the +// NostrDatabase impl vtable of `EraseNostrDatabaseError`. +impl IntoNostrDatabase for Arc +where + T: NostrDatabase + 'static, +{ + fn into_nostr_database(self) -> Arc { + let ptr: *const T = Arc::into_raw(self); + let ptr_erased = ptr as *const EraseNostrDatabaseError; + // SAFETY: EraseNostrDatabaseError is repr(transparent) so T and + // EraseNostrDatabaseError have the same layout and ABI + unsafe { Arc::from_raw(ptr_erased) } + } +} + /// Nostr Database #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] #[cfg_attr(not(target_arch = "wasm32"), async_trait)] pub trait NostrDatabase: AsyncTraitDeps { /// Error - type Err: From; + type Err: From + Into; /// Name of the backend database used (ex. rocksdb, lmdb, sqlite, indexeddb, ...) fn backend(&self) -> Backend; @@ -129,6 +164,97 @@ pub trait NostrDatabaseExt: NostrDatabase { #[cfg_attr(not(target_arch = "wasm32"), async_trait)] impl NostrDatabaseExt for T {} +#[repr(transparent)] +struct EraseNostrDatabaseError(T); + +impl fmt::Debug for EraseNostrDatabaseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait)] +impl NostrDatabase for EraseNostrDatabaseError { + type Err = DatabaseError; + + fn backend(&self) -> Backend { + self.0.backend() + } + + fn opts(&self) -> DatabaseOptions { + self.0.opts() + } + + async fn count(&self) -> Result { + self.0.count().await.map_err(Into::into) + } + + async fn save_event(&self, event: &Event) -> Result { + self.0.save_event(event).await.map_err(Into::into) + } + + async fn has_event_already_been_saved(&self, event_id: EventId) -> Result { + self.0 + .has_event_already_been_saved(event_id) + .await + .map_err(Into::into) + } + + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { + self.0 + .has_event_already_been_seen(event_id) + .await + .map_err(Into::into) + } + + async fn event_id_seen(&self, event_id: EventId, relay_url: Url) -> Result<(), Self::Err> { + self.0 + .event_id_seen(event_id, relay_url) + .await + .map_err(Into::into) + } + + async fn event_recently_seen_on_relays( + &self, + event_id: EventId, + ) -> Result>, Self::Err> { + self.0 + .event_recently_seen_on_relays(event_id) + .await + .map_err(Into::into) + } + + async fn event_by_id(&self, event_id: EventId) -> Result { + self.0.event_by_id(event_id).await.map_err(Into::into) + } + + async fn query(&self, filters: Vec) -> Result, Self::Err> { + self.0.query(filters).await.map_err(Into::into) + } + + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, Self::Err> { + self.0 + .event_ids_by_filters(filters) + .await + .map_err(Into::into) + } + + async fn negentropy_items( + &self, + filter: Filter, + ) -> Result, Self::Err> { + self.0.negentropy_items(filter).await.map_err(Into::into) + } + + async fn wipe(&self) -> Result<(), Self::Err> { + self.0.wipe().await.map_err(Into::into) + } +} + /// Alias for `Send` on non-wasm, empty trait (implemented by everything) on /// wasm. #[cfg(not(target_arch = "wasm32"))] diff --git a/crates/nostr-sdk/src/client/builder.rs b/crates/nostr-sdk/src/client/builder.rs index 94953ec53..fa224bdd5 100644 --- a/crates/nostr-sdk/src/client/builder.rs +++ b/crates/nostr-sdk/src/client/builder.rs @@ -7,7 +7,7 @@ use std::sync::Arc; use nostr::Keys; use nostr_database::memory::MemoryDatabase; -use nostr_database::{DatabaseError, DynNostrDatabase, NostrDatabase}; +use nostr_database::{DynNostrDatabase, IntoNostrDatabase}; #[cfg(feature = "nip46")] use super::RemoteSigner; @@ -37,9 +37,9 @@ impl ClientBuilder { /// Set database pub fn database(mut self, database: D) -> Self where - D: NostrDatabase + 'static, + D: IntoNostrDatabase, { - self.database = Arc::new(database); + self.database = database.into_nostr_database(); self } From cedbc96907dee928db7ce9d513e6d0569fc6b90e Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 13 Nov 2023 16:17:19 +0100 Subject: [PATCH 94/98] Add `nostr-sqlite` --- crates/nostr-database/README.md | 3 +- crates/nostr-sqlite/Cargo.toml | 26 ++ crates/nostr-sqlite/README.md | 15 + crates/nostr-sqlite/examples/sqlite.rs | 104 +++++++ crates/nostr-sqlite/migrations/001_init.sql | 20 ++ crates/nostr-sqlite/src/error.rs | 52 ++++ crates/nostr-sqlite/src/lib.rs | 317 ++++++++++++++++++++ crates/nostr-sqlite/src/migration.rs | 115 +++++++ 8 files changed, 651 insertions(+), 1 deletion(-) create mode 100644 crates/nostr-sqlite/Cargo.toml create mode 100644 crates/nostr-sqlite/README.md create mode 100644 crates/nostr-sqlite/examples/sqlite.rs create mode 100644 crates/nostr-sqlite/migrations/001_init.sql create mode 100644 crates/nostr-sqlite/src/error.rs create mode 100644 crates/nostr-sqlite/src/lib.rs create mode 100644 crates/nostr-sqlite/src/migration.rs diff --git a/crates/nostr-database/README.md b/crates/nostr-database/README.md index ee54207a0..d5f09dc7f 100644 --- a/crates/nostr-database/README.md +++ b/crates/nostr-database/README.md @@ -4,11 +4,12 @@ Database for Nostr apps ## Nostr Database Trait -This library cointains the `NostrDatabase` and `NostrDatabaseExt` traits. You can use the [default backends](#default-backends) or implement your one (PostgreSQL, SQLite, ...). +This library cointains the `NostrDatabase` and `NostrDatabaseExt` traits. You can use the [default backends](#default-backends) or implement your one (like PostgreSQL, ...). ## Default backends * Memory (RAM), available in this library +* SQLite (desktop, server and mobile devices), available at [`nostr-sqlite`](https://crates.io/crates/nostr-sqlite) * RocksDB (desktop, server and mobile devices), available at [`nostr-rocksdb`](https://crates.io/crates/nostr-rocksdb) * IndexedDB (web), available at [`nostr-indexeddb`](https://crates.io/crates/nostr-indexeddb) diff --git a/crates/nostr-sqlite/Cargo.toml b/crates/nostr-sqlite/Cargo.toml new file mode 100644 index 000000000..06964aa91 --- /dev/null +++ b/crates/nostr-sqlite/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "nostr-sqlite" +version = "0.1.0" +edition = "2021" +description = "SQLite Storage backend for Nostr apps" +authors = ["Yuki Kishimoto "] +homepage.workspace = true +repository.workspace = true +license.workspace = true +readme = "README.md" +rust-version = "1.64.0" +keywords = ["nostr", "database", "sqlite"] + +[dependencies] +async-trait = { workspace = true } +deadpool-sqlite = "0.5" +nostr = { workspace = true, features = ["std"] } +nostr-database = { workspace = true, features = ["flatbuf"] } +rusqlite = { version = "0.28", features = ["bundled"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread", "sync"] } +tracing = { workspace = true, features = ["std", "attributes"] } + +[dev-dependencies] +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/nostr-sqlite/README.md b/crates/nostr-sqlite/README.md new file mode 100644 index 000000000..0d0a64f11 --- /dev/null +++ b/crates/nostr-sqlite/README.md @@ -0,0 +1,15 @@ +# Nostr SQLite + +## State + +**This library is in an ALPHA state**, things that are implemented generally work but the API will change in breaking ways. + +## License + +This project is distributed under the MIT software license - see the [LICENSE](../../LICENSE) file for details + +## Donations + +⚡ Tips: + +⚡ Lightning Address: yuki@getalby.com \ No newline at end of file diff --git a/crates/nostr-sqlite/examples/sqlite.rs b/crates/nostr-sqlite/examples/sqlite.rs new file mode 100644 index 000000000..f91d5dc11 --- /dev/null +++ b/crates/nostr-sqlite/examples/sqlite.rs @@ -0,0 +1,104 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use std::time::Duration; + +use nostr::prelude::*; +use nostr_database::NostrDatabase; +use nostr_sqlite::SQLiteDatabase; +use tracing_subscriber::fmt::format::FmtSpan; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::fmt() + .with_span_events(FmtSpan::CLOSE) + .init(); + + let secret_key = + SecretKey::from_bech32("nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99") + .unwrap(); + let keys_a = Keys::new(secret_key); + println!("Pubkey A: {}", keys_a.public_key()); + + let secret_key = + SecretKey::from_bech32("nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85") + .unwrap(); + let keys_b = Keys::new(secret_key); + println!("Pubkey B: {}", keys_b.public_key()); + + let database = SQLiteDatabase::open("./db/sqlite.db").await.unwrap(); + + println!("Events stored: {}", database.count().await.unwrap()); + + /* for i in 0..100_000 { + let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + + let event = EventBuilder::new_text_note( + format!("Reply to event #{i}"), + &[ + Tag::Event(event.id, None, None), + Tag::PubKey(event.pubkey, None), + ], + ) + .to_event(&keys_b) + .unwrap(); + database.save_event(&event).await.unwrap(); + } */ + + for i in 0..10 { + let metadata = Metadata::new().name(format!("Name #{i}")); + let event = EventBuilder::set_metadata(metadata) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + } + + /* for i in 0..500_000 { + let event = EventBuilder::new( + Kind::Custom(123), + "Custom with d tag", + &[Tag::Identifier(format!("myid{i}"))], + ) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + } */ + + let event_id = EventId::all_zeros(); + database + .event_id_seen(event_id, Url::parse("wss://relay.damus.io").unwrap()) + .await + .unwrap(); + database + .event_id_seen(event_id, Url::parse("wss://relay.nostr.info").unwrap()) + .await + .unwrap(); + database + .event_id_seen(event_id, Url::parse("wss://relay.damus.io").unwrap()) + .await + .unwrap(); + + let relays = database + .event_recently_seen_on_relays(event_id) + .await + .unwrap(); + println!("Seen on: {relays:?}"); + + let events = database + .query(vec![Filter::new() + .kinds(vec![Kind::Metadata, Kind::Custom(123), Kind::TextNote]) + .limit(20) + //.kind(Kind::Custom(123)) + //.identifier("myid5000") + .author(keys_a.public_key())]) + .await + .unwrap(); + println!("Got {} events", events.len()); + + loop { + tokio::time::sleep(Duration::from_secs(30)).await + } +} diff --git a/crates/nostr-sqlite/migrations/001_init.sql b/crates/nostr-sqlite/migrations/001_init.sql new file mode 100644 index 000000000..112c5aa40 --- /dev/null +++ b/crates/nostr-sqlite/migrations/001_init.sql @@ -0,0 +1,20 @@ +-- Database settings +PRAGMA encoding = "UTF-8"; +PRAGMA journal_mode=WAL; +PRAGMA main.synchronous=NORMAL; +PRAGMA foreign_keys = ON; +PRAGMA application_id = 1654008667; +PRAGMA user_version = 1; -- Schema version + +CREATE TABLE IF NOT EXISTS events ( + event_id BLOB PRIMARY KEY NOT NULL, + event BLOB NOT NULL +); + +CREATE TABLE IF NOT EXISTS event_seen_by_relays ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_id BLOB NOT NULL, + relay_url TEXT NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS event_seen_by_relays_index ON event_seen_by_relays(event_id,relay_url); \ No newline at end of file diff --git a/crates/nostr-sqlite/src/error.rs b/crates/nostr-sqlite/src/error.rs new file mode 100644 index 000000000..b72f1177d --- /dev/null +++ b/crates/nostr-sqlite/src/error.rs @@ -0,0 +1,52 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use deadpool_sqlite::{CreatePoolError, InteractError, PoolError}; +use nostr_database::{flatbuffers, DatabaseError}; +use thiserror::Error; + +use crate::migration::MigrationError; + +/// Store error +#[derive(Debug, Error)] +pub enum Error { + /// Sqlite error + #[error(transparent)] + Sqlite(#[from] rusqlite::Error), + /// Pool error + #[error(transparent)] + CreateDeadPool(#[from] CreatePoolError), + /// Pool error + #[error(transparent)] + DeadPool(#[from] PoolError), + /// Pool error + #[error("{0}")] + DeadPoolInteract(String), + /// Migration error + #[error(transparent)] + Migration(#[from] MigrationError), + /// Database error + #[error(transparent)] + Database(#[from] DatabaseError), + /// Flatbuffers error + #[error(transparent)] + Flatbuffers(#[from] flatbuffers::Error), + /// Url error + #[error(transparent)] + Url(#[from] nostr::url::ParseError), + /// Not found + #[error("sqlite: {0} not found")] + NotFound(String), +} + +impl From for Error { + fn from(e: InteractError) -> Self { + Self::DeadPoolInteract(e.to_string()) + } +} + +impl From for DatabaseError { + fn from(e: Error) -> Self { + Self::backend(e) + } +} diff --git a/crates/nostr-sqlite/src/lib.rs b/crates/nostr-sqlite/src/lib.rs new file mode 100644 index 000000000..d0d698a48 --- /dev/null +++ b/crates/nostr-sqlite/src/lib.rs @@ -0,0 +1,317 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! SQLite Storage backend for Nostr SDK + +#![forbid(unsafe_code)] +#![warn(missing_docs)] +#![warn(rustdoc::bare_urls)] + +use std::collections::HashSet; +use std::path::Path; +use std::sync::Arc; + +pub extern crate nostr; +pub extern crate nostr_database as database; + +use async_trait::async_trait; +use deadpool_sqlite::{Config, Object, Pool, Runtime}; +use nostr::event::raw::RawEvent; +use nostr::{Event, EventId, Filter, Timestamp, Url}; +use nostr_database::{ + Backend, DatabaseIndexes, DatabaseOptions, EventIndexResult, FlatBufferBuilder, + FlatBufferDecode, FlatBufferEncode, NostrDatabase, +}; +use rusqlite::config::DbConfig; +use tokio::sync::RwLock; + +mod error; +mod migration; + +pub use self::error::Error; +use self::migration::STARTUP_SQL; + +/// SQLite Nostr Database +#[derive(Debug, Clone)] +pub struct SQLiteDatabase { + db: Pool, + indexes: DatabaseIndexes, + fbb: Arc>>, +} + +impl SQLiteDatabase { + /// Open SQLite store + pub async fn open

(path: P) -> Result + where + P: AsRef, + { + let cfg = Config::new(path.as_ref()); + let pool = cfg.create_pool(Runtime::Tokio1)?; + + // Execute migrations + let conn = pool.get().await?; + migration::run(&conn).await?; + + let this = Self { + db: pool, + indexes: DatabaseIndexes::new(), + fbb: Arc::new(RwLock::new(FlatBufferBuilder::with_capacity(70_000))), + }; + + // Build indexes + this.build_indexes(&conn).await?; + + Ok(this) + } + + async fn acquire(&self) -> Result { + Ok(self.db.get().await?) + } + + #[tracing::instrument(skip_all)] + async fn build_indexes(&self, conn: &Object) -> Result<(), Error> { + let events = conn + .interact(move |conn| { + let mut stmt = conn.prepare_cached("SELECT event FROM events;")?; + let mut rows = stmt.query([])?; + let mut events = HashSet::new(); + while let Ok(Some(row)) = rows.next() { + let buf: Vec = row.get(0)?; + let raw = RawEvent::decode(&buf)?; + events.insert(raw); + } + Ok::, Error>(events) + }) + .await??; + self.indexes.bulk_load(events).await; + Ok(()) + } +} + +#[async_trait] +impl NostrDatabase for SQLiteDatabase { + type Err = Error; + + fn backend(&self) -> Backend { + Backend::SQLite + } + + fn opts(&self) -> DatabaseOptions { + DatabaseOptions::default() + } + + async fn count(&self) -> Result { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn.prepare_cached("SELECT COUNT(*) FROM events;")?; + let mut rows = stmt.query([])?; + let row = rows.next()?.ok_or(Error::NotFound("count result".into()))?; + let count: usize = row.get(0)?; + Ok(count) + }) + .await? + } + + #[tracing::instrument(skip_all, level = "trace")] + async fn save_event(&self, event: &Event) -> Result { + // Index event + let EventIndexResult { + to_store, + to_discard, + } = self.indexes.index_event(event).await; + + if !to_discard.is_empty() { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let delete_query = format!( + "DELETE FROM events WHERE {};", + to_discard + .iter() + .map(|id| format!("event_id = '{id}'")) + .collect::>() + .join(" AND ") + ); + conn.execute(&delete_query, []) + }) + .await??; + } + + if to_store { + // Acquire FlatBuffers Builder + let mut fbb = self.fbb.write().await; + + // Encode + let event_id: EventId = event.id; + let value: Vec = event.encode(&mut fbb).to_vec(); + + // Save event + let conn = self.acquire().await?; + conn.interact(move |conn| { + conn.execute( + "INSERT OR IGNORE INTO events (event_id, event) VALUES (?, ?);", + (event_id.to_hex(), value), + ) + }) + .await??; + + Ok(true) + } else { + Ok(false) + } + } + + async fn has_event_already_been_saved(&self, event_id: EventId) -> Result { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn.prepare_cached( + "SELECT EXISTS(SELECT 1 FROM events WHERE event_id = ? LIMIT 1);", + )?; + let mut rows = stmt.query([event_id.to_hex()])?; + let exists: u8 = match rows.next()? { + Some(row) => row.get(0)?, + None => 0, + }; + Ok(exists == 1) + }) + .await? + } + + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn.prepare_cached( + "SELECT EXISTS(SELECT 1 FROM event_seen_by_relays WHERE event_id = ? LIMIT 1);", + )?; + let mut rows = stmt.query([event_id.to_hex()])?; + let exists: u8 = match rows.next()? { + Some(row) => row.get(0)?, + None => 0, + }; + Ok(exists == 1) + }) + .await? + } + + async fn event_id_seen(&self, event_id: EventId, relay_url: Url) -> Result<(), Self::Err> { + let conn = self.acquire().await?; + conn.interact(move |conn| { + conn.execute( + "INSERT OR IGNORE INTO event_seen_by_relays (event_id, relay_url) VALUES (?, ?);", + (event_id.to_hex(), relay_url.to_string()), + ) + }) + .await??; + Ok(()) + } + + async fn event_recently_seen_on_relays( + &self, + event_id: EventId, + ) -> Result>, Self::Err> { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn + .prepare_cached("SELECT relay_url FROM event_seen_by_relays WHERE event_id = ?;")?; + let mut rows = stmt.query([event_id.to_hex()])?; + let mut relays = HashSet::new(); + while let Ok(Some(row)) = rows.next() { + let url: String = row.get(0)?; + relays.insert(Url::parse(&url)?); + } + Ok(Some(relays)) + }) + .await? + } + + #[tracing::instrument(skip_all)] + async fn event_by_id(&self, event_id: EventId) -> Result { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn.prepare_cached("SELECT event FROM events WHERE event_id = ?;")?; + let mut rows = stmt.query([event_id.to_hex()])?; + let row = rows.next()?.ok_or(Error::NotFound("event".into()))?; + let buf: Vec = row.get(0)?; + Ok(Event::decode(&buf)?) + }) + .await? + } + + #[tracing::instrument(skip_all)] + async fn query(&self, filters: Vec) -> Result, Self::Err> { + let ids = self.indexes.query(filters.clone()).await; + let conn = self.acquire().await?; + conn.interact(move |conn| { + let query = format!( + "SELECT event FROM events WHERE {};", + ids.iter() + .map(|id| format!("event_id = '{id}'")) + .collect::>() + .join(" OR ") + ); + let mut stmt = conn.prepare_cached(&query)?; + let mut rows = stmt.query([])?; + let mut events = Vec::with_capacity(ids.len()); + while let Ok(Some(row)) = rows.next() { + let buf: Vec = row.get(0)?; + events.push(Event::decode(&buf)?); + } + Ok(events) + }) + .await? + } + + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, Self::Err> { + Ok(self.indexes.query(filters).await) + } + + async fn negentropy_items( + &self, + filter: Filter, + ) -> Result, Self::Err> { + let ids = self.indexes.query(vec![filter.clone()]).await; + let conn = self.acquire().await?; + conn.interact(move |conn| { + let query = format!( + "SELECT event FROM events WHERE {};", + ids.iter() + .map(|id| format!("event_id = '{id}'")) + .collect::>() + .join(" OR ") + ); + let mut stmt = conn.prepare_cached(&query)?; + let mut rows = stmt.query([])?; + let mut items = Vec::with_capacity(ids.len()); + while let Ok(Some(row)) = rows.next() { + let buf: Vec = row.get(0)?; + let event = Event::decode(&buf)?; // TODO: decode RawEvent? + items.push((event.id, event.created_at)); + } + Ok(items) + }) + .await? + } + + async fn wipe(&self) -> Result<(), Self::Err> { + let conn = self.acquire().await?; + + conn.interact(|conn| { + // Reset DB + conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, true)?; + conn.execute("VACUUM;", [])?; + conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, false)?; + + // Execute migrations + conn.execute_batch(STARTUP_SQL)?; + + Ok::<(), Error>(()) + }) + .await??; + + migration::run(&conn).await?; + + Ok(()) + } +} diff --git a/crates/nostr-sqlite/src/migration.rs b/crates/nostr-sqlite/src/migration.rs new file mode 100644 index 000000000..77e436c92 --- /dev/null +++ b/crates/nostr-sqlite/src/migration.rs @@ -0,0 +1,115 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use std::cmp::Ordering; + +use deadpool_sqlite::Object; +use rusqlite::Connection; +use thiserror::Error; + +use super::Error; + +/// Latest database version +pub const DB_VERSION: usize = 1; + +/// Startup DB Pragmas +pub const STARTUP_SQL: &str = r##" +PRAGMA main.synchronous=NORMAL; +PRAGMA foreign_keys = ON; +PRAGMA journal_size_limit=32768; +pragma mmap_size = 17179869184; -- cap mmap at 16GB +"##; + +/// Schema error +#[derive(Debug, Error)] +pub enum MigrationError { + /// Database versione newer than supported + #[error( + "Database version is newer than supported by this executable (v{current} > v{DB_VERSION})" + )] + NewerDbVersion { current: usize }, +} + +/// Determine the current application database schema version. +pub fn curr_db_version(conn: &mut Connection) -> Result { + let query = "PRAGMA user_version;"; + let curr_version = conn.query_row(query, [], |row| row.get(0))?; + Ok(curr_version) +} + +/// Upgrade DB to latest version, and execute pragma settings +pub(crate) async fn run(conn: &Object) -> Result<(), Error> { + conn.interact(|conn| { + // check the version. + let mut curr_version = curr_db_version(conn)?; + tracing::info!("DB version = {:?}", curr_version); + + match curr_version.cmp(&DB_VERSION) { + // Database is new or not current + Ordering::Less => { + // initialize from scratch + if curr_version == 0 { + curr_version = mig_init(conn)?; + } + + // for initialized but out-of-date schemas, proceed to + // upgrade sequentially until we are current. + /* if curr_version == 1 { + curr_version = mig_1_to_2(conn)?; + } + + if curr_version == 2 { + curr_version = mig_2_to_3(conn)?; + } + + if curr_version == 3 { + curr_version = mig_3_to_4(conn)?; + } + + if curr_version == 4 { + curr_version = mig_4_to_5(conn)?; + } + + if curr_version == 5 { + curr_version = mig_5_to_6(conn)?; + } + + if curr_version == 6 { + curr_version = mig_6_to_7(conn)?; + } */ + + if curr_version == DB_VERSION { + tracing::info!("All migration scripts completed successfully (v{DB_VERSION})"); + } + } + // Database is current, all is good + Ordering::Equal => { + tracing::debug!("Database version was already current (v{DB_VERSION})"); + } + // Database is newer than what this code understands, abort + Ordering::Greater => { + return Err(Error::Migration(MigrationError::NewerDbVersion { + current: curr_version, + })); + } + } + + // Setup PRAGMA + conn.execute_batch(STARTUP_SQL)?; + tracing::debug!("SQLite PRAGMA startup completed"); + Ok(()) + }) + .await? +} + +fn mig_init(conn: &mut Connection) -> Result { + conn.execute_batch(include_str!("../migrations/001_init.sql"))?; + tracing::info!("database schema initialized to v1"); + Ok(1) +} + +/* fn mig_1_to_2(conn: &mut Connection) -> Result { + conn.execute_batch(include_str!("../../migrations/002_notifications.sql"))?; + tracing::info!("database schema upgraded v1 -> v2"); + Ok(2) +} */ From 85a989a52e5d8539800d0cbd23da411ae6b2ef28 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 13 Nov 2023 16:26:45 +0100 Subject: [PATCH 95/98] sqlite: fix `Expression tree is too large` --- crates/nostr-sqlite/src/lib.rs | 44 +++++++++++++--------------------- 1 file changed, 17 insertions(+), 27 deletions(-) diff --git a/crates/nostr-sqlite/src/lib.rs b/crates/nostr-sqlite/src/lib.rs index d0d698a48..7ba533c1a 100644 --- a/crates/nostr-sqlite/src/lib.rs +++ b/crates/nostr-sqlite/src/lib.rs @@ -241,19 +241,14 @@ impl NostrDatabase for SQLiteDatabase { let ids = self.indexes.query(filters.clone()).await; let conn = self.acquire().await?; conn.interact(move |conn| { - let query = format!( - "SELECT event FROM events WHERE {};", - ids.iter() - .map(|id| format!("event_id = '{id}'")) - .collect::>() - .join(" OR ") - ); - let mut stmt = conn.prepare_cached(&query)?; - let mut rows = stmt.query([])?; + let mut stmt = conn.prepare_cached("SELECT event FROM events WHERE event_id = ?;")?; let mut events = Vec::with_capacity(ids.len()); - while let Ok(Some(row)) = rows.next() { - let buf: Vec = row.get(0)?; - events.push(Event::decode(&buf)?); + for id in ids.into_iter() { + let mut rows = stmt.query([id.to_hex()])?; + while let Ok(Some(row)) = rows.next() { + let buf: Vec = row.get(0)?; + events.push(Event::decode(&buf)?); + } } Ok(events) }) @@ -274,22 +269,17 @@ impl NostrDatabase for SQLiteDatabase { let ids = self.indexes.query(vec![filter.clone()]).await; let conn = self.acquire().await?; conn.interact(move |conn| { - let query = format!( - "SELECT event FROM events WHERE {};", - ids.iter() - .map(|id| format!("event_id = '{id}'")) - .collect::>() - .join(" OR ") - ); - let mut stmt = conn.prepare_cached(&query)?; - let mut rows = stmt.query([])?; - let mut items = Vec::with_capacity(ids.len()); - while let Ok(Some(row)) = rows.next() { - let buf: Vec = row.get(0)?; - let event = Event::decode(&buf)?; // TODO: decode RawEvent? - items.push((event.id, event.created_at)); + let mut stmt = conn.prepare_cached("SELECT event FROM events WHERE event_id = ?;")?; + let mut events = Vec::with_capacity(ids.len()); + for id in ids.into_iter() { + let mut rows = stmt.query([id.to_hex()])?; + while let Ok(Some(row)) = rows.next() { + let buf: Vec = row.get(0)?; + let event = Event::decode(&buf)?; + events.push((event.id, event.created_at)); + } } - Ok(items) + Ok(events) }) .await? } From db913370d26167f90cf9fc980572bfad6607b53d Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 13 Nov 2023 16:28:03 +0100 Subject: [PATCH 96/98] sdk: add `sqlite` feature --- .githooks/pre-push | 1 + .github/workflows/ci.yml | 1 + Cargo.lock | 160 +++++++++++++++++++++++++++- README.md | 1 + crates/nostr-sdk/Cargo.toml | 6 ++ crates/nostr-sdk/examples/sqlite.rs | 39 +++++++ crates/nostr-sdk/src/lib.rs | 4 +- 7 files changed, 210 insertions(+), 2 deletions(-) create mode 100644 crates/nostr-sdk/examples/sqlite.rs diff --git a/.githooks/pre-push b/.githooks/pre-push index 45136d426..6589194c6 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -12,6 +12,7 @@ buildargs=( "-p nostr-sdk" "-p nostr-sdk --no-default-features" "-p nostr-sdk --features blocking" + "-p nostr-sdk --features sqlite" #"-p nostr-sdk --features rocksdb" "-p nostr-ffi" "-p nostr-sdk-ffi" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 81eb886db..4ca599d2e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,6 +35,7 @@ jobs: -p nostr-sdk, -p nostr-sdk --no-default-features, -p nostr-sdk --features blocking, + -p nostr-sdk --features sqlite, ] steps: - name: Checkout diff --git a/Cargo.lock b/Cargo.lock index c54e9951d..890ec21a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -40,6 +40,18 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "ahash" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.2" @@ -49,6 +61,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "anstream" version = "0.6.4" @@ -592,6 +610,48 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +[[package]] +name = "deadpool" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "421fe0f90f2ab22016f32a9881be5134fdd71c65298917084b0c7477cbc3856e" +dependencies = [ + "async-trait", + "deadpool-runtime", + "num_cpus", + "retain_mut", + "tokio", +] + +[[package]] +name = "deadpool-runtime" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63dfa964fe2a66f3fde91fc70b267fe193d822c7e603e2a675a49a7f46ad3f49" +dependencies = [ + "tokio", +] + +[[package]] +name = "deadpool-sqlite" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e026821eaacbce25ff0d54405e4421d71656fcae3e4a9323461280fcda6dbc7d" +dependencies = [ + "deadpool", + "deadpool-sync", + "rusqlite", +] + +[[package]] +name = "deadpool-sync" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8db70494c13cae4ce67b4b4dafdaf828cf0df7237ab5b9e2fcabee4965d0a0a" +dependencies = [ + "deadpool-runtime", +] + [[package]] name = "delegate-display" version = "2.1.1" @@ -652,6 +712,18 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + [[package]] name = "fancy_constructor" version = "1.2.2" @@ -890,6 +962,25 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +[[package]] +name = "hashbrown" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +dependencies = [ + "ahash", + "allocator-api2", +] + +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.2", +] + [[package]] name = "heck" version = "0.4.1" @@ -1020,7 +1111,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.12.3", ] [[package]] @@ -1128,6 +1219,17 @@ dependencies = [ "libz-sys", ] +[[package]] +name = "libsqlite3-sys" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "libz-sys" version = "1.1.12" @@ -1395,6 +1497,7 @@ dependencies = [ "nostr-indexeddb", "nostr-rocksdb", "nostr-sdk-net", + "nostr-sqlite", "once_cell", "thiserror", "tokio", @@ -1441,6 +1544,21 @@ dependencies = [ "ws_stream_wasm", ] +[[package]] +name = "nostr-sqlite" +version = "0.1.0" +dependencies = [ + "async-trait", + "deadpool-sqlite", + "nostr", + "nostr-database", + "rusqlite", + "thiserror", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -1703,6 +1821,12 @@ dependencies = [ "winreg", ] +[[package]] +name = "retain_mut" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" + [[package]] name = "ring" version = "0.17.5" @@ -1727,6 +1851,20 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "rusqlite" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" +dependencies = [ + "bitflags 1.3.2", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -2819,3 +2957,23 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", ] + +[[package]] +name = "zerocopy" +version = "0.7.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/README.md b/README.md index 532591c21..d5932ecf7 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@ The project is split up into several crates in the `crates/` directory: * [**nostr**](./crates/nostr/): Rust implementation of Nostr protocol. * [**nostr-database**](./crates/nostr-database/): Database for Nostr apps * [**nostr-rocksdb**](./crates/nostr-rocksdb/): RocksDB Storage backend for Nostr apps + * [**nostr-sqlite**](./crates/nostr-sqlite/): SQLite Storage backend for Nostr apps * [**nostr-indexeddb**](./crates/nostr-indexeddb/): IndexedDB Storage backend for Nostr apps * [**nostr-sdk**](./crates/nostr-sdk/): High level client library. * [**nostr-sdk-net**](./crates/nostr-sdk-net/): Network library for [**nostr-sdk**](./crates/nostr-sdk/) diff --git a/crates/nostr-sdk/Cargo.toml b/crates/nostr-sdk/Cargo.toml index eb4bf0927..93aa55ea5 100644 --- a/crates/nostr-sdk/Cargo.toml +++ b/crates/nostr-sdk/Cargo.toml @@ -19,6 +19,7 @@ rustdoc-args = ["--cfg", "docsrs"] default = ["all-nips"] blocking = ["async-utility/blocking", "nostr/blocking"] rocksdb = ["dep:nostr-rocksdb"] +sqlite = ["dep:nostr-sqlite"] indexeddb = ["dep:nostr-indexeddb"] all-nips = ["nip04", "nip05", "nip06", "nip11", "nip46", "nip47"] nip03 = ["nostr/nip03"] @@ -41,6 +42,7 @@ tracing = { workspace = true, features = ["std"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] nostr-rocksdb = { version = "0.1", path = "../nostr-rocksdb", optional = true } +nostr-sqlite = { version = "0.1", path = "../nostr-sqlite", optional = true } tokio = { workspace = true, features = ["rt-multi-thread", "time", "macros", "sync"] } [target.'cfg(target_arch = "wasm32")'.dependencies] @@ -85,6 +87,10 @@ required-features = ["all-nips", "rocksdb"] [[example]] name = "shutdown-on-drop" +[[example]] +name = "sqlite" +required-features = ["all-nips", "sqlite"] + [[example]] name = "subscriptions" required-features = ["all-nips"] diff --git a/crates/nostr-sdk/examples/sqlite.rs b/crates/nostr-sdk/examples/sqlite.rs new file mode 100644 index 000000000..36f28c86b --- /dev/null +++ b/crates/nostr-sdk/examples/sqlite.rs @@ -0,0 +1,39 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use nostr_sdk::prelude::*; + +const BECH32_SK: &str = "nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85"; + +#[tokio::main] +async fn main() -> Result<()> { + tracing_subscriber::fmt::init(); + + let secret_key = SecretKey::from_bech32(BECH32_SK)?; + let my_keys = Keys::new(secret_key); + + let database = SQLiteDatabase::open("./db/sqlite.db").await?; + let client: Client = ClientBuilder::new(&my_keys).database(database).build(); + + client.add_relay("wss://relay.damus.io", None).await?; + client.add_relay("wss://nostr.wine", None).await?; + client.add_relay("wss://atl.purplerelay.com", None).await?; + + client.connect().await; + + /* // Publish a text note + client.publish_text_note("Hello world", &[]).await?; */ + + // Negentropy reconcile + let filter = Filter::new().author(my_keys.public_key()); + client + .reconcile(filter, NegentropyOptions::default()) + .await?; + + // Query events from database + let filter = Filter::new().author(my_keys.public_key()).limit(10); + let events = client.database().query(vec![filter]).await?; + println!("Events: {events:?}"); + + Ok(()) +} diff --git a/crates/nostr-sdk/src/lib.rs b/crates/nostr-sdk/src/lib.rs index 335278828..eaf74a25a 100644 --- a/crates/nostr-sdk/src/lib.rs +++ b/crates/nostr-sdk/src/lib.rs @@ -20,11 +20,13 @@ compile_error!("`blocking` feature can't be enabled for WASM targets"); pub use nostr::{self, *}; pub use nostr_database as database; #[cfg(feature = "indexeddb")] -pub use nostr_indexeddb::WebDatabase; +pub use nostr_indexeddb::{IndexedDBError, WebDatabase}; #[cfg(feature = "rocksdb")] pub use nostr_rocksdb::RocksDatabase; #[cfg(feature = "blocking")] use nostr_sdk_net::futures_util::Future; +#[cfg(feature = "sqlite")] +pub use nostr_sqlite::{Error as SQLiteError, SQLiteDatabase}; #[cfg(feature = "blocking")] use once_cell::sync::Lazy; #[cfg(feature = "blocking")] From 3437369c2af84e411f1a64171dc44e2866282b76 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 13 Nov 2023 16:31:19 +0100 Subject: [PATCH 97/98] indexeddb: impl From for DatabaseError --- crates/nostr-indexeddb/src/error.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/nostr-indexeddb/src/error.rs b/crates/nostr-indexeddb/src/error.rs index f390a66c6..66433ddeb 100644 --- a/crates/nostr-indexeddb/src/error.rs +++ b/crates/nostr-indexeddb/src/error.rs @@ -31,3 +31,9 @@ impl From for IndexedDBError { } } } + +impl From for DatabaseError { + fn from(e: IndexedDBError) -> Self { + Self::backend(e) + } +} From 776cc9822b8b46d5b42e798e29162f169096da95 Mon Sep 17 00:00:00 2001 From: Yuki Kishimoto Date: Mon, 13 Nov 2023 16:39:56 +0100 Subject: [PATCH 98/98] sqlite: fix clippy --- crates/nostr-sqlite/src/lib.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/nostr-sqlite/src/lib.rs b/crates/nostr-sqlite/src/lib.rs index 7ba533c1a..643d9c22f 100644 --- a/crates/nostr-sqlite/src/lib.rs +++ b/crates/nostr-sqlite/src/lib.rs @@ -105,7 +105,9 @@ impl NostrDatabase for SQLiteDatabase { conn.interact(move |conn| { let mut stmt = conn.prepare_cached("SELECT COUNT(*) FROM events;")?; let mut rows = stmt.query([])?; - let row = rows.next()?.ok_or(Error::NotFound("count result".into()))?; + let row = rows + .next()? + .ok_or_else(|| Error::NotFound("count result".into()))?; let count: usize = row.get(0)?; Ok(count) }) @@ -229,7 +231,9 @@ impl NostrDatabase for SQLiteDatabase { conn.interact(move |conn| { let mut stmt = conn.prepare_cached("SELECT event FROM events WHERE event_id = ?;")?; let mut rows = stmt.query([event_id.to_hex()])?; - let row = rows.next()?.ok_or(Error::NotFound("event".into()))?; + let row = rows + .next()? + .ok_or_else(|| Error::NotFound("event".into()))?; let buf: Vec = row.get(0)?; Ok(Event::decode(&buf)?) })