diff --git a/.githooks/pre-push b/.githooks/pre-push index 3bb6cd118..6589194c6 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -7,10 +7,13 @@ buildargs=( "-p nostr --no-default-features --features alloc" "-p nostr --no-default-features --features alloc,all-nips" "-p nostr --features blocking" + "-p nostr-database" "-p nostr-sdk-net" "-p nostr-sdk" "-p nostr-sdk --no-default-features" "-p nostr-sdk --features blocking" + "-p nostr-sdk --features sqlite" + #"-p nostr-sdk --features rocksdb" "-p nostr-ffi" "-p nostr-sdk-ffi" ) @@ -23,6 +26,7 @@ for arg in "${buildargs[@]}"; do done buildargs=( + "-p nostr-sdk --features indexeddb --target wasm32-unknown-unknown" "-p nostr-js --target wasm32-unknown-unknown" "-p nostr-sdk-js --target wasm32-unknown-unknown" ) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 55002ceb8..4ca599d2e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,9 +31,11 @@ jobs: -p nostr --no-default-features --features alloc, -p nostr --no-default-features --features "alloc all-nips", -p nostr --features blocking, + -p nostr-database -p nostr-sdk, -p nostr-sdk --no-default-features, -p nostr-sdk --features blocking, + -p nostr-sdk --features sqlite, ] steps: - name: Checkout @@ -57,6 +59,40 @@ jobs: - name: Clippy run: cargo clippy ${{ matrix.build-args }} -- -D warnings + build-msrv-1660: + name: Build + runs-on: ubuntu-latest + strategy: + matrix: + rust: + - version: stable # STABLE + - version: 1.66.0 # MSRV + build-args: + [ + -p nostr-sdk --features rocksdb, + ] + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Cache + uses: actions/cache@v3 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-msrv-1.66.0-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }} + - name: Set default toolchain + run: rustup default ${{ matrix.rust.version }} + - name: Set profile + run: rustup set profile minimal && rustup component add clippy + - name: Build + run: cargo build ${{ matrix.build-args }} + - name: Tests + run: cargo test ${{ matrix.build-args }} + - name: Clippy + run: cargo clippy ${{ matrix.build-args }} -- -D warnings + build-wasm: name: Build WASM runs-on: ubuntu-latest @@ -69,6 +105,7 @@ jobs: [ -p nostr, -p nostr-sdk, + -p nostr-sdk --features indexeddb, -p nostr-js, ] steps: diff --git a/.gitignore b/.gitignore index b27f2f59b..147cd5cd8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ target/ +db/ .DS_Store *.db *.db-shm diff --git a/Cargo.lock b/Cargo.lock index e89e9dff8..890ec21a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,18 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "accessory" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fb0baead5bf1d7f3429259f02076dba82d2f617af7128ca4b288051edd80e93" +dependencies = [ + "macroific", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "addr2line" version = "0.21.0" @@ -28,20 +40,38 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "ahash" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "anstream" -version = "0.5.0" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" dependencies = [ "anstyle", "anstyle-parse", @@ -53,15 +83,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" dependencies = [ "utf8parse", ] @@ -77,9 +107,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "2.1.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" dependencies = [ "anstyle", "windows-sys", @@ -93,9 +123,9 @@ checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "askama" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47cbc3cf73fa8d9833727bbee4835ba5c421a0d65b72daf9a7b5d0e0f9cfb57e" +checksum = "b79091df18a97caea757e28cd2d5fda49c6cd4bd01ddffd7ff01ace0c0ad2c28" dependencies = [ "askama_derive", "askama_escape", @@ -103,14 +133,14 @@ dependencies = [ [[package]] name = "askama_derive" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22fbe0413545c098358e56966ff22cdd039e10215ae213cfbd65032b119fc94" +checksum = "9a0fc7dcf8bd4ead96b1d36b41df47c14beedf7b0301fc543d8f2384e66a2ec0" dependencies = [ + "askama_parser", "basic-toml", "mime", "mime_guess", - "nom", "proc-macro2", "quote", "serde", @@ -123,6 +153,26 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "619743e34b5ba4e9703bba34deac3427c72507c7159f5fd030aea8cac0cfe341" +[[package]] +name = "askama_parser" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c268a96e01a4c47c8c5c2472aaa570707e006a875ea63e819f75474ceedaf7b4" +dependencies = [ + "nom", +] + +[[package]] +name = "async-trait" +version = "0.1.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-utility" version = "0.1.1" @@ -169,15 +219,15 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.4" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "basic-toml" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bfc506e7a2370ec239e1d072507b2a80c833083699d3c6fa176fbb4de8448c6" +checksum = "2f2139706359229bfa8f19142ac1155b4b80beafb7a60471ac5dd109d4a19778" dependencies = [ "serde", ] @@ -197,6 +247,27 @@ dependencies = [ "serde", ] +[[package]] +name = "bindgen" +version = "0.65.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +dependencies = [ + "bitflags 1.3.2", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn", +] + [[package]] name = "bip39" version = "2.0.0" @@ -248,9 +319,15 @@ dependencies = [ [[package]] name = "bitflags" -version = "2.4.0" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "block-buffer" @@ -278,9 +355,9 @@ checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -288,6 +365,17 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +[[package]] +name = "bzip2-sys" +version = "0.1.11+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + [[package]] name = "camino" version = "1.1.6" @@ -299,9 +387,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" +checksum = "12024c4645c97566567129c204f65d5815a8c9aecf30fcbe682b2fe034996d36" dependencies = [ "serde", ] @@ -335,9 +423,19 @@ version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ + "jobserver", "libc", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -365,11 +463,22 @@ dependencies = [ "inout", ] +[[package]] +name = "clang-sys" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "clap" -version = "4.4.5" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824956d0dca8334758a5b7f7e50518d66ea319330cbceedcf76905c2f6ab30e3" +checksum = "ac495e00dcec98c83465d5ad66c5c4fabd652fd6686e7c6269b117e729a6f17b" dependencies = [ "clap_builder", "clap_derive", @@ -377,9 +486,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.5" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122ec64120a49b4563ccaedcbea7818d069ed8e9aa6d829b82d8a4128936b2ab" +checksum = "c77ed9a32a62e6ca27175d00d29d05ca32e396ea1eb5fb01d8256b669cec7663" dependencies = [ "anstream", "anstyle", @@ -389,9 +498,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.2" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck", "proc-macro2", @@ -401,9 +510,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "colorchoice" @@ -421,6 +530,22 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" + [[package]] name = "core2" version = "0.3.3" @@ -432,9 +557,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" dependencies = [ "libc", ] @@ -460,9 +585,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ "csv-core", "itoa", @@ -472,9 +597,9 @@ dependencies = [ [[package]] name = "csv-core" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ "memchr", ] @@ -485,6 +610,60 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +[[package]] +name = "deadpool" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "421fe0f90f2ab22016f32a9881be5134fdd71c65298917084b0c7477cbc3856e" +dependencies = [ + "async-trait", + "deadpool-runtime", + "num_cpus", + "retain_mut", + "tokio", +] + +[[package]] +name = "deadpool-runtime" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63dfa964fe2a66f3fde91fc70b267fe193d822c7e603e2a675a49a7f46ad3f49" +dependencies = [ + "tokio", +] + +[[package]] +name = "deadpool-sqlite" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e026821eaacbce25ff0d54405e4421d71656fcae3e4a9323461280fcda6dbc7d" +dependencies = [ + "deadpool", + "deadpool-sync", + "rusqlite", +] + +[[package]] +name = "deadpool-sync" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8db70494c13cae4ce67b4b4dafdaf828cf0df7237ab5b9e2fcabee4965d0a0a" +dependencies = [ + "deadpool-runtime", +] + +[[package]] +name = "delegate-display" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98a85201f233142ac819bbf6226e36d0b5e129a47bd325084674261c82d4cd66" +dependencies = [ + "macroific", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "digest" version = "0.10.7" @@ -525,30 +704,53 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.3" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" +checksum = "7c18ee0ed65a5f1f81cac6b1d213b69c35fa47d4252ad41f1486dbd8226fe36e" dependencies = [ - "errno-dragonfly", "libc", "windows-sys", ] [[package]] -name = "errno-dragonfly" -version = "0.1.2" +name = "fallible-iterator" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + +[[package]] +name = "fancy_constructor" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f71f317e4af73b2f8f608fac190c52eac4b1879d2145df1db2fe48881ca69435" dependencies = [ - "cc", - "libc", + "macroific", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "flatbuffers" +version = "23.5.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dac53e22462d78c16d64a1cd22371b54cc3fe94aa15e7886a2fa6e5d1ab8640" +dependencies = [ + "bitflags 1.3.2", + "rustc_version", ] [[package]] name = "flate2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", @@ -577,9 +779,9 @@ checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", @@ -592,9 +794,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", "futures-sink", @@ -602,15 +804,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" dependencies = [ "futures-core", "futures-task", @@ -619,15 +821,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", @@ -636,21 +838,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures-channel", "futures-core", @@ -689,9 +891,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if", "js-sys", @@ -760,6 +962,25 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +[[package]] +name = "hashbrown" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +dependencies = [ + "ahash", + "allocator-api2", +] + +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.2", +] + [[package]] name = "heck" version = "0.4.1" @@ -835,7 +1056,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -844,9 +1065,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http", @@ -866,6 +1087,23 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "indexed_db_futures" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cc2083760572ee02385ab8b7c02c20925d2dd1f97a1a25a8737a238608f1152" +dependencies = [ + "accessory", + "cfg-if", + "delegate-display", + "fancy_constructor", + "js-sys", + "uuid", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -873,7 +1111,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.12.3", ] [[package]] @@ -900,9 +1138,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" @@ -921,11 +1159,20 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jobserver" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" dependencies = [ "wasm-bindgen", ] @@ -936,17 +1183,69 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "libc" -version = "0.2.148" +version = "0.2.150" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" + +[[package]] +name = "libloading" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +dependencies = [ + "cfg-if", + "winapi", +] + +[[package]] +name = "librocksdb-sys" +version = "0.11.0+8.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "glob", + "libc", + "libz-sys", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "log" @@ -968,6 +1267,53 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "macroific" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a304d83e1ef544546dec28cafb9fcb72bfd281da112042d9279808b2ef6be600" +dependencies = [ + "macroific_attr_parse", + "macroific_core", + "macroific_macro", +] + +[[package]] +name = "macroific_attr_parse" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd34ba6db76d16ae96fbb873ea972524d6b83577040e2758582217aaa2527546" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "macroific_core" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5a4902dfaf37c1480d6573bbc009b41e50e859acd11f5dade360a0a17ecbefb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "macroific_macro" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c9853143cbed7f1e41dc39fee95f9b361bec65c8dc2a01bf609be01b61f5ae" +dependencies = [ + "macroific_attr_parse", + "macroific_core", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "matchers" version = "0.1.0" @@ -979,9 +1325,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.3" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "mime" @@ -1016,9 +1362,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "wasi", @@ -1066,6 +1412,19 @@ dependencies = [ "url-fork", ] +[[package]] +name = "nostr-database" +version = "0.1.0" +dependencies = [ + "async-trait", + "flatbuffers", + "nostr", + "thiserror", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "nostr-ffi" version = "0.1.0" @@ -1074,6 +1433,21 @@ dependencies = [ "uniffi", ] +[[package]] +name = "nostr-indexeddb" +version = "0.1.0" +dependencies = [ + "async-trait", + "indexed_db_futures", + "nostr", + "nostr-database", + "thiserror", + "tokio", + "tracing", + "wasm-bindgen", + "wasm-bindgen-test", +] + [[package]] name = "nostr-js" version = "0.1.0" @@ -1099,13 +1473,31 @@ dependencies = [ "ureq", ] +[[package]] +name = "nostr-rocksdb" +version = "0.1.0" +dependencies = [ + "async-trait", + "nostr", + "nostr-database", + "num_cpus", + "rocksdb", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "nostr-sdk" version = "0.25.0" dependencies = [ "async-utility", "nostr", + "nostr-database", + "nostr-indexeddb", + "nostr-rocksdb", "nostr-sdk-net", + "nostr-sqlite", "once_cell", "thiserror", "tokio", @@ -1148,10 +1540,25 @@ dependencies = [ "tokio-socks", "tokio-tungstenite", "url-fork", - "webpki-roots 0.25.2", + "webpki-roots", "ws_stream_wasm", ] +[[package]] +name = "nostr-sqlite" +version = "0.1.0" +dependencies = [ + "async-trait", + "deadpool-sqlite", + "nostr", + "nostr-database", + "rusqlite", + "thiserror", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -1219,6 +1626,12 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + [[package]] name = "percent-encoding" version = "2.3.0" @@ -1247,6 +1660,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkg-config" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" + [[package]] name = "plain" version = "0.2.3" @@ -1259,11 +1678,21 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "prettyplease" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +dependencies = [ + "proc-macro2", + "syn", +] + [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -1309,13 +1738,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.5" +version = "1.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.8", + "regex-automata 0.3.9", "regex-syntax 0.7.5", ] @@ -1330,9 +1759,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" dependencies = [ "aho-corasick", "memchr", @@ -1353,9 +1782,9 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "reqwest" -version = "0.11.20" +version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ "base64", "bytes", @@ -1379,6 +1808,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-rustls", "tokio-socks", @@ -1387,23 +1817,52 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.25.2", + "webpki-roots", "winreg", ] +[[package]] +name = "retain_mut" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" + [[package]] name = "ring" -version = "0.16.20" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" dependencies = [ "cc", + "getrandom", "libc", - "once_cell", "spin", "untrusted", - "web-sys", - "winapi", + "windows-sys", +] + +[[package]] +name = "rocksdb" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" +dependencies = [ + "libc", + "librocksdb-sys", +] + +[[package]] +name = "rusqlite" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" +dependencies = [ + "bitflags 1.3.2", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", ] [[package]] @@ -1412,6 +1871,12 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + [[package]] name = "rustc_version" version = "0.4.0" @@ -1423,11 +1888,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.20" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ce50cb2e16c2903e30d1cbccfd8387a74b9d4c938b6a4c5ec6cc7556f7a8a0" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", @@ -1436,13 +1901,13 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" dependencies = [ "log", "ring", - "rustls-webpki 0.101.6", + "rustls-webpki", "sct", ] @@ -1457,19 +1922,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.100.3" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustls-webpki" -version = "0.101.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ "ring", "untrusted", @@ -1515,9 +1970,9 @@ dependencies = [ [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ "ring", "untrusted", @@ -1546,9 +2001,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" dependencies = [ "serde", ] @@ -1561,9 +2016,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.188" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" dependencies = [ "serde_derive", ] @@ -1581,9 +2036,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", @@ -1592,9 +2047,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", "ryu", @@ -1626,13 +2081,19 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1b21f559e07218024e7e9f90f96f601825397de0e25420135f7f952453fed0b" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" + [[package]] name = "siphasher" version = "0.3.11" @@ -1656,9 +2117,9 @@ checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -1666,9 +2127,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys", @@ -1676,9 +2137,9 @@ dependencies = [ [[package]] name = "spin" -version = "0.5.2" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "static_assertions" @@ -1694,15 +2155,36 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "2.0.37" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "termcolor" version = "1.3.0" @@ -1714,18 +2196,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.49" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.49" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", @@ -1759,9 +2241,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.32.0" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ "backtrace", "bytes", @@ -1769,7 +2251,7 @@ dependencies = [ "mio", "num_cpus", "pin-project-lite", - "socket2 0.5.4", + "socket2 0.5.5", "tokio-macros", "windows-sys", ] @@ -1819,14 +2301,14 @@ dependencies = [ "tokio", "tokio-rustls", "tungstenite", - "webpki-roots 0.25.2", + "webpki-roots", ] [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", @@ -1853,11 +2335,10 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -1876,9 +2357,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -1886,12 +2367,12 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" dependencies = [ - "lazy_static", "log", + "once_cell", "tracing-core", ] @@ -2116,24 +2597,24 @@ dependencies = [ [[package]] name = "untrusted" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b11c96ac7ee530603dcdf68ed1557050f374ce55a5a07193ebf8cbc9f8927e9" +checksum = "f5ccd538d4a604753ebc2f17cd9946e89b77bf87f6a8e2309667c6f2e87855e3" dependencies = [ "base64", "flate2", "log", "once_cell", "rustls", - "rustls-webpki 0.100.3", + "rustls-webpki", "url", - "webpki-roots 0.23.1", + "webpki-roots", ] [[package]] @@ -2172,12 +2653,28 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +[[package]] +name = "uuid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" +dependencies = [ + "getrandom", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.4" @@ -2201,9 +2698,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -2211,9 +2708,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" dependencies = [ "bumpalo", "log", @@ -2226,9 +2723,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02" dependencies = [ "cfg-if", "js-sys", @@ -2238,9 +2735,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2248,9 +2745,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", @@ -2261,9 +2758,34 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.88" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" + +[[package]] +name = "wasm-bindgen-test" +version = "0.3.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6433b7c56db97397842c46b67e11873eda263170afeb3a2dc74a7cb370fee0d" +dependencies = [ + "console_error_panic_hook", + "js-sys", + "scoped-tls", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "493fcbab756bb764fa37e6bee8cec2dd709eb4273d06d0c282a5e74275ded735" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] [[package]] name = "wasm-logger" @@ -2278,23 +2800,14 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85" dependencies = [ "js-sys", "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" -dependencies = [ - "rustls-webpki 0.100.3", -] - [[package]] name = "webpki-roots" version = "0.25.2" @@ -2444,3 +2957,23 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", ] + +[[package]] +name = "zerocopy" +version = "0.7.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/Cargo.toml b/Cargo.toml index 0482f1475..71c051021 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,9 +5,7 @@ members = [ "bindings/nostr-sdk-ffi", "bindings/nostr-sdk-js", "bindings/uniffi-bindgen", - "crates/nostr", - "crates/nostr-sdk", - "crates/nostr-sdk-net", + "crates/*", ] default-members = ["crates/*"] resolver = "2" @@ -16,9 +14,11 @@ resolver = "2" homepage = "https://github.com/rust-nostr/nostr" repository = "https://github.com/rust-nostr/nostr.git" license = "MIT" -rust-version = "1.64.0" [workspace.dependencies] +async-trait = "0.1" +nostr = { version = "0.25", path = "./crates/nostr", default-features = false } +nostr-database = { version = "0.1", path = "./crates/nostr-database", default-features = false } once_cell = "1.18" thiserror = "1.0" tokio = { version = "1.32", default-features = false } diff --git a/Makefile b/Makefile index c21344608..dc6edf046 100644 --- a/Makefile +++ b/Makefile @@ -12,5 +12,8 @@ clean: book: cd book && make build +flatbuf: + cd crates/nostr-database && make flatbuf + loc: @echo "--- Counting lines of .rs files (LOC):" && find crates/ bindings/ -type f -name "*.rs" -exec cat {} \; | wc -l \ No newline at end of file diff --git a/README.md b/README.md index 710e95b62..d5932ecf7 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,10 @@ The project is split up into several crates in the `crates/` directory: * [**nostr**](./crates/nostr/): Rust implementation of Nostr protocol. +* [**nostr-database**](./crates/nostr-database/): Database for Nostr apps + * [**nostr-rocksdb**](./crates/nostr-rocksdb/): RocksDB Storage backend for Nostr apps + * [**nostr-sqlite**](./crates/nostr-sqlite/): SQLite Storage backend for Nostr apps + * [**nostr-indexeddb**](./crates/nostr-indexeddb/): IndexedDB Storage backend for Nostr apps * [**nostr-sdk**](./crates/nostr-sdk/): High level client library. * [**nostr-sdk-net**](./crates/nostr-sdk-net/): Network library for [**nostr-sdk**](./crates/nostr-sdk/) @@ -33,10 +37,6 @@ Check the example in the [`embedded/`](./crates/nostr/examples/embedded/) direct * Swift: https://github.com/rust-nostr/nostr-sdk-swift * JavaScript: TODO -## Minimum Supported Rust Version (MSRV) - -These crates are built with the Rust language version 2021 and require a minimum compiler version of `1.64.0` - ## State **These libraries are in ALPHA state**, things that are implemented generally work but the API will change in breaking ways. diff --git a/bindings/nostr-js/Cargo.toml b/bindings/nostr-js/Cargo.toml index 115ad3c39..7e00a42f6 100644 --- a/bindings/nostr-js/Cargo.toml +++ b/bindings/nostr-js/Cargo.toml @@ -3,7 +3,7 @@ name = "nostr-js" version = "0.1.0" edition = "2021" description = "Nostr protocol implementation, for JavaScript" -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] publish = false [lib] diff --git a/bindings/nostr-sdk-ffi/Cargo.toml b/bindings/nostr-sdk-ffi/Cargo.toml index 53a55f4c2..f9d41d218 100644 --- a/bindings/nostr-sdk-ffi/Cargo.toml +++ b/bindings/nostr-sdk-ffi/Cargo.toml @@ -2,7 +2,7 @@ name = "nostr-sdk-ffi" version = "0.1.0" edition = "2021" -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] publish = false [lib] diff --git a/bindings/nostr-sdk-ffi/src/client/mod.rs b/bindings/nostr-sdk-ffi/src/client/mod.rs index 5b0793921..f22620e19 100644 --- a/bindings/nostr-sdk-ffi/src/client/mod.rs +++ b/bindings/nostr-sdk-ffi/src/client/mod.rs @@ -72,10 +72,6 @@ impl Client { Ok(self.inner.clone().shutdown()?) } - pub fn clear_already_seen_events(&self) { - self.inner.clear_already_seen_events() - } - pub fn relays(&self) -> HashMap> { self.inner .relays() diff --git a/bindings/nostr-sdk-ffi/src/nostr_sdk.udl b/bindings/nostr-sdk-ffi/src/nostr_sdk.udl index e95b1b12c..3938b0a71 100644 --- a/bindings/nostr-sdk-ffi/src/nostr_sdk.udl +++ b/bindings/nostr-sdk-ffi/src/nostr_sdk.udl @@ -627,7 +627,6 @@ interface Client { boolean is_running(); [Throws=NostrSdkError] void shutdown(); - void clear_already_seen_events(); record relays(); [Throws=NostrSdkError] diff --git a/bindings/nostr-sdk-js/Cargo.toml b/bindings/nostr-sdk-js/Cargo.toml index 0ae35b570..a4137ab4d 100644 --- a/bindings/nostr-sdk-js/Cargo.toml +++ b/bindings/nostr-sdk-js/Cargo.toml @@ -2,7 +2,7 @@ name = "nostr-sdk-js" version = "0.1.0" edition = "2021" -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] publish = false [lib] diff --git a/crates/nostr-database/.gitignore b/crates/nostr-database/.gitignore new file mode 100644 index 000000000..73c384529 --- /dev/null +++ b/crates/nostr-database/.gitignore @@ -0,0 +1,3 @@ +*.svg +perf.data +perf.data.old \ No newline at end of file diff --git a/crates/nostr-database/Cargo.toml b/crates/nostr-database/Cargo.toml new file mode 100644 index 000000000..a0abb27ae --- /dev/null +++ b/crates/nostr-database/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "nostr-database" +version = "0.1.0" +edition = "2021" +description = "Database for Nostr apps" +authors = ["Yuki Kishimoto "] +homepage.workspace = true +repository.workspace = true +license.workspace = true +readme = "README.md" +rust-version = "1.64.0" +keywords = ["nostr", "database"] + +[features] +default = [] +flatbuf = ["dep:flatbuffers"] + +[dependencies] +async-trait = { workspace = true } +flatbuffers = { version = "23.5", optional = true } +nostr = { workspace = true, features = ["std"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["sync"] } +tracing = { workspace = true, features = ["std", "attributes"] } + +[dev-dependencies] +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/nostr-database/Makefile b/crates/nostr-database/Makefile new file mode 100644 index 000000000..1d7b2edb8 --- /dev/null +++ b/crates/nostr-database/Makefile @@ -0,0 +1,8 @@ +all: build + +flatbuf: + flatc --rust -o ./src/flatbuffers ./fbs/event.fbs + flatc --rust -o ./src/flatbuffers ./fbs/event_seen_by.fbs + +graph: + CARGO_PROFILE_RELEASE_DEBUG=true cargo flamegraph --release --example indexes -o flamegraph.svg \ No newline at end of file diff --git a/crates/nostr-database/README.md b/crates/nostr-database/README.md new file mode 100644 index 000000000..d5f09dc7f --- /dev/null +++ b/crates/nostr-database/README.md @@ -0,0 +1,36 @@ +# Nostr Database + +Database for Nostr apps + +## Nostr Database Trait + +This library cointains the `NostrDatabase` and `NostrDatabaseExt` traits. You can use the [default backends](#default-backends) or implement your one (like PostgreSQL, ...). + +## Default backends + +* Memory (RAM), available in this library +* SQLite (desktop, server and mobile devices), available at [`nostr-sqlite`](https://crates.io/crates/nostr-sqlite) +* RocksDB (desktop, server and mobile devices), available at [`nostr-rocksdb`](https://crates.io/crates/nostr-rocksdb) +* IndexedDB (web), available at [`nostr-indexeddb`](https://crates.io/crates/nostr-indexeddb) + +## Crate Feature Flags + +The following crate feature flags are available: + +| Feature | Default | Description | +| ------------------- | :-----: | ---------------------------------------------------------------------------------------- | +| `flatbuf` | No | Enable `flatbuffers` de/serialization for nostr events | + +## State + +**This library is in an ALPHA state**, things that are implemented generally work but the API will change in breaking ways. + +## License + +This project is distributed under the MIT software license - see the [LICENSE](../../LICENSE) file for details + +## Donations + +⚡ Tips: + +⚡ Lightning Address: yuki@getalby.com \ No newline at end of file diff --git a/crates/nostr-database/examples/indexes.rs b/crates/nostr-database/examples/indexes.rs new file mode 100644 index 000000000..e22242cbc --- /dev/null +++ b/crates/nostr-database/examples/indexes.rs @@ -0,0 +1,74 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use nostr::prelude::*; +use nostr_database::DatabaseIndexes; +use tracing_subscriber::fmt::format::FmtSpan; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::fmt() + .with_span_events(FmtSpan::CLOSE) + .init(); + + let secret_key = + SecretKey::from_bech32("nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99") + .unwrap(); + let keys_a = Keys::new(secret_key); + + let secret_key = + SecretKey::from_bech32("nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85") + .unwrap(); + let keys_b = Keys::new(secret_key); + + let index = DatabaseIndexes::new(); + + for i in 0..100_000 { + let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) + .to_event(&keys_a) + .unwrap(); + index.index_event(&event).await; + + let event = EventBuilder::new_text_note( + format!("Reply to event #{i}"), + &[ + Tag::Event(event.id, None, None), + Tag::PubKey(event.pubkey, None), + ], + ) + .to_event(&keys_b) + .unwrap(); + index.index_event(&event).await; + } + + for i in 0..1000 { + let metadata = Metadata::new().name(format!("Name #{i}")); + let event = EventBuilder::set_metadata(metadata) + .to_event(&keys_a) + .unwrap(); + index.index_event(&event).await; + } + + for i in 0..500_000 { + let event = EventBuilder::new( + Kind::Custom(123), + "Custom with d tag", + &[Tag::Identifier(format!("myid{i}"))], + ) + .to_event(&keys_a) + .unwrap(); + index.index_event(&event).await; + } + + let ids = index + .query(vec![Filter::new() + .kinds(vec![Kind::Metadata, Kind::Custom(123), Kind::TextNote]) + .limit(20) + //.kind(Kind::Custom(123)) + //.identifier("myid5000") + .author(keys_a.public_key())]) + .await; + println!("Got {} ids", ids.len()); + + loop {} +} diff --git a/crates/nostr-database/examples/memory.rs b/crates/nostr-database/examples/memory.rs new file mode 100644 index 000000000..42ca6e51d --- /dev/null +++ b/crates/nostr-database/examples/memory.rs @@ -0,0 +1,79 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use nostr::prelude::*; +use nostr::{EventBuilder, Filter, Keys, Kind, Metadata, Tag}; +use nostr_database::memory::MemoryDatabase; +use nostr_database::{DatabaseOptions, NostrDatabase}; +use tracing_subscriber::fmt::format::FmtSpan; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::fmt() + .with_span_events(FmtSpan::CLOSE) + .init(); + + let secret_key = + SecretKey::from_bech32("nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99") + .unwrap(); + let keys_a = Keys::new(secret_key); + + let secret_key = + SecretKey::from_bech32("nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85") + .unwrap(); + let keys_b = Keys::new(secret_key); + + let mut opts = DatabaseOptions::default(); + opts.events = true; + let database = MemoryDatabase::new(opts); + + for i in 0..100_000 { + let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + + let event = EventBuilder::new_text_note( + format!("Reply to event #{i}"), + &[ + Tag::Event(event.id, None, None), + Tag::PubKey(event.pubkey, None), + ], + ) + .to_event(&keys_b) + .unwrap(); + database.save_event(&event).await.unwrap(); + } + + for i in 0..10 { + let metadata = Metadata::new().name(format!("Name #{i}")); + let event = EventBuilder::set_metadata(metadata) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + } + + for i in 0..500_000 { + let event = EventBuilder::new( + Kind::Custom(123), + "Custom with d tag", + &[Tag::Identifier(format!("myid{i}"))], + ) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + } + + let events = database + .query(vec![Filter::new() + .kinds(vec![Kind::Metadata, Kind::Custom(123), Kind::TextNote]) + .limit(20) + //.kind(Kind::Custom(123)) + //.identifier("myid5000") + .author(keys_a.public_key())]) + .await + .unwrap(); + println!("Got {} events", events.len()); + + loop {} +} diff --git a/crates/nostr-database/fbs/event.fbs b/crates/nostr-database/fbs/event.fbs new file mode 100644 index 000000000..f705135b4 --- /dev/null +++ b/crates/nostr-database/fbs/event.fbs @@ -0,0 +1,25 @@ +namespace EventFbs; + +struct Fixed32Bytes { + val: [ubyte:32]; +} + +struct Fixed64Bytes { + val: [ubyte:64]; +} + +table StringVector { + data: [string]; +} + +table Event { + id: Fixed32Bytes; + pubkey: Fixed32Bytes; + created_at: ulong; + kind: ulong; + tags: [StringVector]; + content: string; + sig: Fixed64Bytes; +} + +root_type Event; \ No newline at end of file diff --git a/crates/nostr-database/fbs/event_seen_by.fbs b/crates/nostr-database/fbs/event_seen_by.fbs new file mode 100644 index 000000000..11ec0c306 --- /dev/null +++ b/crates/nostr-database/fbs/event_seen_by.fbs @@ -0,0 +1,7 @@ +namespace EventSeenByFbs; + +table EventSeenBy { + relay_urls: [string]; +} + +root_type EventSeenBy; \ No newline at end of file diff --git a/crates/nostr-database/src/error.rs b/crates/nostr-database/src/error.rs new file mode 100644 index 000000000..71945864a --- /dev/null +++ b/crates/nostr-database/src/error.rs @@ -0,0 +1,50 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Nostr Database Error + +use thiserror::Error; + +/// Database Error +#[derive(Debug, Error)] +pub enum DatabaseError { + /// An error happened in the underlying database backend. + #[error("backend: {0}")] + Backend(Box), + /// Nostr error + #[error("nostr: {0}")] + Nostr(Box), + /// Not supported + #[error("method not supported by current backend")] + NotSupported, + /// Feature disabled + #[error("feature disabled for current backend")] + FeatureDisabled, + /// Not found + #[error("not found")] + NotFound, +} + +impl DatabaseError { + /// Create a new [`Backend`][Self::Backend] error. + /// + /// Shorthand for `Error::Backend(Box::new(error))`. + #[inline] + pub fn backend(error: E) -> Self + where + E: std::error::Error + Send + Sync + 'static, + { + Self::Backend(Box::new(error)) + } + + /// Create a new [`Nostr`][Self::Nostr] error. + /// + /// Shorthand for `Error::Nostr(Box::new(error))`. + #[inline] + pub fn nostr(error: E) -> Self + where + E: std::error::Error + Send + Sync + 'static, + { + Self::Nostr(Box::new(error)) + } +} diff --git a/crates/nostr-database/src/flatbuffers/event_generated.rs b/crates/nostr-database/src/flatbuffers/event_generated.rs new file mode 100644 index 000000000..240878f6f --- /dev/null +++ b/crates/nostr-database/src/flatbuffers/event_generated.rs @@ -0,0 +1,612 @@ +// automatically generated by the FlatBuffers compiler, do not modify + +// @generated + +use core::cmp::Ordering; +use core::mem; + +extern crate flatbuffers; +use self::flatbuffers::{EndianScalar, Follow}; + +#[allow(unused_imports, dead_code)] +pub mod event_fbs { + + use core::cmp::Ordering; + use core::mem; + + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; + + // struct Fixed32Bytes, aligned to 1 + #[repr(transparent)] + #[derive(Clone, Copy, PartialEq)] + pub struct Fixed32Bytes(pub [u8; 32]); + impl Default for Fixed32Bytes { + fn default() -> Self { + Self([0; 32]) + } + } + impl core::fmt::Debug for Fixed32Bytes { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_struct("Fixed32Bytes") + .field("val", &self.val()) + .finish() + } + } + + impl flatbuffers::SimpleToVerifyInSlice for Fixed32Bytes {} + impl<'a> flatbuffers::Follow<'a> for Fixed32Bytes { + type Inner = &'a Fixed32Bytes; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + <&'a Fixed32Bytes>::follow(buf, loc) + } + } + impl<'a> flatbuffers::Follow<'a> for &'a Fixed32Bytes { + type Inner = &'a Fixed32Bytes; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + flatbuffers::follow_cast_ref::(buf, loc) + } + } + impl<'b> flatbuffers::Push for Fixed32Bytes { + type Output = Fixed32Bytes; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + let src = ::core::slice::from_raw_parts( + self as *const Fixed32Bytes as *const u8, + Self::size(), + ); + dst.copy_from_slice(src); + } + } + + impl<'a> flatbuffers::Verifiable for Fixed32Bytes { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.in_buffer::(pos) + } + } + + impl<'a> Fixed32Bytes { + #[allow(clippy::too_many_arguments)] + pub fn new(val: &[u8; 32]) -> Self { + let mut s = Self([0; 32]); + s.set_val(val); + s + } + + pub fn val(&'a self) -> flatbuffers::Array<'a, u8, 32> { + // Safety: + // Created from a valid Table for this object + // Which contains a valid array in this slot + unsafe { flatbuffers::Array::follow(&self.0, 0) } + } + + pub fn set_val(&mut self, items: &[u8; 32]) { + // Safety: + // Created from a valid Table for this object + // Which contains a valid array in this slot + unsafe { flatbuffers::emplace_scalar_array(&mut self.0, 0, items) }; + } + } + + // struct Fixed64Bytes, aligned to 1 + #[repr(transparent)] + #[derive(Clone, Copy, PartialEq)] + pub struct Fixed64Bytes(pub [u8; 64]); + impl Default for Fixed64Bytes { + fn default() -> Self { + Self([0; 64]) + } + } + impl core::fmt::Debug for Fixed64Bytes { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.debug_struct("Fixed64Bytes") + .field("val", &self.val()) + .finish() + } + } + + impl flatbuffers::SimpleToVerifyInSlice for Fixed64Bytes {} + impl<'a> flatbuffers::Follow<'a> for Fixed64Bytes { + type Inner = &'a Fixed64Bytes; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + <&'a Fixed64Bytes>::follow(buf, loc) + } + } + impl<'a> flatbuffers::Follow<'a> for &'a Fixed64Bytes { + type Inner = &'a Fixed64Bytes; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + flatbuffers::follow_cast_ref::(buf, loc) + } + } + impl<'b> flatbuffers::Push for Fixed64Bytes { + type Output = Fixed64Bytes; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + let src = ::core::slice::from_raw_parts( + self as *const Fixed64Bytes as *const u8, + Self::size(), + ); + dst.copy_from_slice(src); + } + } + + impl<'a> flatbuffers::Verifiable for Fixed64Bytes { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.in_buffer::(pos) + } + } + + impl<'a> Fixed64Bytes { + #[allow(clippy::too_many_arguments)] + pub fn new(val: &[u8; 64]) -> Self { + let mut s = Self([0; 64]); + s.set_val(val); + s + } + + pub fn val(&'a self) -> flatbuffers::Array<'a, u8, 64> { + // Safety: + // Created from a valid Table for this object + // Which contains a valid array in this slot + unsafe { flatbuffers::Array::follow(&self.0, 0) } + } + + pub fn set_val(&mut self, items: &[u8; 64]) { + // Safety: + // Created from a valid Table for this object + // Which contains a valid array in this slot + unsafe { flatbuffers::emplace_scalar_array(&mut self.0, 0, items) }; + } + } + + pub enum StringVectorOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct StringVector<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for StringVector<'a> { + type Inner = StringVector<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> StringVector<'a> { + pub const VT_DATA: flatbuffers::VOffsetT = 4; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + StringVector { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, + args: &'args StringVectorArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = StringVectorBuilder::new(_fbb); + if let Some(x) = args.data { + builder.add_data(x); + } + builder.finish() + } + + #[inline] + pub fn data( + &self, + ) -> Option>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>, + >>(StringVector::VT_DATA, None) + } + } + } + + impl flatbuffers::Verifiable for StringVector<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>, + >>("data", Self::VT_DATA, false)? + .finish(); + Ok(()) + } + } + pub struct StringVectorArgs<'a> { + pub data: Option< + flatbuffers::WIPOffset>>, + >, + } + impl<'a> Default for StringVectorArgs<'a> { + #[inline] + fn default() -> Self { + StringVectorArgs { data: None } + } + } + + pub struct StringVectorBuilder<'a: 'b, 'b> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b> StringVectorBuilder<'a, 'b> { + #[inline] + pub fn add_data( + &mut self, + data: flatbuffers::WIPOffset< + flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset<&'b str>>, + >, + ) { + self.fbb_ + .push_slot_always::>(StringVector::VT_DATA, data); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + ) -> StringVectorBuilder<'a, 'b> { + let start = _fbb.start_table(); + StringVectorBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for StringVector<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("StringVector"); + ds.field("data", &self.data()); + ds.finish() + } + } + pub enum EventOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct Event<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for Event<'a> { + type Inner = Event<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> Event<'a> { + pub const VT_ID: flatbuffers::VOffsetT = 4; + pub const VT_PUBKEY: flatbuffers::VOffsetT = 6; + pub const VT_CREATED_AT: flatbuffers::VOffsetT = 8; + pub const VT_KIND: flatbuffers::VOffsetT = 10; + pub const VT_TAGS: flatbuffers::VOffsetT = 12; + pub const VT_CONTENT: flatbuffers::VOffsetT = 14; + pub const VT_SIG: flatbuffers::VOffsetT = 16; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + Event { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, + args: &'args EventArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = EventBuilder::new(_fbb); + builder.add_kind(args.kind); + builder.add_created_at(args.created_at); + if let Some(x) = args.sig { + builder.add_sig(x); + } + if let Some(x) = args.content { + builder.add_content(x); + } + if let Some(x) = args.tags { + builder.add_tags(x); + } + if let Some(x) = args.pubkey { + builder.add_pubkey(x); + } + if let Some(x) = args.id { + builder.add_id(x); + } + builder.finish() + } + + #[inline] + pub fn id(&self) -> Option<&'a Fixed32Bytes> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(Event::VT_ID, None) } + } + #[inline] + pub fn pubkey(&self) -> Option<&'a Fixed32Bytes> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(Event::VT_PUBKEY, None) } + } + #[inline] + pub fn created_at(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(Event::VT_CREATED_AT, Some(0)).unwrap() } + } + #[inline] + pub fn kind(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(Event::VT_KIND, Some(0)).unwrap() } + } + #[inline] + pub fn tags( + &self, + ) -> Option>>> + { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>, + >>(Event::VT_TAGS, None) + } + } + #[inline] + pub fn content(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>(Event::VT_CONTENT, None) + } + } + #[inline] + pub fn sig(&self) -> Option<&'a Fixed64Bytes> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(Event::VT_SIG, None) } + } + } + + impl flatbuffers::Verifiable for Event<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("id", Self::VT_ID, false)? + .visit_field::("pubkey", Self::VT_PUBKEY, false)? + .visit_field::("created_at", Self::VT_CREATED_AT, false)? + .visit_field::("kind", Self::VT_KIND, false)? + .visit_field::>, + >>("tags", Self::VT_TAGS, false)? + .visit_field::>( + "content", + Self::VT_CONTENT, + false, + )? + .visit_field::("sig", Self::VT_SIG, false)? + .finish(); + Ok(()) + } + } + pub struct EventArgs<'a> { + pub id: Option<&'a Fixed32Bytes>, + pub pubkey: Option<&'a Fixed32Bytes>, + pub created_at: u64, + pub kind: u64, + pub tags: Option< + flatbuffers::WIPOffset< + flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset>>, + >, + >, + pub content: Option>, + pub sig: Option<&'a Fixed64Bytes>, + } + impl<'a> Default for EventArgs<'a> { + #[inline] + fn default() -> Self { + EventArgs { + id: None, + pubkey: None, + created_at: 0, + kind: 0, + tags: None, + content: None, + sig: None, + } + } + } + + pub struct EventBuilder<'a: 'b, 'b> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b> EventBuilder<'a, 'b> { + #[inline] + pub fn add_id(&mut self, id: &Fixed32Bytes) { + self.fbb_ + .push_slot_always::<&Fixed32Bytes>(Event::VT_ID, id); + } + #[inline] + pub fn add_pubkey(&mut self, pubkey: &Fixed32Bytes) { + self.fbb_ + .push_slot_always::<&Fixed32Bytes>(Event::VT_PUBKEY, pubkey); + } + #[inline] + pub fn add_created_at(&mut self, created_at: u64) { + self.fbb_ + .push_slot::(Event::VT_CREATED_AT, created_at, 0); + } + #[inline] + pub fn add_kind(&mut self, kind: u64) { + self.fbb_.push_slot::(Event::VT_KIND, kind, 0); + } + #[inline] + pub fn add_tags( + &mut self, + tags: flatbuffers::WIPOffset< + flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset>>, + >, + ) { + self.fbb_ + .push_slot_always::>(Event::VT_TAGS, tags); + } + #[inline] + pub fn add_content(&mut self, content: flatbuffers::WIPOffset<&'b str>) { + self.fbb_ + .push_slot_always::>(Event::VT_CONTENT, content); + } + #[inline] + pub fn add_sig(&mut self, sig: &Fixed64Bytes) { + self.fbb_ + .push_slot_always::<&Fixed64Bytes>(Event::VT_SIG, sig); + } + #[inline] + pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> EventBuilder<'a, 'b> { + let start = _fbb.start_table(); + EventBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for Event<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("Event"); + ds.field("id", &self.id()); + ds.field("pubkey", &self.pubkey()); + ds.field("created_at", &self.created_at()); + ds.field("kind", &self.kind()); + ds.field("tags", &self.tags()); + ds.field("content", &self.content()); + ds.field("sig", &self.sig()); + ds.finish() + } + } + #[inline] + /// Verifies that a buffer of bytes contains a `Event` + /// and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_unchecked`. + pub fn root_as_event(buf: &[u8]) -> Result { + flatbuffers::root::(buf) + } + #[inline] + /// Verifies that a buffer of bytes contains a size prefixed + /// `Event` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `size_prefixed_root_as_event_unchecked`. + pub fn size_prefixed_root_as_event( + buf: &[u8], + ) -> Result { + flatbuffers::size_prefixed_root::(buf) + } + #[inline] + /// Verifies, with the given options, that a buffer of bytes + /// contains a `Event` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_unchecked`. + pub fn root_as_event_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::root_with_opts::>(opts, buf) + } + #[inline] + /// Verifies, with the given verifier options, that a buffer of + /// bytes contains a size prefixed `Event` and returns + /// it. Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_unchecked`. + pub fn size_prefixed_root_as_event_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::size_prefixed_root_with_opts::>(opts, buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a Event and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid `Event`. + pub unsafe fn root_as_event_unchecked(buf: &[u8]) -> Event { + flatbuffers::root_unchecked::(buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a size prefixed Event and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid size prefixed `Event`. + pub unsafe fn size_prefixed_root_as_event_unchecked(buf: &[u8]) -> Event { + flatbuffers::size_prefixed_root_unchecked::(buf) + } + #[inline] + pub fn finish_event_buffer<'a, 'b>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish(root, None); + } + + #[inline] + pub fn finish_size_prefixed_event_buffer<'a, 'b>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish_size_prefixed(root, None); + } +} // pub mod EventFbs diff --git a/crates/nostr-database/src/flatbuffers/event_seen_by_generated.rs b/crates/nostr-database/src/flatbuffers/event_seen_by_generated.rs new file mode 100644 index 000000000..eda0ecf71 --- /dev/null +++ b/crates/nostr-database/src/flatbuffers/event_seen_by_generated.rs @@ -0,0 +1,216 @@ +// automatically generated by the FlatBuffers compiler, do not modify + +// @generated + +use core::cmp::Ordering; +use core::mem; + +extern crate flatbuffers; +use self::flatbuffers::{EndianScalar, Follow}; + +#[allow(unused_imports, dead_code)] +pub mod event_seen_by_fbs { + + use core::cmp::Ordering; + use core::mem; + + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; + + pub enum EventSeenByOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct EventSeenBy<'a> { + pub _tab: flatbuffers::Table<'a>, + } + + impl<'a> flatbuffers::Follow<'a> for EventSeenBy<'a> { + type Inner = EventSeenBy<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + + impl<'a> EventSeenBy<'a> { + pub const VT_RELAY_URLS: flatbuffers::VOffsetT = 4; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + EventSeenBy { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, + args: &'args EventSeenByArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = EventSeenByBuilder::new(_fbb); + if let Some(x) = args.relay_urls { + builder.add_relay_urls(x); + } + builder.finish() + } + + #[inline] + pub fn relay_urls( + &self, + ) -> Option>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::>, + >>(EventSeenBy::VT_RELAY_URLS, None) + } + } + } + + impl flatbuffers::Verifiable for EventSeenBy<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::>, + >>("relay_urls", Self::VT_RELAY_URLS, false)? + .finish(); + Ok(()) + } + } + pub struct EventSeenByArgs<'a> { + pub relay_urls: Option< + flatbuffers::WIPOffset>>, + >, + } + impl<'a> Default for EventSeenByArgs<'a> { + #[inline] + fn default() -> Self { + EventSeenByArgs { relay_urls: None } + } + } + + pub struct EventSeenByBuilder<'a: 'b, 'b> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b> EventSeenByBuilder<'a, 'b> { + #[inline] + pub fn add_relay_urls( + &mut self, + relay_urls: flatbuffers::WIPOffset< + flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset<&'b str>>, + >, + ) { + self.fbb_.push_slot_always::>( + EventSeenBy::VT_RELAY_URLS, + relay_urls, + ); + } + #[inline] + pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> EventSeenByBuilder<'a, 'b> { + let start = _fbb.start_table(); + EventSeenByBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } + + impl core::fmt::Debug for EventSeenBy<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("EventSeenBy"); + ds.field("relay_urls", &self.relay_urls()); + ds.finish() + } + } + #[inline] + /// Verifies that a buffer of bytes contains a `EventSeenBy` + /// and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_seen_by_unchecked`. + pub fn root_as_event_seen_by( + buf: &[u8], + ) -> Result { + flatbuffers::root::(buf) + } + #[inline] + /// Verifies that a buffer of bytes contains a size prefixed + /// `EventSeenBy` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `size_prefixed_root_as_event_seen_by_unchecked`. + pub fn size_prefixed_root_as_event_seen_by( + buf: &[u8], + ) -> Result { + flatbuffers::size_prefixed_root::(buf) + } + #[inline] + /// Verifies, with the given options, that a buffer of bytes + /// contains a `EventSeenBy` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_seen_by_unchecked`. + pub fn root_as_event_seen_by_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::root_with_opts::>(opts, buf) + } + #[inline] + /// Verifies, with the given verifier options, that a buffer of + /// bytes contains a size prefixed `EventSeenBy` and returns + /// it. Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_event_seen_by_unchecked`. + pub fn size_prefixed_root_as_event_seen_by_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::size_prefixed_root_with_opts::>(opts, buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a EventSeenBy and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid `EventSeenBy`. + pub unsafe fn root_as_event_seen_by_unchecked(buf: &[u8]) -> EventSeenBy { + flatbuffers::root_unchecked::(buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a size prefixed EventSeenBy and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid size prefixed `EventSeenBy`. + pub unsafe fn size_prefixed_root_as_event_seen_by_unchecked(buf: &[u8]) -> EventSeenBy { + flatbuffers::size_prefixed_root_unchecked::(buf) + } + #[inline] + pub fn finish_event_seen_by_buffer<'a, 'b>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish(root, None); + } + + #[inline] + pub fn finish_size_prefixed_event_seen_by_buffer<'a, 'b>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish_size_prefixed(root, None); + } +} // pub mod EventSeenByFbs diff --git a/crates/nostr-database/src/flatbuffers/mod.rs b/crates/nostr-database/src/flatbuffers/mod.rs new file mode 100644 index 000000000..afcc24fa6 --- /dev/null +++ b/crates/nostr-database/src/flatbuffers/mod.rs @@ -0,0 +1,185 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Nostr Database Flatbuffers + +use std::collections::HashSet; + +pub use flatbuffers::FlatBufferBuilder; +use flatbuffers::InvalidFlatbuffer; +use nostr::event::raw::RawEvent; +use nostr::secp256k1::schnorr::Signature; +use nostr::secp256k1::{self, XOnlyPublicKey}; +use nostr::{Event, EventId, Kind, Tag, Timestamp, Url}; +use thiserror::Error; + +#[allow(unused_imports, dead_code, clippy::all, unsafe_code, missing_docs)] +mod event_generated; +#[allow(unused_imports, dead_code, clippy::all, unsafe_code, missing_docs)] +mod event_seen_by_generated; + +use self::event_generated::event_fbs; +use self::event_seen_by_generated::event_seen_by_fbs; + +/// FlatBuffers Error +#[derive(Debug, Error)] +pub enum Error { + /// Invalud FlatBuffer + #[error(transparent)] + InvalidFlatbuffer(#[from] InvalidFlatbuffer), + #[error(transparent)] + /// Event ID error + EventId(#[from] nostr::event::id::Error), + /// Tag error + #[error(transparent)] + Tag(#[from] nostr::event::tag::Error), + /// Secp256k1 error + #[error(transparent)] + Secp256k1(#[from] secp256k1::Error), + /// Not found + #[error("not found")] + NotFound, +} + +/// FlatBuffer Encode trait +pub trait FlatBufferEncode { + /// FlatBuffer encode + fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8]; +} + +/// FlatBuffer Decode trait +pub trait FlatBufferDecode: Sized { + /// FlatBuffer decode + fn decode(buf: &[u8]) -> Result; +} + +impl FlatBufferEncode for Event { + #[tracing::instrument(skip_all, level = "trace")] + fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8] { + fbb.reset(); + + let id = event_fbs::Fixed32Bytes::new(&self.id.to_bytes()); + let pubkey = event_fbs::Fixed32Bytes::new(&self.pubkey.serialize()); + let sig = event_fbs::Fixed64Bytes::new(self.sig.as_ref()); + let tags = self + .tags + .iter() + .map(|t| { + let tags = t + .as_vec() + .iter() + .map(|t| fbb.create_string(t)) + .collect::>(); + let args = event_fbs::StringVectorArgs { + data: Some(fbb.create_vector(&tags)), + }; + event_fbs::StringVector::create(fbb, &args) + }) + .collect::>(); + let args = event_fbs::EventArgs { + id: Some(&id), + pubkey: Some(&pubkey), + created_at: self.created_at.as_u64(), + kind: self.kind.as_u64(), + tags: Some(fbb.create_vector(&tags)), + content: Some(fbb.create_string(&self.content)), + sig: Some(&sig), + }; + + let offset = event_fbs::Event::create(fbb, &args); + + event_fbs::finish_event_buffer(fbb, offset); + + fbb.finished_data() + } +} + +impl FlatBufferDecode for Event { + #[tracing::instrument(skip_all, level = "trace")] + fn decode(buf: &[u8]) -> Result { + let ev = event_fbs::root_as_event(buf)?; + let tags = ev + .tags() + .ok_or(Error::NotFound)? + .into_iter() + .filter_map(|tag| { + tag.data() + .map(|tag| Tag::parse(tag.into_iter().collect::>())) + }) + .collect::, _>>()?; + + Ok(Self { + id: EventId::from_slice(&ev.id().ok_or(Error::NotFound)?.0)?, + pubkey: XOnlyPublicKey::from_slice(&ev.pubkey().ok_or(Error::NotFound)?.0)?, + created_at: Timestamp::from(ev.created_at()), + kind: Kind::from(ev.kind()), + tags, + content: ev.content().ok_or(Error::NotFound)?.to_owned(), + sig: Signature::from_slice(&ev.sig().ok_or(Error::NotFound)?.0)?, + }) + } +} + +impl FlatBufferDecode for RawEvent { + #[tracing::instrument(skip_all, level = "trace")] + fn decode(buf: &[u8]) -> Result { + let ev = event_fbs::root_as_event(buf)?; + Ok(Self { + id: ev.id().ok_or(Error::NotFound)?.0, + pubkey: ev.pubkey().ok_or(Error::NotFound)?.0, + created_at: ev.created_at(), + kind: ev.kind(), + tags: ev + .tags() + .ok_or(Error::NotFound)? + .into_iter() + .filter_map(|tag| match tag.data() { + Some(t) => { + if t.len() > 1 { + Some(t.into_iter().map(|s| s.to_owned()).collect::>()) + } else { + None + } + } + None => None, + }) + .collect(), + content: ev.content().ok_or(Error::NotFound)?.to_owned(), + sig: ev.sig().ok_or(Error::NotFound)?.0, + }) + } +} + +impl FlatBufferEncode for HashSet { + #[tracing::instrument(skip_all, level = "trace")] + fn encode<'a>(&self, fbb: &'a mut FlatBufferBuilder) -> &'a [u8] { + fbb.reset(); + + let urls: Vec<_> = self + .iter() + .map(|url| fbb.create_string(url.as_ref())) + .collect(); + let args = event_seen_by_fbs::EventSeenByArgs { + relay_urls: Some(fbb.create_vector(&urls)), + }; + + let offset = event_seen_by_fbs::EventSeenBy::create(fbb, &args); + + event_seen_by_fbs::finish_event_seen_by_buffer(fbb, offset); + + fbb.finished_data() + } +} + +impl FlatBufferDecode for HashSet { + #[tracing::instrument(skip_all, level = "trace")] + fn decode(buf: &[u8]) -> Result { + let ev = event_seen_by_fbs::root_as_event_seen_by(buf)?; + Ok(ev + .relay_urls() + .ok_or(Error::NotFound)? + .into_iter() + .filter_map(|url| Url::parse(url).ok()) + .collect::>()) + } +} diff --git a/crates/nostr-database/src/index.rs b/crates/nostr-database/src/index.rs new file mode 100644 index 000000000..9f70b0d70 --- /dev/null +++ b/crates/nostr-database/src/index.rs @@ -0,0 +1,256 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Nostr Database Indexes + +use std::cmp::Ordering; +use std::collections::{BTreeSet, HashSet}; +use std::sync::Arc; + +use nostr::event::raw::RawEvent; +use nostr::secp256k1::XOnlyPublicKey; +use nostr::{Event, EventId, Filter, Kind, TagIndexValues, TagIndexes, Timestamp}; +use tokio::sync::RwLock; + +/// Public Key Prefix Size +const PUBLIC_KEY_PREFIX_SIZE: usize = 8; + +/// Event Index +#[derive(Debug, Clone, PartialEq, Eq)] +struct EventIndex { + /// Timestamp (seconds) + created_at: Timestamp, + /// Event ID + event_id: EventId, + /// Public key prefix + pubkey: PublicKeyPrefix, + /// Kind + kind: Kind, + /// Tag indexes + tags: TagIndexes, +} + +impl PartialOrd for EventIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for EventIndex { + fn cmp(&self, other: &Self) -> Ordering { + if self.created_at != other.created_at { + self.created_at.cmp(&other.created_at).reverse() + } else { + self.event_id.cmp(&other.event_id) + } + } +} + +impl TryFrom for EventIndex { + type Error = nostr::event::id::Error; + fn try_from(raw: RawEvent) -> Result { + Ok(Self { + created_at: Timestamp::from(raw.created_at), + event_id: EventId::from_slice(&raw.id)?, + pubkey: PublicKeyPrefix::from(raw.pubkey), + kind: Kind::from(raw.kind), + tags: TagIndexes::from(raw.tags.into_iter()), + }) + } +} + +impl From<&Event> for EventIndex { + fn from(e: &Event) -> Self { + Self { + created_at: e.created_at, + event_id: e.id, + pubkey: PublicKeyPrefix::from(e.pubkey), + kind: e.kind, + tags: e.build_tags_index(), + } + } +} + +impl EventIndex { + fn filter_tags_match(&self, filter: &Filter) -> bool { + if filter.generic_tags.is_empty() || self.tags.is_empty() { + return true; + } + + filter.generic_tags.iter().all(|(tagname, set)| { + let set = TagIndexValues::from(set); + self.tags + .get(tagname) + .map(|valset| valset.intersection(&set).count() > 0) + .unwrap_or(false) + }) + } +} + +/// Public Key prefix +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PublicKeyPrefix([u8; PUBLIC_KEY_PREFIX_SIZE]); + +impl From for PublicKeyPrefix { + fn from(pk: XOnlyPublicKey) -> Self { + let pk: [u8; 32] = pk.serialize(); + Self::from(pk) + } +} + +impl From<[u8; 32]> for PublicKeyPrefix { + fn from(pk: [u8; 32]) -> Self { + let mut pubkey = [0u8; PUBLIC_KEY_PREFIX_SIZE]; + pubkey.copy_from_slice(&pk[..PUBLIC_KEY_PREFIX_SIZE]); + Self(pubkey) + } +} + +/// Event Index Result +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct EventIndexResult { + /// Handled event should be stored into database? + pub to_store: bool, + /// List of events that should be removed from database + pub to_discard: HashSet, +} + +/// Database Indexes +#[derive(Debug, Clone, Default)] +pub struct DatabaseIndexes { + index: Arc>>, +} + +impl DatabaseIndexes { + /// New empty indexes + pub fn new() -> Self { + Self::default() + } + + /// Bulk load + #[tracing::instrument(skip_all)] + pub async fn bulk_load(&self, events: I) + where + I: IntoIterator, + { + let mut index = self.index.write().await; + let now = Timestamp::now(); + index.extend( + events + .into_iter() + .filter(|raw| !raw.is_expired(&now) && !raw.is_ephemeral()) + .filter_map(|raw| EventIndex::try_from(raw).ok()), + ); + } + + /// Index [`Event`] + #[tracing::instrument(skip_all, level = "trace")] + pub async fn index_event(&self, event: &Event) -> EventIndexResult { + // Check if it's expired or ephemeral + if event.is_expired() || event.is_ephemeral() { + return EventIndexResult::default(); + } + + let mut index = self.index.write().await; + + let mut should_insert: bool = true; + let mut to_discard: HashSet = HashSet::new(); + + if event.is_replaceable() { + let filter: Filter = Filter::new().author(event.pubkey).kind(event.kind); + for ev in self.internal_query(&index, &filter).await { + if ev.created_at > event.created_at { + should_insert = false; + } else if ev.created_at <= event.created_at { + to_discard.insert(ev.event_id); + } + } + } else if event.is_parameterized_replaceable() { + match event.identifier() { + Some(identifier) => { + let filter: Filter = Filter::new() + .author(event.pubkey) + .kind(event.kind) + .identifier(identifier); + for ev in self.internal_query(&index, &filter).await { + if ev.created_at >= event.created_at { + should_insert = false; + } else if ev.created_at < event.created_at { + to_discard.insert(ev.event_id); + } + } + } + None => should_insert = false, + } + } + + // Remove events + if !to_discard.is_empty() { + index.retain(|e| !to_discard.contains(&e.event_id)); + } + + // Insert event + if should_insert { + index.insert(EventIndex::from(event)); + } + + EventIndexResult { + to_store: should_insert, + to_discard, + } + } + + async fn internal_query<'a>( + &self, + index: &'a BTreeSet, + filter: &'a Filter, + ) -> impl Iterator { + let authors: HashSet = filter + .authors + .iter() + .map(|p| PublicKeyPrefix::from(*p)) + .collect(); + index.iter().filter(move |m| { + (filter.ids.is_empty() || filter.ids.contains(&m.event_id)) + && filter.since.map_or(true, |t| m.created_at >= t) + && filter.until.map_or(true, |t| m.created_at <= t) + && (filter.authors.is_empty() || authors.contains(&m.pubkey)) + && (filter.kinds.is_empty() || filter.kinds.contains(&m.kind)) + && m.filter_tags_match(filter) + }) + } + + /// Query + #[tracing::instrument(skip_all)] + pub async fn query(&self, filters: Vec) -> HashSet { + let index = self.index.read().await; + + let mut matching_ids: HashSet = HashSet::new(); + + for filter in filters.into_iter() { + if let (Some(since), Some(until)) = (filter.since, filter.until) { + if since > until { + continue; + } + } + + let iter = self + .internal_query(&index, &filter) + .await + .map(|m| m.event_id); + if let Some(limit) = filter.limit { + matching_ids.extend(iter.take(limit)) + } else { + matching_ids.extend(iter) + } + } + + matching_ids + } + + /// Clear indexes + pub async fn clear(&self) { + let mut index = self.index.write().await; + index.clear(); + } +} diff --git a/crates/nostr-database/src/lib.rs b/crates/nostr-database/src/lib.rs new file mode 100644 index 000000000..76f20cc90 --- /dev/null +++ b/crates/nostr-database/src/lib.rs @@ -0,0 +1,290 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Nostr Database + +#![warn(missing_docs)] +#![warn(rustdoc::bare_urls)] + +use core::fmt; +use std::collections::HashSet; +use std::sync::Arc; + +pub use async_trait::async_trait; +pub use nostr; +use nostr::secp256k1::XOnlyPublicKey; +use nostr::{Event, EventId, Filter, JsonUtil, Kind, Metadata, Timestamp, Url}; + +mod error; +#[cfg(feature = "flatbuf")] +pub mod flatbuffers; +pub mod index; +pub mod memory; +mod options; + +pub use self::error::DatabaseError; +#[cfg(feature = "flatbuf")] +pub use self::flatbuffers::{FlatBufferBuilder, FlatBufferDecode, FlatBufferEncode}; +pub use self::index::{DatabaseIndexes, EventIndexResult}; +pub use self::memory::MemoryDatabase; +pub use self::options::DatabaseOptions; + +/// Backend +pub enum Backend { + /// Memory + Memory, + /// RocksDB + RocksDB, + /// Lightning Memory-Mapped Database + LMDB, + /// SQLite + SQLite, + /// IndexedDB + IndexedDB, + /// Custom + Custom(String), +} + +/// A type-erased [`NostrDatabase`]. +pub type DynNostrDatabase = dyn NostrDatabase; + +/// A type that can be type-erased into `Arc`. +/// +/// This trait is not meant to be implemented directly outside +/// `matrix-sdk-crypto`, but it is automatically implemented for everything that +/// implements `NostrDatabase`. +pub trait IntoNostrDatabase { + #[doc(hidden)] + fn into_nostr_database(self) -> Arc; +} + +impl IntoNostrDatabase for T +where + T: NostrDatabase + Sized + 'static, +{ + fn into_nostr_database(self) -> Arc { + Arc::new(EraseNostrDatabaseError(self)) + } +} + +// Turns a given `Arc` into `Arc` by attaching the +// NostrDatabase impl vtable of `EraseNostrDatabaseError`. +impl IntoNostrDatabase for Arc +where + T: NostrDatabase + 'static, +{ + fn into_nostr_database(self) -> Arc { + let ptr: *const T = Arc::into_raw(self); + let ptr_erased = ptr as *const EraseNostrDatabaseError; + // SAFETY: EraseNostrDatabaseError is repr(transparent) so T and + // EraseNostrDatabaseError have the same layout and ABI + unsafe { Arc::from_raw(ptr_erased) } + } +} + +/// Nostr Database +#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait)] +pub trait NostrDatabase: AsyncTraitDeps { + /// Error + type Err: From + Into; + + /// Name of the backend database used (ex. rocksdb, lmdb, sqlite, indexeddb, ...) + fn backend(&self) -> Backend; + + /// Database options + fn opts(&self) -> DatabaseOptions; + + /// Count number of [`Event`] stored + async fn count(&self) -> Result; + + /// Save [`Event`] into store + /// + /// Return `true` if event was successfully saved into database. + async fn save_event(&self, event: &Event) -> Result; + + /// Check if [`Event`] has already been saved + async fn has_event_already_been_saved(&self, event_id: EventId) -> Result; + + /// Check if [`EventId`] has already been seen + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result; + + /// Set [`EventId`] as seen by relay + /// + /// Useful for NIP65 (aka gossip) + async fn event_id_seen(&self, event_id: EventId, relay_url: Url) -> Result<(), Self::Err>; + + /// Get list of relays that have seen the [`EventId`] + async fn event_recently_seen_on_relays( + &self, + event_id: EventId, + ) -> Result>, Self::Err>; + + /// Get [`Event`] by [`EventId`] + async fn event_by_id(&self, event_id: EventId) -> Result; + + /// Query store with filters + async fn query(&self, filters: Vec) -> Result, Self::Err>; + + /// Get event IDs by filters + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, Self::Err>; + + /// Get `negentropy` items + async fn negentropy_items( + &self, + filter: Filter, + ) -> Result, Self::Err>; + + /// Wipe all data + async fn wipe(&self) -> Result<(), Self::Err>; +} + +/// Nostr Database Extension +#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait)] +pub trait NostrDatabaseExt: NostrDatabase { + /// Get profile metadata + async fn profile(&self, public_key: XOnlyPublicKey) -> Result { + let filter = Filter::new() + .author(public_key) + .kind(Kind::Metadata) + .limit(1); + let events: Vec = self.query(vec![filter]).await?; + match events.first() { + Some(event) => Ok(Metadata::from_json(&event.content).map_err(DatabaseError::nostr)?), + None => Ok(Metadata::default()), // TODO: return an Option? + } + } +} + +#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait)] +impl NostrDatabaseExt for T {} + +#[repr(transparent)] +struct EraseNostrDatabaseError(T); + +impl fmt::Debug for EraseNostrDatabaseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait)] +impl NostrDatabase for EraseNostrDatabaseError { + type Err = DatabaseError; + + fn backend(&self) -> Backend { + self.0.backend() + } + + fn opts(&self) -> DatabaseOptions { + self.0.opts() + } + + async fn count(&self) -> Result { + self.0.count().await.map_err(Into::into) + } + + async fn save_event(&self, event: &Event) -> Result { + self.0.save_event(event).await.map_err(Into::into) + } + + async fn has_event_already_been_saved(&self, event_id: EventId) -> Result { + self.0 + .has_event_already_been_saved(event_id) + .await + .map_err(Into::into) + } + + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { + self.0 + .has_event_already_been_seen(event_id) + .await + .map_err(Into::into) + } + + async fn event_id_seen(&self, event_id: EventId, relay_url: Url) -> Result<(), Self::Err> { + self.0 + .event_id_seen(event_id, relay_url) + .await + .map_err(Into::into) + } + + async fn event_recently_seen_on_relays( + &self, + event_id: EventId, + ) -> Result>, Self::Err> { + self.0 + .event_recently_seen_on_relays(event_id) + .await + .map_err(Into::into) + } + + async fn event_by_id(&self, event_id: EventId) -> Result { + self.0.event_by_id(event_id).await.map_err(Into::into) + } + + async fn query(&self, filters: Vec) -> Result, Self::Err> { + self.0.query(filters).await.map_err(Into::into) + } + + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, Self::Err> { + self.0 + .event_ids_by_filters(filters) + .await + .map_err(Into::into) + } + + async fn negentropy_items( + &self, + filter: Filter, + ) -> Result, Self::Err> { + self.0.negentropy_items(filter).await.map_err(Into::into) + } + + async fn wipe(&self) -> Result<(), Self::Err> { + self.0.wipe().await.map_err(Into::into) + } +} + +/// Alias for `Send` on non-wasm, empty trait (implemented by everything) on +/// wasm. +#[cfg(not(target_arch = "wasm32"))] +pub trait SendOutsideWasm: Send {} +#[cfg(not(target_arch = "wasm32"))] +impl SendOutsideWasm for T {} + +/// Alias for `Send` on non-wasm, empty trait (implemented by everything) on +/// wasm. +#[cfg(target_arch = "wasm32")] +pub trait SendOutsideWasm {} +#[cfg(target_arch = "wasm32")] +impl SendOutsideWasm for T {} + +/// Alias for `Sync` on non-wasm, empty trait (implemented by everything) on +/// wasm. +#[cfg(not(target_arch = "wasm32"))] +pub trait SyncOutsideWasm: Sync {} +#[cfg(not(target_arch = "wasm32"))] +impl SyncOutsideWasm for T {} + +/// Alias for `Sync` on non-wasm, empty trait (implemented by everything) on +/// wasm. +#[cfg(target_arch = "wasm32")] +pub trait SyncOutsideWasm {} +#[cfg(target_arch = "wasm32")] +impl SyncOutsideWasm for T {} + +/// Super trait that is used for our store traits, this trait will differ if +/// it's used on WASM. WASM targets will not require `Send` and `Sync` to have +/// implemented, while other targets will. +pub trait AsyncTraitDeps: std::fmt::Debug + SendOutsideWasm + SyncOutsideWasm {} +impl AsyncTraitDeps for T {} diff --git a/crates/nostr-database/src/memory.rs b/crates/nostr-database/src/memory.rs new file mode 100644 index 000000000..25996dfeb --- /dev/null +++ b/crates/nostr-database/src/memory.rs @@ -0,0 +1,193 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Memory (RAM) Storage backend for Nostr apps + +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; + +use async_trait::async_trait; +use nostr::{Event, EventId, Filter, FiltersMatchEvent, Timestamp, Url}; +use tokio::sync::RwLock; + +use crate::{ + Backend, DatabaseError, DatabaseIndexes, DatabaseOptions, EventIndexResult, NostrDatabase, +}; + +/// Memory Database (RAM) +#[derive(Debug)] +pub struct MemoryDatabase { + opts: DatabaseOptions, + seen_event_ids: Arc>>>, + events: Arc>>, + indexes: DatabaseIndexes, +} + +// TODO: add queue field? + +impl Default for MemoryDatabase { + fn default() -> Self { + Self::new(DatabaseOptions { events: false }) + } +} + +impl MemoryDatabase { + /// New Memory database + pub fn new(opts: DatabaseOptions) -> Self { + Self { + opts, + seen_event_ids: Arc::new(RwLock::new(HashMap::new())), + events: Arc::new(RwLock::new(HashMap::new())), + indexes: DatabaseIndexes::new(), + } + } + + fn _event_id_seen( + &self, + seen_event_ids: &mut HashMap>, + event_id: EventId, + relay_url: Url, + ) { + seen_event_ids + .entry(event_id) + .and_modify(|set| { + set.insert(relay_url.clone()); + }) + .or_insert_with(|| { + let mut set = HashSet::with_capacity(1); + set.insert(relay_url); + set + }); + } +} + +#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] +#[cfg_attr(not(target_arch = "wasm32"), async_trait)] +impl NostrDatabase for MemoryDatabase { + type Err = DatabaseError; + + fn backend(&self) -> Backend { + Backend::Memory + } + + fn opts(&self) -> DatabaseOptions { + self.opts + } + + async fn count(&self) -> Result { + let events = self.events.read().await; + Ok(events.len()) + } + + async fn save_event(&self, event: &Event) -> Result { + if self.opts.events { + let EventIndexResult { + to_store, + to_discard, + } = self.indexes.index_event(event).await; + + if to_store { + let mut events = self.events.write().await; + + events.insert(event.id, event.clone()); + + for event_id in to_discard.into_iter() { + events.remove(&event_id); + } + + Ok(true) + } else { + tracing::warn!("Event {} not saved: unknown", event.id); + Ok(false) + } + } else { + Ok(false) + } + } + + async fn has_event_already_been_saved(&self, event_id: EventId) -> Result { + if self.opts.events { + let events = self.events.read().await; + Ok(events.contains_key(&event_id)) + } else { + Ok(false) + } + } + + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { + let seen_event_ids = self.seen_event_ids.read().await; + Ok(seen_event_ids.contains_key(&event_id)) + } + + async fn event_id_seen(&self, event_id: EventId, relay_url: Url) -> Result<(), Self::Err> { + let mut seen_event_ids = self.seen_event_ids.write().await; + self._event_id_seen(&mut seen_event_ids, event_id, relay_url); + Ok(()) + } + + async fn event_recently_seen_on_relays( + &self, + event_id: EventId, + ) -> Result>, Self::Err> { + let seen_event_ids = self.seen_event_ids.read().await; + Ok(seen_event_ids.get(&event_id).cloned()) + } + + async fn event_by_id(&self, event_id: EventId) -> Result { + if self.opts.events { + let events = self.events.read().await; + events + .get(&event_id) + .cloned() + .ok_or(DatabaseError::NotFound) + } else { + Err(DatabaseError::FeatureDisabled) + } + } + + #[tracing::instrument(skip_all)] + async fn query(&self, filters: Vec) -> Result, Self::Err> { + if self.opts.events { + let ids = self.indexes.query(filters.clone()).await; + let events = self.events.read().await; + + let mut list: Vec = Vec::new(); + for event_id in ids.into_iter() { + if let Some(event) = events.get(&event_id) { + if filters.match_event(event) { + list.push(event.clone()); + } + } + } + Ok(list) + } else { + Err(DatabaseError::FeatureDisabled) + } + } + + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, Self::Err> { + if self.opts.events { + Ok(self.indexes.query(filters).await) + } else { + Err(DatabaseError::FeatureDisabled) + } + } + + async fn negentropy_items( + &self, + _filter: Filter, + ) -> Result, Self::Err> { + Err(DatabaseError::NotSupported) + } + + async fn wipe(&self) -> Result<(), Self::Err> { + let mut seen_event_ids = self.seen_event_ids.write().await; + seen_event_ids.clear(); + let mut events = self.events.write().await; + events.clear(); + Ok(()) + } +} diff --git a/crates/nostr-database/src/options.rs b/crates/nostr-database/src/options.rs new file mode 100644 index 000000000..2ee9cf72e --- /dev/null +++ b/crates/nostr-database/src/options.rs @@ -0,0 +1,24 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Nostr Database options + +/// Database options +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct DatabaseOptions { + /// Store events (?) + pub events: bool, +} + +impl Default for DatabaseOptions { + fn default() -> Self { + Self { events: true } + } +} + +impl DatabaseOptions { + /// New default database options + pub fn new() -> Self { + Self::default() + } +} diff --git a/crates/nostr-indexeddb/Cargo.toml b/crates/nostr-indexeddb/Cargo.toml new file mode 100644 index 000000000..8e9bac43a --- /dev/null +++ b/crates/nostr-indexeddb/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "nostr-indexeddb" +version = "0.1.0" +edition = "2021" +description = "Web's IndexedDB Storage backend for Nostr apps" +authors = ["Yuki Kishimoto "] +homepage.workspace = true +repository.workspace = true +license.workspace = true +readme = "README.md" +rust-version = "1.64.0" +keywords = ["nostr", "database", "indexeddb"] + +[dependencies] +async-trait = { workspace = true } +indexed_db_futures = "0.4" +nostr = { workspace = true, features = ["std"] } +nostr-database = { workspace = true, features = ["flatbuf"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["sync"] } +tracing = { workspace = true, features = ["std", "attributes"] } +wasm-bindgen = "0.2" + +[dev-dependencies] +wasm-bindgen-test = "0.3" diff --git a/crates/nostr-indexeddb/README.md b/crates/nostr-indexeddb/README.md new file mode 100644 index 000000000..bb0a7ac7e --- /dev/null +++ b/crates/nostr-indexeddb/README.md @@ -0,0 +1,17 @@ +# Nostr IndexedDB + +This crate implements a storage backend on IndexedDB for web environments. + +## State + +**This library is in an ALPHA state**, things that are implemented generally work but the API will change in breaking ways. + +## License + +This project is distributed under the MIT software license - see the [LICENSE](../../LICENSE) file for details + +## Donations + +⚡ Tips: + +⚡ Lightning Address: yuki@getalby.com \ No newline at end of file diff --git a/crates/nostr-indexeddb/examples/webapp/.cargo/config.toml b/crates/nostr-indexeddb/examples/webapp/.cargo/config.toml new file mode 100644 index 000000000..435ed755e --- /dev/null +++ b/crates/nostr-indexeddb/examples/webapp/.cargo/config.toml @@ -0,0 +1,2 @@ +[build] +target = "wasm32-unknown-unknown" \ No newline at end of file diff --git a/crates/nostr-indexeddb/examples/webapp/.gitignore b/crates/nostr-indexeddb/examples/webapp/.gitignore new file mode 100644 index 000000000..d5ae108a3 --- /dev/null +++ b/crates/nostr-indexeddb/examples/webapp/.gitignore @@ -0,0 +1,4 @@ +dist/ +target/ +Cargo.lock +.DS_Store diff --git a/crates/nostr-indexeddb/examples/webapp/Cargo.toml b/crates/nostr-indexeddb/examples/webapp/Cargo.toml new file mode 100644 index 000000000..a3c820d48 --- /dev/null +++ b/crates/nostr-indexeddb/examples/webapp/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "nostr-indexeddb-example" +version = "0.1.0" +edition = "2021" +publish = false + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[dependencies] +nostr-indexeddb = { path = "../../" } +tracing-wasm = "0.2" +wasm-bindgen-futures = "0.4" +web-sys = "0.3" +yew = { version = "0.21", features = ["csr"] } diff --git a/crates/nostr-indexeddb/examples/webapp/Makefile b/crates/nostr-indexeddb/examples/webapp/Makefile new file mode 100644 index 000000000..ac0a4858c --- /dev/null +++ b/crates/nostr-indexeddb/examples/webapp/Makefile @@ -0,0 +1,6 @@ +init: + rustup target add wasm32-unknown-unknown + cargo install --locked trunk + +serve: + trunk serve \ No newline at end of file diff --git a/crates/nostr-indexeddb/examples/webapp/README.md b/crates/nostr-indexeddb/examples/webapp/README.md new file mode 100644 index 000000000..9827add57 --- /dev/null +++ b/crates/nostr-indexeddb/examples/webapp/README.md @@ -0,0 +1,22 @@ +# Yew Trunk Template + +### Installation + +If you don't already have it installed, it's time to install Rust: . +The rest of this guide assumes a typical Rust installation which contains both `rustup` and Cargo. + +### Initalization + +```bash +make init +``` + +That's it, we're done! + +### Running + +```bash +make serve +``` + +Rebuilds the app whenever a change is detected and runs a local server to host it. diff --git a/crates/nostr-indexeddb/examples/webapp/index.html b/crates/nostr-indexeddb/examples/webapp/index.html new file mode 100644 index 000000000..4d13cf1a8 --- /dev/null +++ b/crates/nostr-indexeddb/examples/webapp/index.html @@ -0,0 +1,8 @@ + + + + + Trunk Template + + + diff --git a/crates/nostr-indexeddb/examples/webapp/index.scss b/crates/nostr-indexeddb/examples/webapp/index.scss new file mode 100644 index 000000000..710545f3a --- /dev/null +++ b/crates/nostr-indexeddb/examples/webapp/index.scss @@ -0,0 +1,35 @@ +html, +body { + height: 100%; + margin: 0; +} + +body { + align-items: center; + display: flex; + justify-content: center; + + background: linear-gradient(to bottom right, #444444, #009a5b); + font-size: 1.5rem; +} + +main { + color: #fff6d5; + font-family: sans-serif; + text-align: center; +} + +.logo { + height: 20em; +} + +.heart:after { + content: "❤️"; + + font-size: 1.75em; +} + +h1 + .subtitle { + display: block; + margin-top: -1em; +} diff --git a/crates/nostr-indexeddb/examples/webapp/src/app.rs b/crates/nostr-indexeddb/examples/webapp/src/app.rs new file mode 100644 index 000000000..385301304 --- /dev/null +++ b/crates/nostr-indexeddb/examples/webapp/src/app.rs @@ -0,0 +1,47 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use nostr_indexeddb::database::NostrDatabase; +use nostr_indexeddb::nostr::prelude::*; +use nostr_indexeddb::WebDatabase; +use wasm_bindgen_futures::spawn_local; +use web_sys::console; +use yew::prelude::*; + +#[function_component(App)] +pub fn app() -> Html { + spawn_local(async { + let secret_key = SecretKey::from_bech32( + "nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99", + ) + .unwrap(); + let keys_a = Keys::new(secret_key); + console::log_1(&format!("Pubkey A: {}", keys_a.public_key()).into()); + + let database = WebDatabase::open("nostr-sdk-indexeddb-test").await.unwrap(); + + let metadata = Metadata::new().name("Name"); + let event = EventBuilder::set_metadata(metadata) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + + let events = database + .query(vec![Filter::new() + .kinds(vec![Kind::Metadata, Kind::Custom(123), Kind::TextNote]) + .limit(20) + .author(keys_a.public_key())]) + .await + .unwrap(); + console::log_1(&format!("Events: {events:?}").into()); + console::log_1(&format!("Got {} events", events.len()).into()); + }); + + html! { +
+ +

{ "Hello World!" }

+ { "from Yew with " } +
+ } +} diff --git a/crates/nostr-indexeddb/examples/webapp/src/main.rs b/crates/nostr-indexeddb/examples/webapp/src/main.rs new file mode 100644 index 000000000..3fe8b9ea2 --- /dev/null +++ b/crates/nostr-indexeddb/examples/webapp/src/main.rs @@ -0,0 +1,15 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +mod app; + +use app::App; + +fn main() { + // Init logger + //wasm_logger::init(wasm_logger::Config::default()); + tracing_wasm::set_as_global_default(); + + // Start WASM app + yew::Renderer::::new().render(); +} diff --git a/crates/nostr-indexeddb/src/error.rs b/crates/nostr-indexeddb/src/error.rs new file mode 100644 index 000000000..66433ddeb --- /dev/null +++ b/crates/nostr-indexeddb/src/error.rs @@ -0,0 +1,39 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use nostr_database::DatabaseError; +use thiserror::Error; + +/// IndexedDB error +#[derive(Debug, Error)] +pub enum IndexedDBError { + /// DOM error + #[error("DomException {name} ({code}): {message}")] + DomException { + /// DomException code + code: u16, + /// Specific name of the DomException + name: String, + /// Message given to the DomException + message: String, + }, + /// Database error + #[error(transparent)] + Database(#[from] DatabaseError), +} + +impl From for IndexedDBError { + fn from(frm: indexed_db_futures::web_sys::DomException) -> Self { + Self::DomException { + name: frm.name(), + message: frm.message(), + code: frm.code(), + } + } +} + +impl From for DatabaseError { + fn from(e: IndexedDBError) -> Self { + Self::backend(e) + } +} diff --git a/crates/nostr-indexeddb/src/hex.rs b/crates/nostr-indexeddb/src/hex.rs new file mode 100644 index 000000000..49e231cf7 --- /dev/null +++ b/crates/nostr-indexeddb/src/hex.rs @@ -0,0 +1,139 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Hex + +use std::fmt; +use std::string::String; +use std::vec::Vec; + +/// Hex error +#[derive(Debug, PartialEq, Eq)] +pub enum Error { + /// An invalid character was found + InvalidHexCharacter { + /// Char + c: char, + /// Char index + index: usize, + }, + /// A hex string's length needs to be even, as two digits correspond to + /// one byte. + OddLength, +} + +impl std::error::Error for Error {} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::InvalidHexCharacter { c, index } => { + write!(f, "Invalid character {} at position {}", c, index) + } + Self::OddLength => write!(f, "Odd number of digits"), + } + } +} + +#[inline] +fn from_digit(num: u8) -> char { + if num < 10 { + (b'0' + num) as char + } else { + (b'a' + num - 10) as char + } +} + +/// Hex encode +pub fn encode(data: T) -> String +where + T: AsRef<[u8]>, +{ + let bytes: &[u8] = data.as_ref(); + let mut hex: String = String::with_capacity(2 * bytes.len()); + for byte in bytes.iter() { + hex.push(from_digit(byte >> 4)); + hex.push(from_digit(byte & 0xF)); + } + hex +} + +const fn val(c: u8, idx: usize) -> Result { + match c { + b'A'..=b'F' => Ok(c - b'A' + 10), + b'a'..=b'f' => Ok(c - b'a' + 10), + b'0'..=b'9' => Ok(c - b'0'), + _ => Err(Error::InvalidHexCharacter { + c: c as char, + index: idx, + }), + } +} + +/// Hex decode +pub fn decode(hex: T) -> Result, Error> +where + T: AsRef<[u8]>, +{ + let hex = hex.as_ref(); + let len = hex.len(); + + if len % 2 != 0 { + return Err(Error::OddLength); + } + + let mut bytes: Vec = Vec::with_capacity(len / 2); + + for i in (0..len).step_by(2) { + let high = val(hex[i], i)?; + let low = val(hex[i + 1], i + 1)?; + bytes.push(high << 4 | low); + } + + Ok(bytes) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_encode() { + assert_eq!(encode("foobar"), "666f6f626172"); + } + + #[test] + fn test_decode() { + assert_eq!( + decode("666f6f626172"), + Ok(String::from("foobar").into_bytes()) + ); + } + + #[test] + pub fn test_invalid_length() { + assert_eq!(decode("1").unwrap_err(), Error::OddLength); + assert_eq!(decode("666f6f6261721").unwrap_err(), Error::OddLength); + } + + #[test] + pub fn test_invalid_char() { + assert_eq!( + decode("66ag").unwrap_err(), + Error::InvalidHexCharacter { c: 'g', index: 3 } + ); + } +} + +#[cfg(bench)] +mod benches { + use super::*; + use crate::test::{black_box, Bencher}; + + #[bench] + pub fn hex_encode(bh: &mut Bencher) { + bh.iter(|| { + black_box(encode("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")); + }); + } +} diff --git a/crates/nostr-indexeddb/src/lib.rs b/crates/nostr-indexeddb/src/lib.rs new file mode 100644 index 000000000..368676f3c --- /dev/null +++ b/crates/nostr-indexeddb/src/lib.rs @@ -0,0 +1,406 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Web's IndexedDB Storage backend for Nostr SDK + +#![forbid(unsafe_code)] +#![warn(missing_docs)] +#![warn(rustdoc::bare_urls)] +#![allow(unknown_lints, clippy::arc_with_non_send_sync)] +#![cfg_attr(not(target_arch = "wasm32"), allow(unused))] + +use std::collections::{HashMap, HashSet}; +use std::fmt; +use std::future::IntoFuture; +use std::sync::Arc; + +pub extern crate nostr; +pub extern crate nostr_database as database; + +#[cfg(target_arch = "wasm32")] +use async_trait::async_trait; +use indexed_db_futures::request::{IdbOpenDbRequestLike, OpenDbRequest}; +use indexed_db_futures::web_sys::IdbTransactionMode; +use indexed_db_futures::{IdbDatabase, IdbQuerySource, IdbVersionChangeEvent}; +use nostr::event::raw::RawEvent; +use nostr::{Event, EventId, Filter, Timestamp, Url}; +#[cfg(target_arch = "wasm32")] +use nostr_database::NostrDatabase; +use nostr_database::{ + Backend, DatabaseError, DatabaseIndexes, DatabaseOptions, EventIndexResult, FlatBufferBuilder, + FlatBufferDecode, FlatBufferEncode, +}; +use tokio::sync::Mutex; +use wasm_bindgen::JsValue; + +mod error; +mod hex; + +pub use self::error::IndexedDBError; + +const CURRENT_DB_VERSION: u32 = 2; +const EVENTS_CF: &str = "events"; +const EVENTS_SEEN_BY_RELAYS_CF: &str = "event-seen-by-relays"; +const ALL_STORES: [&str; 2] = [EVENTS_CF, EVENTS_SEEN_BY_RELAYS_CF]; + +/// Helper struct for upgrading the inner DB. +#[derive(Debug, Clone, Default)] +pub struct OngoingMigration { + /// Names of stores to drop. + drop_stores: HashSet<&'static str>, + /// Names of stores to create. + create_stores: HashSet<&'static str>, + /// Store name => key-value data to add. + data: HashMap<&'static str, Vec<(JsValue, JsValue)>>, +} + +/// IndexedDB Nostr Database +#[derive(Clone)] +pub struct WebDatabase { + db: Arc, + indexes: DatabaseIndexes, + fbb: Arc>>, +} + +impl fmt::Debug for WebDatabase { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("WebDatabase") + .field("name", &self.db.name()) + .finish() + } +} + +impl WebDatabase { + /// Open IndexedDB store + pub async fn open(name: S) -> Result + where + S: AsRef, + { + let mut this = Self { + db: Arc::new(IdbDatabase::open(name.as_ref())?.into_future().await?), + indexes: DatabaseIndexes::new(), + fbb: Arc::new(Mutex::new(FlatBufferBuilder::with_capacity(70_000))), + }; + + this.migration().await?; + this.build_indexes().await?; + + Ok(this) + } + + async fn migration(&mut self) -> Result<(), IndexedDBError> { + let name: String = self.db.name(); + let mut old_version: u32 = self.db.version() as u32; + + if old_version < CURRENT_DB_VERSION { + // Inside the `onupgradeneeded` callback we would know whether it's a new DB + // because the old version would be set to 0, here it is already set to 1 so we + // check if the stores exist. + if old_version == 1 && self.db.object_store_names().next().is_none() { + old_version = 0; + } + + if old_version == 0 { + tracing::info!("Initializing database schemas..."); + let migration = OngoingMigration { + create_stores: ALL_STORES.into_iter().collect(), + ..Default::default() + }; + self.apply_migration(CURRENT_DB_VERSION, migration).await?; + tracing::info!("Database schemas initialized."); + } else { + /* if old_version < 3 { + db = migrate_to_v3(db, store_cipher).await?; + } + if old_version < 4 { + db = migrate_to_v4(db, store_cipher).await?; + } */ + } + + self.db.close(); + + let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, CURRENT_DB_VERSION)?; + db_req.set_on_upgrade_needed(Some( + move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { + // Sanity check. + // There should be no upgrade needed since the database should have already been + // upgraded to the latest version. + panic!( + "Opening database that was not fully upgraded: \ + DB version: {}; latest version: {CURRENT_DB_VERSION}", + evt.old_version() + ) + }, + )); + + self.db = Arc::new(db_req.into_future().await?); + } + + Ok(()) + } + + async fn apply_migration( + &mut self, + version: u32, + migration: OngoingMigration, + ) -> Result<(), IndexedDBError> { + let name: String = self.db.name(); + self.db.close(); + + let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, version)?; + db_req.set_on_upgrade_needed(Some( + move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { + // Changing the format can only happen in the upgrade procedure + for store in &migration.drop_stores { + evt.db().delete_object_store(store)?; + } + for store in &migration.create_stores { + evt.db().create_object_store(store)?; + tracing::debug!("Created '{store}' object store"); + } + + Ok(()) + }, + )); + + self.db = Arc::new(db_req.into_future().await?); + + // Finally, we can add data to the newly created tables if needed. + if !migration.data.is_empty() { + let stores: Vec<_> = migration.data.keys().copied().collect(); + let tx = self + .db + .transaction_on_multi_with_mode(&stores, IdbTransactionMode::Readwrite)?; + + for (name, data) in migration.data { + let store = tx.object_store(name)?; + for (key, value) in data { + store.put_key_val(&key, &value)?; + } + } + + tx.await.into_result()?; + } + + Ok(()) + } + + async fn build_indexes(&self) -> Result<(), IndexedDBError> { + tracing::debug!("Building database indexes..."); + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + let events = store + .get_all()? + .await? + .into_iter() + .filter_map(|v| v.as_string()) + .filter_map(|v| { + let bytes = hex::decode(v).ok()?; + RawEvent::decode(&bytes).ok() + }); + self.indexes.bulk_load(events).await; + tracing::info!("Database indexes loaded"); + Ok(()) + } +} + +// Small hack to have the following macro invocation act as the appropriate +// trait impl block on wasm, but still be compiled on non-wasm as a regular +// impl block otherwise. +// +// The trait impl doesn't compile on non-wasm due to unfulfilled trait bounds, +// this hack allows us to still have most of rust-analyzer's IDE functionality +// within the impl block without having to set it up to check things against +// the wasm target (which would disable many other parts of the codebase). +#[cfg(target_arch = "wasm32")] +macro_rules! impl_nostr_database { + ({ $($body:tt)* }) => { + #[async_trait(?Send)] + impl NostrDatabase for WebDatabase { + type Err = IndexedDBError; + + $($body)* + } + }; +} + +#[cfg(not(target_arch = "wasm32"))] +macro_rules! impl_nostr_database { + ({ $($body:tt)* }) => { + impl WebDatabase { + $($body)* + } + }; +} + +impl_nostr_database!({ + fn backend(&self) -> Backend { + Backend::IndexedDB + } + + fn opts(&self) -> DatabaseOptions { + DatabaseOptions::default() + } + + async fn count(&self) -> Result { + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + let count: u32 = store.count()?.await?; + Ok(count as usize) + } + + #[tracing::instrument(skip_all, level = "trace")] + async fn save_event(&self, event: &Event) -> Result { + // Index event + let EventIndexResult { + to_store, + to_discard, + } = self.indexes.index_event(event).await; + + if to_store { + // Acquire FlatBuffers Builder + let mut fbb = self.fbb.lock().await; + + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readwrite)?; + let store = tx.object_store(EVENTS_CF)?; + let key = JsValue::from(event.id.to_hex()); + let value = JsValue::from(hex::encode(event.encode(&mut fbb))); + store.put_key_val(&key, &value)?; + + // Discard events no longer needed + for event_id in to_discard.into_iter() { + let key = JsValue::from(event_id.to_hex()); + store.delete(&key)?; + } + + tx.await.into_result()?; + + Ok(true) + } else { + Ok(false) + } + } + + async fn has_event_already_been_saved( + &self, + event_id: EventId, + ) -> Result { + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + let key = JsValue::from(event_id.to_hex()); + Ok(store.get(&key)?.await?.is_some()) + } + + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_SEEN_BY_RELAYS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_SEEN_BY_RELAYS_CF)?; + let key = JsValue::from(event_id.to_hex()); + Ok(store.get(&key)?.await?.is_some()) + } + + async fn event_id_seen( + &self, + _event_id: EventId, + _relay_url: Url, + ) -> Result<(), IndexedDBError> { + todo!() + } + + async fn event_recently_seen_on_relays( + &self, + _event_id: EventId, + ) -> Result>, IndexedDBError> { + todo!() + } + + #[tracing::instrument(skip_all)] + async fn event_by_id(&self, event_id: EventId) -> Result { + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + let key = JsValue::from(event_id.to_hex()); + match store.get(&key)?.await? { + Some(jsvalue) => { + let event_hex = jsvalue + .as_string() + .ok_or(IndexedDBError::Database(DatabaseError::NotFound))?; + let bytes = hex::decode(event_hex).map_err(DatabaseError::backend)?; + Ok(Event::decode(&bytes).map_err(DatabaseError::backend)?) + } + None => Err(IndexedDBError::Database(DatabaseError::NotFound)), + } + } + + #[tracing::instrument(skip_all)] + async fn query(&self, filters: Vec) -> Result, IndexedDBError> { + let ids = self.indexes.query(filters.clone()).await; + + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + + let mut events: Vec = Vec::with_capacity(ids.len()); + + for event_id in ids.into_iter() { + let key = JsValue::from(event_id.to_hex()); + if let Some(jsvalue) = store.get(&key)?.await? { + let event_hex = jsvalue.as_string().ok_or(DatabaseError::NotFound)?; + let bytes = hex::decode(event_hex).map_err(DatabaseError::backend)?; + let event = Event::decode(&bytes).map_err(DatabaseError::backend)?; + events.push(event); + } + } + + Ok(events) + } + + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, IndexedDBError> { + Ok(self.indexes.query(filters).await) + } + + async fn negentropy_items( + &self, + filter: Filter, + ) -> Result, IndexedDBError> { + let ids = self.indexes.query(vec![filter]).await; + + let tx = self + .db + .transaction_on_one_with_mode(EVENTS_CF, IdbTransactionMode::Readonly)?; + let store = tx.object_store(EVENTS_CF)?; + + let mut events: Vec<(EventId, Timestamp)> = Vec::new(); + + for event_id in ids.into_iter() { + let key = JsValue::from(event_id.to_hex()); + if let Some(jsvalue) = store.get(&key)?.await? { + let event_hex = jsvalue.as_string().ok_or(DatabaseError::NotFound)?; + let bytes = hex::decode(event_hex).map_err(DatabaseError::backend)?; + let raw = RawEvent::decode(&bytes).map_err(DatabaseError::backend)?; + let event_id = EventId::from_slice(&raw.id).map_err(DatabaseError::nostr)?; + events.push((event_id, Timestamp::from(raw.created_at))); + } + } + + Ok(events) + } + + async fn wipe(&self) -> Result<(), IndexedDBError> { + Err(DatabaseError::NotSupported.into()) + } +}); diff --git a/crates/nostr-rocksdb/Cargo.toml b/crates/nostr-rocksdb/Cargo.toml new file mode 100644 index 000000000..bb6dfa26f --- /dev/null +++ b/crates/nostr-rocksdb/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "nostr-rocksdb" +version = "0.1.0" +edition = "2021" +description = "RocksDB Storage backend for Nostr apps" +authors = ["Yuki Kishimoto "] +homepage.workspace = true +repository.workspace = true +license.workspace = true +readme = "README.md" +rust-version = "1.66.0" +keywords = ["nostr", "database", "rocksdb"] + +[dependencies] +async-trait = { workspace = true } +nostr = { workspace = true, features = ["std"] } +nostr-database = { workspace = true, features = ["flatbuf"] } +num_cpus = "1.16" +rocksdb = { version = "0.21", default-features = false, features = ["multi-threaded-cf", "snappy"] } +tokio = { workspace = true, features = ["rt-multi-thread", "sync"] } +tracing = { workspace = true, features = ["std", "attributes"] } + +[dev-dependencies] +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/nostr-rocksdb/examples/rocksdb.rs b/crates/nostr-rocksdb/examples/rocksdb.rs new file mode 100644 index 000000000..fe835a3e4 --- /dev/null +++ b/crates/nostr-rocksdb/examples/rocksdb.rs @@ -0,0 +1,92 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use std::time::Duration; + +use nostr::prelude::*; +use nostr_database::NostrDatabase; +use nostr_rocksdb::RocksDatabase; +use tracing_subscriber::fmt::format::FmtSpan; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::fmt() + .with_span_events(FmtSpan::CLOSE) + .init(); + + let secret_key = + SecretKey::from_bech32("nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99") + .unwrap(); + let keys_a = Keys::new(secret_key); + println!("Pubkey A: {}", keys_a.public_key()); + + let secret_key = + SecretKey::from_bech32("nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85") + .unwrap(); + let keys_b = Keys::new(secret_key); + println!("Pubkey B: {}", keys_b.public_key()); + + let database = RocksDatabase::open("./db/rocksdb").await.unwrap(); + + println!("Events stored: {}", database.count().await.unwrap()); + + /* for i in 0..100_000 { + let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + + let event = EventBuilder::new_text_note( + format!("Reply to event #{i}"), + &[ + Tag::Event(event.id, None, None), + Tag::PubKey(event.pubkey, None), + ], + ) + .to_event(&keys_b) + .unwrap(); + database.save_event(&event).await.unwrap(); + } + + for i in 0..10 { + let metadata = Metadata::new().name(format!("Name #{i}")); + let event = EventBuilder::set_metadata(metadata) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + } + + for i in 0..500_000 { + let event = EventBuilder::new( + Kind::Custom(123), + "Custom with d tag", + &[Tag::Identifier(format!("myid{i}"))], + ) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + } */ + + /* let event_id = EventId::all_zeros(); + database.event_id_seen(event_id, Some(Url::parse("wss://relay.damus.io").unwrap())).await.unwrap(); + database.event_id_seen(event_id, Some(Url::parse("wss://relay.nostr.info").unwrap())).await.unwrap(); + database.event_id_seen(event_id, Some(Url::parse("wss://relay.damus.io").unwrap())).await.unwrap(); + + let relays = database.event_recently_seen_on_relays(event_id).await.unwrap(); + println!("Seen on: {relays:?}"); */ + + let events = database + .query(vec![Filter::new() + .kinds(vec![Kind::Metadata, Kind::Custom(123), Kind::TextNote]) + .limit(20) + //.kind(Kind::Custom(123)) + //.identifier("myid5000") + .author(keys_a.public_key())]) + .await + .unwrap(); + println!("Got {} events", events.len()); + + loop { + tokio::time::sleep(Duration::from_secs(30)).await + } +} diff --git a/crates/nostr-rocksdb/src/lib.rs b/crates/nostr-rocksdb/src/lib.rs new file mode 100644 index 000000000..6f5a4ec19 --- /dev/null +++ b/crates/nostr-rocksdb/src/lib.rs @@ -0,0 +1,319 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! RocksDB Storage backend for Nostr SDK + +#![forbid(unsafe_code)] +#![warn(missing_docs)] +#![warn(rustdoc::bare_urls)] + +use std::collections::HashSet; +use std::path::Path; +use std::sync::Arc; + +pub extern crate nostr; +pub extern crate nostr_database as database; + +use async_trait::async_trait; +use nostr::event::raw::RawEvent; +use nostr::{Event, EventId, Filter, FiltersMatchEvent, Timestamp, Url}; +use nostr_database::{ + Backend, DatabaseError, DatabaseIndexes, DatabaseOptions, EventIndexResult, FlatBufferBuilder, + FlatBufferDecode, FlatBufferEncode, NostrDatabase, +}; +use rocksdb::{ + BoundColumnFamily, ColumnFamilyDescriptor, DBCompactionStyle, DBCompressionType, IteratorMode, + OptimisticTransactionDB, Options, WriteBatchWithTransaction, +}; +use tokio::sync::RwLock; + +mod ops; + +const EVENTS_CF: &str = "events"; +const EVENTS_SEEN_BY_RELAYS_CF: &str = "event-seen-by-relays"; + +/// RocksDB Nostr Database +#[derive(Debug, Clone)] +pub struct RocksDatabase { + db: Arc, + indexes: DatabaseIndexes, + fbb: Arc>>, +} + +fn default_opts() -> rocksdb::Options { + let mut opts = Options::default(); + opts.set_keep_log_file_num(10); + opts.set_max_open_files(16); + opts.set_compaction_style(DBCompactionStyle::Level); + opts.set_compression_type(DBCompressionType::Snappy); + opts.set_target_file_size_base(64 * 1024 * 1024); // 64 MB + opts.set_write_buffer_size(64 * 1024 * 1024); // 64 MB + opts.set_enable_write_thread_adaptive_yield(true); + opts.set_disable_auto_compactions(false); + opts.increase_parallelism(num_cpus::get() as i32); + opts +} + +fn column_families() -> Vec { + let mut relay_urls_opts: Options = default_opts(); + relay_urls_opts.set_merge_operator_associative( + "relay_urls_merge_operator", + ops::relay_urls_merge_operator, + ); + + vec![ + ColumnFamilyDescriptor::new(EVENTS_CF, default_opts()), + ColumnFamilyDescriptor::new(EVENTS_SEEN_BY_RELAYS_CF, relay_urls_opts), + ] +} + +impl RocksDatabase { + /// Open RocksDB store + pub async fn open

(path: P) -> Result + where + P: AsRef, + { + let path: &Path = path.as_ref(); + + tracing::debug!("Opening {}", path.display()); + + let mut db_opts = default_opts(); + db_opts.create_if_missing(true); + db_opts.create_missing_column_families(true); + + let db = OptimisticTransactionDB::open_cf_descriptors(&db_opts, path, column_families()) + .map_err(DatabaseError::backend)?; + + match db.live_files() { + Ok(live_files) => tracing::info!( + "{}: {} SST files, {} GB, {} Grows", + path.display(), + live_files.len(), + live_files.iter().map(|f| f.size).sum::() as f64 / 1e9, + live_files.iter().map(|f| f.num_entries).sum::() as f64 / 1e9 + ), + Err(_) => tracing::warn!("Impossible to get live files"), + }; + + let this = Self { + db: Arc::new(db), + indexes: DatabaseIndexes::new(), + fbb: Arc::new(RwLock::new(FlatBufferBuilder::with_capacity(70_000))), + }; + + this.build_indexes().await?; + + Ok(this) + } + + fn cf_handle(&self, name: &str) -> Result, DatabaseError> { + self.db.cf_handle(name).ok_or(DatabaseError::NotFound) + } + + #[tracing::instrument(skip_all)] + async fn build_indexes(&self) -> Result<(), DatabaseError> { + let cf = self.cf_handle(EVENTS_CF)?; + let events = self + .db + .full_iterator_cf(&cf, IteratorMode::Start) + .flatten() + .filter_map(|(_, value)| RawEvent::decode(&value).ok()); + self.indexes.bulk_load(events).await; + Ok(()) + } +} + +#[async_trait] +impl NostrDatabase for RocksDatabase { + type Err = DatabaseError; + + fn backend(&self) -> Backend { + Backend::RocksDB + } + + fn opts(&self) -> DatabaseOptions { + DatabaseOptions::default() + } + + async fn count(&self) -> Result { + let this = self.clone(); + tokio::task::spawn_blocking(move || { + let cf = this.cf_handle(EVENTS_CF)?; + Ok(this + .db + .full_iterator_cf(&cf, IteratorMode::Start) + .flatten() + .count()) + }) + .await + .unwrap() + } + + #[tracing::instrument(skip_all, level = "trace")] + async fn save_event(&self, event: &Event) -> Result { + // Index event + let EventIndexResult { + to_store, + to_discard, + } = self.indexes.index_event(event).await; + + if to_store { + // Acquire FlatBuffers Builder + let mut fbb = self.fbb.write().await; + + tokio::task::block_in_place(|| { + // Get Column Families + let events_cf = self.cf_handle(EVENTS_CF)?; + + // Serialize key and value + let key: &[u8] = event.id.as_bytes(); + let value: &[u8] = event.encode(&mut fbb); + + // Prepare write batch + let mut batch = WriteBatchWithTransaction::default(); + + // Save event + batch.put_cf(&events_cf, key, value); + + // Discard events no longer needed + for event_id in to_discard.into_iter() { + batch.delete_cf(&events_cf, event_id); + } + + // Write batch changes + self.db.write(batch).map_err(DatabaseError::backend) + })?; + + Ok(true) + } else { + Ok(false) + } + } + + async fn has_event_already_been_saved(&self, event_id: EventId) -> Result { + let cf = self.cf_handle(EVENTS_CF)?; + Ok(self.db.key_may_exist_cf(&cf, event_id.as_bytes())) + } + + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { + let cf = self.cf_handle(EVENTS_SEEN_BY_RELAYS_CF)?; + Ok(self.db.key_may_exist_cf(&cf, event_id.as_bytes())) + } + + async fn event_id_seen(&self, event_id: EventId, relay_url: Url) -> Result<(), Self::Err> { + let mut fbb = self.fbb.write().await; + let cf = self.cf_handle(EVENTS_SEEN_BY_RELAYS_CF)?; + let value: HashSet = { + let mut set = HashSet::with_capacity(1); + set.insert(relay_url); + set + }; + self.db + .merge_cf(&cf, event_id, value.encode(&mut fbb)) + .map_err(DatabaseError::backend) + } + + async fn event_recently_seen_on_relays( + &self, + event_id: EventId, + ) -> Result>, Self::Err> { + let cf = self.cf_handle(EVENTS_SEEN_BY_RELAYS_CF)?; + match self + .db + .get_pinned_cf(&cf, event_id) + .map_err(DatabaseError::backend)? + { + Some(val) => Ok(Some(HashSet::decode(&val).map_err(DatabaseError::backend)?)), + None => Ok(None), + } + } + + #[tracing::instrument(skip_all)] + async fn event_by_id(&self, event_id: EventId) -> Result { + let this = self.clone(); + tokio::task::spawn_blocking(move || { + let cf = this.cf_handle(EVENTS_CF)?; + match this + .db + .get_pinned_cf(&cf, event_id.as_bytes()) + .map_err(DatabaseError::backend)? + { + Some(event) => Event::decode(&event).map_err(DatabaseError::backend), + None => Err(DatabaseError::NotFound), + } + }) + .await + .map_err(DatabaseError::backend)? + } + + #[tracing::instrument(skip_all)] + async fn query(&self, filters: Vec) -> Result, Self::Err> { + let ids = self.indexes.query(filters.clone()).await; + + let this = self.clone(); + tokio::task::spawn_blocking(move || { + let cf = this.cf_handle(EVENTS_CF)?; + + let mut events: Vec = Vec::with_capacity(ids.len()); + + for v in this + .db + .batched_multi_get_cf(&cf, ids, false) + .into_iter() + .flatten() + .flatten() + { + let event: Event = Event::decode(&v).map_err(DatabaseError::backend)?; + if filters.match_event(&event) { + events.push(event); + } + } + + Ok(events) + }) + .await + .map_err(DatabaseError::backend)? + } + + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, Self::Err> { + Ok(self.indexes.query(filters).await) + } + + async fn negentropy_items( + &self, + filter: Filter, + ) -> Result, Self::Err> { + let ids = self.indexes.query(vec![filter.clone()]).await; + + let this = self.clone(); + tokio::task::spawn_blocking(move || { + let cf = this.cf_handle(EVENTS_CF)?; + + let mut event_ids: Vec<(EventId, Timestamp)> = Vec::new(); + + for v in this + .db + .batched_multi_get_cf(&cf, ids, false) + .into_iter() + .flatten() + .flatten() + { + let event: Event = Event::decode(&v).map_err(DatabaseError::backend)?; + if filter.match_event(&event) { + event_ids.push((event.id, event.created_at)); + } + } + + Ok(event_ids) + }) + .await + .map_err(DatabaseError::backend)? + } + + async fn wipe(&self) -> Result<(), Self::Err> { + Err(DatabaseError::NotSupported) + } +} diff --git a/crates/nostr-rocksdb/src/ops.rs b/crates/nostr-rocksdb/src/ops.rs new file mode 100644 index 000000000..50a1ff7ae --- /dev/null +++ b/crates/nostr-rocksdb/src/ops.rs @@ -0,0 +1,28 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! RocksDB Custom Operators + +use std::collections::HashSet; + +use nostr::Url; +use nostr_database::{FlatBufferBuilder, FlatBufferDecode, FlatBufferEncode}; +use rocksdb::MergeOperands; + +pub(crate) fn relay_urls_merge_operator( + _new_key: &[u8], + existing: Option<&[u8]>, + operands: &MergeOperands, +) -> Option> { + let mut existing: HashSet = match existing { + Some(val) => HashSet::decode(val).ok()?, + None => HashSet::with_capacity(operands.len()), + }; + + for operand in operands.into_iter() { + existing.extend(HashSet::decode(operand).ok()?); + } + + let mut fbb = FlatBufferBuilder::with_capacity(existing.len() * 32 * 2); // Check capacity size if correct + Some(existing.encode(&mut fbb).to_vec()) +} diff --git a/crates/nostr-sdk-net/Cargo.toml b/crates/nostr-sdk-net/Cargo.toml index 54ac975c5..b306f0478 100644 --- a/crates/nostr-sdk-net/Cargo.toml +++ b/crates/nostr-sdk-net/Cargo.toml @@ -3,12 +3,12 @@ name = "nostr-sdk-net" version = "0.25.0" edition = "2021" description = "Nostr SDK Network library." -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] homepage.workspace = true repository.workspace = true license.workspace = true readme = "README.md" -rust-version.workspace = true +rust-version = "1.64.0" keywords = ["nostr", "sdk", "net"] [dependencies] diff --git a/crates/nostr-sdk/Cargo.toml b/crates/nostr-sdk/Cargo.toml index 34fd62fb2..93aa55ea5 100644 --- a/crates/nostr-sdk/Cargo.toml +++ b/crates/nostr-sdk/Cargo.toml @@ -3,12 +3,12 @@ name = "nostr-sdk" version = "0.25.0" edition = "2021" description = "High level Nostr client library." -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] homepage.workspace = true repository.workspace = true license.workspace = true readme = "README.md" -rust-version.workspace = true +rust-version = "1.64.0" keywords = ["nostr", "sdk"] [package.metadata.docs.rs] @@ -18,6 +18,9 @@ rustdoc-args = ["--cfg", "docsrs"] [features] default = ["all-nips"] blocking = ["async-utility/blocking", "nostr/blocking"] +rocksdb = ["dep:nostr-rocksdb"] +sqlite = ["dep:nostr-sqlite"] +indexeddb = ["dep:nostr-indexeddb"] all-nips = ["nip04", "nip05", "nip06", "nip11", "nip46", "nip47"] nip03 = ["nostr/nip03"] nip04 = ["nostr/nip04"] @@ -30,16 +33,20 @@ nip47 = ["nostr/nip47"] [dependencies] async-utility = "0.1" -nostr = { version = "0.25", path = "../nostr", default-features = false, features = ["std"] } +nostr = { workspace = true, features = ["std"] } +nostr-database = { workspace = true } nostr-sdk-net = { version = "0.25", path = "../nostr-sdk-net" } once_cell = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true, features = ["std"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] +nostr-rocksdb = { version = "0.1", path = "../nostr-rocksdb", optional = true } +nostr-sqlite = { version = "0.1", path = "../nostr-sqlite", optional = true } tokio = { workspace = true, features = ["rt-multi-thread", "time", "macros", "sync"] } [target.'cfg(target_arch = "wasm32")'.dependencies] +nostr-indexeddb = { version = "0.1", path = "../nostr-indexeddb", optional = true } tokio = { workspace = true, features = ["rt", "macros", "sync"] } [dev-dependencies] @@ -73,9 +80,17 @@ required-features = ["all-nips"] name = "client-stop" required-features = ["all-nips"] +[[example]] +name = "rocksdb" +required-features = ["all-nips", "rocksdb"] + [[example]] name = "shutdown-on-drop" +[[example]] +name = "sqlite" +required-features = ["all-nips", "sqlite"] + [[example]] name = "subscriptions" required-features = ["all-nips"] diff --git a/crates/nostr-sdk/README.md b/crates/nostr-sdk/README.md index 2b2b25d79..a08b1a1c5 100644 --- a/crates/nostr-sdk/README.md +++ b/crates/nostr-sdk/README.md @@ -17,8 +17,9 @@ If you're writing a typical Nostr client or bot, this is likely the crate you ne However, the crate is designed in a modular way and depends on several other lower-level crates. If you're attempting something more custom, you might be interested in these: -- [`nostr`](https://crates.io/crates/nostr): Rust implementation of Nostr protocol -- [`nostr-sdk-net`](https://crates.io/crates/nostr-sdk-net): Nostr SDK Network library +* [`nostr`](https://crates.io/crates/nostr): Rust implementation of Nostr protocol +* [`nostr-database`](https://crates.io/crates/nostr-database): Database for Nostr apps +* [`nostr-sdk-net`](https://crates.io/crates/nostr-sdk-net): Nostr SDK Network library ## Getting started @@ -123,6 +124,8 @@ The following crate feature flags are available: | Feature | Default | Description | | ------------------- | :-----: | ---------------------------------------------------------------------------------------- | | `blocking` | No | Needed to use `NIP-05` and `NIP-11` features in not async/await context | +| `rocksdb` | No | Enable RocksDB Storage backend | +| `indexeddb` | No | Enable Web's IndexedDb Storage backend | | `all-nips` | Yes | Enable all NIPs | | `nip03` | No | Enable NIP-03: OpenTimestamps Attestations for Events | | `nip04` | Yes | Enable NIP-04: Encrypted Direct Message | diff --git a/crates/nostr-sdk/examples/negentropy.rs b/crates/nostr-sdk/examples/negentropy.rs index 7cf8eca96..e81957844 100644 --- a/crates/nostr-sdk/examples/negentropy.rs +++ b/crates/nostr-sdk/examples/negentropy.rs @@ -1,8 +1,6 @@ // Copyright (c) 2022-2023 Yuki Kishimoto // Distributed under the MIT software license -use std::time::Duration; - use nostr_sdk::prelude::*; const BECH32_SK: &str = "nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85"; @@ -15,16 +13,15 @@ async fn main() -> Result<()> { let my_keys = Keys::new(secret_key); let client = Client::new(&my_keys); - client.add_relay("wss://relay.damus.io", None).await?; + client.add_relay("wss://atl.purplerelay.com", None).await?; client.connect().await; let my_items = Vec::new(); let filter = Filter::new().author(my_keys.public_key()).limit(10); - let relay = client.relay("wss://relay.damus.io").await?; - relay - .reconcilie(filter, my_items, Duration::from_secs(30)) - .await?; + let relay = client.relay("wss://atl.purplerelay.com").await?; + let opts = NegentropyOptions::default().syncrounous(false); + relay.reconcile(filter, my_items, opts).await?; client .handle_notifications(|notification| async { diff --git a/crates/nostr-sdk/examples/rocksdb.rs b/crates/nostr-sdk/examples/rocksdb.rs new file mode 100644 index 000000000..fb936243f --- /dev/null +++ b/crates/nostr-sdk/examples/rocksdb.rs @@ -0,0 +1,39 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use nostr_sdk::prelude::*; + +const BECH32_SK: &str = "nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85"; + +#[tokio::main] +async fn main() -> Result<()> { + tracing_subscriber::fmt::init(); + + let secret_key = SecretKey::from_bech32(BECH32_SK)?; + let my_keys = Keys::new(secret_key); + + let database = RocksDatabase::open("./db/rocksdb").await?; + let client: Client = ClientBuilder::new(&my_keys).database(database).build(); + + client.add_relay("wss://relay.damus.io", None).await?; + client.add_relay("wss://nostr.wine", None).await?; + client.add_relay("wss://atl.purplerelay.com", None).await?; + + client.connect().await; + + /* // Publish a text note + client.publish_text_note("Hello world", &[]).await?; */ + + // Negentropy reconcile + let filter = Filter::new().author(my_keys.public_key()); + client + .reconcile(filter, NegentropyOptions::default()) + .await?; + + // Query events from database + let filter = Filter::new().author(my_keys.public_key()).limit(10); + let events = client.database().query(vec![filter]).await?; + println!("Events: {events:?}"); + + Ok(()) +} diff --git a/crates/nostr-sdk/examples/sqlite.rs b/crates/nostr-sdk/examples/sqlite.rs new file mode 100644 index 000000000..36f28c86b --- /dev/null +++ b/crates/nostr-sdk/examples/sqlite.rs @@ -0,0 +1,39 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use nostr_sdk::prelude::*; + +const BECH32_SK: &str = "nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85"; + +#[tokio::main] +async fn main() -> Result<()> { + tracing_subscriber::fmt::init(); + + let secret_key = SecretKey::from_bech32(BECH32_SK)?; + let my_keys = Keys::new(secret_key); + + let database = SQLiteDatabase::open("./db/sqlite.db").await?; + let client: Client = ClientBuilder::new(&my_keys).database(database).build(); + + client.add_relay("wss://relay.damus.io", None).await?; + client.add_relay("wss://nostr.wine", None).await?; + client.add_relay("wss://atl.purplerelay.com", None).await?; + + client.connect().await; + + /* // Publish a text note + client.publish_text_note("Hello world", &[]).await?; */ + + // Negentropy reconcile + let filter = Filter::new().author(my_keys.public_key()); + client + .reconcile(filter, NegentropyOptions::default()) + .await?; + + // Query events from database + let filter = Filter::new().author(my_keys.public_key()).limit(10); + let events = client.database().query(vec![filter]).await?; + println!("Events: {events:?}"); + + Ok(()) +} diff --git a/crates/nostr-sdk/src/client/blocking.rs b/crates/nostr-sdk/src/client/blocking.rs index 005b67207..25c729d55 100644 --- a/crates/nostr-sdk/src/client/blocking.rs +++ b/crates/nostr-sdk/src/client/blocking.rs @@ -99,11 +99,6 @@ impl Client { RUNTIME.block_on(async { self.client.shutdown().await }) } - /// Clear already seen events - pub fn clear_already_seen_events(&self) { - RUNTIME.block_on(async { self.client.clear_already_seen_events().await }) - } - pub fn notifications(&self) -> broadcast::Receiver { self.client.notifications() } diff --git a/crates/nostr-sdk/src/client/builder.rs b/crates/nostr-sdk/src/client/builder.rs new file mode 100644 index 000000000..fa224bdd5 --- /dev/null +++ b/crates/nostr-sdk/src/client/builder.rs @@ -0,0 +1,63 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Client builder + +use std::sync::Arc; + +use nostr::Keys; +use nostr_database::memory::MemoryDatabase; +use nostr_database::{DynNostrDatabase, IntoNostrDatabase}; + +#[cfg(feature = "nip46")] +use super::RemoteSigner; +use crate::{Client, Options}; + +/// Client builder +pub struct ClientBuilder { + pub(super) keys: Keys, + pub(super) database: Arc, + pub(super) opts: Options, + #[cfg(feature = "nip46")] + pub(super) remote_signer: Option, +} + +impl ClientBuilder { + /// New client builder + pub fn new(keys: &Keys) -> Self { + Self { + keys: keys.clone(), + database: Arc::new(MemoryDatabase::default()), + opts: Options::default(), + #[cfg(feature = "nip46")] + remote_signer: None, + } + } + + /// Set database + pub fn database(mut self, database: D) -> Self + where + D: IntoNostrDatabase, + { + self.database = database.into_nostr_database(); + self + } + + /// Set opts + pub fn opts(mut self, opts: Options) -> Self { + self.opts = opts; + self + } + + /// Set remote signer + #[cfg(feature = "nip46")] + pub fn remote_signer(mut self, remote_signer: RemoteSigner) -> Self { + self.remote_signer = Some(remote_signer); + self + } + + /// Build [`Client`] + pub fn build(self) -> Client { + Client::from_builder(self) + } +} diff --git a/crates/nostr-sdk/src/client/mod.rs b/crates/nostr-sdk/src/client/mod.rs index c244724f5..48b607dc5 100644 --- a/crates/nostr-sdk/src/client/mod.rs +++ b/crates/nostr-sdk/src/client/mod.rs @@ -20,22 +20,27 @@ use nostr::types::metadata::Error as MetadataError; use nostr::url::Url; use nostr::{ ChannelId, ClientMessage, Contact, Event, EventBuilder, EventId, Filter, JsonUtil, Keys, Kind, - Metadata, Result, Tag, + Metadata, Result, Tag, Timestamp, }; +use nostr_database::DynNostrDatabase; use nostr_sdk_net::futures_util::Future; use tokio::sync::{broadcast, RwLock}; #[cfg(feature = "blocking")] pub mod blocking; +pub mod builder; pub mod options; #[cfg(feature = "nip46")] pub mod signer; +pub use self::builder::ClientBuilder; pub use self::options::Options; #[cfg(feature = "nip46")] pub use self::signer::remote::RemoteSigner; use crate::relay::pool::{self, Error as RelayPoolError, RelayPool}; -use crate::relay::{FilterOptions, Relay, RelayOptions, RelayPoolNotification, RelaySendOptions}; +use crate::relay::{ + FilterOptions, NegentropyOptions, Relay, RelayOptions, RelayPoolNotification, RelaySendOptions, +}; use crate::util::TryIntoUrl; /// [`Client`] error @@ -164,14 +169,7 @@ impl Client { /// let client = Client::with_opts(&my_keys, opts); /// ``` pub fn with_opts(keys: &Keys, opts: Options) -> Self { - Self { - pool: RelayPool::new(opts.pool), - keys: Arc::new(RwLock::new(keys.clone())), - opts, - dropped: Arc::new(AtomicBool::new(false)), - #[cfg(feature = "nip46")] - remote_signer: None, - } + ClientBuilder::new(keys).opts(opts).build() } /// Create a new NIP46 Client @@ -187,12 +185,21 @@ impl Client { remote_signer: RemoteSigner, opts: Options, ) -> Self { + ClientBuilder::new(app_keys) + .remote_signer(remote_signer) + .opts(opts) + .build() + } + + /// Compose [`Client`] from [`ClientBuilder`] + pub fn from_builder(builder: ClientBuilder) -> Self { Self { - pool: RelayPool::new(opts.pool), - keys: Arc::new(RwLock::new(app_keys.clone())), - opts, + pool: RelayPool::with_database(builder.opts.pool, builder.database), + keys: Arc::new(RwLock::new(builder.keys)), + opts: builder.opts, dropped: Arc::new(AtomicBool::new(false)), - remote_signer: Some(remote_signer), + #[cfg(feature = "nip46")] + remote_signer: builder.remote_signer, } } @@ -218,6 +225,11 @@ impl Client { self.pool.clone() } + /// Get database + pub fn database(&self) -> Arc { + self.pool.database() + } + /// Get NIP46 uri #[cfg(feature = "nip46")] pub async fn nostr_connect_uri( @@ -266,9 +278,8 @@ impl Client { } /// Clear already seen events - pub async fn clear_already_seen_events(&self) { - self.pool.clear_already_seen_events().await; - } + #[deprecated] + pub async fn clear_already_seen_events(&self) {} /// Get new notification listener pub fn notifications(&self) -> broadcast::Receiver { @@ -1305,6 +1316,21 @@ impl Client { self.send_event_builder(builder).await } + /// Negentropy reconciliation + pub async fn reconcile(&self, filter: Filter, opts: NegentropyOptions) -> Result<(), Error> { + Ok(self.pool.reconcile(filter, opts).await?) + } + + /// Negentropy reconciliation with items + pub async fn reconcile_with_items( + &self, + filter: Filter, + items: Vec<(EventId, Timestamp)>, + opts: NegentropyOptions, + ) -> Result<(), Error> { + Ok(self.pool.reconcile_with_items(filter, items, opts).await?) + } + /// Get a list of channels pub async fn get_channels(&self, timeout: Option) -> Result, Error> { self.get_events_of(vec![Filter::new().kind(Kind::ChannelCreation)], timeout) diff --git a/crates/nostr-sdk/src/lib.rs b/crates/nostr-sdk/src/lib.rs index 309f8884a..eaf74a25a 100644 --- a/crates/nostr-sdk/src/lib.rs +++ b/crates/nostr-sdk/src/lib.rs @@ -4,6 +4,8 @@ #![forbid(unsafe_code)] #![warn(missing_docs)] #![warn(rustdoc::bare_urls)] +#![allow(unknown_lints)] +#![allow(clippy::arc_with_non_send_sync)] //! High level Nostr client library. @@ -16,8 +18,15 @@ compile_error!("`blocking` feature can't be enabled for WASM targets"); pub use nostr::{self, *}; +pub use nostr_database as database; +#[cfg(feature = "indexeddb")] +pub use nostr_indexeddb::{IndexedDBError, WebDatabase}; +#[cfg(feature = "rocksdb")] +pub use nostr_rocksdb::RocksDatabase; #[cfg(feature = "blocking")] use nostr_sdk_net::futures_util::Future; +#[cfg(feature = "sqlite")] +pub use nostr_sqlite::{Error as SQLiteError, SQLiteDatabase}; #[cfg(feature = "blocking")] use once_cell::sync::Lazy; #[cfg(feature = "blocking")] @@ -30,10 +39,11 @@ pub mod util; #[cfg(feature = "blocking")] pub use self::client::blocking; -pub use self::client::{Client, Options}; +pub use self::client::{Client, ClientBuilder, Options}; pub use self::relay::{ - ActiveSubscription, FilterOptions, InternalSubscriptionId, Relay, RelayConnectionStats, - RelayOptions, RelayPoolNotification, RelayPoolOptions, RelaySendOptions, RelayStatus, + ActiveSubscription, FilterOptions, InternalSubscriptionId, NegentropyOptions, Relay, + RelayConnectionStats, RelayOptions, RelayPoolNotification, RelayPoolOptions, RelaySendOptions, + RelayStatus, }; #[cfg(feature = "blocking")] diff --git a/crates/nostr-sdk/src/relay/mod.rs b/crates/nostr-sdk/src/relay/mod.rs index 7032f59fc..b6e53f515 100644 --- a/crates/nostr-sdk/src/relay/mod.rs +++ b/crates/nostr-sdk/src/relay/mod.rs @@ -24,6 +24,7 @@ use nostr::{ ClientMessage, Event, EventId, Filter, JsonUtil, Keys, RawRelayMessage, RelayMessage, SubscriptionId, Timestamp, Url, }; +use nostr_database::{DatabaseError, DynNostrDatabase}; use nostr_sdk_net::futures_util::{Future, SinkExt, StreamExt}; use nostr_sdk_net::{self as net, WsMessage}; use thiserror::Error; @@ -36,7 +37,9 @@ pub mod pool; mod stats; pub use self::limits::Limits; -pub use self::options::{FilterOptions, RelayOptions, RelayPoolOptions, RelaySendOptions}; +pub use self::options::{ + FilterOptions, NegentropyOptions, RelayOptions, RelayPoolOptions, RelaySendOptions, +}; use self::options::{MAX_ADJ_RETRY_SEC, MIN_RETRY_SEC}; pub use self::pool::{RelayPoolMessage, RelayPoolNotification}; pub use self::stats::RelayConnectionStats; @@ -55,6 +58,9 @@ pub enum Error { /// Negentropy error #[error(transparent)] Negentropy(#[from] negentropy::Error), + /// Database error + #[error(transparent)] + Database(#[from] DatabaseError), /// Channel timeout #[error("channel timeout")] ChannelTimeout, @@ -256,7 +262,7 @@ pub struct Relay { document: Arc>, opts: RelayOptions, stats: RelayConnectionStats, - // auto_connect_loop_running: Arc, + database: Arc, scheduled_for_stop: Arc, scheduled_for_termination: Arc, pool_sender: Sender, @@ -278,6 +284,7 @@ impl Relay { #[cfg(not(target_arch = "wasm32"))] pub fn new( url: Url, + database: Arc, pool_sender: Sender, notification_sender: broadcast::Sender, proxy: Option, @@ -294,7 +301,7 @@ impl Relay { document: Arc::new(RwLock::new(RelayInformationDocument::new())), opts, stats: RelayConnectionStats::new(), - // auto_connect_loop_running: Arc::new(AtomicBool::new(false)), + database, scheduled_for_stop: Arc::new(AtomicBool::new(false)), scheduled_for_termination: Arc::new(AtomicBool::new(false)), pool_sender, @@ -310,6 +317,7 @@ impl Relay { #[cfg(target_arch = "wasm32")] pub fn new( url: Url, + database: Arc, pool_sender: Sender, notification_sender: broadcast::Sender, opts: RelayOptions, @@ -324,7 +332,7 @@ impl Relay { document: Arc::new(RwLock::new(RelayInformationDocument::new())), opts, stats: RelayConnectionStats::new(), - // auto_connect_loop_running: Arc::new(AtomicBool::new(false)), + database, scheduled_for_stop: Arc::new(AtomicBool::new(false)), scheduled_for_termination: Arc::new(AtomicBool::new(false)), pool_sender, @@ -430,16 +438,6 @@ impl Relay { self.relay_sender.max_capacity() - self.relay_sender.capacity() } - /* fn is_auto_connect_loop_running(&self) -> bool { - self.auto_connect_loop_running.load(Ordering::SeqCst) - } - - fn set_auto_connect_loop_running(&self, value: bool) { - let _ = - self.auto_connect_loop_running - .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| Some(value)); - } */ - fn is_scheduled_for_stop(&self) -> bool { self.scheduled_for_stop.load(Ordering::SeqCst) } @@ -784,7 +782,11 @@ impl Relay { if size <= max_size { match RawRelayMessage::from_json(&data) { Ok(msg) => { - tracing::trace!("Received message to {}: {:?}", relay.url, msg); + tracing::trace!( + "Received message from {}: {:?}", + relay.url, + msg + ); if let Err(err) = relay .pool_sender .send(RelayPoolMessage::ReceivedMsg { @@ -1347,7 +1349,7 @@ impl Relay { } /// Get events of filters with custom callback - pub async fn get_events_of_with_callback( + async fn get_events_of_with_callback( &self, filters: Vec, timeout: Duration, @@ -1376,13 +1378,20 @@ impl Relay { } /// Get events of filters + /// + /// Get events from local database and relay pub async fn get_events_of( &self, filters: Vec, timeout: Duration, opts: FilterOptions, ) -> Result, Error> { - let events: Mutex> = Mutex::new(Vec::new()); + let stored_events: Vec = self + .database + .query(filters.clone()) + .await + .unwrap_or_default(); + let events: Mutex> = Mutex::new(stored_events); self.get_events_of_with_callback(filters, timeout, opts, |event| async { let mut events = events.lock().await; events.push(event); @@ -1472,21 +1481,28 @@ impl Relay { } /// Negentropy reconciliation - pub async fn reconcilie( + pub async fn reconcile( &self, filter: Filter, - my_items: Vec<(EventId, Timestamp)>, - timeout: Duration, + items: Vec<(EventId, Timestamp)>, + opts: NegentropyOptions, ) -> Result<(), Error> { if !self.opts.get_read() { return Err(Error::ReadDisabled); } + if !self.is_connected().await + && self.stats.attempts() > 1 + && self.stats.uptime() < MIN_UPTIME + { + return Err(Error::NotConnected); + } + let id_size: usize = 32; - let mut negentropy = Negentropy::new(id_size, Some(2_500))?; + let mut negentropy = Negentropy::new(id_size, Some(4_096))?; - for (id, timestamp) in my_items.into_iter() { + for (id, timestamp) in items.into_iter() { let id = Bytes::from_slice(id.as_bytes()); negentropy.add_item(timestamp.as_u64(), id)?; } @@ -1499,8 +1515,9 @@ impl Relay { self.send_msg(open_msg, Some(Duration::from_secs(10))) .await?; + // TODO: improve timeouts let mut notifications = self.notification_sender.subscribe(); - time::timeout(Some(timeout), async { + time::timeout(Some(opts.timeout), async { while let Ok(notification) = notifications.recv().await { if let RelayPoolNotification::Message(url, msg) = notification { if url == self.url { @@ -1511,22 +1528,50 @@ impl Relay { } => { if subscription_id == sub_id { let query: Bytes = Bytes::from_hex(message)?; + let mut have_ids: Vec = Vec::new(); let mut need_ids: Vec = Vec::new(); let msg: Option = negentropy.reconcile_with_ids( &query, - &mut Vec::new(), + &mut have_ids, &mut need_ids, )?; + if opts.bidirectional { + let ids = have_ids.into_iter().filter_map(|id| EventId::from_slice(&id).ok()); + let filter = Filter::new().ids(ids); + let events: Vec = self.database.query(vec![filter]).await?; + if let Err(e) = self.batch_event(events, RelaySendOptions::default()).await { + tracing::error!("Impossible to batch events to {}: {e}", self.url); + } + } + + if need_ids.is_empty() { + tracing::info!("Reconciliation terminated"); + break; + } + let ids = need_ids .into_iter() .filter_map(|id| EventId::from_slice(&id).ok()); let filter = Filter::new().ids(ids); - self.req_events_of( - vec![filter], - Duration::from_secs(120), - FilterOptions::ExitOnEOSE, - ); + if !filter.ids.is_empty() { + if opts.syncrounous { + self.get_events_of( + vec![filter], + Duration::from_secs(30), + FilterOptions::ExitOnEOSE, + ) + .await?; + } else { + self.req_events_of( + vec![filter], + Duration::from_secs(30), + FilterOptions::ExitOnEOSE, + ); + } + } else { + tracing::warn!("negentropy reconciliation: tried to send empty filters to {}", self.url); + } match msg { Some(query) => { @@ -1587,7 +1632,13 @@ impl Relay { let pk = Keys::generate(); let filter = Filter::new().author(pk.public_key()); match self - .reconcilie(filter, Vec::new(), Duration::from_secs(5)) + .reconcile( + filter, + Vec::new(), + NegentropyOptions::new() + .timeout(Duration::from_secs(5)) + .syncrounous(false), + ) .await { Ok(_) => Ok(true), diff --git a/crates/nostr-sdk/src/relay/options.rs b/crates/nostr-sdk/src/relay/options.rs index a2357a921..439221325 100644 --- a/crates/nostr-sdk/src/relay/options.rs +++ b/crates/nostr-sdk/src/relay/options.rs @@ -215,11 +215,6 @@ pub struct RelayPoolOptions { pub notification_channel_size: usize, /// Task channel size (default: 1024) pub task_channel_size: usize, - /// Max seen events by Task thread (default: 1_000_000) - /// - /// A lower number can cause receiving in notification channel - /// the same event multiple times - pub task_max_seen_events: usize, /// Shutdown on [RelayPool](super::pool::RelayPool) drop pub shutdown_on_drop: bool, } @@ -229,7 +224,6 @@ impl Default for RelayPoolOptions { Self { notification_channel_size: 1024, task_channel_size: 1024, - task_max_seen_events: 1_000_000, shutdown_on_drop: false, } } @@ -249,3 +243,59 @@ impl RelayPoolOptions { } } } + +/// Negentropy reconciliation options +#[derive(Debug, Clone, Copy)] +pub struct NegentropyOptions { + /// Timeout for reconciliation (default: 30 secs) + pub timeout: Duration, + /// Syncronous (default: true) + /// + /// If `true`, request events and wait that relay send them. + /// If `false`, request events but continue the reconciliation + pub syncrounous: bool, + /// Bidirectional Sync (default: false) + /// + /// If `true`, perform the set reconciliation on each side. + pub bidirectional: bool, +} + +impl Default for NegentropyOptions { + fn default() -> Self { + Self { + timeout: Duration::from_secs(30), + syncrounous: true, + bidirectional: false, + } + } +} + +impl NegentropyOptions { + /// New default [`NegentropyOptions`] + pub fn new() -> Self { + Self::default() + } + + /// Timeout for sending event (default: 30 secs) + pub fn timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + /// Syncronous (default: true) + /// + /// If `true`, request events and wait that relay send them. + /// If `false`, request events but continue the reconciliation + pub fn syncrounous(mut self, syncrounous: bool) -> Self { + self.syncrounous = syncrounous; + self + } + + /// Bidirectional Sync (default: false) + /// + /// If `true`, perform the set reconciliation on each side. + pub fn bidirectional(mut self, bidirectional: bool) -> Self { + self.bidirectional = bidirectional; + self + } +} diff --git a/crates/nostr-sdk/src/relay/pool.rs b/crates/nostr-sdk/src/relay/pool.rs index ff520578d..4fe0b80e2 100644 --- a/crates/nostr-sdk/src/relay/pool.rs +++ b/crates/nostr-sdk/src/relay/pool.rs @@ -3,7 +3,7 @@ //! Relay Pool -use std::collections::{HashMap, VecDeque}; +use std::collections::HashMap; #[cfg(not(target_arch = "wasm32"))] use std::net::SocketAddr; use std::sync::atomic::{AtomicBool, Ordering}; @@ -16,14 +16,15 @@ use nostr::{ event, ClientMessage, Event, EventId, Filter, JsonUtil, MissingPartialEvent, PartialEvent, RawRelayMessage, RelayMessage, SubscriptionId, Timestamp, Url, }; +use nostr_database::{DatabaseError, DynNostrDatabase, MemoryDatabase}; use thiserror::Error; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::sync::{broadcast, Mutex, RwLock}; use super::options::RelayPoolOptions; use super::{ - Error as RelayError, FilterOptions, InternalSubscriptionId, Limits, Relay, RelayOptions, - RelaySendOptions, RelayStatus, + Error as RelayError, FilterOptions, InternalSubscriptionId, Limits, NegentropyOptions, Relay, + RelayOptions, RelaySendOptions, RelayStatus, }; use crate::util::TryIntoUrl; @@ -45,6 +46,9 @@ pub enum Error { /// Message handler error #[error(transparent)] MessageHandler(#[from] MessageHandleError), + /// Database error + #[error(transparent)] + Database(#[from] DatabaseError), /// Thread error #[error(transparent)] Thread(#[from] thread::Error), @@ -81,8 +85,6 @@ pub enum RelayPoolMessage { /// Relay message msg: RawRelayMessage, }, - /// Events sent - BatchEvent(Vec), /// Relay status changed RelayStatus { /// Relay url @@ -118,25 +120,23 @@ pub enum RelayPoolNotification { #[derive(Debug, Clone)] struct RelayPoolTask { + database: Arc, receiver: Arc>>, notification_sender: broadcast::Sender, - events: Arc>>, running: Arc, - max_seen_events: usize, } impl RelayPoolTask { pub fn new( + database: Arc, pool_task_receiver: Receiver, notification_sender: broadcast::Sender, - max_seen_events: usize, ) -> Self { Self { + database, receiver: Arc::new(Mutex::new(pool_task_receiver)), - events: Arc::new(Mutex::new(VecDeque::new())), notification_sender, running: Arc::new(AtomicBool::new(false)), - max_seen_events, } } @@ -150,11 +150,6 @@ impl RelayPoolTask { .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| Some(value)); } - pub async fn clear_already_seen_events(&self) { - let mut events = self.events.lock().await; - events.clear(); - } - pub fn run(&self) { if self.is_running() { tracing::warn!("Relay Pool Task is already running!") @@ -167,8 +162,8 @@ impl RelayPoolTask { while let Some(msg) = receiver.recv().await { match msg { RelayPoolMessage::ReceivedMsg { relay_url, msg } => { - match this.handle_relay_message(msg).await { - Ok(msg) => { + match this.handle_relay_message(relay_url.clone(), msg).await { + Ok(Some(msg)) => { let _ = this.notification_sender.send( RelayPoolNotification::Message( relay_url.clone(), @@ -179,28 +174,42 @@ impl RelayPoolTask { match msg { RelayMessage::Event { event, .. } => { // Check if event was already seen - if this.add_event(event.id).await { - let notification = RelayPoolNotification::Event( - relay_url, - event.as_ref().clone(), - ); - let _ = this.notification_sender.send(notification); + match this.database.has_event_already_been_seen(event.id).await + { + Ok(seen) => { + if !seen { + let _ = + this.notification_sender.send(RelayPoolNotification::Event( + relay_url, + *event.clone(), + )); + } + } + Err(e) => tracing::error!( + "Impossible to check if event {} was already seen: {e}", + event.id + ), } } RelayMessage::Notice { message } => { tracing::warn!("Notice from {relay_url}: {message}") } + RelayMessage::Ok { + event_id, + status, + message, + } => { + tracing::debug!("Received OK from {relay_url} for event {event_id}: status={status}, message={message}"); + } _ => (), } } + Ok(None) => (), Err(e) => tracing::error!( "Impossible to handle relay message from {relay_url}: {e}" ), } } - RelayPoolMessage::BatchEvent(ids) => { - this.add_events(ids).await; - } RelayPoolMessage::RelayStatus { url, status } => { let _ = this .notification_sender @@ -236,7 +245,11 @@ impl RelayPoolTask { } } - async fn handle_relay_message(&self, msg: RawRelayMessage) -> Result { + async fn handle_relay_message( + &self, + relay_url: Url, + msg: RawRelayMessage, + ) -> Result, Error> { match msg { RawRelayMessage::Event { subscription_id, @@ -245,6 +258,28 @@ impl RelayPoolTask { // Deserialize partial event (id, pubkey and sig) let partial_event: PartialEvent = PartialEvent::from_json(event.to_string())?; + // Set event as seen by relay + if let Err(e) = self + .database + .event_id_seen(partial_event.id, relay_url) + .await + { + tracing::error!( + "Impossible to set event {} as seen by relay: {e}", + partial_event.id + ); + } + + // Check if event was already saved + if self + .database + .has_event_already_been_saved(partial_event.id) + .await? + { + tracing::trace!("Event {} already saved into database", partial_event.id); + return Ok(None); + } + // Verify signature partial_event.verify_signature()?; @@ -263,40 +298,16 @@ impl RelayPoolTask { // Verify event ID event.verify_id()?; + // Save event + self.database.save_event(&event).await?; + // Compose RelayMessage - Ok(RelayMessage::Event { + Ok(Some(RelayMessage::Event { subscription_id: SubscriptionId::new(subscription_id), event: Box::new(event), - }) - } - m => Ok(RelayMessage::try_from(m)?), - } - } - - async fn add_event(&self, event_id: EventId) -> bool { - let mut events = self.events.lock().await; - if events.contains(&event_id) { - false - } else { - while events.len() >= self.max_seen_events { - events.pop_front(); - } - events.push_back(event_id); - true - } - } - - async fn add_events(&self, ids: Vec) { - if !ids.is_empty() { - let mut events = self.events.lock().await; - for event_id in ids.into_iter() { - if !events.contains(&event_id) { - while events.len() >= self.max_seen_events { - events.pop_front(); - } - events.push_back(event_id); - } + })) } + m => Ok(Some(RelayMessage::try_from(m)?)), } } } @@ -304,6 +315,7 @@ impl RelayPoolTask { /// Relay Pool #[derive(Debug, Clone)] pub struct RelayPool { + database: Arc, relays: Arc>>, pool_task_sender: Sender, notification_sender: broadcast::Sender, @@ -337,16 +349,22 @@ impl Drop for RelayPool { impl RelayPool { /// Create new `RelayPool` pub fn new(opts: RelayPoolOptions) -> Self { + Self::with_database(opts, Arc::new(MemoryDatabase::default())) + } + + /// New with database + pub fn with_database(opts: RelayPoolOptions, database: Arc) -> Self { let (notification_sender, _) = broadcast::channel(opts.notification_channel_size); let (pool_task_sender, pool_task_receiver) = mpsc::channel(opts.task_channel_size); let relay_pool_task = RelayPoolTask::new( + database.clone(), pool_task_receiver, notification_sender.clone(), - opts.task_max_seen_events, ); let pool = Self { + database, relays: Arc::new(RwLock::new(HashMap::new())), pool_task_sender, notification_sender, @@ -393,16 +411,16 @@ impl RelayPool { Ok(()) } - /// Clear already seen events - pub async fn clear_already_seen_events(&self) { - self.pool_task.clear_already_seen_events().await; - } - /// Get new notification listener pub fn notifications(&self) -> broadcast::Receiver { self.notification_sender.subscribe() } + /// Get database + pub fn database(&self) -> Arc { + self.database.clone() + } + /// Get relays pub async fn relays(&self) -> HashMap { let relays = self.relays.read().await; @@ -448,6 +466,7 @@ impl RelayPool { if !relays.contains_key(&url) { let relay = Relay::new( url, + self.database.clone(), self.pool_task_sender.clone(), self.notification_sender.clone(), proxy, @@ -473,6 +492,7 @@ impl RelayPool { if !relays.contains_key(&url) { let relay = Relay::new( url, + self.database.clone(), self.pool_task_sender.clone(), self.notification_sender.clone(), opts, @@ -499,16 +519,6 @@ impl RelayPool { Ok(()) } - async fn set_events_as_sent(&self, ids: Vec) { - if let Err(e) = self - .pool_task_sender - .send(RelayPoolMessage::BatchEvent(ids)) - .await - { - tracing::error!("{e}"); - }; - } - /// Send client message pub async fn send_msg(&self, msg: ClientMessage, wait: Option) -> Result<(), Error> { let relays = self.relays().await; @@ -518,7 +528,7 @@ impl RelayPool { } if let ClientMessage::Event(event) = &msg { - self.set_events_as_sent(vec![event.id]).await; + self.database.save_event(event).await?; } let sent_to_at_least_one_relay: Arc = Arc::new(AtomicBool::new(false)); @@ -562,17 +572,12 @@ impl RelayPool { return Err(Error::NoRelays); } - let ids: Vec = msgs - .iter() - .filter_map(|msg| { - if let ClientMessage::Event(event) = msg { - Some(event.id) - } else { - None - } - }) - .collect(); - self.set_events_as_sent(ids).await; + // Save events into database + for msg in msgs.iter() { + if let ClientMessage::Event(event) = msg { + self.database.save_event(event).await?; + } + } let sent_to_at_least_one_relay: Arc = Arc::new(AtomicBool::new(false)); let mut handles = Vec::new(); @@ -618,7 +623,7 @@ impl RelayPool { let url: Url = url.try_into_url()?; if let ClientMessage::Event(event) = &msg { - self.set_events_as_sent(vec![event.id]).await; + self.database.save_event(event).await?; } let relays = self.relays().await; @@ -638,7 +643,7 @@ impl RelayPool { return Err(Error::NoRelays); } - self.set_events_as_sent(vec![event.id]).await; + self.database.save_event(&event).await?; let sent_to_at_least_one_relay: Arc = Arc::new(AtomicBool::new(false)); let mut handles = Vec::new(); @@ -683,8 +688,10 @@ impl RelayPool { return Err(Error::NoRelays); } - let ids: Vec = events.iter().map(|e| e.id).collect(); - self.set_events_as_sent(ids).await; + // Save events into database + for event in events.iter() { + self.database.save_event(event).await?; + } let sent_to_at_least_one_relay: Arc = Arc::new(AtomicBool::new(false)); let mut handles = Vec::new(); @@ -728,7 +735,7 @@ impl RelayPool { Error: From<::Err>, { let url: Url = url.try_into_url()?; - self.set_events_as_sent(vec![event.id]).await; + self.database.save_event(&event).await?; let relays = self.relays().await; if let Some(relay) = relays.get(&url) { Ok(relay.send_event(event, opts).await?) @@ -765,13 +772,20 @@ impl RelayPool { } /// Get events of filters + /// + /// Get events from local database and relays pub async fn get_events_of( &self, filters: Vec, timeout: Duration, opts: FilterOptions, ) -> Result, Error> { - let events: Arc>> = Arc::new(Mutex::new(Vec::new())); + let stored_events: Vec = self + .database + .query(filters.clone()) + .await + .unwrap_or_default(); + let events: Arc>> = Arc::new(Mutex::new(stored_events)); let mut handles = Vec::new(); let relays = self.relays().await; for (url, relay) in relays.into_iter() { @@ -797,7 +811,9 @@ impl RelayPool { Ok(events.lock_owned().await.clone()) } - /// Request events of filter. All events will be sent to notification listener + /// Request events of filter. + /// + /// If the events aren't already stored in the database, will be sent to notification listener /// until the EOSE "end of stored events" message is received from the relay. pub async fn req_events_of( &self, @@ -861,20 +877,27 @@ impl RelayPool { } /// Negentropy reconciliation - pub async fn reconcilie( + pub async fn reconcile(&self, filter: Filter, opts: NegentropyOptions) -> Result<(), Error> { + let items: Vec<(EventId, Timestamp)> = + self.database.negentropy_items(filter.clone()).await?; + self.reconcile_with_items(filter, items, opts).await + } + + /// Negentropy reconciliation with custom items + pub async fn reconcile_with_items( &self, filter: Filter, - my_items: Vec<(EventId, Timestamp)>, - timeout: Duration, + items: Vec<(EventId, Timestamp)>, + opts: NegentropyOptions, ) -> Result<(), Error> { let mut handles = Vec::new(); let relays = self.relays().await; for (url, relay) in relays.into_iter() { let filter = filter.clone(); - let my_items = my_items.clone(); + let my_items = items.clone(); let handle = thread::spawn(async move { - if let Err(e) = relay.reconcilie(filter, my_items, timeout).await { - tracing::error!("Failed to get reconcilie with {url}: {e}"); + if let Err(e) = relay.reconcile(filter, my_items, opts).await { + tracing::error!("Failed to get reconcile with {url}: {e}"); } }); handles.push(handle); diff --git a/crates/nostr-sqlite/Cargo.toml b/crates/nostr-sqlite/Cargo.toml new file mode 100644 index 000000000..06964aa91 --- /dev/null +++ b/crates/nostr-sqlite/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "nostr-sqlite" +version = "0.1.0" +edition = "2021" +description = "SQLite Storage backend for Nostr apps" +authors = ["Yuki Kishimoto "] +homepage.workspace = true +repository.workspace = true +license.workspace = true +readme = "README.md" +rust-version = "1.64.0" +keywords = ["nostr", "database", "sqlite"] + +[dependencies] +async-trait = { workspace = true } +deadpool-sqlite = "0.5" +nostr = { workspace = true, features = ["std"] } +nostr-database = { workspace = true, features = ["flatbuf"] } +rusqlite = { version = "0.28", features = ["bundled"] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread", "sync"] } +tracing = { workspace = true, features = ["std", "attributes"] } + +[dev-dependencies] +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/nostr-sqlite/README.md b/crates/nostr-sqlite/README.md new file mode 100644 index 000000000..0d0a64f11 --- /dev/null +++ b/crates/nostr-sqlite/README.md @@ -0,0 +1,15 @@ +# Nostr SQLite + +## State + +**This library is in an ALPHA state**, things that are implemented generally work but the API will change in breaking ways. + +## License + +This project is distributed under the MIT software license - see the [LICENSE](../../LICENSE) file for details + +## Donations + +⚡ Tips: + +⚡ Lightning Address: yuki@getalby.com \ No newline at end of file diff --git a/crates/nostr-sqlite/examples/sqlite.rs b/crates/nostr-sqlite/examples/sqlite.rs new file mode 100644 index 000000000..f91d5dc11 --- /dev/null +++ b/crates/nostr-sqlite/examples/sqlite.rs @@ -0,0 +1,104 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use std::time::Duration; + +use nostr::prelude::*; +use nostr_database::NostrDatabase; +use nostr_sqlite::SQLiteDatabase; +use tracing_subscriber::fmt::format::FmtSpan; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt::fmt() + .with_span_events(FmtSpan::CLOSE) + .init(); + + let secret_key = + SecretKey::from_bech32("nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99") + .unwrap(); + let keys_a = Keys::new(secret_key); + println!("Pubkey A: {}", keys_a.public_key()); + + let secret_key = + SecretKey::from_bech32("nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85") + .unwrap(); + let keys_b = Keys::new(secret_key); + println!("Pubkey B: {}", keys_b.public_key()); + + let database = SQLiteDatabase::open("./db/sqlite.db").await.unwrap(); + + println!("Events stored: {}", database.count().await.unwrap()); + + /* for i in 0..100_000 { + let event = EventBuilder::new_text_note(format!("Event #{i}"), &[]) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + + let event = EventBuilder::new_text_note( + format!("Reply to event #{i}"), + &[ + Tag::Event(event.id, None, None), + Tag::PubKey(event.pubkey, None), + ], + ) + .to_event(&keys_b) + .unwrap(); + database.save_event(&event).await.unwrap(); + } */ + + for i in 0..10 { + let metadata = Metadata::new().name(format!("Name #{i}")); + let event = EventBuilder::set_metadata(metadata) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + } + + /* for i in 0..500_000 { + let event = EventBuilder::new( + Kind::Custom(123), + "Custom with d tag", + &[Tag::Identifier(format!("myid{i}"))], + ) + .to_event(&keys_a) + .unwrap(); + database.save_event(&event).await.unwrap(); + } */ + + let event_id = EventId::all_zeros(); + database + .event_id_seen(event_id, Url::parse("wss://relay.damus.io").unwrap()) + .await + .unwrap(); + database + .event_id_seen(event_id, Url::parse("wss://relay.nostr.info").unwrap()) + .await + .unwrap(); + database + .event_id_seen(event_id, Url::parse("wss://relay.damus.io").unwrap()) + .await + .unwrap(); + + let relays = database + .event_recently_seen_on_relays(event_id) + .await + .unwrap(); + println!("Seen on: {relays:?}"); + + let events = database + .query(vec![Filter::new() + .kinds(vec![Kind::Metadata, Kind::Custom(123), Kind::TextNote]) + .limit(20) + //.kind(Kind::Custom(123)) + //.identifier("myid5000") + .author(keys_a.public_key())]) + .await + .unwrap(); + println!("Got {} events", events.len()); + + loop { + tokio::time::sleep(Duration::from_secs(30)).await + } +} diff --git a/crates/nostr-sqlite/migrations/001_init.sql b/crates/nostr-sqlite/migrations/001_init.sql new file mode 100644 index 000000000..112c5aa40 --- /dev/null +++ b/crates/nostr-sqlite/migrations/001_init.sql @@ -0,0 +1,20 @@ +-- Database settings +PRAGMA encoding = "UTF-8"; +PRAGMA journal_mode=WAL; +PRAGMA main.synchronous=NORMAL; +PRAGMA foreign_keys = ON; +PRAGMA application_id = 1654008667; +PRAGMA user_version = 1; -- Schema version + +CREATE TABLE IF NOT EXISTS events ( + event_id BLOB PRIMARY KEY NOT NULL, + event BLOB NOT NULL +); + +CREATE TABLE IF NOT EXISTS event_seen_by_relays ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_id BLOB NOT NULL, + relay_url TEXT NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS event_seen_by_relays_index ON event_seen_by_relays(event_id,relay_url); \ No newline at end of file diff --git a/crates/nostr-sqlite/src/error.rs b/crates/nostr-sqlite/src/error.rs new file mode 100644 index 000000000..b72f1177d --- /dev/null +++ b/crates/nostr-sqlite/src/error.rs @@ -0,0 +1,52 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use deadpool_sqlite::{CreatePoolError, InteractError, PoolError}; +use nostr_database::{flatbuffers, DatabaseError}; +use thiserror::Error; + +use crate::migration::MigrationError; + +/// Store error +#[derive(Debug, Error)] +pub enum Error { + /// Sqlite error + #[error(transparent)] + Sqlite(#[from] rusqlite::Error), + /// Pool error + #[error(transparent)] + CreateDeadPool(#[from] CreatePoolError), + /// Pool error + #[error(transparent)] + DeadPool(#[from] PoolError), + /// Pool error + #[error("{0}")] + DeadPoolInteract(String), + /// Migration error + #[error(transparent)] + Migration(#[from] MigrationError), + /// Database error + #[error(transparent)] + Database(#[from] DatabaseError), + /// Flatbuffers error + #[error(transparent)] + Flatbuffers(#[from] flatbuffers::Error), + /// Url error + #[error(transparent)] + Url(#[from] nostr::url::ParseError), + /// Not found + #[error("sqlite: {0} not found")] + NotFound(String), +} + +impl From for Error { + fn from(e: InteractError) -> Self { + Self::DeadPoolInteract(e.to_string()) + } +} + +impl From for DatabaseError { + fn from(e: Error) -> Self { + Self::backend(e) + } +} diff --git a/crates/nostr-sqlite/src/lib.rs b/crates/nostr-sqlite/src/lib.rs new file mode 100644 index 000000000..643d9c22f --- /dev/null +++ b/crates/nostr-sqlite/src/lib.rs @@ -0,0 +1,311 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! SQLite Storage backend for Nostr SDK + +#![forbid(unsafe_code)] +#![warn(missing_docs)] +#![warn(rustdoc::bare_urls)] + +use std::collections::HashSet; +use std::path::Path; +use std::sync::Arc; + +pub extern crate nostr; +pub extern crate nostr_database as database; + +use async_trait::async_trait; +use deadpool_sqlite::{Config, Object, Pool, Runtime}; +use nostr::event::raw::RawEvent; +use nostr::{Event, EventId, Filter, Timestamp, Url}; +use nostr_database::{ + Backend, DatabaseIndexes, DatabaseOptions, EventIndexResult, FlatBufferBuilder, + FlatBufferDecode, FlatBufferEncode, NostrDatabase, +}; +use rusqlite::config::DbConfig; +use tokio::sync::RwLock; + +mod error; +mod migration; + +pub use self::error::Error; +use self::migration::STARTUP_SQL; + +/// SQLite Nostr Database +#[derive(Debug, Clone)] +pub struct SQLiteDatabase { + db: Pool, + indexes: DatabaseIndexes, + fbb: Arc>>, +} + +impl SQLiteDatabase { + /// Open SQLite store + pub async fn open

(path: P) -> Result + where + P: AsRef, + { + let cfg = Config::new(path.as_ref()); + let pool = cfg.create_pool(Runtime::Tokio1)?; + + // Execute migrations + let conn = pool.get().await?; + migration::run(&conn).await?; + + let this = Self { + db: pool, + indexes: DatabaseIndexes::new(), + fbb: Arc::new(RwLock::new(FlatBufferBuilder::with_capacity(70_000))), + }; + + // Build indexes + this.build_indexes(&conn).await?; + + Ok(this) + } + + async fn acquire(&self) -> Result { + Ok(self.db.get().await?) + } + + #[tracing::instrument(skip_all)] + async fn build_indexes(&self, conn: &Object) -> Result<(), Error> { + let events = conn + .interact(move |conn| { + let mut stmt = conn.prepare_cached("SELECT event FROM events;")?; + let mut rows = stmt.query([])?; + let mut events = HashSet::new(); + while let Ok(Some(row)) = rows.next() { + let buf: Vec = row.get(0)?; + let raw = RawEvent::decode(&buf)?; + events.insert(raw); + } + Ok::, Error>(events) + }) + .await??; + self.indexes.bulk_load(events).await; + Ok(()) + } +} + +#[async_trait] +impl NostrDatabase for SQLiteDatabase { + type Err = Error; + + fn backend(&self) -> Backend { + Backend::SQLite + } + + fn opts(&self) -> DatabaseOptions { + DatabaseOptions::default() + } + + async fn count(&self) -> Result { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn.prepare_cached("SELECT COUNT(*) FROM events;")?; + let mut rows = stmt.query([])?; + let row = rows + .next()? + .ok_or_else(|| Error::NotFound("count result".into()))?; + let count: usize = row.get(0)?; + Ok(count) + }) + .await? + } + + #[tracing::instrument(skip_all, level = "trace")] + async fn save_event(&self, event: &Event) -> Result { + // Index event + let EventIndexResult { + to_store, + to_discard, + } = self.indexes.index_event(event).await; + + if !to_discard.is_empty() { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let delete_query = format!( + "DELETE FROM events WHERE {};", + to_discard + .iter() + .map(|id| format!("event_id = '{id}'")) + .collect::>() + .join(" AND ") + ); + conn.execute(&delete_query, []) + }) + .await??; + } + + if to_store { + // Acquire FlatBuffers Builder + let mut fbb = self.fbb.write().await; + + // Encode + let event_id: EventId = event.id; + let value: Vec = event.encode(&mut fbb).to_vec(); + + // Save event + let conn = self.acquire().await?; + conn.interact(move |conn| { + conn.execute( + "INSERT OR IGNORE INTO events (event_id, event) VALUES (?, ?);", + (event_id.to_hex(), value), + ) + }) + .await??; + + Ok(true) + } else { + Ok(false) + } + } + + async fn has_event_already_been_saved(&self, event_id: EventId) -> Result { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn.prepare_cached( + "SELECT EXISTS(SELECT 1 FROM events WHERE event_id = ? LIMIT 1);", + )?; + let mut rows = stmt.query([event_id.to_hex()])?; + let exists: u8 = match rows.next()? { + Some(row) => row.get(0)?, + None => 0, + }; + Ok(exists == 1) + }) + .await? + } + + async fn has_event_already_been_seen(&self, event_id: EventId) -> Result { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn.prepare_cached( + "SELECT EXISTS(SELECT 1 FROM event_seen_by_relays WHERE event_id = ? LIMIT 1);", + )?; + let mut rows = stmt.query([event_id.to_hex()])?; + let exists: u8 = match rows.next()? { + Some(row) => row.get(0)?, + None => 0, + }; + Ok(exists == 1) + }) + .await? + } + + async fn event_id_seen(&self, event_id: EventId, relay_url: Url) -> Result<(), Self::Err> { + let conn = self.acquire().await?; + conn.interact(move |conn| { + conn.execute( + "INSERT OR IGNORE INTO event_seen_by_relays (event_id, relay_url) VALUES (?, ?);", + (event_id.to_hex(), relay_url.to_string()), + ) + }) + .await??; + Ok(()) + } + + async fn event_recently_seen_on_relays( + &self, + event_id: EventId, + ) -> Result>, Self::Err> { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn + .prepare_cached("SELECT relay_url FROM event_seen_by_relays WHERE event_id = ?;")?; + let mut rows = stmt.query([event_id.to_hex()])?; + let mut relays = HashSet::new(); + while let Ok(Some(row)) = rows.next() { + let url: String = row.get(0)?; + relays.insert(Url::parse(&url)?); + } + Ok(Some(relays)) + }) + .await? + } + + #[tracing::instrument(skip_all)] + async fn event_by_id(&self, event_id: EventId) -> Result { + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn.prepare_cached("SELECT event FROM events WHERE event_id = ?;")?; + let mut rows = stmt.query([event_id.to_hex()])?; + let row = rows + .next()? + .ok_or_else(|| Error::NotFound("event".into()))?; + let buf: Vec = row.get(0)?; + Ok(Event::decode(&buf)?) + }) + .await? + } + + #[tracing::instrument(skip_all)] + async fn query(&self, filters: Vec) -> Result, Self::Err> { + let ids = self.indexes.query(filters.clone()).await; + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn.prepare_cached("SELECT event FROM events WHERE event_id = ?;")?; + let mut events = Vec::with_capacity(ids.len()); + for id in ids.into_iter() { + let mut rows = stmt.query([id.to_hex()])?; + while let Ok(Some(row)) = rows.next() { + let buf: Vec = row.get(0)?; + events.push(Event::decode(&buf)?); + } + } + Ok(events) + }) + .await? + } + + async fn event_ids_by_filters( + &self, + filters: Vec, + ) -> Result, Self::Err> { + Ok(self.indexes.query(filters).await) + } + + async fn negentropy_items( + &self, + filter: Filter, + ) -> Result, Self::Err> { + let ids = self.indexes.query(vec![filter.clone()]).await; + let conn = self.acquire().await?; + conn.interact(move |conn| { + let mut stmt = conn.prepare_cached("SELECT event FROM events WHERE event_id = ?;")?; + let mut events = Vec::with_capacity(ids.len()); + for id in ids.into_iter() { + let mut rows = stmt.query([id.to_hex()])?; + while let Ok(Some(row)) = rows.next() { + let buf: Vec = row.get(0)?; + let event = Event::decode(&buf)?; + events.push((event.id, event.created_at)); + } + } + Ok(events) + }) + .await? + } + + async fn wipe(&self) -> Result<(), Self::Err> { + let conn = self.acquire().await?; + + conn.interact(|conn| { + // Reset DB + conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, true)?; + conn.execute("VACUUM;", [])?; + conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, false)?; + + // Execute migrations + conn.execute_batch(STARTUP_SQL)?; + + Ok::<(), Error>(()) + }) + .await??; + + migration::run(&conn).await?; + + Ok(()) + } +} diff --git a/crates/nostr-sqlite/src/migration.rs b/crates/nostr-sqlite/src/migration.rs new file mode 100644 index 000000000..77e436c92 --- /dev/null +++ b/crates/nostr-sqlite/src/migration.rs @@ -0,0 +1,115 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +use std::cmp::Ordering; + +use deadpool_sqlite::Object; +use rusqlite::Connection; +use thiserror::Error; + +use super::Error; + +/// Latest database version +pub const DB_VERSION: usize = 1; + +/// Startup DB Pragmas +pub const STARTUP_SQL: &str = r##" +PRAGMA main.synchronous=NORMAL; +PRAGMA foreign_keys = ON; +PRAGMA journal_size_limit=32768; +pragma mmap_size = 17179869184; -- cap mmap at 16GB +"##; + +/// Schema error +#[derive(Debug, Error)] +pub enum MigrationError { + /// Database versione newer than supported + #[error( + "Database version is newer than supported by this executable (v{current} > v{DB_VERSION})" + )] + NewerDbVersion { current: usize }, +} + +/// Determine the current application database schema version. +pub fn curr_db_version(conn: &mut Connection) -> Result { + let query = "PRAGMA user_version;"; + let curr_version = conn.query_row(query, [], |row| row.get(0))?; + Ok(curr_version) +} + +/// Upgrade DB to latest version, and execute pragma settings +pub(crate) async fn run(conn: &Object) -> Result<(), Error> { + conn.interact(|conn| { + // check the version. + let mut curr_version = curr_db_version(conn)?; + tracing::info!("DB version = {:?}", curr_version); + + match curr_version.cmp(&DB_VERSION) { + // Database is new or not current + Ordering::Less => { + // initialize from scratch + if curr_version == 0 { + curr_version = mig_init(conn)?; + } + + // for initialized but out-of-date schemas, proceed to + // upgrade sequentially until we are current. + /* if curr_version == 1 { + curr_version = mig_1_to_2(conn)?; + } + + if curr_version == 2 { + curr_version = mig_2_to_3(conn)?; + } + + if curr_version == 3 { + curr_version = mig_3_to_4(conn)?; + } + + if curr_version == 4 { + curr_version = mig_4_to_5(conn)?; + } + + if curr_version == 5 { + curr_version = mig_5_to_6(conn)?; + } + + if curr_version == 6 { + curr_version = mig_6_to_7(conn)?; + } */ + + if curr_version == DB_VERSION { + tracing::info!("All migration scripts completed successfully (v{DB_VERSION})"); + } + } + // Database is current, all is good + Ordering::Equal => { + tracing::debug!("Database version was already current (v{DB_VERSION})"); + } + // Database is newer than what this code understands, abort + Ordering::Greater => { + return Err(Error::Migration(MigrationError::NewerDbVersion { + current: curr_version, + })); + } + } + + // Setup PRAGMA + conn.execute_batch(STARTUP_SQL)?; + tracing::debug!("SQLite PRAGMA startup completed"); + Ok(()) + }) + .await? +} + +fn mig_init(conn: &mut Connection) -> Result { + conn.execute_batch(include_str!("../migrations/001_init.sql"))?; + tracing::info!("database schema initialized to v1"); + Ok(1) +} + +/* fn mig_1_to_2(conn: &mut Connection) -> Result { + conn.execute_batch(include_str!("../../migrations/002_notifications.sql"))?; + tracing::info!("database schema upgraded v1 -> v2"); + Ok(2) +} */ diff --git a/crates/nostr/Cargo.toml b/crates/nostr/Cargo.toml index 1041f287b..bef7ea5c2 100644 --- a/crates/nostr/Cargo.toml +++ b/crates/nostr/Cargo.toml @@ -3,12 +3,12 @@ name = "nostr" version = "0.25.0" edition = "2021" description = "Rust implementation of the Nostr protocol." -authors = ["Yuki Kishimoto "] +authors = ["Yuki Kishimoto "] homepage.workspace = true repository.workspace = true license.workspace = true readme = "README.md" -rust-version.workspace = true +rust-version = "1.64.0" keywords = ["nostr", "protocol", "no_std"] [package.metadata.docs.rs] diff --git a/crates/nostr/README.md b/crates/nostr/README.md index d3e69b348..5c9d55250 100644 --- a/crates/nostr/README.md +++ b/crates/nostr/README.md @@ -12,7 +12,9 @@ Rust implementation of Nostr protocol. -If you're writing a typical Nostr client or bot, you may be interested in [nostr-sdk](https://crates.io/crates/nostr-sdk). +You may be interested in: +* [`nostr-sdk`](https://crates.io/crates/nostr-sdk) if you want to write a typical Nostr client or bot +* [`nostr-database`](https://crates.io/crates/nostr-database) if you need a database for your Nostr app (native or web) ## Getting started diff --git a/crates/nostr/src/event/id.rs b/crates/nostr/src/event/id.rs index 0f12fe69b..0a268db68 100644 --- a/crates/nostr/src/event/id.rs +++ b/crates/nostr/src/event/id.rs @@ -99,6 +99,11 @@ impl EventId { self.as_ref() } + /// Consume and get bytes + pub fn to_bytes(self) -> [u8; 32] { + self.0.to_byte_array() + } + /// Get as hex string pub fn to_hex(&self) -> String { self.0.to_string() diff --git a/crates/nostr/src/event/mod.rs b/crates/nostr/src/event/mod.rs index 41960d458..e3c97527d 100644 --- a/crates/nostr/src/event/mod.rs +++ b/crates/nostr/src/event/mod.rs @@ -16,6 +16,7 @@ pub mod builder; pub mod id; pub mod kind; pub mod partial; +pub mod raw; pub mod tag; pub mod unsigned; @@ -23,7 +24,7 @@ pub use self::builder::EventBuilder; pub use self::id::EventId; pub use self::kind::Kind; pub use self::partial::{MissingPartialEvent, PartialEvent}; -pub use self::tag::{Marker, Tag, TagKind}; +pub use self::tag::{Marker, Tag, TagIndexValues, TagIndexes, TagKind}; pub use self::unsigned::UnsignedEvent; #[cfg(feature = "std")] use crate::types::time::Instant; @@ -173,6 +174,16 @@ impl Event { .map_err(|_| Error::InvalidSignature) } + /// Get [`Timestamp`] expiration if set + pub fn expiration(&self) -> Option<&Timestamp> { + for tag in self.tags.iter() { + if let Tag::Expiration(timestamp) = tag { + return Some(timestamp); + } + } + None + } + /// Returns `true` if the event has an expiration tag that is expired. /// If an event has no `Expiration` tag, then it will return `false`. /// @@ -191,11 +202,9 @@ impl Event { where T: TimeSupplier, { - let now: Timestamp = Timestamp::now_with_supplier(supplier); - for tag in self.tags.iter() { - if let Tag::Expiration(timestamp) = tag { - return timestamp < &now; - } + if let Some(timestamp) = self.expiration() { + let now: Timestamp = Timestamp::now_with_supplier(supplier); + return timestamp < &now; } false } @@ -282,6 +291,11 @@ impl Event { _ => None, }) } + + /// Build tags index + pub fn build_tags_index(&self) -> TagIndexes { + TagIndexes::from(self.tags.iter().map(|t| t.as_vec())) + } } impl JsonUtil for Event { @@ -359,6 +373,7 @@ mod tests { assert_eq!(Kind::Custom(123), e.kind); assert_eq!(Kind::Custom(123), deserialized.kind); } + #[test] #[cfg(feature = "std")] fn test_event_expired() { @@ -376,10 +391,8 @@ mod tests { #[test] #[cfg(feature = "std")] fn test_event_not_expired() { - let now = Timestamp::now().as_i64(); - - // To make sure it is never considered expired - let expiry_date: u64 = (now * 2).try_into().unwrap(); + let now = Timestamp::now(); + let expiry_date: u64 = now.as_u64() * 2; let my_keys = Keys::generate(); let event = EventBuilder::new_text_note( diff --git a/crates/nostr/src/event/raw.rs b/crates/nostr/src/event/raw.rs new file mode 100644 index 000000000..55085f50b --- /dev/null +++ b/crates/nostr/src/event/raw.rs @@ -0,0 +1,97 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Raw Event + +use alloc::string::String; +use alloc::vec::Vec; +use core::str::FromStr; + +use crate::Timestamp; + +use super::kind::EPHEMERAL_RANGE; + +/// Raw Event +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct RawEvent { + /// Id + pub id: [u8; 32], + /// Author + pub pubkey: [u8; 32], + /// Timestamp (seconds) + pub created_at: u64, + /// Kind + pub kind: u64, + /// Vector of [`Tag`] + pub tags: Vec>, + /// Content + pub content: String, + /// Signature + pub sig: [u8; 64], +} + +impl RawEvent { + /// Returns `true` if the event has an expiration tag that is expired. + /// If an event has no `Expiration` tag, then it will return `false`. + /// + /// + pub fn is_expired(&self, now: &Timestamp) -> bool { + for tag in self.tags.iter() { + if tag.len() == 2 && tag[0] == "expiration" { + if let Ok(timestamp) = Timestamp::from_str(&tag[1]) { + return ×tamp < now; + } + break; + } + } + false + } + + /// Check if event [`Kind`] is `Ephemeral` + /// + /// + pub fn is_ephemeral(&self) -> bool { + EPHEMERAL_RANGE.contains(&self.kind) + } +} + +#[cfg(test)] +mod tests { + #[cfg(feature = "std")] + use super::*; + + #[test] + #[cfg(feature = "std")] + fn test_event_expired() { + let raw = RawEvent { + id: [0u8; 32], + pubkey: [0u8; 32], + created_at: 0, + kind: 1, + tags: vec![vec!["expiration".to_string(), "12345".to_string()]], + content: String::new(), + sig: [0u8; 64], + }; + let now = Timestamp::now(); + assert!(raw.is_expired(&now)); + } + + #[test] + #[cfg(feature = "std")] + fn test_event_not_expired() { + let now = Timestamp::now(); + let expiry_date: u64 = now.as_u64() * 2; + + let raw = RawEvent { + id: [0u8; 32], + pubkey: [0u8; 32], + created_at: 0, + kind: 1, + tags: vec![vec!["expiration".to_string(), expiry_date.to_string()]], + content: String::new(), + sig: [0u8; 64], + }; + + assert!(!raw.is_expired(&now)); + } +} diff --git a/crates/nostr/src/event/tag/indexes.rs b/crates/nostr/src/event/tag/indexes.rs new file mode 100644 index 000000000..a744d23ff --- /dev/null +++ b/crates/nostr/src/event/tag/indexes.rs @@ -0,0 +1,102 @@ +// Copyright (c) 2022-2023 Yuki Kishimoto +// Distributed under the MIT software license + +//! Tag Indexes + +use alloc::string::String; +use alloc::vec::Vec; + +#[cfg(not(feature = "std"))] +use alloc::collections::{BTreeMap as AllocMap, BTreeSet as AllocSet}; +use core::ops::{Deref, DerefMut}; +#[cfg(feature = "std")] +use std::collections::{HashMap as AllocMap, HashSet as AllocSet}; + +use bitcoin::hashes::sha256::Hash as Sha256Hash; +use bitcoin::hashes::Hash; + +use crate::Alphabet; + +/// Tag Index Value Size +pub const TAG_INDEX_VALUE_SIZE: usize = 8; + +/// Tag Indexes +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct TagIndexes { + inner: AllocMap, +} + +impl Deref for TagIndexes { + type Target = AllocMap; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for TagIndexes { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl From for TagIndexes +where + I: Iterator>, + S: AsRef, +{ + fn from(iter: I) -> Self { + let mut tag_index: TagIndexes = TagIndexes::default(); + for t in iter.filter(|t| t.len() > 1) { + if let Some(tagnamechar) = single_char_tagname(t[0].as_ref()) { + let mut inner: [u8; TAG_INDEX_VALUE_SIZE] = [0u8; TAG_INDEX_VALUE_SIZE]; + let hash = Sha256Hash::hash(t[1].as_ref().as_bytes()); + inner.copy_from_slice(&hash[..TAG_INDEX_VALUE_SIZE]); + tag_index.entry(tagnamechar).or_default().insert(inner); + } + } + tag_index + } +} + +#[inline] +fn single_char_tagname(tagname: &str) -> Option { + tagname + .chars() + .next() + .and_then(|first| Alphabet::try_from(first).ok()) +} + +/// Tag Index Values +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct TagIndexValues { + inner: AllocSet<[u8; TAG_INDEX_VALUE_SIZE]>, +} + +impl Deref for TagIndexValues { + type Target = AllocSet<[u8; TAG_INDEX_VALUE_SIZE]>; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for TagIndexValues { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl From<&AllocSet> for TagIndexValues { + fn from(value: &AllocSet) -> Self { + Self { + inner: value + .iter() + .map(|s| { + let mut inner = [0u8; TAG_INDEX_VALUE_SIZE]; + let hash = Sha256Hash::hash(s.as_bytes()); + inner.copy_from_slice(&hash[..TAG_INDEX_VALUE_SIZE]); + inner + }) + .collect(), + } + } +} diff --git a/crates/nostr/src/event/tag.rs b/crates/nostr/src/event/tag/mod.rs similarity index 99% rename from crates/nostr/src/event/tag.rs rename to crates/nostr/src/event/tag/mod.rs index 747636741..74fbe4d1c 100644 --- a/crates/nostr/src/event/tag.rs +++ b/crates/nostr/src/event/tag/mod.rs @@ -18,6 +18,9 @@ use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use url_fork::{ParseError, Url}; +pub mod indexes; + +pub use self::indexes::{TagIndexValues, TagIndexes}; use super::id::{self, EventId}; use crate::nips::nip26::{Conditions, Error as Nip26Error}; use crate::nips::nip48::Protocol; diff --git a/crates/nostr/src/lib.rs b/crates/nostr/src/lib.rs index c144c961e..6261fc5c6 100644 --- a/crates/nostr/src/lib.rs +++ b/crates/nostr/src/lib.rs @@ -50,7 +50,7 @@ pub mod util; pub use self::event::tag::{ ExternalIdentity, HttpMethod, Identity, ImageDimensions, Marker, RelayMetadata, Report, Tag, - TagKind, + TagIndexValues, TagIndexes, TagKind, }; pub use self::event::{ Event, EventBuilder, EventId, Kind, MissingPartialEvent, PartialEvent, UnsignedEvent, diff --git a/crates/nostr/src/message/subscription.rs b/crates/nostr/src/message/subscription.rs index 68acc0d5c..5358f217a 100644 --- a/crates/nostr/src/message/subscription.rs +++ b/crates/nostr/src/message/subscription.rs @@ -24,6 +24,7 @@ use serde::ser::{SerializeMap, Serializer}; use serde::{Deserialize, Serialize}; use serde_json::Value; +use crate::event::{TagIndexValues, TagIndexes}; use crate::{Event, EventId, JsonUtil, Kind, Timestamp}; /// Alphabet Error @@ -109,38 +110,46 @@ impl fmt::Display for Alphabet { } } +impl TryFrom for Alphabet { + type Error = AlphabetError; + fn try_from(c: char) -> Result { + match c { + 'a' => Ok(Self::A), + 'b' => Ok(Self::B), + 'c' => Ok(Self::C), + 'd' => Ok(Self::D), + 'e' => Ok(Self::E), + 'f' => Ok(Self::F), + 'g' => Ok(Self::G), + 'h' => Ok(Self::H), + 'i' => Ok(Self::I), + 'j' => Ok(Self::J), + 'k' => Ok(Self::K), + 'l' => Ok(Self::L), + 'm' => Ok(Self::M), + 'n' => Ok(Self::N), + 'o' => Ok(Self::O), + 'p' => Ok(Self::P), + 'q' => Ok(Self::Q), + 'r' => Ok(Self::R), + 's' => Ok(Self::S), + 't' => Ok(Self::T), + 'u' => Ok(Self::U), + 'v' => Ok(Self::V), + 'w' => Ok(Self::W), + 'x' => Ok(Self::X), + 'y' => Ok(Self::Y), + 'z' => Ok(Self::Z), + _ => Err(AlphabetError::InvalidChar), + } + } +} + impl FromStr for Alphabet { type Err = AlphabetError; fn from_str(s: &str) -> Result { - match s { - "a" => Ok(Self::A), - "b" => Ok(Self::B), - "c" => Ok(Self::C), - "d" => Ok(Self::D), - "e" => Ok(Self::E), - "f" => Ok(Self::F), - "g" => Ok(Self::G), - "h" => Ok(Self::H), - "i" => Ok(Self::I), - "j" => Ok(Self::J), - "k" => Ok(Self::K), - "l" => Ok(Self::L), - "m" => Ok(Self::M), - "n" => Ok(Self::N), - "o" => Ok(Self::O), - "p" => Ok(Self::P), - "q" => Ok(Self::Q), - "r" => Ok(Self::R), - "s" => Ok(Self::S), - "t" => Ok(Self::T), - "u" => Ok(Self::U), - "v" => Ok(Self::V), - "w" => Ok(Self::W), - "x" => Ok(Self::X), - "y" => Ok(Self::Y), - "z" => Ok(Self::Z), - _ => Err(AlphabetError::InvalidChar), - } + let c: char = s.chars().next().ok_or(AlphabetError::InvalidChar)?; + Self::try_from(c) } } @@ -566,26 +575,6 @@ impl Filter { } } -fn single_char_tagname(tagname: &str) -> Option { - tagname - .chars() - .next() - .and_then(|first| Alphabet::from_str(&first.to_string()).ok()) -} - -fn tag_idx(event: &Event) -> AllocMap> { - event - .tags - .iter() - .map(|t| t.as_vec()) - .filter(|t| t.len() > 1) - .filter_map(|t| single_char_tagname(&t[0]).map(|tagnamechar| (tagnamechar, t[1].clone()))) - .fold(AllocMap::new(), |mut idx, (tagnamechar, tagval)| { - idx.entry(tagnamechar).or_default().insert(tagval); - idx - }) -} - impl Filter { fn ids_match(&self, event: &Event) -> bool { self.ids.is_empty() || self.ids.contains(&event.id) @@ -600,10 +589,11 @@ impl Filter { return true; } - let idx: AllocMap> = tag_idx(event); + let idx: TagIndexes = event.build_tags_index(); self.generic_tags.iter().all(|(tagname, set)| { + let set = TagIndexValues::from(set); idx.get(tagname) - .map(|valset| valset.intersection(set).count() > 0) + .map(|valset| valset.intersection(&set).count() > 0) .unwrap_or(false) }) }