From 8699ea010b2fcdcfcb2dd872eac8d332444f4433 Mon Sep 17 00:00:00 2001 From: sydhds Date: Tue, 29 Oct 2024 17:35:36 +0100 Subject: [PATCH 01/40] Add massa event cache crate --- Cargo.lock | 1031 ++++++++++++++++---------- Cargo.toml | 2 + massa-event-cache/Cargo.toml | 17 + massa-event-cache/src/config.rs | 14 + massa-event-cache/src/controller.rs | 30 + massa-event-cache/src/event_cache.rs | 247 ++++++ massa-event-cache/src/lib.rs | 6 + massa-event-cache/src/ser_deser.rs | 296 ++++++++ massa-models/src/output_event.rs | 2 +- massa-module-cache/Cargo.toml | 2 +- 10 files changed, 1259 insertions(+), 388 deletions(-) create mode 100644 massa-event-cache/Cargo.toml create mode 100644 massa-event-cache/src/config.rs create mode 100644 massa-event-cache/src/controller.rs create mode 100644 massa-event-cache/src/event_cache.rs create mode 100644 massa-event-cache/src/lib.rs create mode 100644 massa-event-cache/src/ser_deser.rs diff --git a/Cargo.lock b/Cargo.lock index ba35600ad67..07a3ce878a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -108,9 +108,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -123,43 +123,43 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "arrayref" @@ -194,7 +194,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -204,10 +204,10 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -216,7 +216,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", ] @@ -244,9 +244,9 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -255,9 +255,9 @@ version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -274,15 +274,15 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.7" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", "axum-core", "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", "itoa", @@ -293,7 +293,7 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tower 0.5.1", "tower-layer", "tower-service", @@ -308,13 +308,13 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tower-layer", "tower-service", ] @@ -371,12 +371,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -406,9 +406,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.4" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" +checksum = "b8ee0c1824c4dea5b5f81736aff91bae041d2c07ee1192bec91054e10e3e601e" dependencies = [ "arrayref", "arrayvec", @@ -468,7 +468,7 @@ version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", ] @@ -481,9 +481,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" dependencies = [ "serde", ] @@ -522,9 +522,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.30" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945" +checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" dependencies = [ "jobserver", "libc", @@ -560,9 +560,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", @@ -621,9 +621,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.20" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", "clap_derive", @@ -631,9 +631,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ "anstream", "anstyle", @@ -648,16 +648,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "clipboard-win" @@ -672,18 +672,18 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" dependencies = [ "cc", ] [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "combine" @@ -770,9 +770,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -1029,9 +1029,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -1062,7 +1062,7 @@ checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "strsim 0.10.0", "syn 1.0.109", @@ -1076,10 +1076,10 @@ checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "strsim 0.11.1", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -1101,7 +1101,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -1177,7 +1177,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", ] @@ -1198,7 +1198,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" dependencies = [ "darling 0.14.4", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", ] @@ -1222,7 +1222,7 @@ dependencies = [ "console", "shell-words", "tempfile", - "thiserror", + "thiserror 1.0.69", "zeroize", ] @@ -1279,9 +1279,9 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -1321,7 +1321,7 @@ dependencies = [ "byteorder", "lazy_static", "proc-macro-error", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", ] @@ -1396,7 +1396,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c134c37760b27a871ba422106eedbb8247da973a09e82558bf26d619c882b159" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", ] @@ -1408,7 +1408,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8ea75f31022cba043afe037940d73684327e915f88f62478e778c3de914cd0a" dependencies = [ "enum_delegate_lib", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", ] @@ -1419,7 +1419,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e1f6c3800b304a6be0012039e2a45a322a093539c45ab818d9e6895a39c90fe" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "rand", "syn 1.0.109", @@ -1441,9 +1441,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59c3b24c345d8c314966bdc1832f6c2635bfcce8e7cf363bd115987bba2ee242" dependencies = [ "darling 0.20.10", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -1463,12 +1463,12 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1489,9 +1489,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "2.1.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "fd-lock" @@ -1530,9 +1530,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.34" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "miniz_oxide", @@ -1643,9 +1643,9 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -1763,12 +1763,12 @@ dependencies = [ "futures-core", "futures-sink", "gloo-utils", - "http 1.1.0", + "http 1.2.0", "js-sys", "pin-project", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -1811,7 +1811,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.6.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -1820,17 +1820,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.1.0", - "indexmap 2.6.0", + "http 1.2.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -1870,9 +1870,9 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" [[package]] name = "hdrhistogram" @@ -1896,12 +1896,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - [[package]] name = "hermit-abi" version = "0.4.0" @@ -1972,9 +1966,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -1999,7 +1993,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http 1.2.0", ] [[package]] @@ -2010,7 +2004,7 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "pin-project-lite", ] @@ -2058,15 +2052,15 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.6", - "http 1.1.0", + "h2 0.4.7", + "http 1.2.0", "http-body 1.0.1", "httparse", "httpdate", @@ -2084,8 +2078,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", - "http 1.1.0", - "hyper 1.5.0", + "http 1.2.0", + "hyper 1.5.1", "hyper-util", "log", "rustls", @@ -2097,11 +2091,11 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.5.0", + "hyper 1.5.1", "hyper-util", "pin-project-lite", "tokio", @@ -2110,16 +2104,16 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", - "hyper 1.5.0", + "hyper 1.5.1", "pin-project-lite", "socket2", "tokio", @@ -2150,6 +2144,124 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2 1.0.92", + "quote 1.0.37", + "syn 2.0.90", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -2158,12 +2270,23 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "icu_normalizer", + "icu_properties", ] [[package]] @@ -2179,12 +2302,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.2", "serde", ] @@ -2218,7 +2341,7 @@ version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi 0.4.0", + "hermit-abi", "libc", "windows-sys 0.52.0", ] @@ -2258,9 +2381,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jni" @@ -2272,7 +2395,7 @@ dependencies = [ "combine", "jni-sys", "log", - "thiserror", + "thiserror 1.0.69", "walkdir", ] @@ -2293,10 +2416,11 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -2339,14 +2463,14 @@ dependencies = [ "futures-channel", "futures-util", "gloo-net", - "http 1.1.0", + "http 1.2.0", "jsonrpsee-core", "pin-project", "rustls", "rustls-pki-types", "rustls-platform-verifier", "soketto", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-rustls", "tokio-util", @@ -2364,17 +2488,17 @@ dependencies = [ "bytes", "futures-timer", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", "jsonrpsee-types", "parking_lot", "pin-project", "rand", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -2390,7 +2514,7 @@ dependencies = [ "async-trait", "base64 0.22.1", "http-body 1.0.1", - "hyper 1.5.0", + "hyper 1.5.1", "hyper-rustls", "hyper-util", "jsonrpsee-core", @@ -2399,7 +2523,7 @@ dependencies = [ "rustls-platform-verifier", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tower 0.4.13", "tracing", @@ -2414,9 +2538,9 @@ checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" dependencies = [ "heck 0.5.0", "proc-macro-crate", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -2426,10 +2550,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82ad8ddc14be1d4290cd68046e7d1d37acd408efed6d3ca08aefcc3ad6da069c" dependencies = [ "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.0", + "hyper 1.5.1", "hyper-util", "jsonrpsee-core", "jsonrpsee-types", @@ -2438,7 +2562,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -2452,10 +2576,10 @@ version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a178c60086f24cc35bb82f57c651d0d25d99c4742b4d335de04e97fa1f08a8a1" dependencies = [ - "http 1.1.0", + "http 1.2.0", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -2475,7 +2599,7 @@ version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fe322e0896d0955a3ebdd5bf813571c53fea29edd713bc315b76620b327e86d" dependencies = [ - "http 1.1.0", + "http 1.2.0", "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-types", @@ -2511,15 +2635,15 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.160" +version = "0.2.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0b21006cd1874ae9e650973c565615676dc4a274c965bb0a73796dac838ce4f" +checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -2527,9 +2651,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libredox" @@ -2629,6 +2753,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" + [[package]] name = "litrs" version = "0.4.1" @@ -2821,7 +2951,7 @@ dependencies = [ "serial_test", "sha2 0.10.8", "sha3", - "thiserror", + "thiserror 1.0.69", "tracing", "wasmer", "wasmer-compiler-cranelift", @@ -2837,7 +2967,7 @@ version = "2.5.0" dependencies = [ "async-trait", "futures", - "http 1.1.0", + "http 1.2.0", "itertools 0.12.1", "jsonrpsee", "massa_api_exports", @@ -2865,7 +2995,7 @@ dependencies = [ "tokio", "tokio-stream", "tower 0.4.13", - "tower-http 0.6.1", + "tower-http 0.6.2", "tracing", ] @@ -2887,7 +3017,7 @@ dependencies = [ "serde", "serial_test", "strum", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -2956,7 +3086,7 @@ dependencies = [ "stream_limiter", "substruct", "tempfile", - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -2977,7 +3107,7 @@ dependencies = [ "displaydoc", "pbkdf2", "rand", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3001,7 +3131,7 @@ dependencies = [ "nom", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", ] @@ -3045,7 +3175,7 @@ dependencies = [ "mockall", "mockall_wrap", "parking_lot", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3080,6 +3210,21 @@ dependencies = [ "tempfile", ] +[[package]] +name = "massa_event_cache" +version = "0.1.0" +dependencies = [ + "massa_models", + "massa_serialization", + "more-asserts 0.3.1", + "nom", + "rand", + "rocksdb", + "serial_test", + "tempfile", + "tracing", +] + [[package]] name = "massa_executed_ops" version = "2.5.0" @@ -3114,7 +3259,7 @@ dependencies = [ "num", "serde", "tempfile", - "thiserror", + "thiserror 1.0.69", "tokio", ] @@ -3184,7 +3329,7 @@ dependencies = [ "massa_signature", "massa_storage", "massa_time", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3241,7 +3386,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -3252,7 +3397,7 @@ dependencies = [ "displaydoc", "futures-util", "h2 0.3.26", - "hyper 1.5.0", + "hyper 1.5.1", "itertools 0.12.1", "massa-proto-rs", "massa_bootstrap", @@ -3275,14 +3420,14 @@ dependencies = [ "parking_lot", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tonic", "tonic-health", "tonic-reflection", "tonic-web", - "tower-http 0.6.1", + "tower-http 0.6.2", "tower-service", "tracing", ] @@ -3299,7 +3444,7 @@ dependencies = [ "serde", "serde_json", "serial_test", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3320,7 +3465,7 @@ dependencies = [ "serde_json", "serde_with", "tempfile", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3381,7 +3526,7 @@ dependencies = [ "serde_json", "serde_with", "serial_test", - "thiserror", + "thiserror 1.0.69", "transition", "variant_count", ] @@ -3403,7 +3548,7 @@ dependencies = [ "schnellru", "serial_test", "tempfile", - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -3464,7 +3609,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -3502,7 +3647,7 @@ dependencies = [ "serde", "serde_json", "tempfile", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3547,7 +3692,7 @@ dependencies = [ name = "massa_sdk" version = "2.5.0" dependencies = [ - "http 1.1.0", + "http 1.2.0", "jsonrpsee", "jsonrpsee-http-client", "jsonrpsee-ws-client", @@ -3556,7 +3701,7 @@ dependencies = [ "massa_models", "massa_time", "rcgen", - "thiserror", + "thiserror 1.0.69", "tonic", "tracing", ] @@ -3569,7 +3714,7 @@ dependencies = [ "nom", "num", "paste", - "thiserror", + "thiserror 1.0.69", "unsigned-varint 0.8.0", ] @@ -3587,7 +3732,7 @@ dependencies = [ "serde", "serde_json", "serial_test", - "thiserror", + "thiserror 1.0.69", "transition", ] @@ -3621,7 +3766,7 @@ dependencies = [ "massa_serialization", "nom", "serde", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -3645,7 +3790,7 @@ dependencies = [ "num_enum", "parking_lot", "tempfile", - "thiserror", + "thiserror 1.0.69", "tracing", "variant_count", ] @@ -3663,7 +3808,7 @@ dependencies = [ "serde_qs", "serde_yaml", "tempfile", - "thiserror", + "thiserror 1.0.69", "zeroize", ] @@ -3772,11 +3917,10 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi 0.3.9", "libc", "wasi", "windows-sys 0.52.0", @@ -3804,7 +3948,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", ] @@ -3815,9 +3959,9 @@ version = "0.11.2" source = "git+https://github.com/AurelienFT/mockall-wrap?rev=18f88253a000df96cf407dfe4b9158c69c0aeb96#18f88253a000df96cf407dfe4b9158c69c0aeb96" dependencies = [ "cfg-if", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -3995,9 +4139,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -4133,9 +4277,9 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pathdiff" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c5ce1153ab5b689d0c074c4e7fc613e942dfb7dd9eea5ab202d2ad91fe361" +checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" [[package]] name = "pbkdf2" @@ -4169,7 +4313,7 @@ dependencies = [ "rand", "serde", "stream_limiter", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -4190,20 +4334,20 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror", + "thiserror 2.0.6", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d214365f632b123a47fd913301e14c946c61d1c183ee245fa76eb752e59a02dd" +checksum = "816518421cfc6887a0d62bf441b6ffb4536fcc926395a69e1a85852d4363f57e" dependencies = [ "pest", "pest_generator", @@ -4211,22 +4355,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb55586734301717aea2ac313f50b2eb8f60d2fc3dc01d190eefa2e625f60c4e" +checksum = "7d1396fd3a870fc7838768d171b4616d5c91f6cc25e377b673d714567d99377b" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "pest_meta" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75da2a70cf4d9cb76833c990ac9cd3923c9a8905a8929789ce347c84564d03d" +checksum = "e1e58089ea25d717bfd31fb534e4f3afcc2cc569c70de3e239778991ea3b7dea" dependencies = [ "once_cell", "pest", @@ -4240,34 +4384,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.6.0", + "indexmap 2.7.0", ] [[package]] name = "pin-project" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -4378,12 +4522,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.22" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ - "proc-macro2 1.0.88", - "syn 2.0.79", + "proc-macro2 1.0.92", + "syn 2.0.90", ] [[package]] @@ -4402,7 +4546,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", "version_check", @@ -4414,7 +4558,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "version_check", ] @@ -4430,9 +4574,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.88" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c3a7fc5db1e57d5a779a352c8cdb57b29aa4c40cc69c3a68a7fedc815fbf2f9" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -4474,14 +4618,14 @@ dependencies = [ "parking_lot", "procfs", "protobuf", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "prost" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" +checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec" dependencies = [ "bytes", "prost-derive", @@ -4489,11 +4633,10 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" +checksum = "d0f3e5beed80eb580c68e2c600937ac2c4eedabdfd5ef1e5b7ea4f3fba84497b" dependencies = [ - "bytes", "heck 0.5.0", "itertools 0.13.0", "log", @@ -4504,28 +4647,28 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.79", + "syn 2.0.90", "tempfile", ] [[package]] name = "prost-derive" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" dependencies = [ "anyhow", "itertools 0.13.0", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "prost-types" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" +checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc" dependencies = [ "prost", ] @@ -4551,7 +4694,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", ] @@ -4591,7 +4734,7 @@ version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", ] [[package]] @@ -4709,7 +4852,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -4726,13 +4869,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -4747,9 +4890,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -4844,7 +4987,7 @@ version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", ] @@ -4910,9 +5053,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" [[package]] name = "rustc_version" @@ -4934,22 +5077,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.23.15" +version = "0.23.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" dependencies = [ "log", "once_cell", @@ -5061,9 +5204,9 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a32af5427251d2e4be14fc151eabe18abb4a7aad5efee7044da9f096c906a43" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5083,9 +5226,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -5109,10 +5252,10 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "serde_derive_internals", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5154,9 +5297,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -5164,9 +5307,9 @@ dependencies = [ [[package]] name = "self_cell" -version = "1.0.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d369a96f978623eb3dc28807c4852d6cc617fed53da5d3c400feff1ef34a714a" +checksum = "c2fdfc24bc566f839a2da4c4295b82db7d25a24253867d5c64355abb5799bdbe" [[package]] name = "semver" @@ -5185,9 +5328,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -5205,13 +5348,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5220,16 +5363,16 @@ version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", "memchr", @@ -5245,7 +5388,7 @@ checksum = "0431a35568651e363364210c91983c1da5eb29404d9f0928b67d4ebcfa7d330c" dependencies = [ "percent-encoding", "serde", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -5267,7 +5410,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_derive", "serde_json", @@ -5282,9 +5425,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling 0.20.10", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5293,7 +5436,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "ryu", "serde", @@ -5320,9 +5463,9 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5451,9 +5594,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -5461,14 +5604,14 @@ dependencies = [ [[package]] name = "soketto" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" +checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" dependencies = [ "base64 0.22.1", "bytes", "futures", - "http 1.1.0", + "http 1.2.0", "httparse", "log", "rand", @@ -5549,10 +5692,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "rustversion", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5587,18 +5730,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.79" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "unicode-ident", ] @@ -5611,9 +5754,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" [[package]] name = "synstructure" @@ -5621,12 +5764,23 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", "unicode-xid 0.2.6", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2 1.0.92", + "quote 1.0.37", + "syn 2.0.90", +] + [[package]] name = "tap" version = "1.0.1" @@ -5635,9 +5789,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ff6c40d3aedb5e06b57c6f669ad17ab063dd1e63d977c6a88e7f4dfa4f04020" +checksum = "c65998313f8e17d0d553d28f91a0df93e4dbbbf770279c7bc21ca0f09ea1a1f6" dependencies = [ "filetime", "libc", @@ -5652,9 +5806,9 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", "fastrand", @@ -5671,22 +5825,42 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +dependencies = [ + "thiserror-impl 2.0.6", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ - "thiserror-impl", + "proc-macro2 1.0.92", + "quote 1.0.37", + "syn 2.0.90", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5711,9 +5885,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -5732,14 +5906,24 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -5767,14 +5951,14 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", "libc", - "mio 1.0.2", + "mio 1.0.3", "parking_lot", "pin-project-lite", "signal-hook-registry", @@ -5789,27 +5973,26 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "tokio-rustls" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ "rustls", - "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -5819,9 +6002,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -5867,7 +6050,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "toml_datetime", "winnow 0.5.40", ] @@ -5878,7 +6061,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -5897,11 +6080,11 @@ dependencies = [ "base64 0.22.1", "bytes", "flate2", - "h2 0.4.6", - "http 1.1.0", + "h2 0.4.7", + "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.0", + "hyper 1.5.1", "hyper-timeout", "hyper-util", "percent-encoding", @@ -5925,11 +6108,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ "prettyplease", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "prost-build", "prost-types", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5966,7 +6149,7 @@ checksum = "5299dd20801ad736dccb4a5ea0da7376e59cd98f213bf1c3d478cf53f4834b58" dependencies = [ "base64 0.22.1", "bytes", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", "pin-project", @@ -6021,7 +6204,7 @@ checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "bitflags 2.6.0", "bytes", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", "pin-project-lite", @@ -6031,13 +6214,13 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "bitflags 2.6.0", "bytes", - "http 1.1.0", + "http 1.2.0", "pin-project-lite", "tower-layer", "tower-service", @@ -6057,9 +6240,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -6069,20 +6252,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -6101,9 +6284,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -6131,7 +6314,7 @@ version = "0.1.0" source = "git+https://github.com/massalabs/transition.git?rev=93fa3bf82f9f5ff421c78536879b7fd1b948ca75#93fa3bf82f9f5ff421c78536879b7fd1b948ca75" dependencies = [ "darling 0.14.4", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", "unsigned-varint 0.7.1", @@ -6155,26 +6338,11 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" -[[package]] -name = "unicode-bidi" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" - [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" - -[[package]] -name = "unicode-normalization" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" -dependencies = [ - "tinyvec", -] +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-segmentation" @@ -6247,9 +6415,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna", @@ -6257,6 +6425,18 @@ dependencies = [ "serde", ] +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -6324,9 +6504,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", @@ -6335,36 +6515,36 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote 1.0.37", "wasm-bindgen-macro-support", @@ -6372,22 +6552,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "wasm-encoder" @@ -6415,7 +6595,7 @@ dependencies = [ "serde-wasm-bindgen", "shared-buffer", "target-lexicon", - "thiserror", + "thiserror 1.0.69", "tracing", "wasm-bindgen", "wasmer-compiler", @@ -6448,7 +6628,7 @@ dependencies = [ "self_cell", "shared-buffer", "smallvec", - "thiserror", + "thiserror 1.0.69", "wasmer-object", "wasmer-types", "wasmer-vm", @@ -6506,13 +6686,13 @@ dependencies = [ "ciborium", "derive_builder", "hex", - "indexmap 2.6.0", + "indexmap 2.7.0", "schemars", "semver", "serde", "serde_json", "serde_yaml", - "thiserror", + "thiserror 1.0.69", "toml 0.8.19", "url", ] @@ -6524,7 +6704,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f448efbe12d656ba96d997c9e338f15cd80934c81f2286c2730cb9224d4e41d" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", "syn 1.0.109", ] @@ -6547,7 +6727,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70dabd8c3abd7b72a89052f0dd1991aeeabed2a783d474f6c06bca42c8ce73c4" dependencies = [ "object 0.29.0", - "thiserror", + "thiserror 1.0.69", "wasmer-types", ] @@ -6567,7 +6747,7 @@ dependencies = [ "rkyv", "sha2 0.10.8", "target-lexicon", - "thiserror", + "thiserror 1.0.69", "webc", "xxhash-rust", ] @@ -6595,7 +6775,7 @@ dependencies = [ "more-asserts 0.2.2", "region", "scopeguard", - "thiserror", + "thiserror 1.0.69", "wasmer-types", "windows-sys 0.59.0", ] @@ -6607,7 +6787,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dbe55c8f9d0dbd25d9447a5a889ff90c0cc3feaa7395310d3d826b2c703eaab" dependencies = [ "bitflags 2.6.0", - "indexmap 2.6.0", + "indexmap 2.7.0", "semver", ] @@ -6634,9 +6814,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.72" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" dependencies = [ "js-sys", "wasm-bindgen", @@ -6665,7 +6845,7 @@ dependencies = [ "shared-buffer", "tar", "tempfile", - "thiserror", + "thiserror 1.0.69", "toml 0.8.19", "url", "wasmer-config", @@ -6673,9 +6853,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.6" +version = "0.26.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" dependencies = [ "rustls-pki-types", ] @@ -6942,6 +7122,18 @@ dependencies = [ "memchr", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "wyz" version = "0.5.1" @@ -6965,7 +7157,7 @@ dependencies = [ "oid-registry", "ring 0.16.20", "rusticata-macros", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -7004,6 +7196,30 @@ dependencies = [ "time", ] +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2 1.0.92", + "quote 1.0.37", + "syn 2.0.90", + "synstructure 0.13.1", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -7020,9 +7236,30 @@ version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2 1.0.92", + "quote 1.0.37", + "syn 2.0.90", + "synstructure 0.13.1", ] [[package]] @@ -7040,9 +7277,31 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.88", + "proc-macro2 1.0.92", + "quote 1.0.37", + "syn 2.0.90", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2 1.0.92", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 0390ccbe859..204c227712c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,6 +43,7 @@ members = [ "massa-versioning", "massa-grpc", "massa-xtask", + "massa-event-cache", ] resolver = "2" @@ -105,6 +106,7 @@ massa_test_framework = { path = "./massa-test-framework" } massa_time = { path = "./massa-time" } massa_versioning = { path = "./massa-versioning" } massa_wallet = { path = "./massa-wallet" } +massa_event_cache = { path = "./massa-event-cache" } # Massa projects dependencies # massa-proto-rs = { git = "https://github.com/massalabs/massa-proto-rs", branch = "deferred_calls" } diff --git a/massa-event-cache/Cargo.toml b/massa-event-cache/Cargo.toml new file mode 100644 index 00000000000..dc80de43ab5 --- /dev/null +++ b/massa-event-cache/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "massa_event_cache" +version = "0.1.0" +edition = "2021" + +[dependencies] +nom = {workspace = true} +rocksdb = {workspace = true} +tracing = {workspace = true} +massa_models = {workspace = true} +massa_serialization = {workspace = true} + +[dev-dependencies] +tempfile = {workspace = true} +serial_test = {workspace = true} +more-asserts = {workspace = true} +rand = {workspace = true} diff --git a/massa-event-cache/src/config.rs b/massa-event-cache/src/config.rs new file mode 100644 index 00000000000..5702e4a57f9 --- /dev/null +++ b/massa-event-cache/src/config.rs @@ -0,0 +1,14 @@ +use std::path::PathBuf; + +pub struct EventCacheConfig { + /// Path to the hard drive cache storage + pub event_cache_path: PathBuf, + /// Maximum number of entries we want to keep in the event cache + pub event_cache_size: usize, + /// Amount of entries removed when `event_cache_size` is reached + pub snip_amount: usize, + /// Maximum length of an event + pub max_event_length: u64, + /// Thread count + pub thread_count: u8, +} diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs new file mode 100644 index 00000000000..08e655f304c --- /dev/null +++ b/massa-event-cache/src/controller.rs @@ -0,0 +1,30 @@ +use crate::{ + config::EventCacheConfig, +}; +use crate::event_cache::EventCache; + +/// Final event cache controller +pub struct EventCacheController { + /// Cache config. + /// See `EventCacheConfig` documentation for more information. + cfg: EventCacheConfig, + /// Event stored cache. + /// See the `EventCache` documentation for more information. + event_cache: EventCache, +} + +impl EventCacheController { + /// Creates a new `EventCacheController` + pub fn new(cfg: EventCacheConfig) -> Self { + let event_cache= EventCache::new( + cfg.event_cache_path.clone(), + cfg.event_cache_size, + cfg.snip_amount, + cfg.thread_count, + ); + Self { + cfg, + event_cache + } + } +} diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs new file mode 100644 index 00000000000..a2be90d170f --- /dev/null +++ b/massa-event-cache/src/event_cache.rs @@ -0,0 +1,247 @@ +// std +use std::path::PathBuf; +// third-party +use rocksdb::{ + DBIterator, IteratorMode, WriteBatch, DB}; +use tracing::debug; +use massa_models::output_event::SCOutputEvent; +use massa_serialization::Serializer; +use crate::ser_deser::{SCOutputEventDeserializer, SCOutputEventDeserializerArgs, SCOutputEventSerializer}; + +const OPEN_ERROR: &str = "critical: rocksdb open operation failed"; +const CRUD_ERROR: &str = "critical: rocksdb crud operation failed"; + +/// Module key formatting macro +#[macro_export] +macro_rules! event_key { + ($event_slot:expr) => { + [&$event_slot.to_bytes()[..], &[MODULE_IDENT]].concat() + }; +} + +pub(crate) struct EventCache { + /// RocksDB database + db: DB, + /// How many entries are in the db. Count is initialized at creation time by iterating + /// over all the entries in the db then it is maintained in memory + entry_count: usize, + /// Maximum number of entries we want to keep in the db. + /// When this maximum is reached `snip_amount` entries are removed + max_entry_count: usize, + /// How many entries are removed when `entry_count` reaches `max_entry_count` + snip_amount: usize, + /// Event serializer + event_ser: SCOutputEventSerializer, + /// Event deserializer + event_deser: SCOutputEventDeserializer, +} + +impl EventCache { + /// Create a new EventCache + pub fn new(path: PathBuf, max_entry_count: usize, snip_amount: usize, thread_count: u8) -> Self { + let db = DB::open_default(path).expect(OPEN_ERROR); + let entry_count = db.iterator(IteratorMode::Start).count(); + + Self { + db, + entry_count, + max_entry_count, + snip_amount, + event_ser: SCOutputEventSerializer::new(), + event_deser: SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { thread_count }), + } + } + + /// Insert a new event in the cache + pub fn insert(&mut self, event: SCOutputEvent) { + + if self.entry_count >= self.max_entry_count { + self.snip(); + } + + let event_key = { + let mut event_key = event + .context + .slot + .to_bytes_key() + .to_vec(); + event_key.extend(event.context.index_in_slot.to_be_bytes()); + event_key + }; + let mut event_buffer = Vec::new(); + self.event_ser.serialize(&event, &mut event_buffer).unwrap(); + + let mut batch = WriteBatch::default(); + batch.put(event_key, event_buffer); + self.db.write(batch).expect(CRUD_ERROR); + self.entry_count = self.entry_count.saturating_add(1); + + debug!("(Event insert) entry_count is: {}", self.entry_count); + } + + fn db_iter(&self, mode: Option) -> DBIterator { + self.db.iterator(mode.unwrap_or(IteratorMode::Start)) + } + + /// Try to remove as much as `self.amount_to_snip` entries from the db + fn snip(&mut self) { + let mut iter = self.db.iterator(IteratorMode::Start); + let mut batch = WriteBatch::default(); + let mut snipped_count: usize = 0; + + while snipped_count < self.snip_amount { + let key_value = iter.next(); + if key_value.is_none() { + break; + } + // safe to unwrap as we just tested it + let kvb = key_value.unwrap().unwrap(); + batch.delete(kvb.0); + snipped_count += 1; + } + + // delete the key and reduce entry_count + self.db.write(batch).expect(CRUD_ERROR); + self.entry_count -= snipped_count; + } +} + +#[cfg(test)] +mod tests { + use super::*; + // third-party + use tempfile::TempDir; + use serial_test::serial; + use more_asserts::assert_gt; + use rand::thread_rng; + use rand::seq::SliceRandom; + // internal + use massa_models::output_event::EventExecutionContext; + use massa_models::config::THREAD_COUNT; + use massa_models::slot::Slot; + + fn setup() -> EventCache { + let tmp_path = TempDir::new().unwrap().path().to_path_buf(); + EventCache::new(tmp_path, 1000, 300, THREAD_COUNT) + } + + #[test] + #[serial] + fn test_db_insert_order() { + + // Test that the data will be correctly ordered (when iterated from start) in db + + let mut cache = setup(); + let slot_1 = Slot::new(1, 0); + let index_1_0 = 0; + let event = SCOutputEvent { + context: EventExecutionContext { + slot: slot_1, + block: None, + read_only: false, + index_in_slot: index_1_0, + call_stack: Default::default(), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let mut events = (0..cache.max_entry_count - 5) + .into_iter() + .map(|i| { + let mut event = event.clone(); + event.context.index_in_slot = i as u64; + event + }) + .collect::>(); + + let slot_2 = Slot::new(2, 0); + let event_slot_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = 0u64; + event + }; + let index_2_2 = 256u64; + let event_slot_2_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_2; + event + }; + events.push(event_slot_2.clone()); + events.push(event_slot_2_2.clone()); + // Randomize the events so we insert in random orders in DB + events.shuffle(&mut thread_rng()); + + for event in events { + cache.insert(event); + } + + // Now check that we are going to iter in correct order + let db_it = cache.db_iter(Some(IteratorMode::Start)); + let mut prev_slot = None; + let mut prev_event_index = None; + for kvb in db_it { + if let Ok(kvb) = kvb { + let bytes = kvb.0.iter().as_slice(); + let slot = Slot::from_bytes_key(&bytes[0..=8].try_into().unwrap()); + let event_index = u64::from_be_bytes(bytes[9..].try_into().unwrap()); + if prev_slot.is_some() && prev_event_index.is_some() { + assert_gt!( + (slot, event_index), + (prev_slot.unwrap(), prev_event_index.unwrap()) + ); + } else { + assert_eq!(slot, slot_1); + assert_eq!(event_index, index_1_0); + } + prev_slot = Some(slot); + prev_event_index = Some(event_index); + } + } + + assert_eq!(prev_slot, Some(slot_2)); + assert_eq!(prev_event_index, Some(index_2_2)); + } + + #[test] + #[serial] + fn test_insert_more_than_max_entry() { + + // Test insert (and snip) so we do no store too much event in cache + + let mut cache = setup(); + let event = SCOutputEvent { + context: EventExecutionContext { + slot: Slot::new(1, 0), + block: None, + read_only: false, + index_in_slot: 0, + call_stack: Default::default(), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + // fill the db: add cache.max_entry_count entries + for count in 0..cache.max_entry_count { + let mut event = event.clone(); + event.context.index_in_slot = count as u64; + cache.insert(event.clone()); + } + assert_eq!(cache.entry_count, cache.max_entry_count); + + // insert one more entry + cache.insert(event.clone()); + assert_eq!( + cache.entry_count, + cache.max_entry_count - cache.snip_amount + 1 + ); + dbg!(cache.entry_count); + } +} \ No newline at end of file diff --git a/massa-event-cache/src/lib.rs b/massa-event-cache/src/lib.rs new file mode 100644 index 00000000000..aab918a1fa3 --- /dev/null +++ b/massa-event-cache/src/lib.rs @@ -0,0 +1,6 @@ +pub mod config; + +pub mod controller; +mod event_cache; +mod ser_deser; + diff --git a/massa-event-cache/src/ser_deser.rs b/massa-event-cache/src/ser_deser.rs new file mode 100644 index 00000000000..56b760e1746 --- /dev/null +++ b/massa-event-cache/src/ser_deser.rs @@ -0,0 +1,296 @@ +use std::collections::Bound::{Excluded, Included}; +use std::collections::VecDeque; +// third-party +use nom::{ + error::{context, ContextError, ParseError}, + IResult, Parser, + sequence::tuple, + multi::length_count, + bytes::complete::take, +}; +// internal +use massa_models::output_event::{EventExecutionContext, SCOutputEvent}; +use massa_models::serialization::{StringDeserializer, StringSerializer}; +use massa_models::slot::{SlotDeserializer, SlotSerializer}; +use massa_models::address::{AddressDeserializer, AddressSerializer}; +use massa_models::block_id::{BlockId, BlockIdDeserializer, BlockIdSerializer}; +use massa_models::operation::{OperationId, OperationIdDeserializer, OperationIdSerializer}; +use massa_serialization::{Deserializer, OptionDeserializer, OptionSerializer, SerializeError, Serializer, U32VarIntDeserializer, U32VarIntSerializer, U64VarIntDeserializer, U64VarIntSerializer}; + +/// Metadata serializer +pub struct SCOutputEventSerializer { + u64_ser: U64VarIntSerializer, + u32_ser: U32VarIntSerializer, + slot_ser: SlotSerializer, + addr_ser: AddressSerializer, + block_id_ser: OptionSerializer, + op_id_ser: OptionSerializer, + data_ser: StringSerializer, +} + +impl SCOutputEventSerializer { + pub fn new() -> Self { + Self { + u64_ser: U64VarIntSerializer::new(), + u32_ser: U32VarIntSerializer::new(), + slot_ser: SlotSerializer::new(), + addr_ser: AddressSerializer::new(), + block_id_ser: OptionSerializer::new(BlockIdSerializer::new()), + op_id_ser: OptionSerializer::new(OperationIdSerializer::new()), + data_ser: StringSerializer::new(U64VarIntSerializer::new()), + } + } +} + +impl Default for SCOutputEventSerializer { + fn default() -> Self { + Self::new() + } +} + +impl Serializer for SCOutputEventSerializer { + fn serialize( + &self, + value: &SCOutputEvent, + buffer: &mut Vec, + ) -> Result<(), SerializeError> { + // context + self.slot_ser.serialize(&value.context.slot, buffer)?; + self.block_id_ser.serialize(&value.context.block, buffer)?; + buffer.push(u8::from(value.context.read_only)); + self.u64_ser.serialize(&value.context.index_in_slot, buffer)?; + // Components + let call_stack_len_ = value.context.call_stack.len(); + let call_stack_len = u32::try_from(call_stack_len_).map_err(|_| { + SerializeError::GeneralError(format!( + "Cannot convert component_len ({}) to u32", + call_stack_len_ + )) + })?; + // ser vec len + self.u32_ser.serialize(&call_stack_len, buffer)?; + for address in value.context.call_stack.iter() { + self.addr_ser.serialize(address, buffer)?; + } + self.op_id_ser.serialize(&value.context.origin_operation_id, buffer)?; + buffer.push(u8::from(value.context.is_final)); + buffer.push(u8::from(value.context.is_error)); + + // data + self.data_ser.serialize(&value.data, buffer)?; + Ok(()) + } +} + +/// SCOutputEvent deserializer +pub struct SCOutputEventDeserializer { + u64_deser: U64VarIntDeserializer, + u32_deser: U32VarIntDeserializer, + slot_deser: SlotDeserializer, + addr_deser: AddressDeserializer, + block_id_deser: OptionDeserializer, + op_id_deser: OptionDeserializer, + data_deser: StringDeserializer, +} + +impl SCOutputEventDeserializer { + pub fn new(args: SCOutputEventDeserializerArgs) -> Self { + Self { + u64_deser: U64VarIntDeserializer::new(Included(0), Included(u64::MAX)), + // FIXME: is there some limit for call stack len? + u32_deser: U32VarIntDeserializer::new(Included(0), Included(u32::MAX)), + slot_deser: SlotDeserializer::new( + (Included(0), Included(u64::MAX)), + (Included(0), Excluded(args.thread_count)), + ), + addr_deser: Default::default(), + block_id_deser: OptionDeserializer::new(BlockIdDeserializer::new()), + op_id_deser: OptionDeserializer::new(OperationIdDeserializer::new()), + data_deser: StringDeserializer::new(U64VarIntDeserializer::new( + Included(0), + Included(u64::MAX) + )), + } + } +} + +impl Deserializer for SCOutputEventDeserializer { + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], SCOutputEvent, E> { + context( + "Failed ScOutputEvent deserialization", + tuple(( + context("Failed slot deserialization", |input| { + self.slot_deser.deserialize(input) + }), + context("Failed BlockId deserialization", |input| { + self.block_id_deser.deserialize(input) + }), + context("Failed read_only deserialization", |input: &'a [u8]| { + + let (rem, read_only) = take(1usize)(input)?; + let read_only = match read_only.first() { + None => { + return IResult::Err(nom::Err::Error(ParseError::from_error_kind( + input, + nom::error::ErrorKind::Fail, + ))); + }, + Some(0) => false, + _ => true, + }; + IResult::Ok((rem, read_only)) + }), + context("Failed index_in_slot deser", |input| { + self.u64_deser.deserialize(input) + }), + length_count( + context("Failed call stack entry count deser", |input| { + self.u32_deser.deserialize(input) + }), + context("Failed call stack items deser", |input| { + self.addr_deser.deserialize(input) + }), + ), + context("Failed OperationId deserialization", |input| { + self.op_id_deser.deserialize(input) + }), + context("Failed is_final deserialization", |input: &'a [u8]| { + + let (rem, read_only) = take(1usize)(input)?; + let read_only = match read_only.first() { + None => { + return IResult::Err(nom::Err::Error(ParseError::from_error_kind( + input, + nom::error::ErrorKind::Fail, + ))); + }, + Some(0) => false, + _ => true, + }; + IResult::Ok((rem, read_only)) + }), + context("Failed is_error deserialization", |input: &'a [u8]| { + + let (rem, read_only) = take(1usize)(input)?; + let read_only = match read_only.first() { + None => { + return IResult::Err(nom::Err::Error(ParseError::from_error_kind( + input, + nom::error::ErrorKind::Fail, + ))); + }, + Some(0) => false, + _ => true, + }; + IResult::Ok((rem, read_only)) + }), + context("Failed data deserialization", |input| { + self.data_deser.deserialize(input) + }), + )) + ) + .map(|(slot, bid, read_only, idx, call_stack, oid, is_final, is_error, data)| { + SCOutputEvent { + context: EventExecutionContext { + slot, + block: bid, + read_only, + index_in_slot: idx, + call_stack: VecDeque::from(call_stack), + origin_operation_id: oid, + is_final, + is_error, + }, + data, + } + }) + .parse(buffer) + } +} + + +/// SCOutputEvent deserializer args +#[allow(missing_docs)] +pub struct SCOutputEventDeserializerArgs { + pub thread_count: u8, +} + +#[cfg(test)] +mod test { + use super::*; + use serial_test::serial; + use massa_models::slot::Slot; + use massa_serialization::DeserializeError; + + #[test] + #[serial] + fn test_sc_output_event_ser_der() { + let slot_1 = Slot::new(1, 0); + let index_1_0 = 0; + let event = SCOutputEvent { + context: EventExecutionContext { + slot: slot_1, + block: None, + read_only: false, + index_in_slot: index_1_0, + call_stack: Default::default(), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let event_ser = SCOutputEventSerializer::new(); + let event_deser = SCOutputEventDeserializer::new( + SCOutputEventDeserializerArgs { thread_count: 16 } + ); + + let mut buffer = Vec::new(); + event_ser.serialize(&event, &mut buffer).unwrap(); + + let (rem, event_new) = event_deser.deserialize::(&buffer).unwrap(); + + assert_eq!(event.context, event_new.context); + assert_eq!(event.data, event_new.data); + assert!(rem.is_empty()); + } + + #[test] + #[serial] + fn test_sc_output_event_ser_der_err() { + + // Test serialization / deserialization with a slot with thread too high + + let slot_1 = Slot::new(1, 99); + let index_1_0 = 0; + let event = SCOutputEvent { + context: EventExecutionContext { + slot: slot_1, + block: None, + read_only: false, + index_in_slot: index_1_0, + call_stack: Default::default(), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let event_ser = SCOutputEventSerializer::new(); + let event_deser = SCOutputEventDeserializer::new( + SCOutputEventDeserializerArgs { thread_count: 16 } + ); + + let mut buffer = Vec::new(); + event_ser.serialize(&event, &mut buffer).unwrap(); + + let res = event_deser.deserialize::(&buffer); + // Expect deserialization to fail (slot thread too high) + assert!(res.is_err()); + } +} \ No newline at end of file diff --git a/massa-models/src/output_event.rs b/massa-models/src/output_event.rs index 924b3927bdb..70befd46814 100644 --- a/massa-models/src/output_event.rs +++ b/massa-models/src/output_event.rs @@ -19,7 +19,7 @@ impl Display for SCOutputEvent { } /// Context of the event (not generated by the user) -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct EventExecutionContext { /// when was it generated pub slot: Slot, diff --git a/massa-module-cache/Cargo.toml b/massa-module-cache/Cargo.toml index 23429e5e82a..83a02d52330 100644 --- a/massa-module-cache/Cargo.toml +++ b/massa-module-cache/Cargo.toml @@ -8,7 +8,6 @@ test-exports = ["massa-sc-runtime/testing"] [dependencies] schnellru = {workspace = true} -serial_test = {workspace = true} rand = {workspace = true} # BOM UPGRADE Revert to "0.8.5" if problem num_enum = {workspace = true} nom = {workspace = true} @@ -24,3 +23,4 @@ massa-sc-runtime = {workspace = true, "features" = ["testing"]} [dev-dependencies] tempfile = {workspace = true} # BOM UPGRADE Revert to "3.3" if problem +serial_test = {workspace = true} From dbc06e33ce12a99da1e9d447ed4644790072b8fb Mon Sep 17 00:00:00 2001 From: sydhds Date: Wed, 30 Oct 2024 11:43:52 +0100 Subject: [PATCH 02/40] Add event cache controller into massa execution --- Cargo.lock | 1 + massa-event-cache/src/controller.rs | 29 +- massa-event-cache/src/event_cache.rs | 293 +++++++++++++++--- massa-event-cache/src/lib.rs | 1 - massa-event-cache/src/ser_deser.rs | 72 +++-- massa-execution-exports/src/settings.rs | 8 + .../src/test_exports/config.rs | 7 + massa-execution-worker/Cargo.toml | 1 + massa-execution-worker/src/execution.rs | 29 +- massa-node/src/main.rs | 4 + 10 files changed, 350 insertions(+), 95 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07a3ce878a9..842e51bf9b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3282,6 +3282,7 @@ dependencies = [ "massa_db_exports", "massa_db_worker", "massa_deferred_calls", + "massa_event_cache", "massa_executed_ops", "massa_execution_exports", "massa_final_state", diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index 08e655f304c..42d32eb0ed3 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -1,8 +1,9 @@ -use crate::{ - config::EventCacheConfig, -}; +use crate::config::EventCacheConfig; use crate::event_cache::EventCache; +use massa_models::execution::EventFilter; +use massa_models::output_event::SCOutputEvent; +#[allow(dead_code)] /// Final event cache controller pub struct EventCacheController { /// Cache config. @@ -16,15 +17,27 @@ pub struct EventCacheController { impl EventCacheController { /// Creates a new `EventCacheController` pub fn new(cfg: EventCacheConfig) -> Self { - let event_cache= EventCache::new( + let event_cache = EventCache::new( cfg.event_cache_path.clone(), cfg.event_cache_size, cfg.snip_amount, cfg.thread_count, ); - Self { - cfg, - event_cache - } + Self { cfg, event_cache } + } + + pub fn save_events( + &mut self, + events: impl Iterator + Clone, + events_len: Option, + ) { + self.event_cache.insert_multi_it(events, events_len); + } + + pub fn get_filtered_sc_output_events<'b, 'a: 'b>( + &'a self, + filter: &'b EventFilter, + ) -> impl Iterator + 'b { + self.event_cache.get_filtered_sc_output_events(filter) } } diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index a2be90d170f..ffeda993870 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -1,15 +1,20 @@ +use std::cmp::max; // std +use nom::AsBytes; use std::path::PathBuf; // third-party -use rocksdb::{ - DBIterator, IteratorMode, WriteBatch, DB}; -use tracing::debug; +use crate::ser_deser::{ + SCOutputEventDeserializer, SCOutputEventDeserializerArgs, SCOutputEventSerializer, +}; +use massa_models::execution::EventFilter; use massa_models::output_event::SCOutputEvent; -use massa_serialization::Serializer; -use crate::ser_deser::{SCOutputEventDeserializer, SCOutputEventDeserializerArgs, SCOutputEventSerializer}; +use massa_serialization::{DeserializeError, Deserializer, Serializer}; +use rocksdb::{DBIterator, IteratorMode, WriteBatch, DB}; +use tracing::debug; const OPEN_ERROR: &str = "critical: rocksdb open operation failed"; const CRUD_ERROR: &str = "critical: rocksdb crud operation failed"; +const EVENT_DESER_ERROR: &str = "critical: event deserialization failed"; /// Module key formatting macro #[macro_export] @@ -38,7 +43,12 @@ pub(crate) struct EventCache { impl EventCache { /// Create a new EventCache - pub fn new(path: PathBuf, max_entry_count: usize, snip_amount: usize, thread_count: u8) -> Self { + pub fn new( + path: PathBuf, + max_entry_count: usize, + snip_amount: usize, + thread_count: u8, + ) -> Self { let db = DB::open_default(path).expect(OPEN_ERROR); let entry_count = db.iterator(IteratorMode::Start).count(); @@ -48,29 +58,27 @@ impl EventCache { max_entry_count, snip_amount, event_ser: SCOutputEventSerializer::new(), - event_deser: SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { thread_count }), + event_deser: SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { + thread_count, + }), } } - + + #[allow(dead_code)] /// Insert a new event in the cache pub fn insert(&mut self, event: SCOutputEvent) { - if self.entry_count >= self.max_entry_count { - self.snip(); + self.snip(None); } let event_key = { - let mut event_key = event - .context - .slot - .to_bytes_key() - .to_vec(); + let mut event_key = event.context.slot.to_bytes_key().to_vec(); event_key.extend(event.context.index_in_slot.to_be_bytes()); event_key }; let mut event_buffer = Vec::new(); self.event_ser.serialize(&event, &mut event_buffer).unwrap(); - + let mut batch = WriteBatch::default(); batch.put(event_key, event_buffer); self.db.write(batch).expect(CRUD_ERROR); @@ -79,27 +87,118 @@ impl EventCache { debug!("(Event insert) entry_count is: {}", self.entry_count); } + /// Insert new events in the cache + /// + /// For performance reason, pass events_len to avoid cloning the iterator + pub fn insert_multi_it( + &mut self, + events: impl Iterator + Clone, + events_len: Option, + ) { + let events_len = events_len.unwrap_or_else(|| events.clone().count()); + + if self.entry_count + events_len >= self.max_entry_count { + let snip_amount = max(self.snip_amount, events_len); + self.snip(Some(snip_amount)); + } + + let mut batch = WriteBatch::default(); + for event in events { + let event_key = { + let mut event_key = event.context.slot.to_bytes_key().to_vec(); + event_key.extend(event.context.index_in_slot.to_be_bytes()); + event_key + }; + let mut event_buffer = Vec::new(); + self.event_ser.serialize(&event, &mut event_buffer).unwrap(); + + batch.put(event_key, event_buffer); + } + self.db.write(batch).expect(CRUD_ERROR); + self.entry_count = self.entry_count.saturating_add(events_len); + + debug!("(Events insert) entry_count is: {}", self.entry_count); + } + fn db_iter(&self, mode: Option) -> DBIterator { self.db.iterator(mode.unwrap_or(IteratorMode::Start)) } + pub(crate) fn get_filtered_sc_output_events<'b, 'a: 'b>( + &'a self, + filter: &'b EventFilter, + ) -> impl Iterator + 'b { + self.db_iter(Some(IteratorMode::Start)).filter_map(|kvb| { + let kvb = kvb.unwrap(); + let (_rem, event) = self + .event_deser + .deserialize::(kvb.1.as_bytes()) + .expect(EVENT_DESER_ERROR); + + if let Some(start) = filter.start { + if event.context.slot < start { + return None; + } + } + if let Some(end) = filter.end { + if event.context.slot >= end { + return None; + } + } + if let Some(is_final) = filter.is_final { + if event.context.is_final != is_final { + return None; + } + } + if let Some(is_error) = filter.is_error { + if event.context.is_error != is_error { + return None; + } + } + match ( + filter.original_caller_address, + event.context.call_stack.front(), + ) { + (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + match (filter.emitter_address, event.context.call_stack.back()) { + (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + match ( + filter.original_operation_id, + event.context.origin_operation_id, + ) { + (Some(addr1), Some(addr2)) if addr1 != addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + Some(event) + }) + } + /// Try to remove as much as `self.amount_to_snip` entries from the db - fn snip(&mut self) { + fn snip(&mut self, snip_amount: Option) { let mut iter = self.db.iterator(IteratorMode::Start); let mut batch = WriteBatch::default(); let mut snipped_count: usize = 0; - - while snipped_count < self.snip_amount { + let snip_amount = snip_amount.unwrap_or(self.snip_amount); + + while snipped_count < snip_amount { let key_value = iter.next(); if key_value.is_none() { break; } - // safe to unwrap as we just tested it - let kvb = key_value.unwrap().unwrap(); + let kvb = key_value + .unwrap() // safe to unwrap - just tested it + .expect(EVENT_DESER_ERROR); batch.delete(kvb.0); snipped_count += 1; } - + // delete the key and reduce entry_count self.db.write(batch).expect(CRUD_ERROR); self.entry_count -= snipped_count; @@ -110,14 +209,14 @@ impl EventCache { mod tests { use super::*; // third-party - use tempfile::TempDir; - use serial_test::serial; use more_asserts::assert_gt; - use rand::thread_rng; use rand::seq::SliceRandom; + use rand::thread_rng; + use serial_test::serial; + use tempfile::TempDir; // internal - use massa_models::output_event::EventExecutionContext; use massa_models::config::THREAD_COUNT; + use massa_models::output_event::EventExecutionContext; use massa_models::slot::Slot; fn setup() -> EventCache { @@ -128,9 +227,8 @@ mod tests { #[test] #[serial] fn test_db_insert_order() { - // Test that the data will be correctly ordered (when iterated from start) in db - + let mut cache = setup(); let slot_1 = Slot::new(1, 0); let index_1_0 = 0; @@ -156,7 +254,7 @@ mod tests { event }) .collect::>(); - + let slot_2 = Slot::new(2, 0); let event_slot_2 = { let mut event = event.clone(); @@ -164,7 +262,7 @@ mod tests { event.context.index_in_slot = 0u64; event }; - let index_2_2 = 256u64; + let index_2_2 = 256u64; let event_slot_2_2 = { let mut event = event.clone(); event.context.slot = slot_2; @@ -173,13 +271,13 @@ mod tests { }; events.push(event_slot_2.clone()); events.push(event_slot_2_2.clone()); - // Randomize the events so we insert in random orders in DB + // Randomize the events so we insert in random orders in DB events.shuffle(&mut thread_rng()); for event in events { cache.insert(event); } - + // Now check that we are going to iter in correct order let db_it = cache.db_iter(Some(IteratorMode::Start)); let mut prev_slot = None; @@ -191,7 +289,7 @@ mod tests { let event_index = u64::from_be_bytes(bytes[9..].try_into().unwrap()); if prev_slot.is_some() && prev_event_index.is_some() { assert_gt!( - (slot, event_index), + (slot, event_index), (prev_slot.unwrap(), prev_event_index.unwrap()) ); } else { @@ -202,17 +300,16 @@ mod tests { prev_event_index = Some(event_index); } } - + assert_eq!(prev_slot, Some(slot_2)); assert_eq!(prev_event_index, Some(index_2_2)); } - + #[test] #[serial] fn test_insert_more_than_max_entry() { + // Test insert (and snip) so we do no store too much event in cache - // Test insert (and snip) so we do no store too much event in cache - let mut cache = setup(); let event = SCOutputEvent { context: EventExecutionContext { @@ -227,7 +324,7 @@ mod tests { }, data: "message foo bar".to_string(), }; - + // fill the db: add cache.max_entry_count entries for count in 0..cache.max_entry_count { let mut event = event.clone(); @@ -237,11 +334,127 @@ mod tests { assert_eq!(cache.entry_count, cache.max_entry_count); // insert one more entry - cache.insert(event.clone()); + let mut event_last = event.clone(); + event_last.context.index_in_slot = u64::MAX; + cache.insert(event_last); assert_eq!( cache.entry_count, cache.max_entry_count - cache.snip_amount + 1 ); dbg!(cache.entry_count); } -} \ No newline at end of file + + #[test] + #[serial] + fn test_insert_more_than_max_entry_2() { + // Test insert_multi_it (and snip) so we do no store too much event in cache + + let mut cache = setup(); + let event = SCOutputEvent { + context: EventExecutionContext { + slot: Slot::new(1, 0), + block: None, + read_only: false, + index_in_slot: 0, + call_stack: Default::default(), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let it = (0..cache.max_entry_count).map(|i| { + let mut event = event.clone(); + event.context.index_in_slot = i as u64; + event + }); + cache.insert_multi_it(it, Some(cache.max_entry_count)); + + assert_eq!(cache.entry_count, cache.max_entry_count); + + // insert one more entry + let mut event_last = event.clone(); + event_last.context.index_in_slot = u64::MAX; + cache.insert(event_last); + assert_eq!( + cache.entry_count, + cache.max_entry_count - cache.snip_amount + 1 + ); + dbg!(cache.entry_count); + } + + #[test] + #[serial] + fn test_event_filter() { + // Test that the data will be correctly ordered (when iterated from start) in db + + let mut cache = setup(); + let slot_1 = Slot::new(1, 0); + let index_1_0 = 0; + let event = SCOutputEvent { + context: EventExecutionContext { + slot: slot_1, + block: None, + read_only: false, + index_in_slot: index_1_0, + call_stack: Default::default(), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let mut events = (0..cache.max_entry_count - 5) + .into_iter() + .map(|i| { + let mut event = event.clone(); + event.context.index_in_slot = i as u64; + event + }) + .collect::>(); + + let slot_2 = Slot::new(2, 0); + let index_2_1 = 0u64; + let event_slot_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_1; + event + }; + let index_2_2 = 256u64; + let event_slot_2_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_2; + event + }; + events.push(event_slot_2.clone()); + events.push(event_slot_2_2.clone()); + // Randomize the events so we insert in random orders in DB + events.shuffle(&mut thread_rng()); + + let events_len = events.len(); + cache.insert_multi_it(events.into_iter(), Some(events_len)); + + let filter_1 = EventFilter { + start: Some(Slot::new(2, 0)), + end: None, + emitter_address: None, + original_caller_address: None, + original_operation_id: None, + is_final: None, + is_error: None, + }; + let filtered_events_1 = cache + .get_filtered_sc_output_events(&filter_1) + .collect::>(); + + assert_eq!(filtered_events_1.len(), 2); + assert_eq!(filtered_events_1[0].context.slot, slot_2); + assert_eq!(filtered_events_1[0].context.index_in_slot, index_2_1); + assert_eq!(filtered_events_1[1].context.slot, slot_2); + assert_eq!(filtered_events_1[1].context.index_in_slot, index_2_2); + } +} diff --git a/massa-event-cache/src/lib.rs b/massa-event-cache/src/lib.rs index aab918a1fa3..13061d03c5d 100644 --- a/massa-event-cache/src/lib.rs +++ b/massa-event-cache/src/lib.rs @@ -3,4 +3,3 @@ pub mod config; pub mod controller; mod event_cache; mod ser_deser; - diff --git a/massa-event-cache/src/ser_deser.rs b/massa-event-cache/src/ser_deser.rs index 56b760e1746..38fc5dc29a8 100644 --- a/massa-event-cache/src/ser_deser.rs +++ b/massa-event-cache/src/ser_deser.rs @@ -2,20 +2,23 @@ use std::collections::Bound::{Excluded, Included}; use std::collections::VecDeque; // third-party use nom::{ + bytes::complete::take, error::{context, ContextError, ParseError}, - IResult, Parser, - sequence::tuple, multi::length_count, - bytes::complete::take, + sequence::tuple, + IResult, Parser, }; // internal -use massa_models::output_event::{EventExecutionContext, SCOutputEvent}; -use massa_models::serialization::{StringDeserializer, StringSerializer}; -use massa_models::slot::{SlotDeserializer, SlotSerializer}; use massa_models::address::{AddressDeserializer, AddressSerializer}; use massa_models::block_id::{BlockId, BlockIdDeserializer, BlockIdSerializer}; use massa_models::operation::{OperationId, OperationIdDeserializer, OperationIdSerializer}; -use massa_serialization::{Deserializer, OptionDeserializer, OptionSerializer, SerializeError, Serializer, U32VarIntDeserializer, U32VarIntSerializer, U64VarIntDeserializer, U64VarIntSerializer}; +use massa_models::output_event::{EventExecutionContext, SCOutputEvent}; +use massa_models::serialization::{StringDeserializer, StringSerializer}; +use massa_models::slot::{SlotDeserializer, SlotSerializer}; +use massa_serialization::{ + Deserializer, OptionDeserializer, OptionSerializer, SerializeError, Serializer, + U32VarIntDeserializer, U32VarIntSerializer, U64VarIntDeserializer, U64VarIntSerializer, +}; /// Metadata serializer pub struct SCOutputEventSerializer { @@ -49,16 +52,13 @@ impl Default for SCOutputEventSerializer { } impl Serializer for SCOutputEventSerializer { - fn serialize( - &self, - value: &SCOutputEvent, - buffer: &mut Vec, - ) -> Result<(), SerializeError> { + fn serialize(&self, value: &SCOutputEvent, buffer: &mut Vec) -> Result<(), SerializeError> { // context self.slot_ser.serialize(&value.context.slot, buffer)?; self.block_id_ser.serialize(&value.context.block, buffer)?; buffer.push(u8::from(value.context.read_only)); - self.u64_ser.serialize(&value.context.index_in_slot, buffer)?; + self.u64_ser + .serialize(&value.context.index_in_slot, buffer)?; // Components let call_stack_len_ = value.context.call_stack.len(); let call_stack_len = u32::try_from(call_stack_len_).map_err(|_| { @@ -72,7 +72,8 @@ impl Serializer for SCOutputEventSerializer { for address in value.context.call_stack.iter() { self.addr_ser.serialize(address, buffer)?; } - self.op_id_ser.serialize(&value.context.origin_operation_id, buffer)?; + self.op_id_ser + .serialize(&value.context.origin_operation_id, buffer)?; buffer.push(u8::from(value.context.is_final)); buffer.push(u8::from(value.context.is_error)); @@ -107,8 +108,8 @@ impl SCOutputEventDeserializer { block_id_deser: OptionDeserializer::new(BlockIdDeserializer::new()), op_id_deser: OptionDeserializer::new(OperationIdDeserializer::new()), data_deser: StringDeserializer::new(U64VarIntDeserializer::new( - Included(0), - Included(u64::MAX) + Included(0), + Included(u64::MAX), )), } } @@ -129,7 +130,6 @@ impl Deserializer for SCOutputEventDeserializer { self.block_id_deser.deserialize(input) }), context("Failed read_only deserialization", |input: &'a [u8]| { - let (rem, read_only) = take(1usize)(input)?; let read_only = match read_only.first() { None => { @@ -137,7 +137,7 @@ impl Deserializer for SCOutputEventDeserializer { input, nom::error::ErrorKind::Fail, ))); - }, + } Some(0) => false, _ => true, }; @@ -158,7 +158,6 @@ impl Deserializer for SCOutputEventDeserializer { self.op_id_deser.deserialize(input) }), context("Failed is_final deserialization", |input: &'a [u8]| { - let (rem, read_only) = take(1usize)(input)?; let read_only = match read_only.first() { None => { @@ -166,14 +165,13 @@ impl Deserializer for SCOutputEventDeserializer { input, nom::error::ErrorKind::Fail, ))); - }, + } Some(0) => false, _ => true, }; IResult::Ok((rem, read_only)) }), context("Failed is_error deserialization", |input: &'a [u8]| { - let (rem, read_only) = take(1usize)(input)?; let read_only = match read_only.first() { None => { @@ -181,7 +179,7 @@ impl Deserializer for SCOutputEventDeserializer { input, nom::error::ErrorKind::Fail, ))); - }, + } Some(0) => false, _ => true, }; @@ -190,9 +188,10 @@ impl Deserializer for SCOutputEventDeserializer { context("Failed data deserialization", |input| { self.data_deser.deserialize(input) }), - )) + )), ) - .map(|(slot, bid, read_only, idx, call_stack, oid, is_final, is_error, data)| { + .map( + |(slot, bid, read_only, idx, call_stack, oid, is_final, is_error, data)| { SCOutputEvent { context: EventExecutionContext { slot, @@ -206,12 +205,12 @@ impl Deserializer for SCOutputEventDeserializer { }, data, } - }) - .parse(buffer) + }, + ) + .parse(buffer) } } - /// SCOutputEvent deserializer args #[allow(missing_docs)] pub struct SCOutputEventDeserializerArgs { @@ -221,9 +220,9 @@ pub struct SCOutputEventDeserializerArgs { #[cfg(test)] mod test { use super::*; - use serial_test::serial; use massa_models::slot::Slot; use massa_serialization::DeserializeError; + use serial_test::serial; #[test] #[serial] @@ -245,14 +244,15 @@ mod test { }; let event_ser = SCOutputEventSerializer::new(); - let event_deser = SCOutputEventDeserializer::new( - SCOutputEventDeserializerArgs { thread_count: 16 } - ); + let event_deser = + SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { thread_count: 16 }); let mut buffer = Vec::new(); event_ser.serialize(&event, &mut buffer).unwrap(); - let (rem, event_new) = event_deser.deserialize::(&buffer).unwrap(); + let (rem, event_new) = event_deser + .deserialize::(&buffer) + .unwrap(); assert_eq!(event.context, event_new.context); assert_eq!(event.data, event_new.data); @@ -262,7 +262,6 @@ mod test { #[test] #[serial] fn test_sc_output_event_ser_der_err() { - // Test serialization / deserialization with a slot with thread too high let slot_1 = Slot::new(1, 99); @@ -282,9 +281,8 @@ mod test { }; let event_ser = SCOutputEventSerializer::new(); - let event_deser = SCOutputEventDeserializer::new( - SCOutputEventDeserializerArgs { thread_count: 16 } - ); + let event_deser = + SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { thread_count: 16 }); let mut buffer = Vec::new(); event_ser.serialize(&event, &mut buffer).unwrap(); @@ -293,4 +291,4 @@ mod test { // Expect deserialization to fail (slot thread too high) assert!(res.is_err()); } -} \ No newline at end of file +} diff --git a/massa-execution-exports/src/settings.rs b/massa-execution-exports/src/settings.rs index 867803fb041..fd2bae9859f 100644 --- a/massa-execution-exports/src/settings.rs +++ b/massa-execution-exports/src/settings.rs @@ -112,4 +112,12 @@ pub struct ExecutionConfig { pub deferred_calls_config: DeferredCallsConfig, /// Maximum number of event that an operation can emit pub max_event_per_operation: usize, + /// Path to the hard drive event cache storage + pub event_cache_path: PathBuf, + /// Maximum number of entries we want to keep in the Event cache + pub event_cache_size: usize, + /// Amount of entries removed when `event_cache_size` is reached + pub event_snip_amount: usize, + /// Max event data (msg) length for 1 event + pub event_max_len: u64, } diff --git a/massa-execution-exports/src/test_exports/config.rs b/massa-execution-exports/src/test_exports/config.rs index daac50eff03..efa4f0bfc42 100644 --- a/massa-execution-exports/src/test_exports/config.rs +++ b/massa-execution-exports/src/test_exports/config.rs @@ -25,6 +25,8 @@ impl Default for ExecutionConfig { // So we need to create it manually (not really safe but ok for unit testing) let hd_cache_path = TempDir::new().unwrap().path().to_path_buf(); std::fs::create_dir_all(hd_cache_path.clone()).unwrap(); + let event_cache_path = TempDir::new().unwrap().path().to_path_buf(); + std::fs::create_dir_all(event_cache_path.clone()).unwrap(); let block_dump_folder_path = TempDir::new().unwrap().path().to_path_buf(); std::fs::create_dir_all(block_dump_folder_path.clone()).unwrap(); @@ -103,6 +105,11 @@ impl Default for ExecutionConfig { max_recursive_calls_depth: 25, condom_limits: limits, deferred_calls_config: DeferredCallsConfig::default(), + + event_cache_path, + event_cache_size: 100, + event_snip_amount: 10, + event_max_len: 512, } } } diff --git a/massa-execution-worker/Cargo.toml b/massa-execution-worker/Cargo.toml index 81dec0483b7..73374aade0a 100644 --- a/massa-execution-worker/Cargo.toml +++ b/massa-execution-worker/Cargo.toml @@ -86,6 +86,7 @@ massa_final_state = { workspace = true } massa_versioning = { workspace = true } massa_db_exports = { workspace = true } massa_db_worker = { workspace = true, optional = true } +massa_event_cache = { workspace = true } tempfile = { workspace = true, optional = true } massa_wallet = { workspace = true } massa-proto-rs = { workspace = true } diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 8fffb05d7c6..fda702e6ef6 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -17,7 +17,7 @@ use crate::storage_backend::StorageBackend; use massa_async_pool::AsyncMessage; use massa_deferred_calls::DeferredCall; use massa_execution_exports::{ - EventStore, ExecutedBlockInfo, ExecutionBlockMetadata, ExecutionChannels, ExecutionConfig, + ExecutedBlockInfo, ExecutionBlockMetadata, ExecutionChannels, ExecutionConfig, ExecutionError, ExecutionOutput, ExecutionQueryCycleInfos, ExecutionQueryStakerInfo, ExecutionStackElement, ReadOnlyExecutionOutput, ReadOnlyExecutionRequest, ReadOnlyExecutionTarget, SlotExecutionOutput, @@ -28,6 +28,8 @@ use massa_models::address::ExecutionAddressCycleInfo; use massa_models::bytecode::Bytecode; use massa_models::types::{SetOrDelete, SetUpdateOrDelete}; +use massa_event_cache::config::EventCacheConfig; +use massa_event_cache::controller::EventCacheController; use massa_models::datastore::get_prefix_bounds; use massa_models::deferred_calls::DeferredCallId; use massa_models::denunciation::{Denunciation, DenunciationIndex}; @@ -113,7 +115,8 @@ pub(crate) struct ExecutionState { // a cursor pointing to the highest executed final slot pub final_cursor: Slot, // store containing execution events that became final - final_events: EventStore, + // final_events: EventStore, + final_events_cache: EventCacheController, // final state with atomic R/W access final_state: Arc>, // execution context (see documentation in context.rs) @@ -202,6 +205,14 @@ impl ExecutionState { execution_context.clone(), )); + let event_cache_controller = EventCacheController::new(EventCacheConfig { + event_cache_path: config.event_cache_path.clone(), + event_cache_size: config.event_cache_size, + snip_amount: config.event_snip_amount, + max_event_length: config.event_max_len, + thread_count: config.thread_count, + }); + // build the execution state ExecutionState { final_state, @@ -210,7 +221,8 @@ impl ExecutionState { // empty execution output history: it is not recovered through bootstrap active_history, // empty final event store: it is not recovered through bootstrap - final_events: Default::default(), + // final_events: Default::default(), + final_events_cache: event_cache_controller, // no active slots executed yet: set active_cursor to the last final block active_cursor: last_final_slot, final_cursor: last_final_slot, @@ -299,8 +311,9 @@ impl ExecutionState { // append generated events to the final event store exec_out.events.finalize(); - self.final_events.extend(exec_out.events); - self.final_events.prune(self.config.max_final_events); + let final_events_count = exec_out.events.0.len(); + self.final_events_cache + .save_events(exec_out.events.0.into_iter(), Some(final_events_count)); // update the prometheus metrics self.massa_metrics @@ -2271,9 +2284,8 @@ impl ExecutionState { pub fn get_filtered_sc_output_event(&self, filter: EventFilter) -> Vec { match filter.is_final { Some(true) => self - .final_events + .final_events_cache .get_filtered_sc_output_events(&filter) - .into_iter() .collect(), Some(false) => self .active_history @@ -2283,9 +2295,8 @@ impl ExecutionState { .flat_map(|item| item.events.get_filtered_sc_output_events(&filter)) .collect(), None => self - .final_events + .final_events_cache .get_filtered_sc_output_events(&filter) - .into_iter() .chain( self.active_history .read() diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index dec4a7745e2..3f3812f7e07 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -566,6 +566,10 @@ async fn launch( condom_limits, deferred_calls_config, max_event_per_operation: MAX_EVENT_PER_OPERATION, + event_cache_path: Default::default(), + event_cache_size: 0, + event_snip_amount: 0, + event_max_len: 0, }; let execution_channels = ExecutionChannels { From 07fdb9c0da2e8c8feca68158b6a1ecaa672ab202 Mon Sep 17 00:00:00 2001 From: sydhds Date: Wed, 30 Oct 2024 11:45:14 +0100 Subject: [PATCH 03/40] Cargo fmt --- massa-execution-worker/src/execution.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index fda702e6ef6..df6db440489 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -17,10 +17,10 @@ use crate::storage_backend::StorageBackend; use massa_async_pool::AsyncMessage; use massa_deferred_calls::DeferredCall; use massa_execution_exports::{ - ExecutedBlockInfo, ExecutionBlockMetadata, ExecutionChannels, ExecutionConfig, - ExecutionError, ExecutionOutput, ExecutionQueryCycleInfos, ExecutionQueryStakerInfo, - ExecutionStackElement, ReadOnlyExecutionOutput, ReadOnlyExecutionRequest, - ReadOnlyExecutionTarget, SlotExecutionOutput, + ExecutedBlockInfo, ExecutionBlockMetadata, ExecutionChannels, ExecutionConfig, ExecutionError, + ExecutionOutput, ExecutionQueryCycleInfos, ExecutionQueryStakerInfo, ExecutionStackElement, + ReadOnlyExecutionOutput, ReadOnlyExecutionRequest, ReadOnlyExecutionTarget, + SlotExecutionOutput, }; use massa_final_state::FinalStateController; use massa_metrics::MassaMetrics; From 2285a3ab2493235a2529a445b72d8888e5211842 Mon Sep 17 00:00:00 2001 From: sydhds Date: Wed, 30 Oct 2024 14:30:25 +0100 Subject: [PATCH 04/40] Add event cache config in masa-node --- massa-execution-exports/src/settings.rs | 2 -- massa-execution-exports/src/test_exports/config.rs | 1 - massa-execution-worker/src/execution.rs | 2 +- massa-node/base_config/config.toml | 6 ++++++ massa-node/src/main.rs | 7 +++---- massa-node/src/settings.rs | 4 ++++ 6 files changed, 14 insertions(+), 8 deletions(-) diff --git a/massa-execution-exports/src/settings.rs b/massa-execution-exports/src/settings.rs index fd2bae9859f..7ddc042e19e 100644 --- a/massa-execution-exports/src/settings.rs +++ b/massa-execution-exports/src/settings.rs @@ -118,6 +118,4 @@ pub struct ExecutionConfig { pub event_cache_size: usize, /// Amount of entries removed when `event_cache_size` is reached pub event_snip_amount: usize, - /// Max event data (msg) length for 1 event - pub event_max_len: u64, } diff --git a/massa-execution-exports/src/test_exports/config.rs b/massa-execution-exports/src/test_exports/config.rs index efa4f0bfc42..06cb1cb3dc2 100644 --- a/massa-execution-exports/src/test_exports/config.rs +++ b/massa-execution-exports/src/test_exports/config.rs @@ -109,7 +109,6 @@ impl Default for ExecutionConfig { event_cache_path, event_cache_size: 100, event_snip_amount: 10, - event_max_len: 512, } } } diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index df6db440489..991b27413d7 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -209,7 +209,7 @@ impl ExecutionState { event_cache_path: config.event_cache_path.clone(), event_cache_size: config.event_cache_size, snip_amount: config.event_snip_amount, - max_event_length: config.event_max_len, + max_event_length: config.max_event_size as u64, thread_count: config.thread_count, }); diff --git a/massa-node/base_config/config.toml b/massa-node/base_config/config.toml index 92510a1f783..fe2a0656369 100644 --- a/massa-node/base_config/config.toml +++ b/massa-node/base_config/config.toml @@ -221,6 +221,12 @@ broadcast_slot_execution_traces_channel_capacity = 5000 # Max slots execution traces to keep in cache execution_traces_limit = 320 + # path to the event cache storage + event_cache_path = "storage/event_cache/rocks_db" + # maximum number of entries we want to keep in the Event cache + event_cache_size = 2000 + # amount of entries removed when `event_cache_size` is reached + event_snip_amount = 10 [ledger] # path to the initial ledger diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 3f3812f7e07..863eaf1f7ce 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -566,10 +566,9 @@ async fn launch( condom_limits, deferred_calls_config, max_event_per_operation: MAX_EVENT_PER_OPERATION, - event_cache_path: Default::default(), - event_cache_size: 0, - event_snip_amount: 0, - event_max_len: 0, + event_cache_path: SETTINGS.execution.event_cache_path.clone(), + event_cache_size: SETTINGS.execution.event_cache_size, + event_snip_amount: SETTINGS.execution.event_snip_amount, }; let execution_channels = ExecutionChannels { diff --git a/massa-node/src/settings.rs b/massa-node/src/settings.rs index a6a112d568f..ebc43c01753 100644 --- a/massa-node/src/settings.rs +++ b/massa-node/src/settings.rs @@ -36,6 +36,10 @@ pub struct ExecutionSettings { /// slot execution traces channel capacity pub broadcast_slot_execution_traces_channel_capacity: usize, pub execution_traces_limit: usize, + + pub event_cache_path: PathBuf, + pub event_cache_size: usize, + pub event_snip_amount: usize, } #[derive(Clone, Debug, Deserialize)] From d294854caa2daf32bffb6e0fbab76e8d57735972 Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 31 Oct 2024 10:37:58 +0100 Subject: [PATCH 05/40] Minor fixes --- massa-event-cache/src/controller.rs | 4 ++-- massa-event-cache/src/event_cache.rs | 15 +++++---------- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index 42d32eb0ed3..c89bb7bbc79 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -3,9 +3,9 @@ use crate::event_cache::EventCache; use massa_models::execution::EventFilter; use massa_models::output_event::SCOutputEvent; -#[allow(dead_code)] /// Final event cache controller pub struct EventCacheController { + #[allow(dead_code)] /// Cache config. /// See `EventCacheConfig` documentation for more information. cfg: EventCacheConfig, @@ -18,7 +18,7 @@ impl EventCacheController { /// Creates a new `EventCacheController` pub fn new(cfg: EventCacheConfig) -> Self { let event_cache = EventCache::new( - cfg.event_cache_path.clone(), + &cfg.event_cache_path, cfg.event_cache_size, cfg.snip_amount, cfg.thread_count, diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index ffeda993870..2eab644ca7e 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -1,7 +1,7 @@ use std::cmp::max; // std use nom::AsBytes; -use std::path::PathBuf; +use std::path::Path; // third-party use crate::ser_deser::{ SCOutputEventDeserializer, SCOutputEventDeserializerArgs, SCOutputEventSerializer, @@ -43,12 +43,7 @@ pub(crate) struct EventCache { impl EventCache { /// Create a new EventCache - pub fn new( - path: PathBuf, - max_entry_count: usize, - snip_amount: usize, - thread_count: u8, - ) -> Self { + pub fn new(path: &Path, max_entry_count: usize, snip_amount: usize, thread_count: u8) -> Self { let db = DB::open_default(path).expect(OPEN_ERROR); let entry_count = db.iterator(IteratorMode::Start).count(); @@ -180,7 +175,7 @@ impl EventCache { }) } - /// Try to remove as much as `self.amount_to_snip` entries from the db + /// Try to remove some entries from the db fn snip(&mut self, snip_amount: Option) { let mut iter = self.db.iterator(IteratorMode::Start); let mut batch = WriteBatch::default(); @@ -308,7 +303,7 @@ mod tests { #[test] #[serial] fn test_insert_more_than_max_entry() { - // Test insert (and snip) so we do no store too much event in cache + // Test insert (and snip) so we do not store too many event in the cache let mut cache = setup(); let event = SCOutputEvent { @@ -347,7 +342,7 @@ mod tests { #[test] #[serial] fn test_insert_more_than_max_entry_2() { - // Test insert_multi_it (and snip) so we do no store too much event in cache + // Test insert_multi_it (and snip) so we do not store too many event in the cache let mut cache = setup(); let event = SCOutputEvent { From c948f41bf63443065e1f60eb3d94b11d01279cbb Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 31 Oct 2024 10:55:34 +0100 Subject: [PATCH 06/40] Cargo clippy fixes --- massa-event-cache/src/event_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 2eab644ca7e..37e25cdf522 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -216,7 +216,7 @@ mod tests { fn setup() -> EventCache { let tmp_path = TempDir::new().unwrap().path().to_path_buf(); - EventCache::new(tmp_path, 1000, 300, THREAD_COUNT) + EventCache::new(&tmp_path, 1000, 300, THREAD_COUNT) } #[test] From 9648d2536faa51cb2d7578fbc47a64a1b42bc63e Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 31 Oct 2024 14:40:17 +0100 Subject: [PATCH 07/40] Cargo clippy fixes --- massa-event-cache/src/event_cache.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 37e25cdf522..4f2cfdf0dc8 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -242,7 +242,6 @@ mod tests { }; let mut events = (0..cache.max_entry_count - 5) - .into_iter() .map(|i| { let mut event = event.clone(); event.context.index_in_slot = i as u64; @@ -277,6 +276,7 @@ mod tests { let db_it = cache.db_iter(Some(IteratorMode::Start)); let mut prev_slot = None; let mut prev_event_index = None; + #[allow(clippy::manual_flatten)] for kvb in db_it { if let Ok(kvb) = kvb { let bytes = kvb.0.iter().as_slice(); @@ -402,7 +402,6 @@ mod tests { }; let mut events = (0..cache.max_entry_count - 5) - .into_iter() .map(|i| { let mut event = event.clone(); event.context.index_in_slot = i as u64; From acbc1b49408dad436c69a961b7d57efcafd49286 Mon Sep 17 00:00:00 2001 From: sydhds Date: Mon, 4 Nov 2024 11:21:24 +0100 Subject: [PATCH 08/40] Add limits & security checks --- massa-event-cache/Cargo.toml | 5 +++ massa-event-cache/src/config.rs | 8 ++-- massa-event-cache/src/controller.rs | 12 +++-- massa-event-cache/src/event_cache.rs | 60 ++++++++++++++++++++----- massa-event-cache/src/ser_deser.rs | 46 +++++++++++-------- massa-execution-worker/src/execution.rs | 8 ++-- massa-models/src/output_event.rs | 3 +- massa-node/base_config/config.toml | 4 +- 8 files changed, 99 insertions(+), 47 deletions(-) diff --git a/massa-event-cache/Cargo.toml b/massa-event-cache/Cargo.toml index dc80de43ab5..7966717d6c3 100644 --- a/massa-event-cache/Cargo.toml +++ b/massa-event-cache/Cargo.toml @@ -3,6 +3,11 @@ name = "massa_event_cache" version = "0.1.0" edition = "2021" +[features] +test-exports = [ + "massa_models/test-exports", +] + [dependencies] nom = {workspace = true} rocksdb = {workspace = true} diff --git a/massa-event-cache/src/config.rs b/massa-event-cache/src/config.rs index 5702e4a57f9..62fcc5d4963 100644 --- a/massa-event-cache/src/config.rs +++ b/massa-event-cache/src/config.rs @@ -4,11 +4,13 @@ pub struct EventCacheConfig { /// Path to the hard drive cache storage pub event_cache_path: PathBuf, /// Maximum number of entries we want to keep in the event cache - pub event_cache_size: usize, + pub max_event_cache_length: usize, /// Amount of entries removed when `event_cache_size` is reached pub snip_amount: usize, - /// Maximum length of an event - pub max_event_length: u64, + /// Maximum length of an event data (aka event message) + pub max_event_data_length: u64, /// Thread count pub thread_count: u8, + /// Call stack max length + pub max_recursive_call_depth: u16, } diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index c89bb7bbc79..ffafc8fa33c 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -19,19 +19,17 @@ impl EventCacheController { pub fn new(cfg: EventCacheConfig) -> Self { let event_cache = EventCache::new( &cfg.event_cache_path, - cfg.event_cache_size, + cfg.max_event_cache_length, cfg.snip_amount, cfg.thread_count, + cfg.max_recursive_call_depth, + cfg.max_event_data_length, ); Self { cfg, event_cache } } - pub fn save_events( - &mut self, - events: impl Iterator + Clone, - events_len: Option, - ) { - self.event_cache.insert_multi_it(events, events_len); + pub fn save_events(&mut self, events: impl ExactSizeIterator + Clone) { + self.event_cache.insert_multi_it(events); } pub fn get_filtered_sc_output_events<'b, 'a: 'b>( diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 4f2cfdf0dc8..d8ff12cee62 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -16,6 +16,7 @@ const OPEN_ERROR: &str = "critical: rocksdb open operation failed"; const CRUD_ERROR: &str = "critical: rocksdb crud operation failed"; const EVENT_DESER_ERROR: &str = "critical: event deserialization failed"; +/* /// Module key formatting macro #[macro_export] macro_rules! event_key { @@ -23,6 +24,7 @@ macro_rules! event_key { [&$event_slot.to_bytes()[..], &[MODULE_IDENT]].concat() }; } +*/ pub(crate) struct EventCache { /// RocksDB database @@ -43,20 +45,44 @@ pub(crate) struct EventCache { impl EventCache { /// Create a new EventCache - pub fn new(path: &Path, max_entry_count: usize, snip_amount: usize, thread_count: u8) -> Self { + pub fn new( + path: &Path, + max_entry_count: usize, + snip_amount: usize, + thread_count: u8, + max_recursive_call_depth: u16, + max_event_data_length: u64, + ) -> Self { let db = DB::open_default(path).expect(OPEN_ERROR); - let entry_count = db.iterator(IteratorMode::Start).count(); - Self { + let mut event_cache = Self { db, - entry_count, + entry_count: 0, max_entry_count, snip_amount, event_ser: SCOutputEventSerializer::new(), event_deser: SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { thread_count, + max_recursive_call_depth, + max_event_data_length, }), + }; + + event_cache.clear(); + event_cache + } + + fn clear(&mut self) { + let iter = self.db.iterator(IteratorMode::Start); + let mut batch = WriteBatch::default(); + + for kvb in iter { + let kvb = kvb.expect(EVENT_DESER_ERROR); + batch.delete(kvb.0); } + + self.db.write(batch).expect(CRUD_ERROR); + self.entry_count = 0; } #[allow(dead_code)] @@ -77,6 +103,9 @@ impl EventCache { let mut batch = WriteBatch::default(); batch.put(event_key, event_buffer); self.db.write(batch).expect(CRUD_ERROR); + + // Note: + // This assumes that events are always added, never overwritten self.entry_count = self.entry_count.saturating_add(1); debug!("(Event insert) entry_count is: {}", self.entry_count); @@ -87,10 +116,9 @@ impl EventCache { /// For performance reason, pass events_len to avoid cloning the iterator pub fn insert_multi_it( &mut self, - events: impl Iterator + Clone, - events_len: Option, + events: impl ExactSizeIterator + Clone, ) { - let events_len = events_len.unwrap_or_else(|| events.clone().count()); + let events_len = events.len(); if self.entry_count + events_len >= self.max_entry_count { let snip_amount = max(self.snip_amount, events_len); @@ -110,6 +138,8 @@ impl EventCache { batch.put(event_key, event_buffer); } self.db.write(batch).expect(CRUD_ERROR); + // Note: + // This assumes that events are always added, never overwritten self.entry_count = self.entry_count.saturating_add(events_len); debug!("(Events insert) entry_count is: {}", self.entry_count); @@ -210,13 +240,20 @@ mod tests { use serial_test::serial; use tempfile::TempDir; // internal - use massa_models::config::THREAD_COUNT; + use massa_models::config::{MAX_EVENT_DATA_SIZE, MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT}; use massa_models::output_event::EventExecutionContext; use massa_models::slot::Slot; fn setup() -> EventCache { let tmp_path = TempDir::new().unwrap().path().to_path_buf(); - EventCache::new(&tmp_path, 1000, 300, THREAD_COUNT) + EventCache::new( + &tmp_path, + 1000, + 300, + THREAD_COUNT, + MAX_RECURSIVE_CALLS_DEPTH, + MAX_EVENT_DATA_SIZE as u64, + ) } #[test] @@ -364,7 +401,7 @@ mod tests { event.context.index_in_slot = i as u64; event }); - cache.insert_multi_it(it, Some(cache.max_entry_count)); + cache.insert_multi_it(it); assert_eq!(cache.entry_count, cache.max_entry_count); @@ -429,8 +466,7 @@ mod tests { // Randomize the events so we insert in random orders in DB events.shuffle(&mut thread_rng()); - let events_len = events.len(); - cache.insert_multi_it(events.into_iter(), Some(events_len)); + cache.insert_multi_it(events.into_iter()); let filter_1 = EventFilter { start: Some(Slot::new(2, 0)), diff --git a/massa-event-cache/src/ser_deser.rs b/massa-event-cache/src/ser_deser.rs index 38fc5dc29a8..65f047277c8 100644 --- a/massa-event-cache/src/ser_deser.rs +++ b/massa-event-cache/src/ser_deser.rs @@ -22,8 +22,8 @@ use massa_serialization::{ /// Metadata serializer pub struct SCOutputEventSerializer { - u64_ser: U64VarIntSerializer, - u32_ser: U32VarIntSerializer, + index_in_slot_ser: U64VarIntSerializer, + addr_len_ser: U32VarIntSerializer, slot_ser: SlotSerializer, addr_ser: AddressSerializer, block_id_ser: OptionSerializer, @@ -34,8 +34,8 @@ pub struct SCOutputEventSerializer { impl SCOutputEventSerializer { pub fn new() -> Self { Self { - u64_ser: U64VarIntSerializer::new(), - u32_ser: U32VarIntSerializer::new(), + index_in_slot_ser: U64VarIntSerializer::new(), + addr_len_ser: U32VarIntSerializer::new(), slot_ser: SlotSerializer::new(), addr_ser: AddressSerializer::new(), block_id_ser: OptionSerializer::new(BlockIdSerializer::new()), @@ -57,7 +57,7 @@ impl Serializer for SCOutputEventSerializer { self.slot_ser.serialize(&value.context.slot, buffer)?; self.block_id_ser.serialize(&value.context.block, buffer)?; buffer.push(u8::from(value.context.read_only)); - self.u64_ser + self.index_in_slot_ser .serialize(&value.context.index_in_slot, buffer)?; // Components let call_stack_len_ = value.context.call_stack.len(); @@ -68,7 +68,7 @@ impl Serializer for SCOutputEventSerializer { )) })?; // ser vec len - self.u32_ser.serialize(&call_stack_len, buffer)?; + self.addr_len_ser.serialize(&call_stack_len, buffer)?; for address in value.context.call_stack.iter() { self.addr_ser.serialize(address, buffer)?; } @@ -85,8 +85,8 @@ impl Serializer for SCOutputEventSerializer { /// SCOutputEvent deserializer pub struct SCOutputEventDeserializer { - u64_deser: U64VarIntDeserializer, - u32_deser: U32VarIntDeserializer, + index_in_slot_deser: U64VarIntDeserializer, + addr_len_deser: U32VarIntDeserializer, slot_deser: SlotDeserializer, addr_deser: AddressDeserializer, block_id_deser: OptionDeserializer, @@ -97,9 +97,11 @@ pub struct SCOutputEventDeserializer { impl SCOutputEventDeserializer { pub fn new(args: SCOutputEventDeserializerArgs) -> Self { Self { - u64_deser: U64VarIntDeserializer::new(Included(0), Included(u64::MAX)), - // FIXME: is there some limit for call stack len? - u32_deser: U32VarIntDeserializer::new(Included(0), Included(u32::MAX)), + index_in_slot_deser: U64VarIntDeserializer::new(Included(0), Included(u64::MAX)), + addr_len_deser: U32VarIntDeserializer::new( + Included(0), + Included(u32::from(args.max_recursive_call_depth)), + ), slot_deser: SlotDeserializer::new( (Included(0), Included(u64::MAX)), (Included(0), Excluded(args.thread_count)), @@ -109,7 +111,7 @@ impl SCOutputEventDeserializer { op_id_deser: OptionDeserializer::new(OperationIdDeserializer::new()), data_deser: StringDeserializer::new(U64VarIntDeserializer::new( Included(0), - Included(u64::MAX), + Included(args.max_event_data_length), )), } } @@ -144,11 +146,11 @@ impl Deserializer for SCOutputEventDeserializer { IResult::Ok((rem, read_only)) }), context("Failed index_in_slot deser", |input| { - self.u64_deser.deserialize(input) + self.index_in_slot_deser.deserialize(input) }), length_count( context("Failed call stack entry count deser", |input| { - self.u32_deser.deserialize(input) + self.addr_len_deser.deserialize(input) }), context("Failed call stack items deser", |input| { self.addr_deser.deserialize(input) @@ -215,6 +217,8 @@ impl Deserializer for SCOutputEventDeserializer { #[allow(missing_docs)] pub struct SCOutputEventDeserializerArgs { pub thread_count: u8, + pub max_recursive_call_depth: u16, + pub max_event_data_length: u64, } #[cfg(test)] @@ -244,8 +248,11 @@ mod test { }; let event_ser = SCOutputEventSerializer::new(); - let event_deser = - SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { thread_count: 16 }); + let event_deser = SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { + thread_count: 16, + max_recursive_call_depth: 25, + max_event_data_length: 512, + }); let mut buffer = Vec::new(); event_ser.serialize(&event, &mut buffer).unwrap(); @@ -281,8 +288,11 @@ mod test { }; let event_ser = SCOutputEventSerializer::new(); - let event_deser = - SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { thread_count: 16 }); + let event_deser = SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { + thread_count: 16, + max_recursive_call_depth: 25, + max_event_data_length: 512, + }); let mut buffer = Vec::new(); event_ser.serialize(&event, &mut buffer).unwrap(); diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 991b27413d7..c902aef1681 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -207,10 +207,11 @@ impl ExecutionState { let event_cache_controller = EventCacheController::new(EventCacheConfig { event_cache_path: config.event_cache_path.clone(), - event_cache_size: config.event_cache_size, + max_event_cache_length: config.event_cache_size, snip_amount: config.event_snip_amount, - max_event_length: config.max_event_size as u64, + max_event_data_length: config.max_event_size as u64, thread_count: config.thread_count, + max_recursive_call_depth: config.max_recursive_calls_depth, }); // build the execution state @@ -311,9 +312,8 @@ impl ExecutionState { // append generated events to the final event store exec_out.events.finalize(); - let final_events_count = exec_out.events.0.len(); self.final_events_cache - .save_events(exec_out.events.0.into_iter(), Some(final_events_count)); + .save_events(exec_out.events.0.into_iter()); // update the prometheus metrics self.massa_metrics diff --git a/massa-models/src/output_event.rs b/massa-models/src/output_event.rs index 70befd46814..7b7a70e6cd7 100644 --- a/massa-models/src/output_event.rs +++ b/massa-models/src/output_event.rs @@ -19,7 +19,8 @@ impl Display for SCOutputEvent { } /// Context of the event (not generated by the user) -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "test-exports", derive(PartialEq))] pub struct EventExecutionContext { /// when was it generated pub slot: Slot, diff --git a/massa-node/base_config/config.toml b/massa-node/base_config/config.toml index fe2a0656369..20862958936 100644 --- a/massa-node/base_config/config.toml +++ b/massa-node/base_config/config.toml @@ -223,8 +223,8 @@ execution_traces_limit = 320 # path to the event cache storage event_cache_path = "storage/event_cache/rocks_db" - # maximum number of entries we want to keep in the Event cache - event_cache_size = 2000 + # maximum number of entries we want to keep in the Event cache (~ 10 Gb) + event_cache_size = 20071520 # amount of entries removed when `event_cache_size` is reached event_snip_amount = 10 From 23725f0edb0a23cafbca482145658d6c01227847 Mon Sep 17 00:00:00 2001 From: sydhds Date: Wed, 6 Nov 2024 11:37:12 +0100 Subject: [PATCH 09/40] Add controller + manager --- Cargo.lock | 2 + massa-event-cache/Cargo.toml | 1 + massa-event-cache/src/controller.rs | 90 +++++++++----- massa-event-cache/src/lib.rs | 2 + massa-event-cache/src/worker.rs | 156 ++++++++++++++++++++++++ massa-execution-worker/src/execution.rs | 22 ++-- massa-execution-worker/src/worker.rs | 3 + massa-node/Cargo.toml | 1 + massa-node/src/main.rs | 24 ++++ 9 files changed, 260 insertions(+), 41 deletions(-) create mode 100644 massa-event-cache/src/worker.rs diff --git a/Cargo.lock b/Cargo.lock index 842e51bf9b9..38855fc06b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2879,6 +2879,7 @@ dependencies = [ "massa_db_exports", "massa_db_worker", "massa_deferred_calls", + "massa_event_cache", "massa_executed_ops", "massa_execution_exports", "massa_execution_worker", @@ -3218,6 +3219,7 @@ dependencies = [ "massa_serialization", "more-asserts 0.3.1", "nom", + "parking_lot", "rand", "rocksdb", "serial_test", diff --git a/massa-event-cache/Cargo.toml b/massa-event-cache/Cargo.toml index 7966717d6c3..48cb498b9ff 100644 --- a/massa-event-cache/Cargo.toml +++ b/massa-event-cache/Cargo.toml @@ -12,6 +12,7 @@ test-exports = [ nom = {workspace = true} rocksdb = {workspace = true} tracing = {workspace = true} +parking_lot = { workspace = true } massa_models = {workspace = true} massa_serialization = {workspace = true} diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index ffafc8fa33c..a12b1d659c8 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -1,41 +1,71 @@ -use crate::config::EventCacheConfig; -use crate::event_cache::EventCache; +// std +use std::sync::Arc; +use std::collections::VecDeque; +// third-party +use parking_lot::{Condvar, Mutex, RwLock}; +// internal use massa_models::execution::EventFilter; use massa_models::output_event::SCOutputEvent; +use crate::event_cache::EventCache; -/// Final event cache controller -pub struct EventCacheController { - #[allow(dead_code)] - /// Cache config. - /// See `EventCacheConfig` documentation for more information. - cfg: EventCacheConfig, - /// Event stored cache. - /// See the `EventCache` documentation for more information. - event_cache: EventCache, +/// structure used to communicate with controller +#[derive(Debug)] +pub(crate) struct EventCacheWriterInputData { + /// set stop to true to stop the thread + pub stop: bool, + pub(crate) events: VecDeque, } -impl EventCacheController { - /// Creates a new `EventCacheController` - pub fn new(cfg: EventCacheConfig) -> Self { - let event_cache = EventCache::new( - &cfg.event_cache_path, - cfg.max_event_cache_length, - cfg.snip_amount, - cfg.thread_count, - cfg.max_recursive_call_depth, - cfg.max_event_data_length, - ); - Self { cfg, event_cache } +impl EventCacheWriterInputData { + + pub fn new() -> Self { + Self { + stop: Default::default(), + events: Default::default(), + } } - pub fn save_events(&mut self, events: impl ExactSizeIterator + Clone) { - self.event_cache.insert_multi_it(events); + /// Takes the current input data into a clone that is returned, + /// and resets self. + pub fn take(&mut self) -> Self { + Self { + stop: std::mem::take(&mut self.stop), + events: std::mem::take(&mut self.events), + } + } +} + +/// interface that communicates with the worker thread +pub trait EventCacheController: Send + Sync { + fn save_events(&self, events: VecDeque); + + fn get_filtered_sc_output_events( + &self, + filter: &EventFilter, + ) -> Vec; +} + +#[derive(Clone)] +/// implementation of the event cache controller +pub struct EventCacheControllerImpl { + /// input data to process in the VM loop + /// with a wake-up condition variable that needs to be triggered when the data changes + pub(crate) input_data: Arc<(Condvar, Mutex)>, + /// Event cache + pub(crate) cache: Arc>, +} + +impl EventCacheController for EventCacheControllerImpl { + fn save_events(&self, events: VecDeque) { + // lock input data + let mut input_data = self.input_data.1.lock(); + input_data.events = events; + // wake up VM loop + self.input_data.0.notify_one(); } - pub fn get_filtered_sc_output_events<'b, 'a: 'b>( - &'a self, - filter: &'b EventFilter, - ) -> impl Iterator + 'b { - self.event_cache.get_filtered_sc_output_events(filter) + fn get_filtered_sc_output_events(&self, filter: &EventFilter) -> Vec { + let lock = self.cache.read(); + lock.get_filtered_sc_output_events(filter).collect() } } diff --git a/massa-event-cache/src/lib.rs b/massa-event-cache/src/lib.rs index 13061d03c5d..e6237fcd57a 100644 --- a/massa-event-cache/src/lib.rs +++ b/massa-event-cache/src/lib.rs @@ -3,3 +3,5 @@ pub mod config; pub mod controller; mod event_cache; mod ser_deser; +pub mod worker; + diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs new file mode 100644 index 00000000000..835f4dcaa9e --- /dev/null +++ b/massa-event-cache/src/worker.rs @@ -0,0 +1,156 @@ +// std +use std::sync::Arc; +use std::thread; +// third-party +use parking_lot::{Condvar, Mutex, RwLock}; +use tracing::{debug, info}; +// internal +use crate::config::EventCacheConfig; +use crate::controller::{EventCacheController, EventCacheControllerImpl, EventCacheWriterInputData}; +use crate::event_cache::EventCache; + +/// Structure gathering all elements needed by the event cache thread +pub(crate) struct EventCacheWriterThread { + // A copy of the input data allowing access to incoming requests + input_data: Arc<(Condvar, Mutex)>, + /// Event cache + cache: Arc> +} + +impl EventCacheWriterThread { + fn new(input_data: Arc<(Condvar, Mutex)>, + event_cache: Arc>, + ) -> Self { + Self { + input_data, + cache: event_cache + } + } + + /// Waits for an event to trigger a new iteration in the .... + /// + /// # Returns + /// `ExecutionInputData` representing the input requests, + /// and a boolean saying whether we should stop the loop. + fn wait_loop_event(&mut self) -> (EventCacheWriterInputData, bool) { + loop { + // lock input data + let mut input_data_lock = self.input_data.1.lock(); + + // take current input data, resetting it + let input_data: EventCacheWriterInputData = input_data_lock.take(); + + // Check if there is some input data + if !input_data.events.is_empty() { + return (input_data, false); + } + + // if we need to stop, return None + if input_data.stop { + return (input_data, true); + } + + // FIXME / TODO: should we sleep here? + } + } + + /// Main loop of the worker + pub fn main_loop(&mut self) { + loop { + let (input_data, stop) = self.wait_loop_event(); + debug!("Event cache writer loop triggered, input_data = {:?}", input_data); + + if stop { + // we need to stop + break; + } + + let mut lock = self.cache.write(); + lock.insert_multi_it(input_data.events.into_iter()); + drop(lock); + } + } +} + +/// Event cache manager trait used to stop the event cache thread +pub trait EventCacheManager { + /// Stop the event cache thread + /// Note that we do not take self by value to consume it + /// because it is not allowed to move out of `Box` + /// This will improve if the `unsized_fn_params` feature stabilizes enough to be safely usable. + fn stop(&mut self); +} + +/// ... manager +/// Allows stopping the ... worker +pub struct EventCacheWriterManagerImpl { + /// input data to process in the VM loop + /// with a wake-up condition variable that needs to be triggered when the data changes + pub(crate) input_data: Arc<(Condvar, Mutex)>, + /// handle used to join the worker thread + pub(crate) thread_handle: Option>, +} + +impl EventCacheManager for EventCacheWriterManagerImpl { + + /// stops the worker + fn stop(&mut self) { + info!("Stopping Execution controller..."); + // notify the worker thread to stop + { + let mut input_wlock = self.input_data.1.lock(); + input_wlock.stop = true; + self.input_data.0.notify_one(); + } + // join the thread + if let Some(join_handle) = self.thread_handle.take() { + join_handle.join().expect("VM controller thread panicked"); + } + info!("Execution controller stopped"); + } +} + +pub fn start_event_cache_writer_worker(cfg: EventCacheConfig) -> (Box, Box) { + + let event_cache = Arc::new( + RwLock::new( + EventCache::new( + cfg.event_cache_path.as_path(), + cfg.max_event_cache_length, + cfg.snip_amount, + cfg.thread_count, + cfg.max_recursive_call_depth, + cfg.max_event_data_length, + ))); + + // define the input data interface + let input_data = Arc::new(( + Condvar::new(), + Mutex::new(EventCacheWriterInputData::new()), + )); + let input_data_clone = input_data.clone(); + + // create a controller + let controller = EventCacheControllerImpl { + input_data: input_data.clone(), + cache: event_cache.clone(), + }; + + let thread_builder = thread::Builder::new() + .name("event_cache".into()); + let thread_handle = thread_builder + .spawn(move || { + EventCacheWriterThread::new(input_data_clone, event_cache).main_loop(); + }) + .expect("failed to spawn thread : event_cache"); + + // create a manager + let manager = EventCacheWriterManagerImpl { + input_data, + thread_handle: Some(thread_handle), + }; + + // return the manager and controller pair + (Box::new(manager), Box::new(controller)) +} + diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index c902aef1681..927ef23c473 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -16,6 +16,8 @@ use crate::stats::ExecutionStatsCounter; use crate::storage_backend::StorageBackend; use massa_async_pool::AsyncMessage; use massa_deferred_calls::DeferredCall; +use massa_event_cache::config::EventCacheConfig; +use massa_event_cache::controller::EventCacheController; use massa_execution_exports::{ ExecutedBlockInfo, ExecutionBlockMetadata, ExecutionChannels, ExecutionConfig, ExecutionError, ExecutionOutput, ExecutionQueryCycleInfos, ExecutionQueryStakerInfo, ExecutionStackElement, @@ -26,10 +28,6 @@ use massa_final_state::FinalStateController; use massa_metrics::MassaMetrics; use massa_models::address::ExecutionAddressCycleInfo; use massa_models::bytecode::Bytecode; -use massa_models::types::{SetOrDelete, SetUpdateOrDelete}; - -use massa_event_cache::config::EventCacheConfig; -use massa_event_cache::controller::EventCacheController; use massa_models::datastore::get_prefix_bounds; use massa_models::deferred_calls::DeferredCallId; use massa_models::denunciation::{Denunciation, DenunciationIndex}; @@ -38,6 +36,7 @@ use massa_models::output_event::SCOutputEvent; use massa_models::prehash::PreHashSet; use massa_models::stats::ExecutionStats; use massa_models::timeslots::get_block_slot_timestamp; +use massa_models::types::{SetOrDelete, SetUpdateOrDelete}; use massa_models::{ address::Address, block_id::BlockId, @@ -115,8 +114,7 @@ pub(crate) struct ExecutionState { // a cursor pointing to the highest executed final slot pub final_cursor: Slot, // store containing execution events that became final - // final_events: EventStore, - final_events_cache: EventCacheController, + final_events_cache: Box, // final state with atomic R/W access final_state: Arc>, // execution context (see documentation in context.rs) @@ -163,6 +161,7 @@ impl ExecutionState { channels: ExecutionChannels, wallet: Arc>, massa_metrics: MassaMetrics, + event_cache: Box, #[cfg(feature = "dump-block")] block_storage_backend: Arc>, ) -> ExecutionState { // Get the slot at the output of which the final state is attached. @@ -205,6 +204,7 @@ impl ExecutionState { execution_context.clone(), )); + /* let event_cache_controller = EventCacheController::new(EventCacheConfig { event_cache_path: config.event_cache_path.clone(), max_event_cache_length: config.event_cache_size, @@ -213,6 +213,7 @@ impl ExecutionState { thread_count: config.thread_count, max_recursive_call_depth: config.max_recursive_calls_depth, }); + */ // build the execution state ExecutionState { @@ -223,7 +224,7 @@ impl ExecutionState { active_history, // empty final event store: it is not recovered through bootstrap // final_events: Default::default(), - final_events_cache: event_cache_controller, + final_events_cache: event_cache, // no active slots executed yet: set active_cursor to the last final block active_cursor: last_final_slot, final_cursor: last_final_slot, @@ -312,8 +313,7 @@ impl ExecutionState { // append generated events to the final event store exec_out.events.finalize(); - self.final_events_cache - .save_events(exec_out.events.0.into_iter()); + self.final_events_cache.save_events(exec_out.events.0); // update the prometheus metrics self.massa_metrics @@ -2285,8 +2285,7 @@ impl ExecutionState { match filter.is_final { Some(true) => self .final_events_cache - .get_filtered_sc_output_events(&filter) - .collect(), + .get_filtered_sc_output_events(&filter), Some(false) => self .active_history .read() @@ -2297,6 +2296,7 @@ impl ExecutionState { None => self .final_events_cache .get_filtered_sc_output_events(&filter) + .into_iter() .chain( self.active_history .read() diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index 5fee6e65eac..e3e1dc17368 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -29,6 +29,7 @@ use parking_lot::{Condvar, Mutex, RwLock}; use std::sync::Arc; use std::thread; use tracing::debug; +use massa_event_cache::controller::EventCacheController; /// Structure gathering all elements needed by the execution thread pub(crate) struct ExecutionThread { @@ -258,6 +259,7 @@ pub fn start_execution_worker( channels: ExecutionChannels, wallet: Arc>, massa_metrics: MassaMetrics, + event_cache: Box, #[cfg(feature = "dump-block")] block_storage_backend: Arc>, ) -> (Box, Box) { if config.hd_cache_size < config.snip_amount { @@ -273,6 +275,7 @@ pub fn start_execution_worker( channels, wallet, massa_metrics, + event_cache, #[cfg(feature = "dump-block")] block_storage_backend, ))); diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index aadb5cd6682..dc7083f961b 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -88,3 +88,4 @@ massa_versioning = { workspace = true } massa_signature = { workspace = true } massa_db_exports = { workspace = true } massa_db_worker = { workspace = true } +massa_event_cache = { workspace = true } diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 863eaf1f7ce..c4e6f96c6aa 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -131,6 +131,8 @@ use survey::MassaSurveyStopper; use tokio::sync::broadcast; use tracing::{debug, error, info, warn}; use tracing_subscriber::filter::{filter_fn, LevelFilter}; +use massa_event_cache::config::EventCacheConfig; +use massa_event_cache::worker::{start_event_cache_writer_worker, EventCacheManager}; #[cfg(feature = "op_spammer")] mod operation_injector; @@ -150,6 +152,7 @@ async fn launch( Box, Box, Box, + Box, StopHandle, StopHandle, StopHandle, @@ -472,6 +475,19 @@ async fn launch( ); } } + + // Event cache thread + let event_cache_config = EventCacheConfig { + event_cache_path: SETTINGS.execution.event_cache_path.clone(), + max_event_cache_length: SETTINGS.execution.event_cache_size, + snip_amount: SETTINGS.execution.event_snip_amount, + max_event_data_length: MAX_EVENT_DATA_SIZE as u64, + thread_count: THREAD_COUNT, + max_recursive_call_depth: MAX_RECURSIVE_CALLS_DEPTH, + }; + let (event_cache_manager, event_cache_controller) = start_event_cache_writer_worker( + event_cache_config + ); // Storage costs constants let storage_costs_constants = StorageCostsConstants { @@ -607,6 +623,7 @@ async fn launch( execution_channels.clone(), node_wallet.clone(), massa_metrics.clone(), + event_cache_controller, #[cfg(feature = "dump-block")] block_storage_backend.clone(), ); @@ -1143,6 +1160,7 @@ async fn launch( pool_manager, protocol_manager, factory_manager, + event_cache_manager, api_private_handle, api_public_handle, api_handle, @@ -1238,6 +1256,7 @@ struct Managers { pool_manager: Box, protocol_manager: Box, factory_manager: Box, + event_cache_manager: Box, } #[allow(clippy::too_many_arguments)] @@ -1251,6 +1270,7 @@ async fn stop( mut pool_manager, mut protocol_manager, mut factory_manager, + mut event_cache_manager, }: Managers, api_private_handle: StopHandle, api_public_handle: StopHandle, @@ -1322,6 +1342,8 @@ async fn stop( //let protocol_pool_event_receiver = pool_manager.stop().await.expect("pool shutdown failed"); // note that FinalLedger gets destroyed as soon as its Arc count goes to zero + + event_cache_manager.stop(); } #[derive(Parser)] @@ -1471,6 +1493,7 @@ async fn run(args: Args) -> anyhow::Result<()> { pool_manager, protocol_manager, factory_manager, + event_cache_manager, api_private_handle, api_public_handle, api_handle, @@ -1537,6 +1560,7 @@ async fn run(args: Args) -> anyhow::Result<()> { pool_manager, protocol_manager, factory_manager, + event_cache_manager, }, api_private_handle, api_public_handle, From 30864954e1c548a3536edea2ae67eeae3a52950d Mon Sep 17 00:00:00 2001 From: sydhds Date: Wed, 6 Nov 2024 11:43:26 +0100 Subject: [PATCH 10/40] Cargo fmt pass --- massa-event-cache/src/controller.rs | 10 ++---- massa-event-cache/src/event_cache.rs | 2 +- massa-event-cache/src/lib.rs | 1 - massa-event-cache/src/worker.rs | 53 ++++++++++++++-------------- massa-execution-worker/src/worker.rs | 2 +- massa-node/src/main.rs | 13 ++++--- 6 files changed, 37 insertions(+), 44 deletions(-) diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index a12b1d659c8..d03aab36e82 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -1,12 +1,12 @@ // std -use std::sync::Arc; use std::collections::VecDeque; +use std::sync::Arc; // third-party use parking_lot::{Condvar, Mutex, RwLock}; // internal +use crate::event_cache::EventCache; use massa_models::execution::EventFilter; use massa_models::output_event::SCOutputEvent; -use crate::event_cache::EventCache; /// structure used to communicate with controller #[derive(Debug)] @@ -17,7 +17,6 @@ pub(crate) struct EventCacheWriterInputData { } impl EventCacheWriterInputData { - pub fn new() -> Self { Self { stop: Default::default(), @@ -39,10 +38,7 @@ impl EventCacheWriterInputData { pub trait EventCacheController: Send + Sync { fn save_events(&self, events: VecDeque); - fn get_filtered_sc_output_events( - &self, - filter: &EventFilter, - ) -> Vec; + fn get_filtered_sc_output_events(&self, filter: &EventFilter) -> Vec; } #[derive(Clone)] diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index d8ff12cee62..af91295b520 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -76,7 +76,7 @@ impl EventCache { let iter = self.db.iterator(IteratorMode::Start); let mut batch = WriteBatch::default(); - for kvb in iter { + for kvb in iter { let kvb = kvb.expect(EVENT_DESER_ERROR); batch.delete(kvb.0); } diff --git a/massa-event-cache/src/lib.rs b/massa-event-cache/src/lib.rs index e6237fcd57a..4e4ccea9a2a 100644 --- a/massa-event-cache/src/lib.rs +++ b/massa-event-cache/src/lib.rs @@ -4,4 +4,3 @@ pub mod controller; mod event_cache; mod ser_deser; pub mod worker; - diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index 835f4dcaa9e..a5bef6e16bc 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -6,7 +6,9 @@ use parking_lot::{Condvar, Mutex, RwLock}; use tracing::{debug, info}; // internal use crate::config::EventCacheConfig; -use crate::controller::{EventCacheController, EventCacheControllerImpl, EventCacheWriterInputData}; +use crate::controller::{ + EventCacheController, EventCacheControllerImpl, EventCacheWriterInputData, +}; use crate::event_cache::EventCache; /// Structure gathering all elements needed by the event cache thread @@ -14,16 +16,17 @@ pub(crate) struct EventCacheWriterThread { // A copy of the input data allowing access to incoming requests input_data: Arc<(Condvar, Mutex)>, /// Event cache - cache: Arc> + cache: Arc>, } impl EventCacheWriterThread { - fn new(input_data: Arc<(Condvar, Mutex)>, + fn new( + input_data: Arc<(Condvar, Mutex)>, event_cache: Arc>, ) -> Self { Self { input_data, - cache: event_cache + cache: event_cache, } } @@ -39,7 +42,7 @@ impl EventCacheWriterThread { // take current input data, resetting it let input_data: EventCacheWriterInputData = input_data_lock.take(); - + // Check if there is some input data if !input_data.events.is_empty() { return (input_data, false); @@ -49,7 +52,7 @@ impl EventCacheWriterThread { if input_data.stop { return (input_data, true); } - + // FIXME / TODO: should we sleep here? } } @@ -58,7 +61,10 @@ impl EventCacheWriterThread { pub fn main_loop(&mut self) { loop { let (input_data, stop) = self.wait_loop_event(); - debug!("Event cache writer loop triggered, input_data = {:?}", input_data); + debug!( + "Event cache writer loop triggered, input_data = {:?}", + input_data + ); if stop { // we need to stop @@ -92,7 +98,6 @@ pub struct EventCacheWriterManagerImpl { } impl EventCacheManager for EventCacheWriterManagerImpl { - /// stops the worker fn stop(&mut self) { info!("Stopping Execution controller..."); @@ -110,24 +115,20 @@ impl EventCacheManager for EventCacheWriterManagerImpl { } } -pub fn start_event_cache_writer_worker(cfg: EventCacheConfig) -> (Box, Box) { - - let event_cache = Arc::new( - RwLock::new( - EventCache::new( - cfg.event_cache_path.as_path(), - cfg.max_event_cache_length, - cfg.snip_amount, - cfg.thread_count, - cfg.max_recursive_call_depth, - cfg.max_event_data_length, - ))); +pub fn start_event_cache_writer_worker( + cfg: EventCacheConfig, +) -> (Box, Box) { + let event_cache = Arc::new(RwLock::new(EventCache::new( + cfg.event_cache_path.as_path(), + cfg.max_event_cache_length, + cfg.snip_amount, + cfg.thread_count, + cfg.max_recursive_call_depth, + cfg.max_event_data_length, + ))); // define the input data interface - let input_data = Arc::new(( - Condvar::new(), - Mutex::new(EventCacheWriterInputData::new()), - )); + let input_data = Arc::new((Condvar::new(), Mutex::new(EventCacheWriterInputData::new()))); let input_data_clone = input_data.clone(); // create a controller @@ -136,8 +137,7 @@ pub fn start_event_cache_writer_worker(cfg: EventCacheConfig) -> (Box (Box Date: Wed, 6 Nov 2024 15:58:21 +0100 Subject: [PATCH 11/40] Fix check/clippy with --all-targets --- Cargo.lock | 2 ++ massa-event-cache/Cargo.toml | 8 ++++++++ massa-event-cache/src/controller.rs | 1 + massa-event-cache/src/lib.rs | 3 +++ massa-execution-worker/Cargo.toml | 3 ++- massa-execution-worker/src/tests/universe.rs | 7 +++++-- 6 files changed, 21 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38855fc06b6..e9ec362a16a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3217,6 +3217,8 @@ version = "0.1.0" dependencies = [ "massa_models", "massa_serialization", + "mockall", + "mockall_wrap", "more-asserts 0.3.1", "nom", "parking_lot", diff --git a/massa-event-cache/Cargo.toml b/massa-event-cache/Cargo.toml index 48cb498b9ff..ebbcfe69946 100644 --- a/massa-event-cache/Cargo.toml +++ b/massa-event-cache/Cargo.toml @@ -6,8 +6,11 @@ edition = "2021" [features] test-exports = [ "massa_models/test-exports", + "mockall", + "mockall_wrap" ] + [dependencies] nom = {workspace = true} rocksdb = {workspace = true} @@ -15,9 +18,14 @@ tracing = {workspace = true} parking_lot = { workspace = true } massa_models = {workspace = true} massa_serialization = {workspace = true} +mockall = {workspace = true, optional = true} +mockall_wrap = {workspace = true, optional = true} [dev-dependencies] tempfile = {workspace = true} serial_test = {workspace = true} more-asserts = {workspace = true} rand = {workspace = true} +mockall = {workspace = true} +mockall_wrap = {workspace = true} +massa_models = { workspace = true, features = ["test-exports"] } diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index d03aab36e82..72609c8aa40 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -35,6 +35,7 @@ impl EventCacheWriterInputData { } /// interface that communicates with the worker thread +#[cfg_attr(feature = "test-exports", mockall_wrap::wrap, mockall::automock)] pub trait EventCacheController: Send + Sync { fn save_events(&self, events: VecDeque); diff --git a/massa-event-cache/src/lib.rs b/massa-event-cache/src/lib.rs index 4e4ccea9a2a..685578a1442 100644 --- a/massa-event-cache/src/lib.rs +++ b/massa-event-cache/src/lib.rs @@ -4,3 +4,6 @@ pub mod controller; mod event_cache; mod ser_deser; pub mod worker; + +#[cfg(feature = "test-exports")] +pub use controller::{MockEventCacheController, MockEventCacheControllerWrapper}; diff --git a/massa-execution-worker/Cargo.toml b/massa-execution-worker/Cargo.toml index 73374aade0a..c4fc5cc7915 100644 --- a/massa-execution-worker/Cargo.toml +++ b/massa-execution-worker/Cargo.toml @@ -29,6 +29,7 @@ test-exports = [ "massa_metrics/test-exports", "massa_metrics/test-exports", "massa_db_worker", + "massa_event_cache/test-exports", "tempfile", ] benchmarking = [ @@ -112,4 +113,4 @@ tokio = { workspace = true, features = ["sync"] } hex-literal = { workspace = true } mockall = { workspace = true } assert_matches = { workspace = true } - +massa_event_cache = { workspace = true, features = ["test-exports"] } diff --git a/massa-execution-worker/src/tests/universe.rs b/massa-execution-worker/src/tests/universe.rs index 8c1ac11a918..48ba24da07a 100644 --- a/massa-execution-worker/src/tests/universe.rs +++ b/massa-execution-worker/src/tests/universe.rs @@ -4,6 +4,7 @@ use std::{ sync::Arc, }; +use crate::start_execution_worker; #[cfg(all(feature = "file_storage_backend", not(feature = "db_storage_backend")))] use crate::storage_backend::FileStorageBackend; #[cfg(feature = "db_storage_backend")] @@ -11,6 +12,7 @@ use crate::storage_backend::RocksDBStorageBackend; use cfg_if::cfg_if; use massa_db_exports::{MassaDBConfig, MassaDBController, ShareableMassaDBController}; use massa_db_worker::MassaDB; +use massa_event_cache::MockEventCacheControllerWrapper; use massa_execution_exports::{ ExecutionBlockMetadata, ExecutionChannels, ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, SlotExecutionOutput, @@ -43,8 +45,6 @@ use parking_lot::RwLock; use tempfile::TempDir; use tokio::sync::broadcast; -use crate::start_execution_worker; - #[cfg(feature = "execution-trace")] use massa_execution_exports::types_trace_info::SlotAbiCallStack; @@ -53,6 +53,7 @@ pub struct ExecutionForeignControllers { pub final_state: Arc>, pub ledger_controller: MockLedgerControllerWrapper, pub db: ShareableMassaDBController, + pub event_cache_controller: Box, } impl ExecutionForeignControllers { @@ -75,6 +76,7 @@ impl ExecutionForeignControllers { ledger_controller: MockLedgerControllerWrapper::new(), final_state: Arc::new(RwLock::new(MockFinalStateController::new())), db, + event_cache_controller: Box::new(MockEventCacheControllerWrapper::new()), } } } @@ -141,6 +143,7 @@ impl TestUniverse for ExecutionTestUniverse { std::time::Duration::from_secs(5), ) .0, + controllers.event_cache_controller, #[cfg(feature = "dump-block")] block_storage_backend.clone(), ); From e72423dfdd84db1aaa4fc3d7d4b0719487fdf18f Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 7 Nov 2024 10:32:02 +0100 Subject: [PATCH 12/40] Better event cache clear at startup && better filtering --- massa-event-cache/src/controller.rs | 60 +++++++++++++++++++++++++++- massa-event-cache/src/event_cache.rs | 36 +++-------------- 2 files changed, 64 insertions(+), 32 deletions(-) diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index 72609c8aa40..b27612e27f6 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -56,13 +56,69 @@ impl EventCacheController for EventCacheControllerImpl { fn save_events(&self, events: VecDeque) { // lock input data let mut input_data = self.input_data.1.lock(); - input_data.events = events; + input_data.events.extend(events); // wake up VM loop self.input_data.0.notify_one(); } fn get_filtered_sc_output_events(&self, filter: &EventFilter) -> Vec { + + let lock_0 = self.input_data.1.lock(); + let it = lock_0 + .events + .iter() + .filter_map(|event| { + + if let Some(start) = filter.start { + if event.context.slot < start { + return None; + } + } + if let Some(end) = filter.end { + if event.context.slot >= end { + return None; + } + } + if let Some(is_final) = filter.is_final { + if event.context.is_final != is_final { + return None; + } + } + if let Some(is_error) = filter.is_error { + if event.context.is_error != is_error { + return None; + } + } + match ( + filter.original_caller_address, + event.context.call_stack.front(), + ) { + (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + match (filter.emitter_address, event.context.call_stack.back()) { + (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + match ( + filter.original_operation_id, + event.context.origin_operation_id, + ) { + (Some(addr1), Some(addr2)) if addr1 != addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + Some(event) + }); + let lock = self.cache.read(); - lock.get_filtered_sc_output_events(filter).collect() + it + .cloned() + .chain(lock + .get_filtered_sc_output_events(filter) + ) + .collect() } } diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index af91295b520..671363bbd97 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -9,23 +9,14 @@ use crate::ser_deser::{ use massa_models::execution::EventFilter; use massa_models::output_event::SCOutputEvent; use massa_serialization::{DeserializeError, Deserializer, Serializer}; -use rocksdb::{DBIterator, IteratorMode, WriteBatch, DB}; +use rocksdb::{DBIterator, IteratorMode, Options, WriteBatch, DB}; use tracing::debug; const OPEN_ERROR: &str = "critical: rocksdb open operation failed"; +const DESTROY_ERROR: &str = "critical: rocksdb delete operation failed"; const CRUD_ERROR: &str = "critical: rocksdb crud operation failed"; const EVENT_DESER_ERROR: &str = "critical: event deserialization failed"; -/* -/// Module key formatting macro -#[macro_export] -macro_rules! event_key { - ($event_slot:expr) => { - [&$event_slot.to_bytes()[..], &[MODULE_IDENT]].concat() - }; -} -*/ - pub(crate) struct EventCache { /// RocksDB database db: DB, @@ -53,9 +44,12 @@ impl EventCache { max_recursive_call_depth: u16, max_event_data_length: u64, ) -> Self { + + // Clear the db + DB::destroy(&Options::default(), path).expect(DESTROY_ERROR); let db = DB::open_default(path).expect(OPEN_ERROR); - let mut event_cache = Self { + Self { db, entry_count: 0, max_entry_count, @@ -66,23 +60,7 @@ impl EventCache { max_recursive_call_depth, max_event_data_length, }), - }; - - event_cache.clear(); - event_cache - } - - fn clear(&mut self) { - let iter = self.db.iterator(IteratorMode::Start); - let mut batch = WriteBatch::default(); - - for kvb in iter { - let kvb = kvb.expect(EVENT_DESER_ERROR); - batch.delete(kvb.0); } - - self.db.write(batch).expect(CRUD_ERROR); - self.entry_count = 0; } #[allow(dead_code)] @@ -112,8 +90,6 @@ impl EventCache { } /// Insert new events in the cache - /// - /// For performance reason, pass events_len to avoid cloning the iterator pub fn insert_multi_it( &mut self, events: impl ExactSizeIterator + Clone, From ba34210ba2622fe4ddb41c2afd271a8d093dd0ca Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 7 Nov 2024 11:56:05 +0100 Subject: [PATCH 13/40] Rename to config to max_call_stack_length --- massa-event-cache/src/config.rs | 2 +- massa-event-cache/src/event_cache.rs | 2 +- massa-event-cache/src/ser_deser.rs | 8 ++++---- massa-event-cache/src/worker.rs | 2 +- massa-node/src/main.rs | 5 ++++- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/massa-event-cache/src/config.rs b/massa-event-cache/src/config.rs index 62fcc5d4963..6e0fb4ab3c4 100644 --- a/massa-event-cache/src/config.rs +++ b/massa-event-cache/src/config.rs @@ -12,5 +12,5 @@ pub struct EventCacheConfig { /// Thread count pub thread_count: u8, /// Call stack max length - pub max_recursive_call_depth: u16, + pub max_call_stack_length: u16, } diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 671363bbd97..aaee8448e38 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -57,7 +57,7 @@ impl EventCache { event_ser: SCOutputEventSerializer::new(), event_deser: SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { thread_count, - max_recursive_call_depth, + max_call_stack_length: max_recursive_call_depth, max_event_data_length, }), } diff --git a/massa-event-cache/src/ser_deser.rs b/massa-event-cache/src/ser_deser.rs index 65f047277c8..09819793947 100644 --- a/massa-event-cache/src/ser_deser.rs +++ b/massa-event-cache/src/ser_deser.rs @@ -100,7 +100,7 @@ impl SCOutputEventDeserializer { index_in_slot_deser: U64VarIntDeserializer::new(Included(0), Included(u64::MAX)), addr_len_deser: U32VarIntDeserializer::new( Included(0), - Included(u32::from(args.max_recursive_call_depth)), + Included(u32::from(args.max_call_stack_length)), ), slot_deser: SlotDeserializer::new( (Included(0), Included(u64::MAX)), @@ -217,7 +217,7 @@ impl Deserializer for SCOutputEventDeserializer { #[allow(missing_docs)] pub struct SCOutputEventDeserializerArgs { pub thread_count: u8, - pub max_recursive_call_depth: u16, + pub max_call_stack_length: u16, pub max_event_data_length: u64, } @@ -250,7 +250,7 @@ mod test { let event_ser = SCOutputEventSerializer::new(); let event_deser = SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { thread_count: 16, - max_recursive_call_depth: 25, + max_call_stack_length: 25, max_event_data_length: 512, }); @@ -290,7 +290,7 @@ mod test { let event_ser = SCOutputEventSerializer::new(); let event_deser = SCOutputEventDeserializer::new(SCOutputEventDeserializerArgs { thread_count: 16, - max_recursive_call_depth: 25, + max_call_stack_length: 25, max_event_data_length: 512, }); diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index a5bef6e16bc..6d3a488eabe 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -123,7 +123,7 @@ pub fn start_event_cache_writer_worker( cfg.max_event_cache_length, cfg.snip_amount, cfg.thread_count, - cfg.max_recursive_call_depth, + cfg.max_call_stack_length, cfg.max_event_data_length, ))); diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 516b7d81296..c379d439222 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -483,7 +483,10 @@ async fn launch( snip_amount: SETTINGS.execution.event_snip_amount, max_event_data_length: MAX_EVENT_DATA_SIZE as u64, thread_count: THREAD_COUNT, - max_recursive_call_depth: MAX_RECURSIVE_CALLS_DEPTH, + // Note: SCOutputEvent call stack comes from the execution module, and we assume + // this should return a limited call stack length + // The value remains for future use & limitations + max_call_stack_length: u16::MAX, }; let (event_cache_manager, event_cache_controller) = start_event_cache_writer_worker(event_cache_config); From b2021d598d467b34ccd278ffeae074043f641db7 Mon Sep 17 00:00:00 2001 From: sydhds Date: Tue, 19 Nov 2024 16:03:45 +0100 Subject: [PATCH 14/40] Improve event cache filtering --- Cargo.lock | 1 + massa-db-worker/src/lib.rs | 6 +- massa-event-cache/Cargo.toml | 1 + massa-event-cache/src/config.rs | 4 + massa-event-cache/src/controller.rs | 95 +- massa-event-cache/src/event_cache.rs | 1202 +++++++++++++++++++-- massa-event-cache/src/lib.rs | 1 + massa-event-cache/src/rocksdb_operator.rs | 105 ++ massa-event-cache/src/worker.rs | 2 + massa-node/src/main.rs | 5 +- 10 files changed, 1272 insertions(+), 150 deletions(-) create mode 100644 massa-event-cache/src/rocksdb_operator.rs diff --git a/Cargo.lock b/Cargo.lock index e9ec362a16a..74de35dac43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3221,6 +3221,7 @@ dependencies = [ "mockall_wrap", "more-asserts 0.3.1", "nom", + "num_enum", "parking_lot", "rand", "rocksdb", diff --git a/massa-db-worker/src/lib.rs b/massa-db-worker/src/lib.rs index d2d0296968c..21a41903a6b 100644 --- a/massa-db-worker/src/lib.rs +++ b/massa-db-worker/src/lib.rs @@ -35,7 +35,7 @@ //! * if we want to delete item a: 1000 ^ 0011 == 1011 (== item b) //! * if we want to delete item b: 1000 ^ 1011 == 0011 (== item a) //! -//! Note that this does not provides "Proof of present" nor "Proof of Absence" +//! Note that this does not provide "Proof of present" nor "Proof of Absence" //! (operations avail with Merkle trees) //! //! For more details here: https://github.com/massalabs/massa/discussions/3852#discussioncomment-6188158 @@ -45,10 +45,10 @@ //! # Caches //! //! A cache of db changes is kept in memory allowing to easily stream it -//! (by streaming, we means: sending it to another massa node (aka bootstrap)) +//! (by streaming, we mean: sending it to another massa node (aka bootstrap)) //! There is 2 separate caches: one for 'state' and one for 'versioning' //! -//! These caches is stored as a key, value: slot -> insertion_data|deletion_data. +//! These caches are stored as a key, value: slot -> insertion_data|deletion_data. //! //! # Streaming steps //! diff --git a/massa-event-cache/Cargo.toml b/massa-event-cache/Cargo.toml index ebbcfe69946..6d74b11b9fe 100644 --- a/massa-event-cache/Cargo.toml +++ b/massa-event-cache/Cargo.toml @@ -16,6 +16,7 @@ nom = {workspace = true} rocksdb = {workspace = true} tracing = {workspace = true} parking_lot = { workspace = true } +num_enum = { workspace = true } massa_models = {workspace = true} massa_serialization = {workspace = true} mockall = {workspace = true, optional = true} diff --git a/massa-event-cache/src/config.rs b/massa-event-cache/src/config.rs index 6e0fb4ab3c4..257d03b4a21 100644 --- a/massa-event-cache/src/config.rs +++ b/massa-event-cache/src/config.rs @@ -13,4 +13,8 @@ pub struct EventCacheConfig { pub thread_count: u8, /// Call stack max length pub max_call_stack_length: u16, + /// Maximum number of events per operation + pub max_events_per_operation: u64, + /// Maximum number of operations per block + pub max_operations_per_block: u64, } diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index b27612e27f6..178fa6b424f 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -62,63 +62,56 @@ impl EventCacheController for EventCacheControllerImpl { } fn get_filtered_sc_output_events(&self, filter: &EventFilter) -> Vec { - let lock_0 = self.input_data.1.lock(); - let it = lock_0 - .events - .iter() - .filter_map(|event| { - - if let Some(start) = filter.start { - if event.context.slot < start { - return None; - } - } - if let Some(end) = filter.end { - if event.context.slot >= end { - return None; - } - } - if let Some(is_final) = filter.is_final { - if event.context.is_final != is_final { - return None; - } + #[allow(clippy::unnecessary_filter_map)] + let it = lock_0.events.iter().filter_map(|event| { + if let Some(start) = filter.start { + if event.context.slot < start { + return None; } - if let Some(is_error) = filter.is_error { - if event.context.is_error != is_error { - return None; - } + } + if let Some(end) = filter.end { + if event.context.slot >= end { + return None; } - match ( - filter.original_caller_address, - event.context.call_stack.front(), - ) { - (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, - (Some(_), None) => return None, - _ => (), + } + if let Some(is_final) = filter.is_final { + if event.context.is_final != is_final { + return None; } - match (filter.emitter_address, event.context.call_stack.back()) { - (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, - (Some(_), None) => return None, - _ => (), + } + if let Some(is_error) = filter.is_error { + if event.context.is_error != is_error { + return None; } - match ( - filter.original_operation_id, - event.context.origin_operation_id, - ) { - (Some(addr1), Some(addr2)) if addr1 != addr2 => return None, - (Some(_), None) => return None, - _ => (), - } - Some(event) - }); - + } + match ( + filter.original_caller_address, + event.context.call_stack.front(), + ) { + (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + match (filter.emitter_address, event.context.call_stack.back()) { + (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + match ( + filter.original_operation_id, + event.context.origin_operation_id, + ) { + (Some(addr1), Some(addr2)) if addr1 != addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + Some(event) + }); + let lock = self.cache.read(); - it - .cloned() - .chain(lock - .get_filtered_sc_output_events(filter) - ) + it.cloned() + .chain(lock.get_filtered_sc_output_events(filter)) .collect() } } diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index aaee8448e38..76c2464d6e8 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -1,21 +1,173 @@ -use std::cmp::max; // std -use nom::AsBytes; +use std::cmp::max; +use std::collections::{BTreeMap, BTreeSet}; use std::path::Path; // third-party +use num_enum::IntoPrimitive; +use rocksdb::{IteratorMode, Options, WriteBatch, DB}; +use tracing::{debug, warn}; +// internal +use crate::rocksdb_operator::counter_merge; use crate::ser_deser::{ SCOutputEventDeserializer, SCOutputEventDeserializerArgs, SCOutputEventSerializer, }; +use massa_models::address::Address; +use massa_models::error::ModelsError; use massa_models::execution::EventFilter; +use massa_models::operation::{OperationId, OperationIdSerializer}; use massa_models::output_event::SCOutputEvent; +use massa_models::slot::Slot; use massa_serialization::{DeserializeError, Deserializer, Serializer}; -use rocksdb::{DBIterator, IteratorMode, Options, WriteBatch, DB}; -use tracing::debug; const OPEN_ERROR: &str = "critical: rocksdb open operation failed"; +const COUNTER_INIT_ERROR: &str = "critical: cannot init rocksdb counters"; const DESTROY_ERROR: &str = "critical: rocksdb delete operation failed"; const CRUD_ERROR: &str = "critical: rocksdb crud operation failed"; const EVENT_DESER_ERROR: &str = "critical: event deserialization failed"; +const OPERATION_ID_DESER_ERROR: &str = "critical: deserialization failed for op id in rocksdb"; +const COUNTER_ERROR: &str = "critical: cannot get counter"; + +#[allow(dead_code)] +/// Prefix u8 used to identify rocksdb keys +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, IntoPrimitive)] +#[repr(u8)] +enum KeyIndent { + Counter = 0, + Event, + EmitterAddress, + OriginalCallerAddress, + OriginalOperationId, + IsError, + IsFinal, +} + +/// A Rocksdb key builder to insert in EventCache +struct EventCacheKeyBuilder { + /// Operation Id Serializer + op_id_ser: OperationIdSerializer, +} + +impl EventCacheKeyBuilder { + fn new() -> Self { + Self { + op_id_ser: OperationIdSerializer::new(), + } + } + + /// A prefix key (for rocksdb prefix iteration) + fn get_prefix_event_key(&self, slot: &Slot) -> Vec { + let mut event_key = vec![KeyIndent::Event as u8]; + event_key.extend(slot.to_bytes_key()); + event_key + } + + fn get_event_key(&self, event: &SCOutputEvent) -> Vec { + let mut event_key = vec![KeyIndent::Event as u8]; + event_key.extend(event.context.slot.to_bytes_key()); + event_key.extend(event.context.index_in_slot.to_be_bytes()); + event_key + } + + fn get_prefix_emitter_address_address_key(&self, addr: &Address) -> Vec { + let mut key = vec![KeyIndent::EmitterAddress as u8]; + let addr_bytes = addr.to_prefixed_bytes(); + let addr_bytes_len = addr_bytes.len(); + key.extend(addr_bytes); + key.push(addr_bytes_len as u8); + key + } + + fn get_emitter_address_key(&self, event: &SCOutputEvent) -> Option> { + if let Some(addr) = event.context.call_stack.back() { + let mut key = vec![KeyIndent::EmitterAddress as u8]; + let addr_bytes = addr.to_prefixed_bytes(); + let addr_bytes_len = addr_bytes.len(); + key.extend(addr_bytes); + key.push(addr_bytes_len as u8); + key.extend(self.get_event_key(event)); + Some(key) + } else { + None + } + } + + fn get_prefix_original_caller_address_key(&self, addr: &Address) -> Vec { + let mut key = vec![KeyIndent::OriginalCallerAddress as u8]; + let addr_bytes = addr.to_prefixed_bytes(); + let addr_bytes_len = addr_bytes.len(); + key.extend(addr_bytes); + key.push(addr_bytes_len as u8); + key + } + + fn get_original_caller_address_key(&self, event: &SCOutputEvent) -> Option> { + if let Some(addr) = event.context.call_stack.front() { + let mut key = vec![KeyIndent::OriginalCallerAddress as u8]; + let addr_bytes = addr.to_prefixed_bytes(); + let addr_bytes_len = addr_bytes.len(); + key.extend(addr_bytes); + key.push(addr_bytes_len as u8); + key.extend(self.get_event_key(event)); + Some(key) + } else { + None + } + } + + fn get_prefix_original_operation_id_key(&self, op_id: &OperationId) -> Option> { + let mut key = vec![KeyIndent::OriginalOperationId as u8]; + let mut buffer = Vec::new(); + self.op_id_ser.serialize(op_id, &mut buffer).ok()?; + key.extend(&buffer); + key.extend(u32::to_be_bytes(buffer.len() as u32)); + Some(key) + } + + fn get_original_operation_id_key(&self, event: &SCOutputEvent) -> Option> { + if let Some(op_id) = event.context.origin_operation_id { + let mut key = vec![KeyIndent::OriginalOperationId as u8]; + let mut buffer = Vec::new(); + self.op_id_ser.serialize(&op_id, &mut buffer).ok()?; + key.extend(&buffer); + key.extend(u32::to_be_bytes(buffer.len() as u32)); + key.extend(self.get_event_key(event)); + Some(key) + } else { + None + } + } + + fn get_prefix_is_error_key(&self, is_error: bool) -> Vec { + vec![KeyIndent::IsError as u8, u8::from(is_error)] + } + + fn get_is_error_key(&self, event: &SCOutputEvent) -> Vec { + let mut key = vec![KeyIndent::IsError as u8]; + key.push(event.context.is_error as u8); + key.extend(self.get_event_key(event)); + key + } + + fn get_counter_key_from(&self, key: &[u8]) -> Vec { + vec![KeyIndent::Counter as u8, key[0]] + } + + fn get_counter_key_from_indent(&self, indent: &KeyIndent) -> Vec { + vec![KeyIndent::Counter as u8, *indent as u8] + } + + fn get_counter_key_bool_from(&self, key: &[u8], value: bool) -> Vec { + vec![u8::from(KeyIndent::Counter), key[0], u8::from(value)] + } + + fn get_counter_key_bool_from_indent(&self, indent: &KeyIndent, value: bool) -> Vec { + vec![ + u8::from(KeyIndent::Counter), + u8::from(*indent), + u8::from(value), + ] + } +} pub(crate) struct EventCache { /// RocksDB database @@ -32,9 +184,22 @@ pub(crate) struct EventCache { event_ser: SCOutputEventSerializer, /// Event deserializer event_deser: SCOutputEventDeserializer, + /// Key builder + key_builder: EventCacheKeyBuilder, + /// First event slot in db + first_slot: Slot, + /// Last event slot in db + last_slot: Slot, + /// Thread count + thread_count: u8, + /// Maximum number of events per operation + max_events_per_operation: u64, + /// Maximum number of operations per block + max_operations_per_block: u64, } impl EventCache { + #[allow(clippy::too_many_arguments)] /// Create a new EventCache pub fn new( path: &Path, @@ -43,11 +208,35 @@ impl EventCache { thread_count: u8, max_recursive_call_depth: u16, max_event_data_length: u64, + max_events_per_operation: u64, + max_operations_per_block: u64, ) -> Self { - - // Clear the db + // Clear the db DB::destroy(&Options::default(), path).expect(DESTROY_ERROR); - let db = DB::open_default(path).expect(OPEN_ERROR); + let options = { + let mut opts = Options::default(); + opts.create_if_missing(true); + opts.set_merge_operator_associative("counter merge operator", counter_merge); + opts + }; + let db = DB::open(&options, path).expect(OPEN_ERROR); + + let key_builder = EventCacheKeyBuilder::new(); + // init counters + let mut batch = WriteBatch::default(); + let value = 0u64.to_be_bytes(); + let key_counter = key_builder.get_counter_key_from_indent(&KeyIndent::EmitterAddress); + batch.put(key_counter, value); + let key_counter = + key_builder.get_counter_key_from_indent(&KeyIndent::OriginalCallerAddress); + batch.put(key_counter, value); + let key_counter = key_builder.get_counter_key_from_indent(&KeyIndent::OriginalOperationId); + batch.put(key_counter, value); + let key_counter = key_builder.get_counter_key_bool_from_indent(&KeyIndent::IsError, true); + batch.put(key_counter, value); + let key_counter = key_builder.get_counter_key_bool_from_indent(&KeyIndent::IsError, false); + batch.put(key_counter, value); + db.write(batch).expect(COUNTER_INIT_ERROR); Self { db, @@ -60,9 +249,49 @@ impl EventCache { max_call_stack_length: max_recursive_call_depth, max_event_data_length, }), + key_builder, + first_slot: Slot::new(0, 0), + last_slot: Slot::new(0, 0), + thread_count, + max_events_per_operation, + max_operations_per_block, } } + /// From an event add keys & values into a rocksdb batch + fn insert_into_batch(&mut self, event: SCOutputEvent, batch: &mut WriteBatch) { + let mut event_buffer = Vec::new(); + self.event_ser.serialize(&event, &mut event_buffer).unwrap(); + + batch.put(self.key_builder.get_event_key(&event), event_buffer); + if let Some(key) = self.key_builder.get_emitter_address_key(&event) { + let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + batch.put(key, vec![]); + batch.merge(key_counter, 1i64.to_be_bytes()); + } + if let Some(key) = self.key_builder.get_original_caller_address_key(&event) { + let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + batch.put(key, vec![]); + batch.merge(key_counter, 1i64.to_be_bytes()); + } + if let Some(key) = self.key_builder.get_original_operation_id_key(&event) { + let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + batch.put(key, vec![]); + batch.merge(key_counter, 1i64.to_be_bytes()); + } + + let key = self.key_builder.get_is_error_key(&event); + let key_counter = self + .key_builder + .get_counter_key_bool_from(key.as_slice(), event.context.is_error); + batch.put(key, vec![]); + batch.merge(key_counter, 1i64.to_be_bytes()); + + // Keep track of last slot (and start slot) of events in the DB + // Help for event filtering + self.last_slot = max(self.last_slot, event.context.slot); + } + #[allow(dead_code)] /// Insert a new event in the cache pub fn insert(&mut self, event: SCOutputEvent) { @@ -70,16 +299,8 @@ impl EventCache { self.snip(None); } - let event_key = { - let mut event_key = event.context.slot.to_bytes_key().to_vec(); - event_key.extend(event.context.index_in_slot.to_be_bytes()); - event_key - }; - let mut event_buffer = Vec::new(); - self.event_ser.serialize(&event, &mut event_buffer).unwrap(); - let mut batch = WriteBatch::default(); - batch.put(event_key, event_buffer); + self.insert_into_batch(event, &mut batch); self.db.write(batch).expect(CRUD_ERROR); // Note: @@ -103,15 +324,7 @@ impl EventCache { let mut batch = WriteBatch::default(); for event in events { - let event_key = { - let mut event_key = event.context.slot.to_bytes_key().to_vec(); - event_key.extend(event.context.index_in_slot.to_be_bytes()); - event_key - }; - let mut event_buffer = Vec::new(); - self.event_ser.serialize(&event, &mut event_buffer).unwrap(); - - batch.put(event_key, event_buffer); + self.insert_into_batch(event, &mut batch); } self.db.write(batch).expect(CRUD_ERROR); // Note: @@ -121,64 +334,253 @@ impl EventCache { debug!("(Events insert) entry_count is: {}", self.entry_count); } - fn db_iter(&self, mode: Option) -> DBIterator { - self.db.iterator(mode.unwrap_or(IteratorMode::Start)) + /// Get events filtered by the given argument + pub(crate) fn get_filtered_sc_output_events(&self, filter: &EventFilter) -> Vec { + // Step 1 + // Build a (sorted) map with key: (counter value, indent), value: filter + // Will be used to iterate from the lower count index to the highest count index + // e.g. if index for emitter address is 10 (index count), and origin operation id is 20 + // iter over emitter address index then origin operation id index + + let filter_items = from_event_filter(filter); + + if filter_items.is_empty() { + // Note: will return too many event - user should restrict the filter + warn!("Filter item only on is final field, please add more filter parameters"); + return vec![]; + } + + let it = filter_items.iter().map(|(key_indent, filter_item)| { + let count = self + .filter_item_estimate_count(key_indent, filter_item) + .unwrap_or_else(|e| { + warn!( + "Could not estimate count for key indent: {:?} - filter_item: {:?}: {}", + key_indent, filter_item, e + ); + self.max_entry_count as u64 + }); + ((count, key_indent), filter_item) + }); + + let map = BTreeMap::from_iter(it); + + // Step 2: apply filter from the lowest counter to the highest counter + + let mut filter_res_prev = None; + for ((_counter, indent), filter_item) in map.iter() { + let mut filter_res = BTreeSet::new(); + self.filter_for( + indent, + filter_item, + &mut filter_res, + filter_res_prev.as_ref(), + ); + filter_res_prev = Some(filter_res); + } + + // Step 3: get values & deserialize + + let multi_args = filter_res_prev + .unwrap() + .into_iter() + .collect::>>(); + let res = self.db.multi_get(multi_args); + + res.into_iter() + .map(|value| { + let value = value.unwrap().unwrap(); + let (_, event) = self + .event_deser + .deserialize::(&value) + .unwrap(); + event + }) + .collect::>() } - pub(crate) fn get_filtered_sc_output_events<'b, 'a: 'b>( - &'a self, - filter: &'b EventFilter, - ) -> impl Iterator + 'b { - self.db_iter(Some(IteratorMode::Start)).filter_map(|kvb| { - let kvb = kvb.unwrap(); - let (_rem, event) = self - .event_deser - .deserialize::(kvb.1.as_bytes()) - .expect(EVENT_DESER_ERROR); + fn filter_for( + &self, + indent: &KeyIndent, + filter_item: &FilterItem, + result: &mut BTreeSet>, + seen: Option<&BTreeSet>>, + ) { + if *indent == KeyIndent::Event { + let opts = match filter_item { + FilterItem::SlotStart(start) => { + let key_start = self.key_builder.get_prefix_event_key(start); + let mut options = rocksdb::ReadOptions::default(); + options.set_iterate_lower_bound(key_start); + options + } + FilterItem::SlotEnd(end) => { + let key_end = self.key_builder.get_prefix_event_key(end); + let mut options = rocksdb::ReadOptions::default(); + options.set_iterate_upper_bound(key_end); + options + } + FilterItem::SlotStartEnd(start, end) => { + let key_start = self.key_builder.get_prefix_event_key(start); + let key_end = self.key_builder.get_prefix_event_key(end); + let mut options = rocksdb::ReadOptions::default(); + options.set_iterate_range(key_start..key_end); + options + } + _ => unreachable!(), + }; + + #[allow(clippy::manual_flatten)] + for kvb in self.db.iterator_opt(IteratorMode::Start, opts) { + if let Ok(kvb) = kvb { + if !kvb.0.starts_with(&[*indent as u8]) { + break; + } + + // FIXME: should check for end bound? - if let Some(start) = filter.start { - if event.context.slot < start { - return None; + let found = kvb.0.to_vec(); + // println!("found: {:?}", found); + + if let Some(filter_set_seen) = seen { + if filter_set_seen.contains(&found) { + result.insert(found); + } + + // We have already found as many items as in a previous search + // As we search from the lowest count to the highest count, we will never add new items + // in our result, so we can break here + if filter_set_seen.len() == result.len() { + break; + } + } else { + result.insert(found); + } } } - if let Some(end) = filter.end { - if event.context.slot >= end { - return None; + } else { + let prefix_filter = match filter_item { + FilterItem::EmitterAddress(addr) => self + .key_builder + .get_prefix_emitter_address_address_key(addr), + FilterItem::OriginalCallerAddress(addr) => self + .key_builder + .get_prefix_original_caller_address_key(addr), + FilterItem::OriginalOperationId(op_id) => self + .key_builder + .get_prefix_original_operation_id_key(op_id) + .expect(OPERATION_ID_DESER_ERROR), + FilterItem::IsError(is_error) => { + self.key_builder.get_prefix_is_error_key(*is_error) } - } - if let Some(is_final) = filter.is_final { - if event.context.is_final != is_final { - return None; + _ => unreachable!(), + }; + + #[allow(clippy::manual_flatten)] + for kvb in self.db.prefix_iterator(prefix_filter.as_slice()) { + if let Ok(kvb) = kvb { + if !kvb.0.starts_with(&[*indent as u8]) { + break; + } + + // FIXME: is this always true? + if !kvb.0.starts_with(prefix_filter.as_slice()) { + break; + } + + let found = kvb + .0 + .strip_prefix(prefix_filter.as_slice()) + .unwrap() + .to_vec(); + + if let Some(filter_set_seen) = seen { + if filter_set_seen.contains(&found) { + result.insert(found); + } + + // We have already found as many items as in a previous search + // As we search from the lowest count to the highest count, we will never add new items + // in our result, so we can break here + if filter_set_seen.len() == result.len() { + break; + } + } else { + result.insert(found); + } } } - if let Some(is_error) = filter.is_error { - if event.context.is_error != is_error { - return None; - } + } + } + + /// Estimate for a given KeyIndent & FilterItem the number of row to process + fn filter_item_estimate_count( + &self, + key_indent: &KeyIndent, + filter_item: &FilterItem, + ) -> Result { + match filter_item { + FilterItem::SlotStart(start) => { + let diff = self.last_slot.slots_since(start, self.thread_count)?; + // Note: Pessimistic estimation - should we keep an average count of events per slot + // and use that instead? + Ok(diff + .saturating_mul(self.max_events_per_operation) + .saturating_mul(self.max_operations_per_block)) + } + FilterItem::SlotStartEnd(start, end) => { + let diff = end.slots_since(start, self.thread_count)?; + Ok(diff + .saturating_mul(self.max_events_per_operation) + .saturating_mul(self.max_operations_per_block)) + } + FilterItem::SlotEnd(end) => { + let diff = end.slots_since(&self.first_slot, self.thread_count)?; + Ok(diff + .saturating_mul(self.max_events_per_operation) + .saturating_mul(self.max_operations_per_block)) + } + FilterItem::EmitterAddress(_addr) => { + let counter_key = self.key_builder.get_counter_key_from_indent(key_indent); + let counter = u64::from_be_bytes( + self.db + .get(counter_key) + .expect(COUNTER_ERROR) + .unwrap() // safe to unwrap - counter init in new + .try_into() + .unwrap(), // safe to unwrap - counter is init with u64.to_be_bytes + ); + Ok(counter) } - match ( - filter.original_caller_address, - event.context.call_stack.front(), - ) { - (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, - (Some(_), None) => return None, - _ => (), + FilterItem::OriginalCallerAddress(_addr) => { + let counter_key = self.key_builder.get_counter_key_from_indent(key_indent); + let counter = u64::from_be_bytes( + self.db + .get(counter_key) + .expect(COUNTER_ERROR) + .unwrap() + .try_into() + .unwrap(), + ); + Ok(counter) } - match (filter.emitter_address, event.context.call_stack.back()) { - (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, - (Some(_), None) => return None, - _ => (), + FilterItem::OriginalOperationId(_op_id) => { + let counter_key = self.key_builder.get_counter_key_from_indent(key_indent); + let counter_ = self.db.get(counter_key); + let counter = + u64::from_be_bytes(counter_.expect(COUNTER_ERROR).unwrap().try_into().unwrap()); + Ok(counter) } - match ( - filter.original_operation_id, - event.context.origin_operation_id, - ) { - (Some(addr1), Some(addr2)) if addr1 != addr2 => return None, - (Some(_), None) => return None, - _ => (), + FilterItem::IsError(is_error) => { + let counter_key = self + .key_builder + .get_counter_key_bool_from(&[*key_indent as u8], *is_error); + let counter_ = self.db.get(counter_key); + let counter = + u64::from_be_bytes(counter_.expect(COUNTER_ERROR).unwrap().try_into().unwrap()); + Ok(counter) } - Some(event) - }) + } } /// Try to remove some entries from the db @@ -196,19 +598,143 @@ impl EventCache { let kvb = key_value .unwrap() // safe to unwrap - just tested it .expect(EVENT_DESER_ERROR); - batch.delete(kvb.0); + + let key = kvb.0; + if !key.starts_with(&[u8::from(KeyIndent::Event)]) { + continue; + } + + let (_, event) = self + .event_deser + .deserialize::(&kvb.1) + .unwrap(); + + // delete all associated key + if let Some(key) = self.key_builder.get_emitter_address_key(&event) { + let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + batch.delete(key); + batch.merge(key_counter, (-1i64).to_be_bytes()); + } + if let Some(key) = self.key_builder.get_original_caller_address_key(&event) { + let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + batch.delete(key); + batch.merge(key_counter, (-1i64).to_be_bytes()); + } + if let Some(key) = self.key_builder.get_original_operation_id_key(&event) { + let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + batch.delete(key); + batch.merge(key_counter, (-1i64).to_be_bytes()); + } + let key_is_error = self.key_builder.get_is_error_key(&event); + let key_counter = self + .key_builder + .get_counter_key_bool_from(key_is_error.as_slice(), event.context.is_error); + batch.delete(key_is_error); + batch.merge(key_counter, (-1i64).to_be_bytes()); + + batch.delete(key); + snipped_count += 1; } // delete the key and reduce entry_count self.db.write(batch).expect(CRUD_ERROR); - self.entry_count -= snipped_count; + self.entry_count = self.entry_count.saturating_sub(snipped_count); + + // Update first_slot / last_slot in the DB + if self.entry_count == 0 { + // Reset + self.first_slot = Slot::new(0, 0); + self.last_slot = Slot::new(0, 0); + } else { + // Get the first event in the db + // By using a prefix iterator this should be fast + + let mut it_slot = self.db.prefix_iterator([u8::from(KeyIndent::Event)]); + + let key_value = it_slot.next(); + let kvb = key_value.unwrap().expect(EVENT_DESER_ERROR); + + let (_, event) = self + .event_deser + .deserialize::(&kvb.1) + .unwrap(); + self.first_slot = event.context.slot; + } + } +} + +/// A filter parameter - used to decompose EventFilter in multiple filters +#[derive(Debug)] +enum FilterItem { + SlotStart(Slot), + SlotStartEnd(Slot, Slot), + SlotEnd(Slot), + EmitterAddress(Address), + OriginalCallerAddress(Address), + OriginalOperationId(OperationId), + IsError(bool), +} + +/// Convert a EventFilter into a list of (KeyIndent, FilterItem) +fn from_event_filter(event_filter: &EventFilter) -> Vec<(KeyIndent, FilterItem)> { + let mut filter_items = vec![]; + if event_filter.start.is_some() && event_filter.end.is_some() { + let start = event_filter.start.unwrap(); + let end = event_filter.end.unwrap(); + filter_items.push((KeyIndent::Event, FilterItem::SlotStartEnd(start, end))); + } else if event_filter.start.is_some() { + let start = event_filter.start.unwrap(); + filter_items.push((KeyIndent::Event, FilterItem::SlotStart(start))); + } else if event_filter.end.is_some() { + let end = event_filter.end.unwrap(); + filter_items.push((KeyIndent::Event, FilterItem::SlotEnd(end))); + } + + if let Some(addr) = event_filter.emitter_address { + filter_items.push((KeyIndent::EmitterAddress, FilterItem::EmitterAddress(addr))); + } + + if let Some(addr) = event_filter.original_caller_address { + filter_items.push(( + KeyIndent::OriginalCallerAddress, + FilterItem::OriginalCallerAddress(addr), + )); + } + + if let Some(op_id) = event_filter.original_operation_id { + filter_items.push(( + KeyIndent::OriginalOperationId, + FilterItem::OriginalOperationId(op_id), + )); + } + + if let Some(is_error) = event_filter.is_error { + filter_items.push((KeyIndent::IsError, FilterItem::IsError(is_error))); + } + + filter_items +} + +#[cfg(test)] +impl EventCache { + /// Iterate over all keys & values in the db - test only + fn iter_all( + &self, + mode: Option, + ) -> impl Iterator, Box<[u8]>)> + '_ { + self.db + .iterator(mode.unwrap_or(IteratorMode::Start)) + .flatten() } } #[cfg(test)] mod tests { use super::*; + // std + use std::collections::VecDeque; + use std::str::FromStr; // third-party use more_asserts::assert_gt; use rand::seq::SliceRandom; @@ -216,7 +742,10 @@ mod tests { use serial_test::serial; use tempfile::TempDir; // internal - use massa_models::config::{MAX_EVENT_DATA_SIZE, MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT}; + use massa_models::config::{ + MAX_EVENT_DATA_SIZE, MAX_OPERATIONS_PER_BLOCK, MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT, + }; + use massa_models::operation::OperationId; use massa_models::output_event::EventExecutionContext; use massa_models::slot::Slot; @@ -229,6 +758,10 @@ mod tests { THREAD_COUNT, MAX_RECURSIVE_CALLS_DEPTH, MAX_EVENT_DATA_SIZE as u64, + // TODO: rebase + // MAX_EVENT_PER_OPERATION as u64, + 25u64, + MAX_OPERATIONS_PER_BLOCK as u64, ) } @@ -286,27 +819,30 @@ mod tests { } // Now check that we are going to iter in correct order - let db_it = cache.db_iter(Some(IteratorMode::Start)); + // let db_it = cache.db_iter(Some(IteratorMode::Start)); let mut prev_slot = None; let mut prev_event_index = None; #[allow(clippy::manual_flatten)] - for kvb in db_it { - if let Ok(kvb) = kvb { - let bytes = kvb.0.iter().as_slice(); - let slot = Slot::from_bytes_key(&bytes[0..=8].try_into().unwrap()); - let event_index = u64::from_be_bytes(bytes[9..].try_into().unwrap()); - if prev_slot.is_some() && prev_event_index.is_some() { - assert_gt!( - (slot, event_index), - (prev_slot.unwrap(), prev_event_index.unwrap()) - ); - } else { - assert_eq!(slot, slot_1); - assert_eq!(event_index, index_1_0); - } - prev_slot = Some(slot); - prev_event_index = Some(event_index); + for kvb in cache.iter_all(None) { + let bytes = kvb.0.iter().as_slice(); + + if bytes[0] != u8::from(KeyIndent::Event) { + continue; } + + let slot = Slot::from_bytes_key(&bytes[1..=9].try_into().unwrap()); + let event_index = u64::from_be_bytes(bytes[10..].try_into().unwrap()); + if prev_slot.is_some() && prev_event_index.is_some() { + assert_gt!( + (slot, event_index), + (prev_slot.unwrap(), prev_event_index.unwrap()) + ); + } else { + assert_eq!(slot, slot_1); + assert_eq!(event_index, index_1_0); + } + prev_slot = Some(slot); + prev_event_index = Some(event_index); } assert_eq!(prev_slot, Some(slot_2)); @@ -439,7 +975,7 @@ mod tests { }; events.push(event_slot_2.clone()); events.push(event_slot_2_2.clone()); - // Randomize the events so we insert in random orders in DB + // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); cache.insert_multi_it(events.into_iter()); @@ -453,14 +989,490 @@ mod tests { is_final: None, is_error: None, }; - let filtered_events_1 = cache - .get_filtered_sc_output_events(&filter_1) - .collect::>(); + + let filtered_events_1 = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_1.len(), 2); + println!("filtered_events_1[0]: {:?}", filtered_events_1[0]); assert_eq!(filtered_events_1[0].context.slot, slot_2); assert_eq!(filtered_events_1[0].context.index_in_slot, index_2_1); + println!("filtered_events_1[1]: {:?}", filtered_events_1[1]); assert_eq!(filtered_events_1[1].context.slot, slot_2); assert_eq!(filtered_events_1[1].context.index_in_slot, index_2_2); } + + #[test] + #[serial] + fn test_event_filter_2() { + // Test get_filtered_sc_output_events + op id + + let mut cache = setup(); + cache.max_entry_count = 10; + + let slot_1 = Slot::new(1, 0); + let index_1_0 = 0; + let op_id_1 = + OperationId::from_str("O12n1vt8uTLh3H65J4TVuztaWfBh3oumjjVtRCkke7Ba5qWdXdjD").unwrap(); + let op_id_2 = + OperationId::from_str("O1p5P691KF672fQ8tQougxzSERBwDKZF8FwtkifMSJbP14sEuGc").unwrap(); + + let event = SCOutputEvent { + context: EventExecutionContext { + slot: slot_1, + block: None, + read_only: false, + index_in_slot: index_1_0, + call_stack: Default::default(), + origin_operation_id: Some(op_id_1), + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let mut events = (0..cache.max_entry_count - 5) + .map(|i| { + let mut event = event.clone(); + event.context.index_in_slot = i as u64; + event + }) + .collect::>(); + + let slot_2 = Slot::new(2, 0); + let index_2_1 = 0u64; + let event_slot_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_1; + event.context.origin_operation_id = Some(op_id_2); + event + }; + let index_2_2 = 256u64; + let event_slot_2_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_2; + event.context.origin_operation_id = Some(op_id_2); + event + }; + events.push(event_slot_2.clone()); + events.push(event_slot_2_2.clone()); + // Randomize the events so we insert in random orders in the DB + events.shuffle(&mut thread_rng()); + println!("inserting events:"); + for evt in events.iter() { + println!("{:?}", evt); + } + println!("{}", "#".repeat(32)); + + cache.insert_multi_it(events.into_iter()); + + for (k, v) in cache.iter_all(None) { + println!("k: {:?}, v: {:?}", k, v); + } + println!("{}", "#".repeat(32)); + + let mut filter_1 = EventFilter { + start: None, // Some(Slot::new(2, 0)), + end: None, + emitter_address: None, + original_caller_address: None, + original_operation_id: Some(op_id_1), + is_final: None, + is_error: None, + }; + + let filtered_events_1 = cache.get_filtered_sc_output_events(&filter_1); + + assert_eq!(filtered_events_1.len(), cache.max_entry_count - 5); + filtered_events_1.iter().enumerate().for_each(|(i, event)| { + println!("checking event #{}: {:?}", i, event); + assert_eq!(event.context.slot, slot_1); + assert_eq!(event.context.index_in_slot, i as u64); + }); + + // println!("filtered_events_1[0]: {:?}", filtered_events_1[0]); + // assert_eq!(filtered_events_1[0].context.slot, slot_2); + // assert_eq!(filtered_events_1[0].context.index_in_slot, index_2_1); + // println!("filtered_events_1[1]: {:?}", filtered_events_1[1]); + // assert_eq!(filtered_events_1[1].context.slot, slot_2); + // assert_eq!(filtered_events_1[1].context.index_in_slot, index_2_2); + + { + filter_1.original_operation_id = Some(op_id_2); + let filtered_events_2 = cache.get_filtered_sc_output_events(&filter_1); + assert_eq!(filtered_events_2.len(), 2); + filtered_events_2.iter().enumerate().for_each(|(i, event)| { + println!("checking event #{}: {:?}", i, event); + assert_eq!(event.context.slot, slot_2); + if i == 0 { + assert_eq!(event.context.index_in_slot, i as u64); + } else { + assert_eq!(event.context.index_in_slot, 256u64); + } + }); + } + } + + #[test] + #[serial] + fn test_event_filter_3() { + // Test get_filtered_sc_output_events + emitter address + + let mut cache = setup(); + cache.max_entry_count = 10; + + let slot_1 = Slot::new(1, 0); + let index_1_0 = 0; + + let dummy_addr = + Address::from_str("AU12qePoXhNbYWE1jZuafqJong7bbq1jw3k89RgbMawbrdZpaasoA").unwrap(); + let emit_addr_1 = + Address::from_str("AU122Em8qkqegdLb1eyH8rdkSCNEf7RZLeTJve4Q2inRPGiTJ2xNv").unwrap(); + let emit_addr_2 = + Address::from_str("AU12WuVR1Td74q9eAbtYZUnk5jnRbUuUacyhQFwm217bV5v1mNqTZ").unwrap(); + + let event = SCOutputEvent { + context: EventExecutionContext { + slot: slot_1, + block: None, + read_only: false, + index_in_slot: index_1_0, + call_stack: Default::default(), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let to_insert_count = cache.max_entry_count - 5; + let threshold = to_insert_count / 2; + let mut events = (0..cache.max_entry_count - 5) + .map(|i| { + let mut event = event.clone(); + event.context.index_in_slot = i as u64; + if i < threshold { + event.context.call_stack = + VecDeque::from(vec![dummy_addr, emit_addr_1.clone()]); + } else { + event.context.call_stack = + VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + } + event + }) + .collect::>(); + + let slot_2 = Slot::new(2, 0); + let index_2_1 = 0u64; + let event_slot_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_1; + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + event + }; + let index_2_2 = 256u64; + let event_slot_2_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_2; + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + event + }; + events.push(event_slot_2.clone()); + events.push(event_slot_2_2.clone()); + // Randomize the events so we insert in random orders in the DB + events.shuffle(&mut thread_rng()); + println!("inserting events:"); + for evt in events.iter() { + println!("{:?}", evt); + } + println!("{}", "#".repeat(32)); + + cache.insert_multi_it(events.into_iter()); + + println!("db iter all:"); + for (k, v) in cache.iter_all(None) { + println!("k: {:?}, v: {:?}", k, v); + } + println!("{}", "#".repeat(32)); + + let mut filter_1 = EventFilter { + start: None, // Some(Slot::new(2, 0)), + end: None, + emitter_address: Some(emit_addr_1), + original_caller_address: None, + original_operation_id: None, + is_final: None, + is_error: None, + }; + + let filtered_events_1 = cache.get_filtered_sc_output_events(&filter_1); + + assert_eq!(filtered_events_1.len(), threshold); + filtered_events_1 + .iter() + .enumerate() + .for_each(|(_i, event)| { + assert_eq!(event.context.slot, slot_1); + assert_eq!(*event.context.call_stack.back().unwrap(), emit_addr_1) + }); + + { + filter_1.emitter_address = Some(emit_addr_2); + let filtered_events_2 = cache.get_filtered_sc_output_events(&filter_1); + assert_eq!(filtered_events_2.len(), threshold + 1 + 2); + filtered_events_2 + .iter() + .enumerate() + .for_each(|(_i, event)| { + assert_eq!(*event.context.call_stack.back().unwrap(), emit_addr_2) + }); + } + } + + #[test] + #[serial] + fn test_event_filter_4() { + // Test get_filtered_sc_output_events + original caller addr + + let mut cache = setup(); + cache.max_entry_count = 10; + + let slot_1 = Slot::new(1, 0); + let index_1_0 = 0; + + let dummy_addr = + Address::from_str("AU12qePoXhNbYWE1jZuafqJong7bbq1jw3k89RgbMawbrdZpaasoA").unwrap(); + let emit_addr_1 = + Address::from_str("AU122Em8qkqegdLb1eyH8rdkSCNEf7RZLeTJve4Q2inRPGiTJ2xNv").unwrap(); + let emit_addr_2 = + Address::from_str("AU12WuVR1Td74q9eAbtYZUnk5jnRbUuUacyhQFwm217bV5v1mNqTZ").unwrap(); + + let event = SCOutputEvent { + context: EventExecutionContext { + slot: slot_1, + block: None, + read_only: false, + index_in_slot: index_1_0, + call_stack: Default::default(), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let to_insert_count = cache.max_entry_count - 5; + let threshold = to_insert_count / 2; + let mut events = (0..cache.max_entry_count - 5) + .map(|i| { + let mut event = event.clone(); + event.context.index_in_slot = i as u64; + if i < threshold { + event.context.call_stack = + VecDeque::from(vec![emit_addr_1.clone(), dummy_addr.clone()]); + } else { + event.context.call_stack = + VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + } + event + }) + .collect::>(); + + let slot_2 = Slot::new(2, 0); + let index_2_1 = 0u64; + let event_slot_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_1; + event.context.call_stack = VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + event + }; + let index_2_2 = 256u64; + let event_slot_2_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_2; + event.context.call_stack = VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + event + }; + events.push(event_slot_2.clone()); + events.push(event_slot_2_2.clone()); + // Randomize the events so we insert in random orders in the DB + events.shuffle(&mut thread_rng()); + println!("inserting events:"); + for evt in events.iter() { + println!("{:?}", evt); + } + println!("{}", "#".repeat(32)); + + cache.insert_multi_it(events.into_iter()); + + println!("db iter all:"); + for (k, v) in cache.iter_all(None) { + println!("k: {:?}, v: {:?}", k, v); + } + println!("{}", "#".repeat(32)); + + let mut filter_1 = EventFilter { + start: None, // Some(Slot::new(2, 0)), + end: None, + emitter_address: None, + original_caller_address: Some(emit_addr_1), + original_operation_id: None, + is_final: None, + is_error: None, + }; + + let filtered_events_1 = cache.get_filtered_sc_output_events(&filter_1); + + assert_eq!(filtered_events_1.len(), threshold); + filtered_events_1 + .iter() + .enumerate() + .for_each(|(_i, event)| { + assert_eq!(event.context.slot, slot_1); + assert_eq!(*event.context.call_stack.front().unwrap(), emit_addr_1); + }); + + { + filter_1.original_caller_address = Some(emit_addr_2); + let filtered_events_2 = cache.get_filtered_sc_output_events(&filter_1); + assert_eq!(filtered_events_2.len(), threshold + 1 + 2); + filtered_events_2 + .iter() + .enumerate() + .for_each(|(_i, event)| { + assert_eq!(*event.context.call_stack.front().unwrap(), emit_addr_2); + }); + } + } + + #[test] + #[serial] + fn test_event_filter_5() { + // Test get_filtered_sc_output_events + is error + + let mut cache = setup(); + cache.max_entry_count = 10; + + let slot_1 = Slot::new(1, 0); + let index_1_0 = 0; + + let dummy_addr = + Address::from_str("AU12qePoXhNbYWE1jZuafqJong7bbq1jw3k89RgbMawbrdZpaasoA").unwrap(); + let emit_addr_1 = + Address::from_str("AU122Em8qkqegdLb1eyH8rdkSCNEf7RZLeTJve4Q2inRPGiTJ2xNv").unwrap(); + let emit_addr_2 = + Address::from_str("AU12WuVR1Td74q9eAbtYZUnk5jnRbUuUacyhQFwm217bV5v1mNqTZ").unwrap(); + + let event = SCOutputEvent { + context: EventExecutionContext { + slot: slot_1, + block: None, + read_only: false, + index_in_slot: index_1_0, + call_stack: Default::default(), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let to_insert_count = cache.max_entry_count - 5; + let threshold = to_insert_count / 2; + let mut events = (0..cache.max_entry_count - 5) + .map(|i| { + let mut event = event.clone(); + event.context.index_in_slot = i as u64; + if i < threshold { + event.context.call_stack = + VecDeque::from(vec![emit_addr_1.clone(), dummy_addr.clone()]); + } else { + event.context.call_stack = + VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + } + event + }) + .collect::>(); + + let slot_2 = Slot::new(2, 0); + let index_2_1 = 0u64; + let event_slot_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_1; + event.context.call_stack = VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + event + }; + let index_2_2 = 256u64; + let event_slot_2_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_2; + event.context.call_stack = VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + event.context.is_error = true; + event + }; + events.push(event_slot_2.clone()); + events.push(event_slot_2_2.clone()); + // Randomize the events so we insert in random orders in the DB + events.shuffle(&mut thread_rng()); + println!("inserting events:"); + for evt in events.iter() { + println!("{:?}", evt); + } + println!("{}", "#".repeat(32)); + + cache.insert_multi_it(events.into_iter()); + + println!("db iter all:"); + for (k, v) in cache.iter_all(None) { + println!("k: {:?}, v: {:?}", k, v); + } + println!("{}", "#".repeat(32)); + + let filter_1 = EventFilter { + start: None, // Some(Slot::new(2, 0)), + end: None, + emitter_address: None, + original_caller_address: None, + original_operation_id: None, + is_final: None, + is_error: Some(true), + }; + + let filtered_events_1 = cache.get_filtered_sc_output_events(&filter_1); + + assert_eq!(filtered_events_1.len(), 1); + assert_eq!(filtered_events_1[0].context.is_error, true); + assert_eq!(filtered_events_1[0].context.slot, slot_2); + assert_eq!(filtered_events_1[0].context.index_in_slot, index_2_2); + + // filtered_events_1 + // .iter() + // .enumerate() + // .for_each(|(_i, event)| { + // assert_eq!(event.context.slot, slot_1); + // assert_eq!(*event.context.call_stack.front().unwrap(), emit_addr_1); + // }); + + /* + { + filter_1.original_caller_address = Some(emit_addr_2); + let filtered_events_2 = cache + .get_filtered_sc_output_events(&filter_1); + assert_eq!(filtered_events_2.len(), threshold + 1 + 2); + filtered_events_2 + .iter() + .enumerate() + .for_each(|(_i, event)| { + assert_eq!(*event.context.call_stack.front().unwrap(), emit_addr_2); + }); + } + */ + } } diff --git a/massa-event-cache/src/lib.rs b/massa-event-cache/src/lib.rs index 685578a1442..d56da234a5c 100644 --- a/massa-event-cache/src/lib.rs +++ b/massa-event-cache/src/lib.rs @@ -2,6 +2,7 @@ pub mod config; pub mod controller; mod event_cache; +mod rocksdb_operator; mod ser_deser; pub mod worker; diff --git a/massa-event-cache/src/rocksdb_operator.rs b/massa-event-cache/src/rocksdb_operator.rs new file mode 100644 index 00000000000..65b9a9fd71b --- /dev/null +++ b/massa-event-cache/src/rocksdb_operator.rs @@ -0,0 +1,105 @@ +use rocksdb::MergeOperands; + +pub fn counter_merge( + _key: &[u8], + existing_val: Option<&[u8]>, + operands: &MergeOperands, +) -> Option> { + let counter_current_value = u64::from_be_bytes(existing_val?.try_into().unwrap()); + let counter_value = operands.iter().fold(counter_current_value, |mut acc, x| { + let incr_value = i64::from_be_bytes(x.try_into().unwrap()); + acc = acc.saturating_add_signed(incr_value); + acc + }); + + Some(counter_value.to_be_bytes().to_vec()) +} + +#[cfg(test)] +mod tests { + use super::*; + // std + // third-party + use rocksdb::{Options, DB}; + use serial_test::serial; + use tempfile::TempDir; + + #[test] + #[serial] + fn test_operator() { + let tmp_path = TempDir::new().unwrap().path().to_path_buf(); + let options = { + let mut opts = Options::default(); + opts.create_if_missing(true); + opts.set_merge_operator_associative("counter merge operator", counter_merge); + opts + }; + let db = DB::open(&options, tmp_path).unwrap(); + let key_1 = "foo1"; + let key_2 = "baz42"; + db.put(key_1, 0u64.to_be_bytes()).unwrap(); + db.put(key_2, 0u64.to_be_bytes()).unwrap(); + + let value = db.get(key_1).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value.try_into().unwrap()), 0); + let value2 = db.get(key_2).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value2.try_into().unwrap()), 0); + + // key_1 counter += 1 + db.merge(key_1, 1i64.to_be_bytes()).unwrap(); + + let value = db.get(key_1).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value.try_into().unwrap()), 1); + let value2 = db.get(key_2).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value2.try_into().unwrap()), 0); + + // key_2 counter += 9 + db.merge(key_2, 9i64.to_be_bytes()).unwrap(); + // key_2 counter += 1 + db.merge(key_2, 1i64.to_be_bytes()).unwrap(); + // key_2 counter += 32 + db.merge(key_2, 32i64.to_be_bytes()).unwrap(); + + let value = db.get(key_1).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value.try_into().unwrap()), 1); + let value2 = db.get(key_2).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value2.try_into().unwrap()), 42); + } + + #[test] + #[serial] + fn test_operator_2() { + let tmp_path = TempDir::new().unwrap().path().to_path_buf(); + let options = { + let mut opts = Options::default(); + opts.create_if_missing(true); + opts.set_merge_operator_associative("counter merge operator", counter_merge); + opts + }; + let db = DB::open(&options, tmp_path).unwrap(); + let key_1 = "foo1"; + let key_2 = "baz42"; + db.put(key_1, 0u64.to_be_bytes()).unwrap(); + db.put(key_2, 0u64.to_be_bytes()).unwrap(); + + let value = db.get(key_1).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value.try_into().unwrap()), 0); + let value2 = db.get(key_2).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value2.try_into().unwrap()), 0); + + // key_1 counter += 1 + db.merge(key_1, 1i64.to_be_bytes()).unwrap(); + + let value = db.get(key_1).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value.try_into().unwrap()), 1); + let value2 = db.get(key_2).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value2.try_into().unwrap()), 0); + + db.merge(key_1, (-3i64).to_be_bytes()).unwrap(); + + let value = db.get(key_1).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value.try_into().unwrap()), 0); + let value2 = db.get(key_2).unwrap().unwrap(); + assert_eq!(u64::from_be_bytes(value2.try_into().unwrap()), 0); + } +} diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index 6d3a488eabe..a50c99aeef5 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -125,6 +125,8 @@ pub fn start_event_cache_writer_worker( cfg.thread_count, cfg.max_call_stack_length, cfg.max_event_data_length, + cfg.max_events_per_operation, + cfg.max_operations_per_block, ))); // define the input data interface diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index c379d439222..1e4ba75791c 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -483,10 +483,13 @@ async fn launch( snip_amount: SETTINGS.execution.event_snip_amount, max_event_data_length: MAX_EVENT_DATA_SIZE as u64, thread_count: THREAD_COUNT, - // Note: SCOutputEvent call stack comes from the execution module, and we assume + // Note: SCOutputEvent call stack comes from the execution module, and we assume // this should return a limited call stack length // The value remains for future use & limitations max_call_stack_length: u16::MAX, + + max_events_per_operation: 25u64, // MAX_EVENTS_PER_OPERATION - TODO: rebase + max_operations_per_block: MAX_OPERATIONS_PER_BLOCK as u64, }; let (event_cache_manager, event_cache_controller) = start_event_cache_writer_worker(event_cache_config); From 47819b17ace3a8829235df4be4c4aaa44e426d77 Mon Sep 17 00:00:00 2001 From: sydhds Date: Mon, 25 Nov 2024 11:11:03 +0100 Subject: [PATCH 15/40] Avoid lock contention in controller::get_filtered_sc_output_events --- massa-event-cache/src/controller.rs | 20 +++++++++++++++----- massa-event-cache/src/worker.rs | 2 +- massa-models/src/output_event.rs | 18 +++++++++++++++--- 3 files changed, 31 insertions(+), 9 deletions(-) diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index 178fa6b424f..a58e8a6b4bb 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -1,5 +1,5 @@ // std -use std::collections::VecDeque; +use std::collections::{BTreeSet, VecDeque}; use std::sync::Arc; // third-party use parking_lot::{Condvar, Mutex, RwLock}; @@ -23,7 +23,7 @@ impl EventCacheWriterInputData { events: Default::default(), } } - + /// Takes the current input data into a clone that is returned, /// and resets self. pub fn take(&mut self) -> Self { @@ -108,10 +108,20 @@ impl EventCacheController for EventCacheControllerImpl { } Some(event) }); + + let mut res_0: BTreeSet = it.cloned().collect(); + // Drop the lock on the queue as soon as possible to avoid deadlocks + drop(lock_0); let lock = self.cache.read(); - it.cloned() - .chain(lock.get_filtered_sc_output_events(filter)) - .collect() + + let res_1 = lock.get_filtered_sc_output_events(filter); + // Drop the lock on the event cache db asap + drop(lock); + + let res_1: BTreeSet = BTreeSet::from_iter(res_1); + + res_0.extend(res_1); + Vec::from_iter(res_0) } } diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index a50c99aeef5..416963dc592 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -42,7 +42,7 @@ impl EventCacheWriterThread { // take current input data, resetting it let input_data: EventCacheWriterInputData = input_data_lock.take(); - + // Check if there is some input data if !input_data.events.is_empty() { return (input_data, false); diff --git a/massa-models/src/output_event.rs b/massa-models/src/output_event.rs index 7b7a70e6cd7..f171aa6e359 100644 --- a/massa-models/src/output_event.rs +++ b/massa-models/src/output_event.rs @@ -1,8 +1,9 @@ use crate::{address::Address, block_id::BlockId, operation::OperationId, slot::Slot}; use serde::{Deserialize, Serialize}; use std::{collections::VecDeque, fmt::Display}; +use std::cmp::Ordering; -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] /// By product of a byte code execution pub struct SCOutputEvent { /// context generated by the execution context @@ -11,6 +12,18 @@ pub struct SCOutputEvent { pub data: String, } +impl PartialOrd for SCOutputEvent { + fn partial_cmp(&self, other: &Self) -> Option { + (self.context.slot, self.context.index_in_slot).partial_cmp(&(other.context.slot, other.context.index_in_slot)) + } +} + +impl Ord for SCOutputEvent { + fn cmp(&self, other: &Self) -> Ordering { + (self.context.slot, self.context.index_in_slot).cmp(&(other.context.slot, other.context.index_in_slot)) + } +} + impl Display for SCOutputEvent { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "Context: {}", self.context)?; @@ -19,8 +32,7 @@ impl Display for SCOutputEvent { } /// Context of the event (not generated by the user) -#[derive(Debug, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "test-exports", derive(PartialEq))] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct EventExecutionContext { /// when was it generated pub slot: Slot, From 5dfe45c9f9dda6eee8c9036704612f080afadd91 Mon Sep 17 00:00:00 2001 From: sydhds Date: Mon, 25 Nov 2024 11:18:37 +0100 Subject: [PATCH 16/40] Improve comment --- massa-event-cache/src/worker.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index 416963dc592..de2edaaa143 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -30,7 +30,7 @@ impl EventCacheWriterThread { } } - /// Waits for an event to trigger a new iteration in the .... + /// Waits for an event to trigger a new iteration in the event cache main loop. /// /// # Returns /// `ExecutionInputData` representing the input requests, @@ -42,7 +42,7 @@ impl EventCacheWriterThread { // take current input data, resetting it let input_data: EventCacheWriterInputData = input_data_lock.take(); - + // Check if there is some input data if !input_data.events.is_empty() { return (input_data, false); From 46b1cf71d10bbabef4fb91559a5de1db40953c08 Mon Sep 17 00:00:00 2001 From: sydhds Date: Mon, 25 Nov 2024 11:30:57 +0100 Subject: [PATCH 17/40] Add query limit --- massa-event-cache/src/config.rs | 2 ++ massa-event-cache/src/event_cache.rs | 10 +++++++--- massa-event-cache/src/worker.rs | 1 + massa-models/src/config/constants.rs | 9 +++++++++ massa-node/src/main.rs | 9 +++++---- 5 files changed, 24 insertions(+), 7 deletions(-) diff --git a/massa-event-cache/src/config.rs b/massa-event-cache/src/config.rs index 257d03b4a21..9eab23e0d5b 100644 --- a/massa-event-cache/src/config.rs +++ b/massa-event-cache/src/config.rs @@ -17,4 +17,6 @@ pub struct EventCacheConfig { pub max_events_per_operation: u64, /// Maximum number of operations per block pub max_operations_per_block: u64, + /// Maximum events returned in a query + pub max_events_per_query: usize, } diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 76c2464d6e8..7357c61d2b6 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -196,6 +196,8 @@ pub(crate) struct EventCache { max_events_per_operation: u64, /// Maximum number of operations per block max_operations_per_block: u64, + /// Max number of events returned by a query + max_events_per_query: usize, } impl EventCache { @@ -210,6 +212,7 @@ impl EventCache { max_event_data_length: u64, max_events_per_operation: u64, max_operations_per_block: u64, + max_events_per_query: usize, ) -> Self { // Clear the db DB::destroy(&Options::default(), path).expect(DESTROY_ERROR); @@ -255,6 +258,7 @@ impl EventCache { thread_count, max_events_per_operation, max_operations_per_block, + max_events_per_query } } @@ -384,6 +388,7 @@ impl EventCache { let multi_args = filter_res_prev .unwrap() .into_iter() + .take(self.max_events_per_query) .collect::>>(); let res = self.db.multi_get(multi_args); @@ -742,9 +747,7 @@ mod tests { use serial_test::serial; use tempfile::TempDir; // internal - use massa_models::config::{ - MAX_EVENT_DATA_SIZE, MAX_OPERATIONS_PER_BLOCK, MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT, - }; + use massa_models::config::{MAX_EVENT_DATA_SIZE, MAX_EVENTS_PER_QUERY, MAX_OPERATIONS_PER_BLOCK, MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT}; use massa_models::operation::OperationId; use massa_models::output_event::EventExecutionContext; use massa_models::slot::Slot; @@ -762,6 +765,7 @@ mod tests { // MAX_EVENT_PER_OPERATION as u64, 25u64, MAX_OPERATIONS_PER_BLOCK as u64, + MAX_EVENTS_PER_QUERY, ) } diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index de2edaaa143..95aef405dd5 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -127,6 +127,7 @@ pub fn start_event_cache_writer_worker( cfg.max_event_data_length, cfg.max_events_per_operation, cfg.max_operations_per_block, + cfg.max_events_per_query, ))); // define the input data interface diff --git a/massa-models/src/config/constants.rs b/massa-models/src/config/constants.rs index 97130583c0b..5f8c31ffbd7 100644 --- a/massa-models/src/config/constants.rs +++ b/massa-models/src/config/constants.rs @@ -315,6 +315,7 @@ pub const MAX_EVENT_PER_OPERATION: usize = 25; /// Maximum number of recursion for calls pub const MAX_RECURSIVE_CALLS_DEPTH: u16 = 25; + // // Constants used in network // @@ -412,6 +413,14 @@ pub const DEFERRED_CALL_SLOT_OVERBOOKING_PENALTY: Amount = Amount::from_raw(1_00 /// deferred call call gas cost pub const DEFERRED_CALL_CST_GAS_COST: u64 = 750_000; + +// +// Constants for event cache +// + +/// Maximum number of events that can be returned by a query +pub const MAX_EVENTS_PER_QUERY: usize = 10000; + // Some checks at compile time that should not be ignored! #[allow(clippy::assertions_on_constants)] const _: () = { diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 1e4ba75791c..e8264c707be 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -92,10 +92,10 @@ use massa_models::config::{ DEFERRED_CALL_MAX_ASYNC_GAS, DEFERRED_CALL_MAX_POOL_CHANGES, DEFERRED_CALL_MIN_GAS_COST, DEFERRED_CALL_MIN_GAS_INCREMENT, DEFERRED_CALL_SLOT_OVERBOOKING_PENALTY, KEEP_EXECUTED_HISTORY_EXTRA_PERIODS, MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, - MAX_BOOTSTRAP_VERSIONING_ELEMENTS_SIZE, MAX_EVENT_DATA_SIZE, MAX_EVENT_PER_OPERATION, - MAX_MESSAGE_SIZE, MAX_RECURSIVE_CALLS_DEPTH, MAX_RUNTIME_MODULE_CUSTOM_SECTION_DATA_LEN, - MAX_RUNTIME_MODULE_CUSTOM_SECTION_LEN, MAX_RUNTIME_MODULE_EXPORTS, - MAX_RUNTIME_MODULE_FUNCTIONS, MAX_RUNTIME_MODULE_FUNCTION_NAME_LEN, + MAX_BOOTSTRAP_VERSIONING_ELEMENTS_SIZE, MAX_EVENTS_PER_QUERY, MAX_EVENT_DATA_SIZE, + MAX_EVENT_PER_OPERATION, MAX_MESSAGE_SIZE, MAX_RECURSIVE_CALLS_DEPTH, + MAX_RUNTIME_MODULE_CUSTOM_SECTION_DATA_LEN, MAX_RUNTIME_MODULE_CUSTOM_SECTION_LEN, + MAX_RUNTIME_MODULE_EXPORTS, MAX_RUNTIME_MODULE_FUNCTIONS, MAX_RUNTIME_MODULE_FUNCTION_NAME_LEN, MAX_RUNTIME_MODULE_GLOBAL_INITIALIZER, MAX_RUNTIME_MODULE_IMPORTS, MAX_RUNTIME_MODULE_MEMORIES, MAX_RUNTIME_MODULE_NAME_LEN, MAX_RUNTIME_MODULE_PASSIVE_DATA, MAX_RUNTIME_MODULE_PASSIVE_ELEMENT, MAX_RUNTIME_MODULE_SIGNATURE_LEN, MAX_RUNTIME_MODULE_TABLE, @@ -490,6 +490,7 @@ async fn launch( max_events_per_operation: 25u64, // MAX_EVENTS_PER_OPERATION - TODO: rebase max_operations_per_block: MAX_OPERATIONS_PER_BLOCK as u64, + max_events_per_query: MAX_EVENTS_PER_QUERY, }; let (event_cache_manager, event_cache_controller) = start_event_cache_writer_worker(event_cache_config); From 350fc7bca0d1b8fb9f53588dec6e9b6e59c7ce51 Mon Sep 17 00:00:00 2001 From: sydhds Date: Tue, 26 Nov 2024 11:14:43 +0100 Subject: [PATCH 18/40] Add tick delay in event cache writer thread --- Cargo.lock | 1 + massa-event-cache/Cargo.toml | 1 + massa-event-cache/src/config.rs | 3 +++ massa-event-cache/src/worker.rs | 18 ++++++++++++++++-- massa-models/src/config/constants.rs | 2 ++ massa-node/src/main.rs | 12 +++++++----- 6 files changed, 30 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 74de35dac43..71721d87366 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3217,6 +3217,7 @@ version = "0.1.0" dependencies = [ "massa_models", "massa_serialization", + "massa_time", "mockall", "mockall_wrap", "more-asserts 0.3.1", diff --git a/massa-event-cache/Cargo.toml b/massa-event-cache/Cargo.toml index 6d74b11b9fe..0a3bef49fc7 100644 --- a/massa-event-cache/Cargo.toml +++ b/massa-event-cache/Cargo.toml @@ -19,6 +19,7 @@ parking_lot = { workspace = true } num_enum = { workspace = true } massa_models = {workspace = true} massa_serialization = {workspace = true} +massa_time = {workspace = true} mockall = {workspace = true, optional = true} mockall_wrap = {workspace = true, optional = true} diff --git a/massa-event-cache/src/config.rs b/massa-event-cache/src/config.rs index 9eab23e0d5b..e25d4a920a9 100644 --- a/massa-event-cache/src/config.rs +++ b/massa-event-cache/src/config.rs @@ -1,4 +1,5 @@ use std::path::PathBuf; +use std::time::Duration; pub struct EventCacheConfig { /// Path to the hard drive cache storage @@ -19,4 +20,6 @@ pub struct EventCacheConfig { pub max_operations_per_block: u64, /// Maximum events returned in a query pub max_events_per_query: usize, + /// Delay to wait between 2 writes in event cache writer in milliseconds + pub tick_delay: Duration, } diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index 95aef405dd5..bf93441e106 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -1,9 +1,11 @@ // std use std::sync::Arc; use std::thread; +use std::time::Duration; // third-party use parking_lot::{Condvar, Mutex, RwLock}; use tracing::{debug, info}; +use massa_time::MassaTime; // internal use crate::config::EventCacheConfig; use crate::controller::{ @@ -17,16 +19,19 @@ pub(crate) struct EventCacheWriterThread { input_data: Arc<(Condvar, Mutex)>, /// Event cache cache: Arc>, + tick_delay: Duration, } impl EventCacheWriterThread { fn new( input_data: Arc<(Condvar, Mutex)>, event_cache: Arc>, + tick_delay: Duration, ) -> Self { Self { input_data, cache: event_cache, + tick_delay, } } @@ -53,7 +58,16 @@ impl EventCacheWriterThread { return (input_data, true); } - // FIXME / TODO: should we sleep here? + // Wait until deadline + let now = MassaTime::now(); + let wakeup_deadline = now.saturating_add( + MassaTime::from_millis(self.tick_delay.as_millis() as u64)); + let _ = self.input_data.0.wait_until( + &mut input_data_lock, + wakeup_deadline + .estimate_instant() + .expect("could not estimate instant"), + ); } } @@ -143,7 +157,7 @@ pub fn start_event_cache_writer_worker( let thread_builder = thread::Builder::new().name("event_cache".into()); let thread_handle = thread_builder .spawn(move || { - EventCacheWriterThread::new(input_data_clone, event_cache).main_loop(); + EventCacheWriterThread::new(input_data_clone, event_cache, cfg.tick_delay).main_loop(); }) .expect("failed to spawn thread : event_cache"); diff --git a/massa-models/src/config/constants.rs b/massa-models/src/config/constants.rs index 5f8c31ffbd7..add7681e3a8 100644 --- a/massa-models/src/config/constants.rs +++ b/massa-models/src/config/constants.rs @@ -420,6 +420,8 @@ pub const DEFERRED_CALL_CST_GAS_COST: u64 = 750_000; /// Maximum number of events that can be returned by a query pub const MAX_EVENTS_PER_QUERY: usize = 10000; +/// Delay between writes in event cache writer thread +pub const EVENT_CACHE_TICK_DELAY: u64 = 100; // Some checks at compile time that should not be ignored! #[allow(clippy::assertions_on_constants)] diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index e8264c707be..14ca16bbd77 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -91,11 +91,12 @@ use massa_models::config::{ DEFERRED_CALL_CST_GAS_COST, DEFERRED_CALL_GLOBAL_OVERBOOKING_PENALTY, DEFERRED_CALL_MAX_ASYNC_GAS, DEFERRED_CALL_MAX_POOL_CHANGES, DEFERRED_CALL_MIN_GAS_COST, DEFERRED_CALL_MIN_GAS_INCREMENT, DEFERRED_CALL_SLOT_OVERBOOKING_PENALTY, - KEEP_EXECUTED_HISTORY_EXTRA_PERIODS, MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, - MAX_BOOTSTRAP_VERSIONING_ELEMENTS_SIZE, MAX_EVENTS_PER_QUERY, MAX_EVENT_DATA_SIZE, - MAX_EVENT_PER_OPERATION, MAX_MESSAGE_SIZE, MAX_RECURSIVE_CALLS_DEPTH, - MAX_RUNTIME_MODULE_CUSTOM_SECTION_DATA_LEN, MAX_RUNTIME_MODULE_CUSTOM_SECTION_LEN, - MAX_RUNTIME_MODULE_EXPORTS, MAX_RUNTIME_MODULE_FUNCTIONS, MAX_RUNTIME_MODULE_FUNCTION_NAME_LEN, + EVENT_CACHE_TICK_DELAY, KEEP_EXECUTED_HISTORY_EXTRA_PERIODS, + MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, MAX_BOOTSTRAP_VERSIONING_ELEMENTS_SIZE, + MAX_EVENTS_PER_QUERY, MAX_EVENT_DATA_SIZE, MAX_EVENT_PER_OPERATION, MAX_MESSAGE_SIZE, + MAX_RECURSIVE_CALLS_DEPTH, MAX_RUNTIME_MODULE_CUSTOM_SECTION_DATA_LEN, + MAX_RUNTIME_MODULE_CUSTOM_SECTION_LEN, MAX_RUNTIME_MODULE_EXPORTS, + MAX_RUNTIME_MODULE_FUNCTIONS, MAX_RUNTIME_MODULE_FUNCTION_NAME_LEN, MAX_RUNTIME_MODULE_GLOBAL_INITIALIZER, MAX_RUNTIME_MODULE_IMPORTS, MAX_RUNTIME_MODULE_MEMORIES, MAX_RUNTIME_MODULE_NAME_LEN, MAX_RUNTIME_MODULE_PASSIVE_DATA, MAX_RUNTIME_MODULE_PASSIVE_ELEMENT, MAX_RUNTIME_MODULE_SIGNATURE_LEN, MAX_RUNTIME_MODULE_TABLE, @@ -491,6 +492,7 @@ async fn launch( max_events_per_operation: 25u64, // MAX_EVENTS_PER_OPERATION - TODO: rebase max_operations_per_block: MAX_OPERATIONS_PER_BLOCK as u64, max_events_per_query: MAX_EVENTS_PER_QUERY, + tick_delay: Duration::from_millis(EVENT_CACHE_TICK_DELAY), }; let (event_cache_manager, event_cache_controller) = start_event_cache_writer_worker(event_cache_config); From 9531a0cbf14e57fab96a65a71e59c1210395330d Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 28 Nov 2024 09:41:50 +0100 Subject: [PATCH 19/40] Use per address / operation id / is_error counters --- massa-event-cache/src/controller.rs | 14 +- massa-event-cache/src/event_cache.rs | 893 +++++++++++++++------- massa-event-cache/src/rocksdb_operator.rs | 7 +- massa-event-cache/src/worker.rs | 6 +- massa-models/src/output_event.rs | 2 +- 5 files changed, 639 insertions(+), 283 deletions(-) diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index a58e8a6b4bb..573537373b5 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -23,7 +23,7 @@ impl EventCacheWriterInputData { events: Default::default(), } } - + /// Takes the current input data into a clone that is returned, /// and resets self. pub fn take(&mut self) -> Self { @@ -108,19 +108,19 @@ impl EventCacheController for EventCacheControllerImpl { } Some(event) }); - + let mut res_0: BTreeSet = it.cloned().collect(); // Drop the lock on the queue as soon as possible to avoid deadlocks drop(lock_0); let lock = self.cache.read(); - - let res_1 = lock.get_filtered_sc_output_events(filter); - // Drop the lock on the event cache db asap + + let (_, res_1) = lock.get_filtered_sc_output_events(filter); + // Drop the lock on the event cache db asap drop(lock); - + let res_1: BTreeSet = BTreeSet::from_iter(res_1); - + res_0.extend(res_1); Vec::from_iter(res_0) } diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 7357c61d2b6..93692861ae3 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -20,12 +20,13 @@ use massa_models::slot::Slot; use massa_serialization::{DeserializeError, Deserializer, Serializer}; const OPEN_ERROR: &str = "critical: rocksdb open operation failed"; -const COUNTER_INIT_ERROR: &str = "critical: cannot init rocksdb counters"; +// const COUNTER_INIT_ERROR: &str = "critical: cannot init rocksdb counters"; const DESTROY_ERROR: &str = "critical: rocksdb delete operation failed"; const CRUD_ERROR: &str = "critical: rocksdb crud operation failed"; const EVENT_DESER_ERROR: &str = "critical: event deserialization failed"; const OPERATION_ID_DESER_ERROR: &str = "critical: deserialization failed for op id in rocksdb"; const COUNTER_ERROR: &str = "critical: cannot get counter"; +const COUNTER_KEY_CREATION_ERROR: &str = "critical: cannot create counter key"; #[allow(dead_code)] /// Prefix u8 used to identify rocksdb keys @@ -41,7 +42,15 @@ enum KeyIndent { IsFinal, } -/// A Rocksdb key builder to insert in EventCache +enum KeyBuilderType<'a> { + Slot(&'a Slot), + Event(&'a Slot, u64), + Address(&'a Address), + OperationId(&'a OperationId), + Bool(bool), + None, +} + struct EventCacheKeyBuilder { /// Operation Id Serializer op_id_ser: OperationIdSerializer, @@ -54,118 +63,196 @@ impl EventCacheKeyBuilder { } } - /// A prefix key (for rocksdb prefix iteration) - fn get_prefix_event_key(&self, slot: &Slot) -> Vec { - let mut event_key = vec![KeyIndent::Event as u8]; - event_key.extend(slot.to_bytes_key()); - event_key - } - - fn get_event_key(&self, event: &SCOutputEvent) -> Vec { - let mut event_key = vec![KeyIndent::Event as u8]; - event_key.extend(event.context.slot.to_bytes_key()); - event_key.extend(event.context.index_in_slot.to_be_bytes()); - event_key - } - - fn get_prefix_emitter_address_address_key(&self, addr: &Address) -> Vec { - let mut key = vec![KeyIndent::EmitterAddress as u8]; - let addr_bytes = addr.to_prefixed_bytes(); - let addr_bytes_len = addr_bytes.len(); - key.extend(addr_bytes); - key.push(addr_bytes_len as u8); - key - } - - fn get_emitter_address_key(&self, event: &SCOutputEvent) -> Option> { - if let Some(addr) = event.context.call_stack.back() { - let mut key = vec![KeyIndent::EmitterAddress as u8]; - let addr_bytes = addr.to_prefixed_bytes(); - let addr_bytes_len = addr_bytes.len(); - key.extend(addr_bytes); - key.push(addr_bytes_len as u8); - key.extend(self.get_event_key(event)); - Some(key) + fn key( + &self, + indent: &KeyIndent, + key_type: KeyBuilderType, + _is_prefix: bool, + is_counter: bool, + ) -> Vec { + // Low level key builder function + // There is no guarantees that the key will be unique + // Use key_from_event OR key_from... unless you know what you're doing + + let mut key_base = if is_counter { + vec![u8::from(KeyIndent::Counter), u8::from(*indent)] } else { - None - } - } - - fn get_prefix_original_caller_address_key(&self, addr: &Address) -> Vec { - let mut key = vec![KeyIndent::OriginalCallerAddress as u8]; - let addr_bytes = addr.to_prefixed_bytes(); - let addr_bytes_len = addr_bytes.len(); - key.extend(addr_bytes); - key.push(addr_bytes_len as u8); - key - } + vec![u8::from(*indent)] + }; - fn get_original_caller_address_key(&self, event: &SCOutputEvent) -> Option> { - if let Some(addr) = event.context.call_stack.front() { - let mut key = vec![KeyIndent::OriginalCallerAddress as u8]; - let addr_bytes = addr.to_prefixed_bytes(); - let addr_bytes_len = addr_bytes.len(); - key.extend(addr_bytes); - key.push(addr_bytes_len as u8); - key.extend(self.get_event_key(event)); - Some(key) - } else { - None + match key_type { + KeyBuilderType::Slot(slot) => { + key_base.extend(slot.to_bytes_key()); + } + KeyBuilderType::Event(slot, index) => { + key_base.extend(slot.to_bytes_key()); + key_base.extend(index.to_be_bytes()); + } + KeyBuilderType::Address(addr) => { + let addr_bytes = addr.to_prefixed_bytes(); + let addr_bytes_len = addr_bytes.len(); + key_base.extend(addr_bytes); + key_base.push(addr_bytes_len as u8); + } + KeyBuilderType::OperationId(op_id) => { + let mut buffer = Vec::new(); + self.op_id_ser + .serialize(op_id, &mut buffer) + .expect(OPERATION_ID_DESER_ERROR); + key_base.extend(&buffer); + key_base.extend(u32::to_be_bytes(buffer.len() as u32)); + } + KeyBuilderType::Bool(value) => { + key_base.push(u8::from(value)); + } + KeyBuilderType::None => {} } - } - fn get_prefix_original_operation_id_key(&self, op_id: &OperationId) -> Option> { - let mut key = vec![KeyIndent::OriginalOperationId as u8]; - let mut buffer = Vec::new(); - self.op_id_ser.serialize(op_id, &mut buffer).ok()?; - key.extend(&buffer); - key.extend(u32::to_be_bytes(buffer.len() as u32)); - Some(key) + key_base } - fn get_original_operation_id_key(&self, event: &SCOutputEvent) -> Option> { - if let Some(op_id) = event.context.origin_operation_id { - let mut key = vec![KeyIndent::OriginalOperationId as u8]; - let mut buffer = Vec::new(); - self.op_id_ser.serialize(&op_id, &mut buffer).ok()?; - key.extend(&buffer); - key.extend(u32::to_be_bytes(buffer.len() as u32)); - key.extend(self.get_event_key(event)); - Some(key) - } else { - None - } - } - - fn get_prefix_is_error_key(&self, is_error: bool) -> Vec { - vec![KeyIndent::IsError as u8, u8::from(is_error)] - } + fn key_from_event( + &self, + event: &SCOutputEvent, + indent: &KeyIndent, + is_prefix: bool, + is_counter: bool, + ) -> Option> { + // High level key builder function + + let key = match indent { + KeyIndent::Event => { + let item = KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); + Some(self.key(indent, item, is_prefix, is_counter)) + } + KeyIndent::EmitterAddress => { + if let Some(addr) = event.context.call_stack.back() { + let item = KeyBuilderType::Address(addr); + let mut key = self.key(indent, item, is_prefix, is_counter); + let item = + KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); + if !is_prefix && !is_counter { + key.extend(self.key(&KeyIndent::Event, item, false, false)); + } + Some(key) + } else { + None + } + } + KeyIndent::OriginalCallerAddress => { + if let Some(addr) = event.context.call_stack.front() { + let item = KeyBuilderType::Address(addr); + let mut key = self.key(indent, item, is_prefix, is_counter); + let item = + KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); + if !is_prefix && !is_counter { + key.extend(self.key(&KeyIndent::Event, item, false, false)); + } + Some(key) + } else { + None + } + } + KeyIndent::OriginalOperationId => { + if let Some(op_id) = event.context.origin_operation_id.as_ref() { + let item = KeyBuilderType::OperationId(op_id); + let mut key = self.key(indent, item, is_prefix, is_counter); + let item = + KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); + if !is_prefix && !is_counter { + // key.extend(self.key_from_item(indent, item, false, false)); + key.extend(self.key(&KeyIndent::Event, item, false, false)); + } + Some(key) + } else { + None + } + } + KeyIndent::IsError => { + let item = KeyBuilderType::Bool(event.context.is_error); + let mut key = self.key(indent, item, is_prefix, is_counter); + let item = KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); + if !is_prefix && !is_counter { + key.extend(self.key(&KeyIndent::Event, item, false, false)); + } + Some(key) + } + _ => unreachable!(), + }; - fn get_is_error_key(&self, event: &SCOutputEvent) -> Vec { - let mut key = vec![KeyIndent::IsError as u8]; - key.push(event.context.is_error as u8); - key.extend(self.get_event_key(event)); key } - fn get_counter_key_from(&self, key: &[u8]) -> Vec { - vec![KeyIndent::Counter as u8, key[0]] + /// Prefix key to iterate over all events / emitter_address / ... + fn prefix_key_from_indent(&self, indent: &KeyIndent) -> Vec { + // High level key builder function + self.key(indent, KeyBuilderType::None, false, false) } - fn get_counter_key_from_indent(&self, indent: &KeyIndent) -> Vec { - vec![KeyIndent::Counter as u8, *indent as u8] - } - - fn get_counter_key_bool_from(&self, key: &[u8], value: bool) -> Vec { - vec![u8::from(KeyIndent::Counter), key[0], u8::from(value)] + /// Prefix key to iterate over specific emitter_address / operation_id / ... + fn prefix_key_from_filter_item(&self, filter_item: &FilterItem, indent: &KeyIndent) -> Vec { + // High level key builder function + match (indent, filter_item) { + (KeyIndent::Event, FilterItem::SlotStartEnd(_start, _end)) => { + unimplemented!() + } + (KeyIndent::Event, FilterItem::SlotStart(start)) => { + self.key(indent, KeyBuilderType::Slot(start), true, false) + } + (KeyIndent::Event, FilterItem::SlotEnd(end)) => { + self.key(indent, KeyBuilderType::Slot(end), true, false) + } + (KeyIndent::EmitterAddress, FilterItem::EmitterAddress(addr)) => { + self.key(indent, KeyBuilderType::Address(addr), true, false) + } + (KeyIndent::OriginalCallerAddress, FilterItem::OriginalCallerAddress(addr)) => { + self.key(indent, KeyBuilderType::Address(addr), true, false) + } + (KeyIndent::OriginalOperationId, FilterItem::OriginalOperationId(op_id)) => { + self.key(indent, KeyBuilderType::OperationId(op_id), true, false) + } + (KeyIndent::IsError, FilterItem::IsError(v)) => { + self.key(indent, KeyBuilderType::Bool(*v), true, false) + } + _ => { + unreachable!() + } + } } - fn get_counter_key_bool_from_indent(&self, indent: &KeyIndent, value: bool) -> Vec { - vec![ - u8::from(KeyIndent::Counter), - u8::from(*indent), - u8::from(value), - ] + /// Counter key for specific emitter_address / operation_id / ... + fn counter_key_from_filter_item( + &self, + filter_item: &FilterItem, + indent: &KeyIndent, + ) -> Vec { + // High level key builder function + match (indent, filter_item) { + (KeyIndent::Event, FilterItem::SlotStartEnd(_start, _end)) => { + unimplemented!() + } + (KeyIndent::Event, FilterItem::SlotStart(start)) => { + self.key(indent, KeyBuilderType::Slot(start), false, true) + } + (KeyIndent::Event, FilterItem::SlotEnd(end)) => { + self.key(indent, KeyBuilderType::Slot(end), false, true) + } + (KeyIndent::EmitterAddress, FilterItem::EmitterAddress(addr)) => { + self.key(indent, KeyBuilderType::Address(addr), false, true) + } + (KeyIndent::OriginalCallerAddress, FilterItem::OriginalCallerAddress(addr)) => { + self.key(indent, KeyBuilderType::Address(addr), false, true) + } + (KeyIndent::OriginalOperationId, FilterItem::OriginalOperationId(op_id)) => { + self.key(indent, KeyBuilderType::OperationId(op_id), false, true) + } + (KeyIndent::IsError, FilterItem::IsError(v)) => { + self.key(indent, KeyBuilderType::Bool(*v), false, true) + } + _ => { + unreachable!() + } + } } } @@ -224,22 +311,7 @@ impl EventCache { }; let db = DB::open(&options, path).expect(OPEN_ERROR); - let key_builder = EventCacheKeyBuilder::new(); - // init counters - let mut batch = WriteBatch::default(); - let value = 0u64.to_be_bytes(); - let key_counter = key_builder.get_counter_key_from_indent(&KeyIndent::EmitterAddress); - batch.put(key_counter, value); - let key_counter = - key_builder.get_counter_key_from_indent(&KeyIndent::OriginalCallerAddress); - batch.put(key_counter, value); - let key_counter = key_builder.get_counter_key_from_indent(&KeyIndent::OriginalOperationId); - batch.put(key_counter, value); - let key_counter = key_builder.get_counter_key_bool_from_indent(&KeyIndent::IsError, true); - batch.put(key_counter, value); - let key_counter = key_builder.get_counter_key_bool_from_indent(&KeyIndent::IsError, false); - batch.put(key_counter, value); - db.write(batch).expect(COUNTER_INIT_ERROR); + let key_builder_2 = EventCacheKeyBuilder::new(); Self { db, @@ -252,13 +324,13 @@ impl EventCache { max_call_stack_length: max_recursive_call_depth, max_event_data_length, }), - key_builder, + key_builder: key_builder_2, first_slot: Slot::new(0, 0), last_slot: Slot::new(0, 0), thread_count, max_events_per_operation, max_operations_per_block, - max_events_per_query + max_events_per_query, } } @@ -267,29 +339,68 @@ impl EventCache { let mut event_buffer = Vec::new(); self.event_ser.serialize(&event, &mut event_buffer).unwrap(); - batch.put(self.key_builder.get_event_key(&event), event_buffer); - if let Some(key) = self.key_builder.get_emitter_address_key(&event) { - let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + batch.put( + self.key_builder + .key_from_event(&event, &KeyIndent::Event, false, false) + .unwrap(), + event_buffer, + ); + + if let Some(key) = + self.key_builder + .key_from_event(&event, &KeyIndent::EmitterAddress, false, false) + { + let key_counter = + self.key_builder + .key_from_event(&event, &KeyIndent::EmitterAddress, false, true); batch.put(key, vec![]); + let key_counter = key_counter.expect(COUNTER_KEY_CREATION_ERROR); batch.merge(key_counter, 1i64.to_be_bytes()); } - if let Some(key) = self.key_builder.get_original_caller_address_key(&event) { - let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + + if let Some(key) = + self.key_builder + .key_from_event(&event, &KeyIndent::OriginalCallerAddress, false, false) + { + let key_counter = self.key_builder.key_from_event( + &event, + &KeyIndent::OriginalCallerAddress, + false, + true, + ); batch.put(key, vec![]); + let key_counter = key_counter.expect(COUNTER_KEY_CREATION_ERROR); batch.merge(key_counter, 1i64.to_be_bytes()); } - if let Some(key) = self.key_builder.get_original_operation_id_key(&event) { - let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + + if let Some(key) = + self.key_builder + .key_from_event(&event, &KeyIndent::OriginalOperationId, false, false) + { + let key_counter = self.key_builder.key_from_event( + &event, + &KeyIndent::OriginalOperationId, + false, + true, + ); batch.put(key, vec![]); + let key_counter = key_counter.expect(COUNTER_KEY_CREATION_ERROR); batch.merge(key_counter, 1i64.to_be_bytes()); } - let key = self.key_builder.get_is_error_key(&event); - let key_counter = self - .key_builder - .get_counter_key_bool_from(key.as_slice(), event.context.is_error); - batch.put(key, vec![]); - batch.merge(key_counter, 1i64.to_be_bytes()); + { + if let Some(key) = + self.key_builder + .key_from_event(&event, &KeyIndent::IsError, false, false) + { + let key_counter = + self.key_builder + .key_from_event(&event, &KeyIndent::IsError, false, true); + let key_counter = key_counter.expect(COUNTER_KEY_CREATION_ERROR); + batch.put(key, vec![]); + batch.merge(key_counter, 1i64.to_be_bytes()); + } + } // Keep track of last slot (and start slot) of events in the DB // Help for event filtering @@ -339,7 +450,10 @@ impl EventCache { } /// Get events filtered by the given argument - pub(crate) fn get_filtered_sc_output_events(&self, filter: &EventFilter) -> Vec { + pub(crate) fn get_filtered_sc_output_events( + &self, + filter: &EventFilter, + ) -> (Vec, Vec) { // Step 1 // Build a (sorted) map with key: (counter value, indent), value: filter // Will be used to iterate from the lower count index to the highest count index @@ -351,7 +465,7 @@ impl EventCache { if filter_items.is_empty() { // Note: will return too many event - user should restrict the filter warn!("Filter item only on is final field, please add more filter parameters"); - return vec![]; + return (vec![], vec![]); } let it = filter_items.iter().map(|(key_indent, filter_item)| { @@ -368,18 +482,21 @@ impl EventCache { }); let map = BTreeMap::from_iter(it); + // println!("map: {:?}", map); // Step 2: apply filter from the lowest counter to the highest counter + let mut query_counts = vec![]; let mut filter_res_prev = None; for ((_counter, indent), filter_item) in map.iter() { let mut filter_res = BTreeSet::new(); - self.filter_for( + let query_count = self.filter_for( indent, filter_item, &mut filter_res, filter_res_prev.as_ref(), ); + query_counts.push(query_count); filter_res_prev = Some(filter_res); } @@ -390,9 +507,13 @@ impl EventCache { .into_iter() .take(self.max_events_per_query) .collect::>>(); + + // println!("multi_args len: {:?}", multi_args.len()); + // println!("multi_args: {:?}", multi_args); let res = self.db.multi_get(multi_args); - res.into_iter() + let events = res + .into_iter() .map(|value| { let value = value.unwrap().unwrap(); let (_, event) = self @@ -401,7 +522,9 @@ impl EventCache { .unwrap(); event }) - .collect::>() + .collect::>(); + + (query_counts, events) } fn filter_for( @@ -410,24 +533,34 @@ impl EventCache { filter_item: &FilterItem, result: &mut BTreeSet>, seen: Option<&BTreeSet>>, - ) { + ) -> u64 { + let mut query_count: u64 = 0; + if *indent == KeyIndent::Event { let opts = match filter_item { - FilterItem::SlotStart(start) => { - let key_start = self.key_builder.get_prefix_event_key(start); + FilterItem::SlotStart(_start) => { + let key_start = self + .key_builder + .prefix_key_from_filter_item(filter_item, indent); let mut options = rocksdb::ReadOptions::default(); options.set_iterate_lower_bound(key_start); options } - FilterItem::SlotEnd(end) => { - let key_end = self.key_builder.get_prefix_event_key(end); + FilterItem::SlotEnd(_end) => { + let key_end = self + .key_builder + .prefix_key_from_filter_item(filter_item, indent); let mut options = rocksdb::ReadOptions::default(); options.set_iterate_upper_bound(key_end); options } FilterItem::SlotStartEnd(start, end) => { - let key_start = self.key_builder.get_prefix_event_key(start); - let key_end = self.key_builder.get_prefix_event_key(end); + let key_start = self + .key_builder + .prefix_key_from_filter_item(&FilterItem::SlotStart(*start), indent); + let key_end = self + .key_builder + .prefix_key_from_filter_item(&FilterItem::SlotEnd(*end), indent); let mut options = rocksdb::ReadOptions::default(); options.set_iterate_range(key_start..key_end); options @@ -442,10 +575,8 @@ impl EventCache { break; } - // FIXME: should check for end bound? - let found = kvb.0.to_vec(); - // println!("found: {:?}", found); + query_count = query_count.saturating_add(1); if let Some(filter_set_seen) = seen { if filter_set_seen.contains(&found) { @@ -465,19 +596,18 @@ impl EventCache { } } else { let prefix_filter = match filter_item { - FilterItem::EmitterAddress(addr) => self + FilterItem::EmitterAddress(_addr) => self .key_builder - .get_prefix_emitter_address_address_key(addr), - FilterItem::OriginalCallerAddress(addr) => self + .prefix_key_from_filter_item(filter_item, indent), + FilterItem::OriginalCallerAddress(_addr) => self .key_builder - .get_prefix_original_caller_address_key(addr), - FilterItem::OriginalOperationId(op_id) => self + .prefix_key_from_filter_item(filter_item, indent), + FilterItem::OriginalOperationId(_op_id) => self .key_builder - .get_prefix_original_operation_id_key(op_id) - .expect(OPERATION_ID_DESER_ERROR), - FilterItem::IsError(is_error) => { - self.key_builder.get_prefix_is_error_key(*is_error) - } + .prefix_key_from_filter_item(filter_item, indent), + FilterItem::IsError(_is_error) => self + .key_builder + .prefix_key_from_filter_item(filter_item, indent), _ => unreachable!(), }; @@ -488,7 +618,6 @@ impl EventCache { break; } - // FIXME: is this always true? if !kvb.0.starts_with(prefix_filter.as_slice()) { break; } @@ -496,9 +625,11 @@ impl EventCache { let found = kvb .0 .strip_prefix(prefix_filter.as_slice()) - .unwrap() + .unwrap() // safe to unwrap() - already tested .to_vec(); + query_count = query_count.saturating_add(1); + if let Some(filter_set_seen) = seen { if filter_set_seen.contains(&found) { result.insert(found); @@ -516,6 +647,8 @@ impl EventCache { } } } + + query_count } /// Estimate for a given KeyIndent & FilterItem the number of row to process @@ -546,44 +679,46 @@ impl EventCache { .saturating_mul(self.max_operations_per_block)) } FilterItem::EmitterAddress(_addr) => { - let counter_key = self.key_builder.get_counter_key_from_indent(key_indent); - let counter = u64::from_be_bytes( - self.db - .get(counter_key) - .expect(COUNTER_ERROR) - .unwrap() // safe to unwrap - counter init in new - .try_into() - .unwrap(), // safe to unwrap - counter is init with u64.to_be_bytes - ); - Ok(counter) + let counter_key = self + .key_builder + .counter_key_from_filter_item(filter_item, key_indent); + println!("counter_key: {:?}", counter_key); + let counter = self.db.get(counter_key).expect(COUNTER_ERROR); + println!("counter: {:?}", counter); + let counter_value = counter + .map(|b| u64::from_be_bytes(b.try_into().unwrap())) + .unwrap_or(0); + Ok(counter_value) } FilterItem::OriginalCallerAddress(_addr) => { - let counter_key = self.key_builder.get_counter_key_from_indent(key_indent); - let counter = u64::from_be_bytes( - self.db - .get(counter_key) - .expect(COUNTER_ERROR) - .unwrap() - .try_into() - .unwrap(), - ); - Ok(counter) + let counter_key = self + .key_builder + .counter_key_from_filter_item(filter_item, key_indent); + let counter = self.db.get(counter_key).expect(COUNTER_ERROR); + let counter_value = counter + .map(|b| u64::from_be_bytes(b.try_into().unwrap())) + .unwrap_or(0); + Ok(counter_value) } FilterItem::OriginalOperationId(_op_id) => { - let counter_key = self.key_builder.get_counter_key_from_indent(key_indent); - let counter_ = self.db.get(counter_key); - let counter = - u64::from_be_bytes(counter_.expect(COUNTER_ERROR).unwrap().try_into().unwrap()); - Ok(counter) + let counter_key = self + .key_builder + .counter_key_from_filter_item(filter_item, key_indent); + let counter = self.db.get(counter_key).expect(COUNTER_ERROR); + let counter_value = counter + .map(|b| u64::from_be_bytes(b.try_into().unwrap())) + .unwrap_or(0); + Ok(counter_value) } - FilterItem::IsError(is_error) => { + FilterItem::IsError(_is_error) => { let counter_key = self .key_builder - .get_counter_key_bool_from(&[*key_indent as u8], *is_error); - let counter_ = self.db.get(counter_key); - let counter = - u64::from_be_bytes(counter_.expect(COUNTER_ERROR).unwrap().try_into().unwrap()); - Ok(counter) + .counter_key_from_filter_item(filter_item, key_indent); + let counter = self.db.get(counter_key).expect(COUNTER_ERROR); + let counter_value = counter + .map(|b| u64::from_be_bytes(b.try_into().unwrap())) + .unwrap_or(0); + Ok(counter_value) } } } @@ -595,6 +730,8 @@ impl EventCache { let mut snipped_count: usize = 0; let snip_amount = snip_amount.unwrap_or(self.snip_amount); + let mut counter_keys = vec![]; + while snipped_count < snip_amount { let key_value = iter.next(); if key_value.is_none() { @@ -615,30 +752,61 @@ impl EventCache { .unwrap(); // delete all associated key - if let Some(key) = self.key_builder.get_emitter_address_key(&event) { - let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + if let Some(key) = + self.key_builder + .key_from_event(&event, &KeyIndent::EmitterAddress, false, false) + { + let key_counter = self + .key_builder + .key_from_event(&event, &KeyIndent::EmitterAddress, false, true) + .expect(COUNTER_ERROR); batch.delete(key); + counter_keys.push(key_counter.clone()); batch.merge(key_counter, (-1i64).to_be_bytes()); } - if let Some(key) = self.key_builder.get_original_caller_address_key(&event) { - let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + if let Some(key) = self.key_builder.key_from_event( + &event, + &KeyIndent::OriginalCallerAddress, + false, + false, + ) { + let key_counter = self + .key_builder + .key_from_event(&event, &KeyIndent::OriginalCallerAddress, false, true) + .expect(COUNTER_ERROR); batch.delete(key); + counter_keys.push(key_counter.clone()); batch.merge(key_counter, (-1i64).to_be_bytes()); } - if let Some(key) = self.key_builder.get_original_operation_id_key(&event) { - let key_counter = self.key_builder.get_counter_key_from(key.as_slice()); + + if let Some(key) = self.key_builder.key_from_event( + &event, + &KeyIndent::OriginalOperationId, + false, + false, + ) { + let key_counter = self + .key_builder + .key_from_event(&event, &KeyIndent::OriginalOperationId, false, true) + .expect(COUNTER_ERROR); batch.delete(key); + counter_keys.push(key_counter.clone()); + batch.merge(key_counter, (-1i64).to_be_bytes()); + } + if let Some(key) = + self.key_builder + .key_from_event(&event, &KeyIndent::IsError, false, false) + { + let key_counter = self + .key_builder + .key_from_event(&event, &KeyIndent::IsError, false, true) + .expect(COUNTER_ERROR); + batch.delete(key); + counter_keys.push(key_counter.clone()); batch.merge(key_counter, (-1i64).to_be_bytes()); } - let key_is_error = self.key_builder.get_is_error_key(&event); - let key_counter = self - .key_builder - .get_counter_key_bool_from(key_is_error.as_slice(), event.context.is_error); - batch.delete(key_is_error); - batch.merge(key_counter, (-1i64).to_be_bytes()); batch.delete(key); - snipped_count += 1; } @@ -646,6 +814,16 @@ impl EventCache { self.db.write(batch).expect(CRUD_ERROR); self.entry_count = self.entry_count.saturating_sub(snipped_count); + let mut batch_counters = WriteBatch::default(); + for (value, key) in self.db.multi_get(&counter_keys).iter().zip(counter_keys) { + if let Ok(Some(value)) = value { + if *value == 0u64.to_be_bytes().to_vec() { + batch_counters.delete(key); + } + } + } + self.db.write(batch_counters).expect(CRUD_ERROR); + // Update first_slot / last_slot in the DB if self.entry_count == 0 { // Reset @@ -655,7 +833,8 @@ impl EventCache { // Get the first event in the db // By using a prefix iterator this should be fast - let mut it_slot = self.db.prefix_iterator([u8::from(KeyIndent::Event)]); + let key_prefix = self.key_builder.prefix_key_from_indent(&KeyIndent::Event); + let mut it_slot = self.db.prefix_iterator(key_prefix); let key_value = it_slot.next(); let kvb = key_value.unwrap().expect(EVENT_DESER_ERROR); @@ -747,7 +926,10 @@ mod tests { use serial_test::serial; use tempfile::TempDir; // internal - use massa_models::config::{MAX_EVENT_DATA_SIZE, MAX_EVENTS_PER_QUERY, MAX_OPERATIONS_PER_BLOCK, MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT}; + use massa_models::config::{ + MAX_EVENTS_PER_QUERY, MAX_EVENT_DATA_SIZE, MAX_OPERATIONS_PER_BLOCK, + MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT, + }; use massa_models::operation::OperationId; use massa_models::output_event::EventExecutionContext; use massa_models::slot::Slot; @@ -791,7 +973,8 @@ mod tests { data: "message foo bar".to_string(), }; - let mut events = (0..cache.max_entry_count - 5) + let max_entry_count = cache.max_entry_count - 5; + let mut events = (0..max_entry_count) .map(|i| { let mut event = event.clone(); event.context.index_in_slot = i as u64; @@ -826,7 +1009,6 @@ mod tests { // let db_it = cache.db_iter(Some(IteratorMode::Start)); let mut prev_slot = None; let mut prev_event_index = None; - #[allow(clippy::manual_flatten)] for kvb in cache.iter_all(None) { let bytes = kvb.0.iter().as_slice(); @@ -932,6 +1114,43 @@ mod tests { dbg!(cache.entry_count); } + #[test] + #[serial] + fn test_snip() { + // Test snip so we enfore that all db keys are removed + + let mut cache = setup(); + cache.max_entry_count = 10; + + let event = SCOutputEvent { + context: EventExecutionContext { + slot: Slot::new(1, 0), + block: None, + read_only: false, + index_in_slot: 0, + call_stack: Default::default(), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let it = (0..cache.max_entry_count).map(|i| { + let mut event = event.clone(); + event.context.index_in_slot = i as u64; + event + }); + cache.insert_multi_it(it); + + assert_eq!(cache.entry_count, cache.max_entry_count); + + cache.snip(Some(cache.entry_count)); + + assert_eq!(cache.entry_count, 0); + assert_eq!(cache.iter_all(None).count(), 0); + } + #[test] #[serial] fn test_event_filter() { @@ -994,13 +1213,13 @@ mod tests { is_error: None, }; - let filtered_events_1 = cache.get_filtered_sc_output_events(&filter_1); + let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_1.len(), 2); - println!("filtered_events_1[0]: {:?}", filtered_events_1[0]); + // println!("filtered_events_1[0]: {:?}", filtered_events_1[0]); assert_eq!(filtered_events_1[0].context.slot, slot_2); assert_eq!(filtered_events_1[0].context.index_in_slot, index_2_1); - println!("filtered_events_1[1]: {:?}", filtered_events_1[1]); + // println!("filtered_events_1[1]: {:?}", filtered_events_1[1]); assert_eq!(filtered_events_1[1].context.slot, slot_2); assert_eq!(filtered_events_1[1].context.index_in_slot, index_2_2); } @@ -1019,6 +1238,8 @@ mod tests { OperationId::from_str("O12n1vt8uTLh3H65J4TVuztaWfBh3oumjjVtRCkke7Ba5qWdXdjD").unwrap(); let op_id_2 = OperationId::from_str("O1p5P691KF672fQ8tQougxzSERBwDKZF8FwtkifMSJbP14sEuGc").unwrap(); + let op_id_uknown = + OperationId::from_str("O1kvXTfsnVbQcmDERkC89vqAd2xRTLCb3q5b2E5WaVPHwFd7Qth").unwrap(); let event = SCOutputEvent { context: EventExecutionContext { @@ -1063,18 +1284,18 @@ mod tests { events.push(event_slot_2_2.clone()); // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); - println!("inserting events:"); - for evt in events.iter() { - println!("{:?}", evt); - } - println!("{}", "#".repeat(32)); + // println!("inserting events:"); + // for evt in events.iter() { + // println!("{:?}", evt); + // } + // println!("{}", "#".repeat(32)); cache.insert_multi_it(events.into_iter()); - for (k, v) in cache.iter_all(None) { - println!("k: {:?}, v: {:?}", k, v); - } - println!("{}", "#".repeat(32)); + // for (k, v) in cache.iter_all(None) { + // println!("k: {:?}, v: {:?}", k, v); + // } + // println!("{}", "#".repeat(32)); let mut filter_1 = EventFilter { start: None, // Some(Slot::new(2, 0)), @@ -1086,11 +1307,11 @@ mod tests { is_error: None, }; - let filtered_events_1 = cache.get_filtered_sc_output_events(&filter_1); + let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_1.len(), cache.max_entry_count - 5); filtered_events_1.iter().enumerate().for_each(|(i, event)| { - println!("checking event #{}: {:?}", i, event); + // println!("checking event #{}: {:?}", i, event); assert_eq!(event.context.slot, slot_1); assert_eq!(event.context.index_in_slot, i as u64); }); @@ -1104,10 +1325,10 @@ mod tests { { filter_1.original_operation_id = Some(op_id_2); - let filtered_events_2 = cache.get_filtered_sc_output_events(&filter_1); + let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_2.len(), 2); filtered_events_2.iter().enumerate().for_each(|(i, event)| { - println!("checking event #{}: {:?}", i, event); + // println!("checking event #{}: {:?}", i, event); assert_eq!(event.context.slot, slot_2); if i == 0 { assert_eq!(event.context.index_in_slot, i as u64); @@ -1116,6 +1337,12 @@ mod tests { } }); } + + { + filter_1.original_operation_id = Some(op_id_uknown); + let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); + assert_eq!(filtered_events_2.len(), 0); + } } #[test] @@ -1135,6 +1362,8 @@ mod tests { Address::from_str("AU122Em8qkqegdLb1eyH8rdkSCNEf7RZLeTJve4Q2inRPGiTJ2xNv").unwrap(); let emit_addr_2 = Address::from_str("AU12WuVR1Td74q9eAbtYZUnk5jnRbUuUacyhQFwm217bV5v1mNqTZ").unwrap(); + let emit_addr_unknown = + Address::from_str("AU1zLC4TFUiaKDg7quQyusMPQcHT4ykWVs3FsFpuhdNSmowUG2As").unwrap(); let event = SCOutputEvent { context: EventExecutionContext { @@ -1188,19 +1417,19 @@ mod tests { events.push(event_slot_2_2.clone()); // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); - println!("inserting events:"); - for evt in events.iter() { - println!("{:?}", evt); - } - println!("{}", "#".repeat(32)); + // println!("inserting events:"); + // for evt in events.iter() { + // println!("{:?}", evt); + // } + // println!("{}", "#".repeat(32)); cache.insert_multi_it(events.into_iter()); - println!("db iter all:"); - for (k, v) in cache.iter_all(None) { - println!("k: {:?}, v: {:?}", k, v); - } - println!("{}", "#".repeat(32)); + // println!("db iter all:"); + // for (k, v) in cache.iter_all(None) { + // println!("k: {:?}, v: {:?}", k, v); + // } + // println!("{}", "#".repeat(32)); let mut filter_1 = EventFilter { start: None, // Some(Slot::new(2, 0)), @@ -1212,7 +1441,7 @@ mod tests { is_error: None, }; - let filtered_events_1 = cache.get_filtered_sc_output_events(&filter_1); + let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_1.len(), threshold); filtered_events_1 @@ -1225,7 +1454,7 @@ mod tests { { filter_1.emitter_address = Some(emit_addr_2); - let filtered_events_2 = cache.get_filtered_sc_output_events(&filter_1); + let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_2.len(), threshold + 1 + 2); filtered_events_2 .iter() @@ -1234,6 +1463,16 @@ mod tests { assert_eq!(*event.context.call_stack.back().unwrap(), emit_addr_2) }); } + { + filter_1.emitter_address = Some(dummy_addr); + let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); + assert_eq!(filtered_events_2.len(), 0); + } + { + filter_1.emitter_address = Some(emit_addr_unknown); + let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); + assert_eq!(filtered_events_2.len(), 0); + } } #[test] @@ -1249,10 +1488,12 @@ mod tests { let dummy_addr = Address::from_str("AU12qePoXhNbYWE1jZuafqJong7bbq1jw3k89RgbMawbrdZpaasoA").unwrap(); - let emit_addr_1 = + let caller_addr_1 = Address::from_str("AU122Em8qkqegdLb1eyH8rdkSCNEf7RZLeTJve4Q2inRPGiTJ2xNv").unwrap(); - let emit_addr_2 = + let caller_addr_2 = Address::from_str("AU12WuVR1Td74q9eAbtYZUnk5jnRbUuUacyhQFwm217bV5v1mNqTZ").unwrap(); + let caller_addr_unknown = + Address::from_str("AU1zLC4TFUiaKDg7quQyusMPQcHT4ykWVs3FsFpuhdNSmowUG2As").unwrap(); let event = SCOutputEvent { context: EventExecutionContext { @@ -1276,10 +1517,10 @@ mod tests { event.context.index_in_slot = i as u64; if i < threshold { event.context.call_stack = - VecDeque::from(vec![emit_addr_1.clone(), dummy_addr.clone()]); + VecDeque::from(vec![caller_addr_1.clone(), dummy_addr.clone()]); } else { event.context.call_stack = - VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + VecDeque::from(vec![caller_addr_2.clone(), dummy_addr]); } event }) @@ -1291,7 +1532,7 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_1; - event.context.call_stack = VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + event.context.call_stack = VecDeque::from(vec![caller_addr_2.clone(), dummy_addr]); event }; let index_2_2 = 256u64; @@ -1299,38 +1540,38 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_2; - event.context.call_stack = VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + event.context.call_stack = VecDeque::from(vec![caller_addr_2.clone(), dummy_addr]); event }; events.push(event_slot_2.clone()); events.push(event_slot_2_2.clone()); // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); - println!("inserting events:"); - for evt in events.iter() { - println!("{:?}", evt); - } - println!("{}", "#".repeat(32)); + // println!("inserting events:"); + // for evt in events.iter() { + // println!("{:?}", evt); + // } + // println!("{}", "#".repeat(32)); cache.insert_multi_it(events.into_iter()); - println!("db iter all:"); - for (k, v) in cache.iter_all(None) { - println!("k: {:?}, v: {:?}", k, v); - } - println!("{}", "#".repeat(32)); + // println!("db iter all:"); + // for (k, v) in cache.iter_all(None) { + // println!("k: {:?}, v: {:?}", k, v); + // } + // println!("{}", "#".repeat(32)); let mut filter_1 = EventFilter { start: None, // Some(Slot::new(2, 0)), end: None, emitter_address: None, - original_caller_address: Some(emit_addr_1), + original_caller_address: Some(caller_addr_1), original_operation_id: None, is_final: None, is_error: None, }; - let filtered_events_1 = cache.get_filtered_sc_output_events(&filter_1); + let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_1.len(), threshold); filtered_events_1 @@ -1338,20 +1579,30 @@ mod tests { .enumerate() .for_each(|(_i, event)| { assert_eq!(event.context.slot, slot_1); - assert_eq!(*event.context.call_stack.front().unwrap(), emit_addr_1); + assert_eq!(*event.context.call_stack.front().unwrap(), caller_addr_1); }); { - filter_1.original_caller_address = Some(emit_addr_2); - let filtered_events_2 = cache.get_filtered_sc_output_events(&filter_1); + filter_1.original_caller_address = Some(caller_addr_2); + let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_2.len(), threshold + 1 + 2); filtered_events_2 .iter() .enumerate() .for_each(|(_i, event)| { - assert_eq!(*event.context.call_stack.front().unwrap(), emit_addr_2); + assert_eq!(*event.context.call_stack.front().unwrap(), caller_addr_2); }); } + { + filter_1.original_caller_address = Some(dummy_addr); + let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); + assert_eq!(filtered_events_2.len(), 0); + } + { + filter_1.original_caller_address = Some(caller_addr_unknown); + let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); + assert_eq!(filtered_events_2.len(), 0); + } } #[test] @@ -1394,10 +1645,10 @@ mod tests { event.context.index_in_slot = i as u64; if i < threshold { event.context.call_stack = - VecDeque::from(vec![emit_addr_1.clone(), dummy_addr.clone()]); + VecDeque::from(vec![dummy_addr, emit_addr_1.clone()]); } else { event.context.call_stack = - VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); } event }) @@ -1409,7 +1660,7 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_1; - event.context.call_stack = VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); event }; let index_2_2 = 256u64; @@ -1417,7 +1668,7 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_2; - event.context.call_stack = VecDeque::from(vec![emit_addr_2.clone(), dummy_addr]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); event.context.is_error = true; event }; @@ -1425,19 +1676,19 @@ mod tests { events.push(event_slot_2_2.clone()); // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); - println!("inserting events:"); - for evt in events.iter() { - println!("{:?}", evt); - } - println!("{}", "#".repeat(32)); + // println!("inserting events:"); + // for evt in events.iter() { + // println!("{:?}", evt); + // } + // println!("{}", "#".repeat(32)); cache.insert_multi_it(events.into_iter()); - println!("db iter all:"); - for (k, v) in cache.iter_all(None) { - println!("k: {:?}, v: {:?}", k, v); - } - println!("{}", "#".repeat(32)); + // println!("db iter all:"); + // for (k, v) in cache.iter_all(None) { + // println!("k: {:?}, v: {:?}", k, v); + // } + // println!("{}", "#".repeat(32)); let filter_1 = EventFilter { start: None, // Some(Slot::new(2, 0)), @@ -1449,7 +1700,7 @@ mod tests { is_error: Some(true), }; - let filtered_events_1 = cache.get_filtered_sc_output_events(&filter_1); + let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_1.len(), 1); assert_eq!(filtered_events_1[0].context.is_error, true); @@ -1479,4 +1730,104 @@ mod tests { } */ } + #[test] + #[serial] + fn test_filter_optim() { + // Test we iterate over the right number of rows when filtering + + let mut cache = setup(); + cache.max_entry_count = 10; + + let slot_1 = Slot::new(1, 0); + let index_1_0 = 0; + + let dummy_addr = + Address::from_str("AU12qePoXhNbYWE1jZuafqJong7bbq1jw3k89RgbMawbrdZpaasoA").unwrap(); + let emit_addr_1 = + Address::from_str("AU122Em8qkqegdLb1eyH8rdkSCNEf7RZLeTJve4Q2inRPGiTJ2xNv").unwrap(); + let emit_addr_2 = + Address::from_str("AU12WuVR1Td74q9eAbtYZUnk5jnRbUuUacyhQFwm217bV5v1mNqTZ").unwrap(); + + let event = SCOutputEvent { + context: EventExecutionContext { + slot: slot_1, + block: None, + read_only: false, + index_in_slot: index_1_0, + call_stack: Default::default(), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let to_insert_count = cache.max_entry_count - 5; + let threshold = to_insert_count / 2; + let mut events = (0..cache.max_entry_count - 5) + .map(|i| { + let mut event = event.clone(); + event.context.index_in_slot = i as u64; + if i < threshold { + event.context.call_stack = + VecDeque::from(vec![dummy_addr, emit_addr_1.clone()]); + } else { + event.context.call_stack = + VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + } + event + }) + .collect::>(); + + let slot_2 = Slot::new(2, 0); + let index_2_1 = 0u64; + let event_slot_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_1; + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + event + }; + let index_2_2 = 256u64; + let event_slot_2_2 = { + let mut event = event.clone(); + event.context.slot = slot_2; + event.context.index_in_slot = index_2_2; + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + // event.context.is_error = true; + event + }; + events.push(event_slot_2.clone()); + events.push(event_slot_2_2.clone()); + // Randomize the events so we insert in random orders in the DB + events.shuffle(&mut thread_rng()); + // println!("inserting events:"); + // for evt in events.iter() { + // println!("{:?}", evt); + // } + // println!("{}", "#".repeat(32)); + + cache.insert_multi_it(events.into_iter()); + + // println!("db iter all:"); + // for (k, v) in cache.iter_all(None) { + // println!("k: {:?}, v: {:?}", k, v); + // } + // println!("{}", "#".repeat(32)); + + let emit_addr_1_count = cache + .filter_item_estimate_count( + &KeyIndent::EmitterAddress, + &FilterItem::EmitterAddress(emit_addr_1), + ) + .unwrap(); + assert_eq!(emit_addr_1_count, (threshold) as u64); + let emit_addr_2_count = cache + .filter_item_estimate_count( + &KeyIndent::EmitterAddress, + &FilterItem::EmitterAddress(emit_addr_2), + ) + .unwrap(); + assert_eq!(emit_addr_2_count, (threshold + 1 + 2) as u64); + } } diff --git a/massa-event-cache/src/rocksdb_operator.rs b/massa-event-cache/src/rocksdb_operator.rs index 65b9a9fd71b..d3518852e0e 100644 --- a/massa-event-cache/src/rocksdb_operator.rs +++ b/massa-event-cache/src/rocksdb_operator.rs @@ -5,7 +5,12 @@ pub fn counter_merge( existing_val: Option<&[u8]>, operands: &MergeOperands, ) -> Option> { - let counter_current_value = u64::from_be_bytes(existing_val?.try_into().unwrap()); + let counter_current_value = if let Some(existing_val) = existing_val { + u64::from_be_bytes(existing_val.try_into().unwrap()) + } else { + 0 + }; + let counter_value = operands.iter().fold(counter_current_value, |mut acc, x| { let incr_value = i64::from_be_bytes(x.try_into().unwrap()); acc = acc.saturating_add_signed(incr_value); diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index bf93441e106..d8fe018f58e 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -3,9 +3,9 @@ use std::sync::Arc; use std::thread; use std::time::Duration; // third-party +use massa_time::MassaTime; use parking_lot::{Condvar, Mutex, RwLock}; use tracing::{debug, info}; -use massa_time::MassaTime; // internal use crate::config::EventCacheConfig; use crate::controller::{ @@ -60,8 +60,8 @@ impl EventCacheWriterThread { // Wait until deadline let now = MassaTime::now(); - let wakeup_deadline = now.saturating_add( - MassaTime::from_millis(self.tick_delay.as_millis() as u64)); + let wakeup_deadline = + now.saturating_add(MassaTime::from_millis(self.tick_delay.as_millis() as u64)); let _ = self.input_data.0.wait_until( &mut input_data_lock, wakeup_deadline diff --git a/massa-models/src/output_event.rs b/massa-models/src/output_event.rs index f171aa6e359..19388dceeab 100644 --- a/massa-models/src/output_event.rs +++ b/massa-models/src/output_event.rs @@ -14,7 +14,7 @@ pub struct SCOutputEvent { impl PartialOrd for SCOutputEvent { fn partial_cmp(&self, other: &Self) -> Option { - (self.context.slot, self.context.index_in_slot).partial_cmp(&(other.context.slot, other.context.index_in_slot)) + Some(self.cmp(other)) } } From 180743f219ae017508c871b13aa0ec538e836ce0 Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 28 Nov 2024 10:07:12 +0100 Subject: [PATCH 20/40] Cargo fmt --- massa-models/src/config/constants.rs | 4 +--- massa-models/src/output_event.rs | 5 +++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/massa-models/src/config/constants.rs b/massa-models/src/config/constants.rs index add7681e3a8..ee47b5aef01 100644 --- a/massa-models/src/config/constants.rs +++ b/massa-models/src/config/constants.rs @@ -315,7 +315,6 @@ pub const MAX_EVENT_PER_OPERATION: usize = 25; /// Maximum number of recursion for calls pub const MAX_RECURSIVE_CALLS_DEPTH: u16 = 25; - // // Constants used in network // @@ -413,10 +412,9 @@ pub const DEFERRED_CALL_SLOT_OVERBOOKING_PENALTY: Amount = Amount::from_raw(1_00 /// deferred call call gas cost pub const DEFERRED_CALL_CST_GAS_COST: u64 = 750_000; - // // Constants for event cache -// +// /// Maximum number of events that can be returned by a query pub const MAX_EVENTS_PER_QUERY: usize = 10000; diff --git a/massa-models/src/output_event.rs b/massa-models/src/output_event.rs index 19388dceeab..50ea0eb54d3 100644 --- a/massa-models/src/output_event.rs +++ b/massa-models/src/output_event.rs @@ -1,7 +1,7 @@ use crate::{address::Address, block_id::BlockId, operation::OperationId, slot::Slot}; use serde::{Deserialize, Serialize}; -use std::{collections::VecDeque, fmt::Display}; use std::cmp::Ordering; +use std::{collections::VecDeque, fmt::Display}; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] /// By product of a byte code execution @@ -20,7 +20,8 @@ impl PartialOrd for SCOutputEvent { impl Ord for SCOutputEvent { fn cmp(&self, other: &Self) -> Ordering { - (self.context.slot, self.context.index_in_slot).cmp(&(other.context.slot, other.context.index_in_slot)) + (self.context.slot, self.context.index_in_slot) + .cmp(&(other.context.slot, other.context.index_in_slot)) } } From 1989ee5dbbd22330ee125da5d92bf1981be806b3 Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 28 Nov 2024 10:13:58 +0100 Subject: [PATCH 21/40] typos fixes --- _typos.toml | 3 ++- massa-event-cache/src/event_cache.rs | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/_typos.toml b/_typos.toml index c758abfbe1c..5959126f53d 100644 --- a/_typos.toml +++ b/_typos.toml @@ -13,7 +13,8 @@ extend-ignore-re = [ # Secret key (S): 18 - 62 characters # Public key (P): 18 - 62 characters # NodeId (N) - "(AU|AS|N|S|P)\\d\\w{18,62}", + # OperationId (O) + "(AU|AS|N|S|P|O)\\d\\w{18,62}", ] [default.extend-words] diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 93692861ae3..be6f1a74bcf 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -1117,7 +1117,7 @@ mod tests { #[test] #[serial] fn test_snip() { - // Test snip so we enfore that all db keys are removed + // Test snip so we enforce that all db keys are removed let mut cache = setup(); cache.max_entry_count = 10; @@ -1238,7 +1238,7 @@ mod tests { OperationId::from_str("O12n1vt8uTLh3H65J4TVuztaWfBh3oumjjVtRCkke7Ba5qWdXdjD").unwrap(); let op_id_2 = OperationId::from_str("O1p5P691KF672fQ8tQougxzSERBwDKZF8FwtkifMSJbP14sEuGc").unwrap(); - let op_id_uknown = + let op_id_unknown = OperationId::from_str("O1kvXTfsnVbQcmDERkC89vqAd2xRTLCb3q5b2E5WaVPHwFd7Qth").unwrap(); let event = SCOutputEvent { @@ -1339,7 +1339,7 @@ mod tests { } { - filter_1.original_operation_id = Some(op_id_uknown); + filter_1.original_operation_id = Some(op_id_unknown); let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_2.len(), 0); } From 31111e3e4942f4cfb5128b56fa677c85699998e7 Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 28 Nov 2024 10:34:57 +0100 Subject: [PATCH 22/40] Cargo clippy fixes for tests --- massa-event-cache/src/event_cache.rs | 46 +++++++++++++--------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index be6f1a74bcf..59563236017 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -1387,10 +1387,10 @@ mod tests { event.context.index_in_slot = i as u64; if i < threshold { event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_1.clone()]); + VecDeque::from(vec![dummy_addr, emit_addr_1]); } else { event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + VecDeque::from(vec![dummy_addr, emit_addr_2]); } event }) @@ -1402,7 +1402,7 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_1; - event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2]); event }; let index_2_2 = 256u64; @@ -1410,7 +1410,7 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_2; - event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2]); event }; events.push(event_slot_2.clone()); @@ -1446,8 +1446,7 @@ mod tests { assert_eq!(filtered_events_1.len(), threshold); filtered_events_1 .iter() - .enumerate() - .for_each(|(_i, event)| { + .for_each(|event| { assert_eq!(event.context.slot, slot_1); assert_eq!(*event.context.call_stack.back().unwrap(), emit_addr_1) }); @@ -1458,8 +1457,7 @@ mod tests { assert_eq!(filtered_events_2.len(), threshold + 1 + 2); filtered_events_2 .iter() - .enumerate() - .for_each(|(_i, event)| { + .for_each(|event| { assert_eq!(*event.context.call_stack.back().unwrap(), emit_addr_2) }); } @@ -1517,10 +1515,10 @@ mod tests { event.context.index_in_slot = i as u64; if i < threshold { event.context.call_stack = - VecDeque::from(vec![caller_addr_1.clone(), dummy_addr.clone()]); + VecDeque::from(vec![caller_addr_1, dummy_addr]); } else { event.context.call_stack = - VecDeque::from(vec![caller_addr_2.clone(), dummy_addr]); + VecDeque::from(vec![caller_addr_2, dummy_addr]); } event }) @@ -1532,7 +1530,7 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_1; - event.context.call_stack = VecDeque::from(vec![caller_addr_2.clone(), dummy_addr]); + event.context.call_stack = VecDeque::from(vec![caller_addr_2, dummy_addr]); event }; let index_2_2 = 256u64; @@ -1540,7 +1538,7 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_2; - event.context.call_stack = VecDeque::from(vec![caller_addr_2.clone(), dummy_addr]); + event.context.call_stack = VecDeque::from(vec![caller_addr_2, dummy_addr]); event }; events.push(event_slot_2.clone()); @@ -1576,8 +1574,7 @@ mod tests { assert_eq!(filtered_events_1.len(), threshold); filtered_events_1 .iter() - .enumerate() - .for_each(|(_i, event)| { + .for_each(|event| { assert_eq!(event.context.slot, slot_1); assert_eq!(*event.context.call_stack.front().unwrap(), caller_addr_1); }); @@ -1588,8 +1585,7 @@ mod tests { assert_eq!(filtered_events_2.len(), threshold + 1 + 2); filtered_events_2 .iter() - .enumerate() - .for_each(|(_i, event)| { + .for_each(|event| { assert_eq!(*event.context.call_stack.front().unwrap(), caller_addr_2); }); } @@ -1645,10 +1641,10 @@ mod tests { event.context.index_in_slot = i as u64; if i < threshold { event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_1.clone()]); + VecDeque::from(vec![dummy_addr, emit_addr_1]); } else { event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + VecDeque::from(vec![dummy_addr, emit_addr_2]); } event }) @@ -1660,7 +1656,7 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_1; - event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2]); event }; let index_2_2 = 256u64; @@ -1668,7 +1664,7 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_2; - event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2]); event.context.is_error = true; event }; @@ -1703,7 +1699,7 @@ mod tests { let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_1.len(), 1); - assert_eq!(filtered_events_1[0].context.is_error, true); + assert!(filtered_events_1[0].context.is_error); assert_eq!(filtered_events_1[0].context.slot, slot_2); assert_eq!(filtered_events_1[0].context.index_in_slot, index_2_2); @@ -1770,10 +1766,10 @@ mod tests { event.context.index_in_slot = i as u64; if i < threshold { event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_1.clone()]); + VecDeque::from(vec![dummy_addr, emit_addr_1]); } else { event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + VecDeque::from(vec![dummy_addr, emit_addr_2]); } event }) @@ -1785,7 +1781,7 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_1; - event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2]); event }; let index_2_2 = 256u64; @@ -1793,7 +1789,7 @@ mod tests { let mut event = event.clone(); event.context.slot = slot_2; event.context.index_in_slot = index_2_2; - event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2.clone()]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2]); // event.context.is_error = true; event }; From 082e86e0761cc4a5f5b08654e4bf62af8e86f12d Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 28 Nov 2024 10:36:39 +0100 Subject: [PATCH 23/40] Cargo fmt --- massa-event-cache/src/event_cache.rs | 60 ++++++++++------------------ 1 file changed, 22 insertions(+), 38 deletions(-) diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 59563236017..e744fb9ee84 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -1386,11 +1386,9 @@ mod tests { let mut event = event.clone(); event.context.index_in_slot = i as u64; if i < threshold { - event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_1]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_1]); } else { - event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_2]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2]); } event }) @@ -1444,22 +1442,18 @@ mod tests { let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_1.len(), threshold); - filtered_events_1 - .iter() - .for_each(|event| { - assert_eq!(event.context.slot, slot_1); - assert_eq!(*event.context.call_stack.back().unwrap(), emit_addr_1) - }); + filtered_events_1.iter().for_each(|event| { + assert_eq!(event.context.slot, slot_1); + assert_eq!(*event.context.call_stack.back().unwrap(), emit_addr_1) + }); { filter_1.emitter_address = Some(emit_addr_2); let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_2.len(), threshold + 1 + 2); - filtered_events_2 - .iter() - .for_each(|event| { - assert_eq!(*event.context.call_stack.back().unwrap(), emit_addr_2) - }); + filtered_events_2.iter().for_each(|event| { + assert_eq!(*event.context.call_stack.back().unwrap(), emit_addr_2) + }); } { filter_1.emitter_address = Some(dummy_addr); @@ -1514,11 +1508,9 @@ mod tests { let mut event = event.clone(); event.context.index_in_slot = i as u64; if i < threshold { - event.context.call_stack = - VecDeque::from(vec![caller_addr_1, dummy_addr]); + event.context.call_stack = VecDeque::from(vec![caller_addr_1, dummy_addr]); } else { - event.context.call_stack = - VecDeque::from(vec![caller_addr_2, dummy_addr]); + event.context.call_stack = VecDeque::from(vec![caller_addr_2, dummy_addr]); } event }) @@ -1572,22 +1564,18 @@ mod tests { let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_1.len(), threshold); - filtered_events_1 - .iter() - .for_each(|event| { - assert_eq!(event.context.slot, slot_1); - assert_eq!(*event.context.call_stack.front().unwrap(), caller_addr_1); - }); + filtered_events_1.iter().for_each(|event| { + assert_eq!(event.context.slot, slot_1); + assert_eq!(*event.context.call_stack.front().unwrap(), caller_addr_1); + }); { filter_1.original_caller_address = Some(caller_addr_2); let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_2.len(), threshold + 1 + 2); - filtered_events_2 - .iter() - .for_each(|event| { - assert_eq!(*event.context.call_stack.front().unwrap(), caller_addr_2); - }); + filtered_events_2.iter().for_each(|event| { + assert_eq!(*event.context.call_stack.front().unwrap(), caller_addr_2); + }); } { filter_1.original_caller_address = Some(dummy_addr); @@ -1640,11 +1628,9 @@ mod tests { let mut event = event.clone(); event.context.index_in_slot = i as u64; if i < threshold { - event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_1]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_1]); } else { - event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_2]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2]); } event }) @@ -1765,11 +1751,9 @@ mod tests { let mut event = event.clone(); event.context.index_in_slot = i as u64; if i < threshold { - event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_1]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_1]); } else { - event.context.call_stack = - VecDeque::from(vec![dummy_addr, emit_addr_2]); + event.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2]); } event }) From f4b4f813aab57c6b46b8a736e65d4bdced768ee3 Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 28 Nov 2024 16:11:45 +0100 Subject: [PATCH 24/40] Add mock expectations + impl --- massa-execution-worker/src/execution.rs | 11 --- massa-execution-worker/src/tests/universe.rs | 81 +++++++++++++++++++- 2 files changed, 77 insertions(+), 15 deletions(-) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 927ef23c473..4bf37a961e7 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -204,17 +204,6 @@ impl ExecutionState { execution_context.clone(), )); - /* - let event_cache_controller = EventCacheController::new(EventCacheConfig { - event_cache_path: config.event_cache_path.clone(), - max_event_cache_length: config.event_cache_size, - snip_amount: config.event_snip_amount, - max_event_data_length: config.max_event_size as u64, - thread_count: config.thread_count, - max_recursive_call_depth: config.max_recursive_calls_depth, - }); - */ - // build the execution state ExecutionState { final_state, diff --git a/massa-execution-worker/src/tests/universe.rs b/massa-execution-worker/src/tests/universe.rs index 48ba24da07a..8afc6f95ef2 100644 --- a/massa-execution-worker/src/tests/universe.rs +++ b/massa-execution-worker/src/tests/universe.rs @@ -13,6 +13,8 @@ use cfg_if::cfg_if; use massa_db_exports::{MassaDBConfig, MassaDBController, ShareableMassaDBController}; use massa_db_worker::MassaDB; use massa_event_cache::MockEventCacheControllerWrapper; +#[cfg(feature = "execution-trace")] +use massa_execution_exports::types_trace_info::SlotAbiCallStack; use massa_execution_exports::{ ExecutionBlockMetadata, ExecutionChannels, ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, SlotExecutionOutput, @@ -21,6 +23,7 @@ use massa_final_state::{FinalStateController, MockFinalStateController}; use massa_ledger_exports::MockLedgerControllerWrapper; use massa_metrics::MassaMetrics; use massa_models::config::{CHAINID, GENESIS_KEY}; +use massa_models::output_event::SCOutputEvent; use massa_models::{ address::Address, amount::Amount, @@ -45,9 +48,6 @@ use parking_lot::RwLock; use tempfile::TempDir; use tokio::sync::broadcast; -#[cfg(feature = "execution-trace")] -use massa_execution_exports::types_trace_info::SlotAbiCallStack; - pub struct ExecutionForeignControllers { pub selector_controller: Box, pub final_state: Arc>, @@ -71,12 +71,85 @@ impl ExecutionForeignControllers { let db = Arc::new(RwLock::new( Box::new(MassaDB::new(db_config)) as Box<(dyn MassaDBController + 'static)> )); + + let mut event_cache_controller = MockEventCacheControllerWrapper::new(); + let events = Arc::new(std::sync::Mutex::new(Vec::new())); + let events_clone_1 = events.clone(); + let events_clone_2 = events.clone(); + + event_cache_controller.set_expectations(|controller| { + controller + .expect_save_events() + .withf(move |new_events| { + // Save events in memory + events_clone_1 + .lock() + .unwrap() + .extend(new_events.iter().cloned()); + true + }) + .return_const(()); + controller + .expect_get_filtered_sc_output_events() + .returning(move |filter| { + let events_ = events_clone_2.lock().unwrap().clone(); + events_ + .into_iter() + .filter(|evt| { + if let Some(start) = filter.start { + if evt.context.slot < start { + return false; + } + } + if let Some(end) = filter.end { + if evt.context.slot >= end { + return false; + } + } + if let Some(is_final) = filter.is_final { + if evt.context.is_final != is_final { + return false; + } + } + if let Some(is_error) = filter.is_error { + if evt.context.is_error != is_error { + return false; + } + } + + match ( + filter.original_caller_address, + evt.context.call_stack.front(), + ) { + (Some(addr1), Some(addr2)) if addr1 != *addr2 => return false, + (Some(_), None) => return false, + _ => (), + } + match (filter.emitter_address, evt.context.call_stack.back()) { + (Some(addr1), Some(addr2)) if addr1 != *addr2 => return false, + (Some(_), None) => return false, + _ => (), + } + match ( + filter.original_operation_id, + evt.context.origin_operation_id, + ) { + (Some(addr1), Some(addr2)) if addr1 != addr2 => return false, + (Some(_), None) => return false, + _ => (), + } + + return true; + }) + .collect::>() + }); + }); Self { selector_controller: Box::new(MockSelectorControllerWrapper::new()), ledger_controller: MockLedgerControllerWrapper::new(), final_state: Arc::new(RwLock::new(MockFinalStateController::new())), db, - event_cache_controller: Box::new(MockEventCacheControllerWrapper::new()), + event_cache_controller: Box::new(event_cache_controller), } } } From 16bc18d6f0234c4e9b341cdc3d16875a73684dd2 Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 28 Nov 2024 16:30:07 +0100 Subject: [PATCH 25/40] Cargo clippy for TU fixes --- massa-execution-worker/src/tests/universe.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-execution-worker/src/tests/universe.rs b/massa-execution-worker/src/tests/universe.rs index 8afc6f95ef2..49f7ea2873f 100644 --- a/massa-execution-worker/src/tests/universe.rs +++ b/massa-execution-worker/src/tests/universe.rs @@ -139,7 +139,7 @@ impl ExecutionForeignControllers { _ => (), } - return true; + true }) .collect::>() }); From 23bfa065afb0b5006608555647dd205ce6db0d9a Mon Sep 17 00:00:00 2001 From: sydhds Date: Fri, 29 Nov 2024 10:13:51 +0100 Subject: [PATCH 26/40] Use MAX_EVENT_PER_OPERATION constant --- massa-node/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 14ca16bbd77..e397e1c0633 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -489,7 +489,7 @@ async fn launch( // The value remains for future use & limitations max_call_stack_length: u16::MAX, - max_events_per_operation: 25u64, // MAX_EVENTS_PER_OPERATION - TODO: rebase + max_events_per_operation: MAX_EVENT_PER_OPERATION as u64, max_operations_per_block: MAX_OPERATIONS_PER_BLOCK as u64, max_events_per_query: MAX_EVENTS_PER_QUERY, tick_delay: Duration::from_millis(EVENT_CACHE_TICK_DELAY), From fe319c55887ea7a75eeef808ef7fc0bb4ad5509b Mon Sep 17 00:00:00 2001 From: sydhds Date: Fri, 29 Nov 2024 10:32:55 +0100 Subject: [PATCH 27/40] Unit test the filter optimisations --- massa-event-cache/src/event_cache.rs | 55 +++++++++++++++++----------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index e744fb9ee84..6eb6cea44ed 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -124,7 +124,7 @@ impl EventCacheKeyBuilder { KeyIndent::Event => { let item = KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); Some(self.key(indent, item, is_prefix, is_counter)) - } + }, KeyIndent::EmitterAddress => { if let Some(addr) = event.context.call_stack.back() { let item = KeyBuilderType::Address(addr); @@ -138,7 +138,7 @@ impl EventCacheKeyBuilder { } else { None } - } + }, KeyIndent::OriginalCallerAddress => { if let Some(addr) = event.context.call_stack.front() { let item = KeyBuilderType::Address(addr); @@ -152,7 +152,7 @@ impl EventCacheKeyBuilder { } else { None } - } + }, KeyIndent::OriginalOperationId => { if let Some(op_id) = event.context.origin_operation_id.as_ref() { let item = KeyBuilderType::OperationId(op_id); @@ -167,7 +167,7 @@ impl EventCacheKeyBuilder { } else { None } - } + }, KeyIndent::IsError => { let item = KeyBuilderType::Bool(event.context.is_error); let mut key = self.key(indent, item, is_prefix, is_counter); @@ -176,7 +176,7 @@ impl EventCacheKeyBuilder { key.extend(self.key(&KeyIndent::Event, item, false, false)); } Some(key) - } + }, _ => unreachable!(), }; @@ -486,7 +486,7 @@ impl EventCache { // Step 2: apply filter from the lowest counter to the highest counter - let mut query_counts = vec![]; + let mut query_counts = Vec::with_capacity(map.len()); let mut filter_res_prev = None; for ((_counter, indent), filter_item) in map.iter() { let mut filter_res = BTreeSet::new(); @@ -1739,9 +1739,9 @@ mod tests { call_stack: Default::default(), origin_operation_id: None, is_final: true, - is_error: false, + is_error: true, }, - data: "message foo bar".to_string(), + data: "error foo bar".to_string(), }; let to_insert_count = cache.max_entry_count - 5; @@ -1781,33 +1781,44 @@ mod tests { events.push(event_slot_2_2.clone()); // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); - // println!("inserting events:"); - // for evt in events.iter() { - // println!("{:?}", evt); - // } - // println!("{}", "#".repeat(32)); - cache.insert_multi_it(events.into_iter()); - - // println!("db iter all:"); - // for (k, v) in cache.iter_all(None) { - // println!("k: {:?}, v: {:?}", k, v); - // } - // println!("{}", "#".repeat(32)); - + + // Check if we correctly count the number of events in the DB with emit_addr_1 & emit_addr_2 let emit_addr_1_count = cache .filter_item_estimate_count( &KeyIndent::EmitterAddress, &FilterItem::EmitterAddress(emit_addr_1), ) .unwrap(); - assert_eq!(emit_addr_1_count, (threshold) as u64); let emit_addr_2_count = cache .filter_item_estimate_count( &KeyIndent::EmitterAddress, &FilterItem::EmitterAddress(emit_addr_2), ) .unwrap(); + + + assert_eq!(emit_addr_1_count, (threshold) as u64); assert_eq!(emit_addr_2_count, (threshold + 1 + 2) as u64); + + // Check if we query first by emitter address then is_error + + let filter_1 = { + let mut filter = EventFilter::default(); + filter.emitter_address = Some(emit_addr_1); + filter.is_error = Some(true); + filter + }; + + let (query_counts, _filtered_events_1) = cache + .get_filtered_sc_output_events(&filter_1); + println!("threshold: {:?}", threshold); + println!("query_counts: {:?}", query_counts); + + // Check that we iter no more then needed (here: only 2 (== threshold) event with emit addr 1) + assert_eq!(query_counts[0], threshold as u64); + // For second filter (is_error) we could have iter more (all events have is_error = true) + // but as soon as we found 2 items we could return (as the previous filter already limit the final count) + assert_eq!(query_counts[1], threshold as u64); } } From d192d2a6b6211bedf555d062080f0a6c9d3b0554 Mon Sep 17 00:00:00 2001 From: sydhds Date: Fri, 29 Nov 2024 11:05:53 +0100 Subject: [PATCH 28/40] Add more doc --- massa-event-cache/src/event_cache.rs | 151 +++++++++------------------ 1 file changed, 48 insertions(+), 103 deletions(-) diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 6eb6cea44ed..c994021d3eb 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -42,6 +42,7 @@ enum KeyIndent { IsFinal, } +/// Key type that we want to get enum KeyBuilderType<'a> { Slot(&'a Slot), Event(&'a Slot, u64), @@ -51,18 +52,23 @@ enum KeyBuilderType<'a> { None, } -struct EventCacheKeyBuilder { +struct DbKeyBuilder { /// Operation Id Serializer op_id_ser: OperationIdSerializer, } -impl EventCacheKeyBuilder { +impl DbKeyBuilder { fn new() -> Self { Self { op_id_ser: OperationIdSerializer::new(), } } + /// Low level key builder function + /// There is no guarantees that the key will be unique + /// Recommended to use high level function like: + /// `key_from_event`, `prefix_key_from_indent`, + /// `prefix_key_from_filter_item` or `counter_key_from_filter_item` fn key( &self, indent: &KeyIndent, @@ -70,9 +76,6 @@ impl EventCacheKeyBuilder { _is_prefix: bool, is_counter: bool, ) -> Vec { - // Low level key builder function - // There is no guarantees that the key will be unique - // Use key_from_event OR key_from... unless you know what you're doing let mut key_base = if is_counter { vec![u8::from(KeyIndent::Counter), u8::from(*indent)] @@ -111,6 +114,7 @@ impl EventCacheKeyBuilder { key_base } + /// Key usually used to populate the DB fn key_from_event( &self, event: &SCOutputEvent, @@ -118,8 +122,17 @@ impl EventCacheKeyBuilder { is_prefix: bool, is_counter: bool, ) -> Option> { + // High level key builder function - + // Db format: + // * Regular keys: + // * Event key: [Event Indent][Slot][Index] -> Event value: Event serialized + // * Emitter address key: [Emitter Address Indent][Addr][Addr len][Event key] -> [] + // * Prefix keys: + // * Emitter address prefix key: [Counter indent][Emitter Address Indent][Addr][Addr len] + // * Counter keys: + // * Emitter address counter key: [Counter indent][Emitter Address Indent][Addr][Addr len][Event key] -> u64 + let key = match indent { KeyIndent::Event => { let item = KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); @@ -256,6 +269,7 @@ impl EventCacheKeyBuilder { } } +/// Disk based event cache db (rocksdb based) pub(crate) struct EventCache { /// RocksDB database db: DB, @@ -272,7 +286,7 @@ pub(crate) struct EventCache { /// Event deserializer event_deser: SCOutputEventDeserializer, /// Key builder - key_builder: EventCacheKeyBuilder, + key_builder: DbKeyBuilder, /// First event slot in db first_slot: Slot, /// Last event slot in db @@ -311,7 +325,7 @@ impl EventCache { }; let db = DB::open(&options, path).expect(OPEN_ERROR); - let key_builder_2 = EventCacheKeyBuilder::new(); + let key_builder_2 = DbKeyBuilder::new(); Self { db, @@ -482,7 +496,7 @@ impl EventCache { }); let map = BTreeMap::from_iter(it); - // println!("map: {:?}", map); + debug!("Filter items map: {:?}", map); // Step 2: apply filter from the lowest counter to the highest counter @@ -508,10 +522,9 @@ impl EventCache { .take(self.max_events_per_query) .collect::>>(); - // println!("multi_args len: {:?}", multi_args.len()); - // println!("multi_args: {:?}", multi_args); let res = self.db.multi_get(multi_args); - + debug!("Filter will try to deserialize to SCOutputEvent {} values", res.len()); + let events = res .into_iter() .map(|value| { @@ -534,6 +547,7 @@ impl EventCache { result: &mut BTreeSet>, seen: Option<&BTreeSet>>, ) -> u64 { + let mut query_count: u64 = 0; if *indent == KeyIndent::Event { @@ -571,7 +585,9 @@ impl EventCache { #[allow(clippy::manual_flatten)] for kvb in self.db.iterator_opt(IteratorMode::Start, opts) { if let Ok(kvb) = kvb { + if !kvb.0.starts_with(&[*indent as u8]) { + // Stop as soon as our key does not start with the right indent break; } @@ -615,10 +631,12 @@ impl EventCache { for kvb in self.db.prefix_iterator(prefix_filter.as_slice()) { if let Ok(kvb) = kvb { if !kvb.0.starts_with(&[*indent as u8]) { + // Stop as soon as our key does not start with the right indent break; } if !kvb.0.starts_with(prefix_filter.as_slice()) { + // Stop as soon as our key does not start with our current prefix break; } @@ -651,7 +669,7 @@ impl EventCache { query_count } - /// Estimate for a given KeyIndent & FilterItem the number of row to process + /// Estimate for a given KeyIndent & FilterItem the number of row to iterate fn filter_item_estimate_count( &self, key_indent: &KeyIndent, @@ -682,9 +700,7 @@ impl EventCache { let counter_key = self .key_builder .counter_key_from_filter_item(filter_item, key_indent); - println!("counter_key: {:?}", counter_key); let counter = self.db.get(counter_key).expect(COUNTER_ERROR); - println!("counter: {:?}", counter); let counter_value = counter .map(|b| u64::from_be_bytes(b.try_into().unwrap())) .unwrap_or(0); @@ -725,6 +741,7 @@ impl EventCache { /// Try to remove some entries from the db fn snip(&mut self, snip_amount: Option) { + let mut iter = self.db.iterator(IteratorMode::Start); let mut batch = WriteBatch::default(); let mut snipped_count: usize = 0; @@ -813,11 +830,13 @@ impl EventCache { // delete the key and reduce entry_count self.db.write(batch).expect(CRUD_ERROR); self.entry_count = self.entry_count.saturating_sub(snipped_count); - + + // delete key counters where value == 0 let mut batch_counters = WriteBatch::default(); + const U64_ZERO_BYTES: [u8; 8] = 0u64.to_be_bytes(); for (value, key) in self.db.multi_get(&counter_keys).iter().zip(counter_keys) { if let Ok(Some(value)) = value { - if *value == 0u64.to_be_bytes().to_vec() { + if *value == U64_ZERO_BYTES.to_vec() { batch_counters.delete(key); } } @@ -848,7 +867,7 @@ impl EventCache { } } -/// A filter parameter - used to decompose EventFilter in multiple filters +/// A filter parameter - used to decompose an EventFilter in multiple filters #[derive(Debug)] enum FilterItem { SlotStart(Slot), @@ -1154,7 +1173,7 @@ mod tests { #[test] #[serial] fn test_event_filter() { - // Test that the data will be correctly ordered (when iterated from start) in db + // Test that the data will be correctly ordered (when filtered) in db let mut cache = setup(); let slot_1 = Slot::new(1, 0); @@ -1284,21 +1303,10 @@ mod tests { events.push(event_slot_2_2.clone()); // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); - // println!("inserting events:"); - // for evt in events.iter() { - // println!("{:?}", evt); - // } - // println!("{}", "#".repeat(32)); - cache.insert_multi_it(events.into_iter()); - - // for (k, v) in cache.iter_all(None) { - // println!("k: {:?}, v: {:?}", k, v); - // } - // println!("{}", "#".repeat(32)); - + let mut filter_1 = EventFilter { - start: None, // Some(Slot::new(2, 0)), + start: None, end: None, emitter_address: None, original_caller_address: None, @@ -1311,24 +1319,15 @@ mod tests { assert_eq!(filtered_events_1.len(), cache.max_entry_count - 5); filtered_events_1.iter().enumerate().for_each(|(i, event)| { - // println!("checking event #{}: {:?}", i, event); assert_eq!(event.context.slot, slot_1); assert_eq!(event.context.index_in_slot, i as u64); }); - // println!("filtered_events_1[0]: {:?}", filtered_events_1[0]); - // assert_eq!(filtered_events_1[0].context.slot, slot_2); - // assert_eq!(filtered_events_1[0].context.index_in_slot, index_2_1); - // println!("filtered_events_1[1]: {:?}", filtered_events_1[1]); - // assert_eq!(filtered_events_1[1].context.slot, slot_2); - // assert_eq!(filtered_events_1[1].context.index_in_slot, index_2_2); - { filter_1.original_operation_id = Some(op_id_2); let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_2.len(), 2); filtered_events_2.iter().enumerate().for_each(|(i, event)| { - // println!("checking event #{}: {:?}", i, event); assert_eq!(event.context.slot, slot_2); if i == 0 { assert_eq!(event.context.index_in_slot, i as u64); @@ -1415,22 +1414,11 @@ mod tests { events.push(event_slot_2_2.clone()); // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); - // println!("inserting events:"); - // for evt in events.iter() { - // println!("{:?}", evt); - // } - // println!("{}", "#".repeat(32)); cache.insert_multi_it(events.into_iter()); - // println!("db iter all:"); - // for (k, v) in cache.iter_all(None) { - // println!("k: {:?}, v: {:?}", k, v); - // } - // println!("{}", "#".repeat(32)); - let mut filter_1 = EventFilter { - start: None, // Some(Slot::new(2, 0)), + start: None, end: None, emitter_address: Some(emit_addr_1), original_caller_address: None, @@ -1447,6 +1435,7 @@ mod tests { assert_eq!(*event.context.call_stack.back().unwrap(), emit_addr_1) }); + // filter with emit_addr_2 { filter_1.emitter_address = Some(emit_addr_2); let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); @@ -1455,11 +1444,13 @@ mod tests { assert_eq!(*event.context.call_stack.back().unwrap(), emit_addr_2) }); } + // filter with dummy_addr { filter_1.emitter_address = Some(dummy_addr); let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_2.len(), 0); } + // filter with address that is not in the DB { filter_1.emitter_address = Some(emit_addr_unknown); let (_, filtered_events_2) = cache.get_filtered_sc_output_events(&filter_1); @@ -1537,22 +1528,10 @@ mod tests { events.push(event_slot_2_2.clone()); // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); - // println!("inserting events:"); - // for evt in events.iter() { - // println!("{:?}", evt); - // } - // println!("{}", "#".repeat(32)); - cache.insert_multi_it(events.into_iter()); - // println!("db iter all:"); - // for (k, v) in cache.iter_all(None) { - // println!("k: {:?}, v: {:?}", k, v); - // } - // println!("{}", "#".repeat(32)); - let mut filter_1 = EventFilter { - start: None, // Some(Slot::new(2, 0)), + start: None, end: None, emitter_address: None, original_caller_address: Some(caller_addr_1), @@ -1658,20 +1637,8 @@ mod tests { events.push(event_slot_2_2.clone()); // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); - // println!("inserting events:"); - // for evt in events.iter() { - // println!("{:?}", evt); - // } - // println!("{}", "#".repeat(32)); - cache.insert_multi_it(events.into_iter()); - // println!("db iter all:"); - // for (k, v) in cache.iter_all(None) { - // println!("k: {:?}, v: {:?}", k, v); - // } - // println!("{}", "#".repeat(32)); - let filter_1 = EventFilter { start: None, // Some(Slot::new(2, 0)), end: None, @@ -1688,33 +1655,11 @@ mod tests { assert!(filtered_events_1[0].context.is_error); assert_eq!(filtered_events_1[0].context.slot, slot_2); assert_eq!(filtered_events_1[0].context.index_in_slot, index_2_2); - - // filtered_events_1 - // .iter() - // .enumerate() - // .for_each(|(_i, event)| { - // assert_eq!(event.context.slot, slot_1); - // assert_eq!(*event.context.call_stack.front().unwrap(), emit_addr_1); - // }); - - /* - { - filter_1.original_caller_address = Some(emit_addr_2); - let filtered_events_2 = cache - .get_filtered_sc_output_events(&filter_1); - assert_eq!(filtered_events_2.len(), threshold + 1 + 2); - filtered_events_2 - .iter() - .enumerate() - .for_each(|(_i, event)| { - assert_eq!(*event.context.call_stack.front().unwrap(), emit_addr_2); - }); - } - */ } + #[test] #[serial] - fn test_filter_optim() { + fn test_filter_optimisations() { // Test we iterate over the right number of rows when filtering let mut cache = setup(); @@ -1815,7 +1760,7 @@ mod tests { println!("threshold: {:?}", threshold); println!("query_counts: {:?}", query_counts); - // Check that we iter no more then needed (here: only 2 (== threshold) event with emit addr 1) + // Check that we iter no more than needed (here: only 2 (== threshold) event with emit addr 1) assert_eq!(query_counts[0], threshold as u64); // For second filter (is_error) we could have iter more (all events have is_error = true) // but as soon as we found 2 items we could return (as the previous filter already limit the final count) From db4aa41883f0d1966bc945fc87ddc6ba5c5842d9 Mon Sep 17 00:00:00 2001 From: sydhds Date: Fri, 29 Nov 2024 11:08:19 +0100 Subject: [PATCH 29/40] Cargo clippy fixes --- massa-event-cache/src/event_cache.rs | 67 +++++++++++++--------------- massa-node/src/main.rs | 2 +- 2 files changed, 32 insertions(+), 37 deletions(-) diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index c994021d3eb..29c7e701ed3 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -66,9 +66,9 @@ impl DbKeyBuilder { /// Low level key builder function /// There is no guarantees that the key will be unique - /// Recommended to use high level function like: - /// `key_from_event`, `prefix_key_from_indent`, - /// `prefix_key_from_filter_item` or `counter_key_from_filter_item` + /// Recommended to use high level function like: + /// `key_from_event`, `prefix_key_from_indent`, + /// `prefix_key_from_filter_item` or `counter_key_from_filter_item` fn key( &self, indent: &KeyIndent, @@ -76,7 +76,6 @@ impl DbKeyBuilder { _is_prefix: bool, is_counter: bool, ) -> Vec { - let mut key_base = if is_counter { vec![u8::from(KeyIndent::Counter), u8::from(*indent)] } else { @@ -122,7 +121,6 @@ impl DbKeyBuilder { is_prefix: bool, is_counter: bool, ) -> Option> { - // High level key builder function // Db format: // * Regular keys: @@ -132,12 +130,12 @@ impl DbKeyBuilder { // * Emitter address prefix key: [Counter indent][Emitter Address Indent][Addr][Addr len] // * Counter keys: // * Emitter address counter key: [Counter indent][Emitter Address Indent][Addr][Addr len][Event key] -> u64 - + let key = match indent { KeyIndent::Event => { let item = KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); Some(self.key(indent, item, is_prefix, is_counter)) - }, + } KeyIndent::EmitterAddress => { if let Some(addr) = event.context.call_stack.back() { let item = KeyBuilderType::Address(addr); @@ -151,7 +149,7 @@ impl DbKeyBuilder { } else { None } - }, + } KeyIndent::OriginalCallerAddress => { if let Some(addr) = event.context.call_stack.front() { let item = KeyBuilderType::Address(addr); @@ -165,7 +163,7 @@ impl DbKeyBuilder { } else { None } - }, + } KeyIndent::OriginalOperationId => { if let Some(op_id) = event.context.origin_operation_id.as_ref() { let item = KeyBuilderType::OperationId(op_id); @@ -180,7 +178,7 @@ impl DbKeyBuilder { } else { None } - }, + } KeyIndent::IsError => { let item = KeyBuilderType::Bool(event.context.is_error); let mut key = self.key(indent, item, is_prefix, is_counter); @@ -189,7 +187,7 @@ impl DbKeyBuilder { key.extend(self.key(&KeyIndent::Event, item, false, false)); } Some(key) - }, + } _ => unreachable!(), }; @@ -523,8 +521,11 @@ impl EventCache { .collect::>>(); let res = self.db.multi_get(multi_args); - debug!("Filter will try to deserialize to SCOutputEvent {} values", res.len()); - + debug!( + "Filter will try to deserialize to SCOutputEvent {} values", + res.len() + ); + let events = res .into_iter() .map(|value| { @@ -547,7 +548,6 @@ impl EventCache { result: &mut BTreeSet>, seen: Option<&BTreeSet>>, ) -> u64 { - let mut query_count: u64 = 0; if *indent == KeyIndent::Event { @@ -585,7 +585,6 @@ impl EventCache { #[allow(clippy::manual_flatten)] for kvb in self.db.iterator_opt(IteratorMode::Start, opts) { if let Ok(kvb) = kvb { - if !kvb.0.starts_with(&[*indent as u8]) { // Stop as soon as our key does not start with the right indent break; @@ -741,7 +740,6 @@ impl EventCache { /// Try to remove some entries from the db fn snip(&mut self, snip_amount: Option) { - let mut iter = self.db.iterator(IteratorMode::Start); let mut batch = WriteBatch::default(); let mut snipped_count: usize = 0; @@ -830,7 +828,7 @@ impl EventCache { // delete the key and reduce entry_count self.db.write(batch).expect(CRUD_ERROR); self.entry_count = self.entry_count.saturating_sub(snipped_count); - + // delete key counters where value == 0 let mut batch_counters = WriteBatch::default(); const U64_ZERO_BYTES: [u8; 8] = 0u64.to_be_bytes(); @@ -1304,9 +1302,9 @@ mod tests { // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); cache.insert_multi_it(events.into_iter()); - + let mut filter_1 = EventFilter { - start: None, + start: None, end: None, emitter_address: None, original_caller_address: None, @@ -1418,7 +1416,7 @@ mod tests { cache.insert_multi_it(events.into_iter()); let mut filter_1 = EventFilter { - start: None, + start: None, end: None, emitter_address: Some(emit_addr_1), original_caller_address: None, @@ -1656,7 +1654,7 @@ mod tests { assert_eq!(filtered_events_1[0].context.slot, slot_2); assert_eq!(filtered_events_1[0].context.index_in_slot, index_2_2); } - + #[test] #[serial] fn test_filter_optimisations() { @@ -1727,7 +1725,7 @@ mod tests { // Randomize the events so we insert in random orders in the DB events.shuffle(&mut thread_rng()); cache.insert_multi_it(events.into_iter()); - + // Check if we correctly count the number of events in the DB with emit_addr_1 & emit_addr_2 let emit_addr_1_count = cache .filter_item_estimate_count( @@ -1741,26 +1739,23 @@ mod tests { &FilterItem::EmitterAddress(emit_addr_2), ) .unwrap(); - - + assert_eq!(emit_addr_1_count, (threshold) as u64); assert_eq!(emit_addr_2_count, (threshold + 1 + 2) as u64); - + // Check if we query first by emitter address then is_error - - let filter_1 = { - let mut filter = EventFilter::default(); - filter.emitter_address = Some(emit_addr_1); - filter.is_error = Some(true); - filter + + let filter_1 = EventFilter { + emitter_address: Some(emit_addr_1), + is_error: Some(true), + ..Default::default() }; - - let (query_counts, _filtered_events_1) = cache - .get_filtered_sc_output_events(&filter_1); + + let (query_counts, _filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); println!("threshold: {:?}", threshold); println!("query_counts: {:?}", query_counts); - - // Check that we iter no more than needed (here: only 2 (== threshold) event with emit addr 1) + + // Check that we iter no more than needed (here: only 2 (== threshold) event with emit addr 1) assert_eq!(query_counts[0], threshold as u64); // For second filter (is_error) we could have iter more (all events have is_error = true) // but as soon as we found 2 items we could return (as the previous filter already limit the final count) diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index e397e1c0633..fec0cb4a7ed 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -489,7 +489,7 @@ async fn launch( // The value remains for future use & limitations max_call_stack_length: u16::MAX, - max_events_per_operation: MAX_EVENT_PER_OPERATION as u64, + max_events_per_operation: MAX_EVENT_PER_OPERATION as u64, max_operations_per_block: MAX_OPERATIONS_PER_BLOCK as u64, max_events_per_query: MAX_EVENTS_PER_QUERY, tick_delay: Duration::from_millis(EVENT_CACHE_TICK_DELAY), From 21e3e6241c56be4978fccbc72a3075479ebff8df Mon Sep 17 00:00:00 2001 From: sydhds Date: Fri, 29 Nov 2024 11:31:44 +0100 Subject: [PATCH 30/40] Use ..Default::default in TU --- massa-event-cache/src/event_cache.rs | 50 +++++----------------------- 1 file changed, 8 insertions(+), 42 deletions(-) diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 29c7e701ed3..097a4ad34b3 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -121,7 +121,6 @@ impl DbKeyBuilder { is_prefix: bool, is_counter: bool, ) -> Option> { - // High level key builder function // Db format: // * Regular keys: // * Event key: [Event Indent][Slot][Index] -> Event value: Event serialized @@ -171,7 +170,6 @@ impl DbKeyBuilder { let item = KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); if !is_prefix && !is_counter { - // key.extend(self.key_from_item(indent, item, false, false)); key.extend(self.key(&KeyIndent::Event, item, false, false)); } Some(key) @@ -196,13 +194,11 @@ impl DbKeyBuilder { /// Prefix key to iterate over all events / emitter_address / ... fn prefix_key_from_indent(&self, indent: &KeyIndent) -> Vec { - // High level key builder function self.key(indent, KeyBuilderType::None, false, false) } /// Prefix key to iterate over specific emitter_address / operation_id / ... fn prefix_key_from_filter_item(&self, filter_item: &FilterItem, indent: &KeyIndent) -> Vec { - // High level key builder function match (indent, filter_item) { (KeyIndent::Event, FilterItem::SlotStartEnd(_start, _end)) => { unimplemented!() @@ -944,8 +940,8 @@ mod tests { use tempfile::TempDir; // internal use massa_models::config::{ - MAX_EVENTS_PER_QUERY, MAX_EVENT_DATA_SIZE, MAX_OPERATIONS_PER_BLOCK, - MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT, + MAX_EVENTS_PER_QUERY, MAX_EVENT_DATA_SIZE, MAX_EVENT_PER_OPERATION, + MAX_OPERATIONS_PER_BLOCK, MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT, }; use massa_models::operation::OperationId; use massa_models::output_event::EventExecutionContext; @@ -960,9 +956,7 @@ mod tests { THREAD_COUNT, MAX_RECURSIVE_CALLS_DEPTH, MAX_EVENT_DATA_SIZE as u64, - // TODO: rebase - // MAX_EVENT_PER_OPERATION as u64, - 25u64, + MAX_EVENT_PER_OPERATION as u64, MAX_OPERATIONS_PER_BLOCK as u64, MAX_EVENTS_PER_QUERY, ) @@ -1222,21 +1216,14 @@ mod tests { let filter_1 = EventFilter { start: Some(Slot::new(2, 0)), - end: None, - emitter_address: None, - original_caller_address: None, - original_operation_id: None, - is_final: None, - is_error: None, + ..Default::default() }; let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); assert_eq!(filtered_events_1.len(), 2); - // println!("filtered_events_1[0]: {:?}", filtered_events_1[0]); assert_eq!(filtered_events_1[0].context.slot, slot_2); assert_eq!(filtered_events_1[0].context.index_in_slot, index_2_1); - // println!("filtered_events_1[1]: {:?}", filtered_events_1[1]); assert_eq!(filtered_events_1[1].context.slot, slot_2); assert_eq!(filtered_events_1[1].context.index_in_slot, index_2_2); } @@ -1304,13 +1291,8 @@ mod tests { cache.insert_multi_it(events.into_iter()); let mut filter_1 = EventFilter { - start: None, - end: None, - emitter_address: None, - original_caller_address: None, original_operation_id: Some(op_id_1), - is_final: None, - is_error: None, + ..Default::default() }; let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); @@ -1416,13 +1398,8 @@ mod tests { cache.insert_multi_it(events.into_iter()); let mut filter_1 = EventFilter { - start: None, - end: None, emitter_address: Some(emit_addr_1), - original_caller_address: None, - original_operation_id: None, - is_final: None, - is_error: None, + ..Default::default() }; let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); @@ -1529,13 +1506,8 @@ mod tests { cache.insert_multi_it(events.into_iter()); let mut filter_1 = EventFilter { - start: None, - end: None, - emitter_address: None, original_caller_address: Some(caller_addr_1), - original_operation_id: None, - is_final: None, - is_error: None, + ..Default::default() }; let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); @@ -1638,17 +1610,11 @@ mod tests { cache.insert_multi_it(events.into_iter()); let filter_1 = EventFilter { - start: None, // Some(Slot::new(2, 0)), - end: None, - emitter_address: None, - original_caller_address: None, - original_operation_id: None, - is_final: None, is_error: Some(true), + ..Default::default() }; let (_, filtered_events_1) = cache.get_filtered_sc_output_events(&filter_1); - assert_eq!(filtered_events_1.len(), 1); assert!(filtered_events_1[0].context.is_error); assert_eq!(filtered_events_1[0].context.slot, slot_2); From dc817b7c2598179798802b08b17b57d12362e722 Mon Sep 17 00:00:00 2001 From: sydhds Date: Tue, 10 Dec 2024 14:55:50 +0100 Subject: [PATCH 31/40] Cargo clippy fix --- massa-execution-worker/src/execution.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 4bf37a961e7..4256f25eb37 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -16,7 +16,6 @@ use crate::stats::ExecutionStatsCounter; use crate::storage_backend::StorageBackend; use massa_async_pool::AsyncMessage; use massa_deferred_calls::DeferredCall; -use massa_event_cache::config::EventCacheConfig; use massa_event_cache::controller::EventCacheController; use massa_execution_exports::{ ExecutedBlockInfo, ExecutionBlockMetadata, ExecutionChannels, ExecutionConfig, ExecutionError, From a972ba97c356e80418b23d273715cc6913b79ef4 Mon Sep 17 00:00:00 2001 From: sydhds Date: Tue, 10 Dec 2024 15:08:20 +0100 Subject: [PATCH 32/40] Use scope --- massa-event-cache/src/controller.rs | 109 +++++++++++++++------------- 1 file changed, 58 insertions(+), 51 deletions(-) diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index 573537373b5..ee7b1af9550 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -62,65 +62,72 @@ impl EventCacheController for EventCacheControllerImpl { } fn get_filtered_sc_output_events(&self, filter: &EventFilter) -> Vec { - let lock_0 = self.input_data.1.lock(); - #[allow(clippy::unnecessary_filter_map)] - let it = lock_0.events.iter().filter_map(|event| { - if let Some(start) = filter.start { - if event.context.slot < start { - return None; + let mut res_0 = { + // Read from new events first + let lock_0 = self.input_data.1.lock(); + #[allow(clippy::unnecessary_filter_map)] + let it = lock_0.events.iter().filter_map(|event| { + if let Some(start) = filter.start { + if event.context.slot < start { + return None; + } } - } - if let Some(end) = filter.end { - if event.context.slot >= end { - return None; + if let Some(end) = filter.end { + if event.context.slot >= end { + return None; + } } - } - if let Some(is_final) = filter.is_final { - if event.context.is_final != is_final { - return None; + if let Some(is_final) = filter.is_final { + if event.context.is_final != is_final { + return None; + } } - } - if let Some(is_error) = filter.is_error { - if event.context.is_error != is_error { - return None; + if let Some(is_error) = filter.is_error { + if event.context.is_error != is_error { + return None; + } } - } - match ( - filter.original_caller_address, - event.context.call_stack.front(), - ) { - (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, - (Some(_), None) => return None, - _ => (), - } - match (filter.emitter_address, event.context.call_stack.back()) { - (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, - (Some(_), None) => return None, - _ => (), - } - match ( - filter.original_operation_id, - event.context.origin_operation_id, - ) { - (Some(addr1), Some(addr2)) if addr1 != addr2 => return None, - (Some(_), None) => return None, - _ => (), - } - Some(event) - }); - - let mut res_0: BTreeSet = it.cloned().collect(); - // Drop the lock on the queue as soon as possible to avoid deadlocks - drop(lock_0); + match ( + filter.original_caller_address, + event.context.call_stack.front(), + ) { + (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + match (filter.emitter_address, event.context.call_stack.back()) { + (Some(addr1), Some(addr2)) if addr1 != *addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + match ( + filter.original_operation_id, + event.context.origin_operation_id, + ) { + (Some(addr1), Some(addr2)) if addr1 != addr2 => return None, + (Some(_), None) => return None, + _ => (), + } + Some(event) + }); - let lock = self.cache.read(); + let res_0: BTreeSet = it.cloned().collect(); + // Drop the lock on the queue as soon as possible to avoid deadlocks + drop(lock_0); + res_0 + }; - let (_, res_1) = lock.get_filtered_sc_output_events(filter); - // Drop the lock on the event cache db asap - drop(lock); + let res_1 = { + // Read from db (on disk) events + let lock = self.cache.read(); + let (_, res_1) = lock.get_filtered_sc_output_events(filter); + // Drop the lock on the event cache db asap + drop(lock); + res_1 + }; + // Merge results let res_1: BTreeSet = BTreeSet::from_iter(res_1); - res_0.extend(res_1); Vec::from_iter(res_0) } From c812679383e26f14e977d4af49d688f370ec6561 Mon Sep 17 00:00:00 2001 From: sydhds Date: Tue, 10 Dec 2024 15:11:04 +0100 Subject: [PATCH 33/40] Use scope 2 --- massa-event-cache/src/worker.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index d8fe018f58e..3a4e8f20faa 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -85,9 +85,12 @@ impl EventCacheWriterThread { break; } - let mut lock = self.cache.write(); - lock.insert_multi_it(input_data.events.into_iter()); - drop(lock); + { + let mut lock = self.cache.write(); + lock.insert_multi_it(input_data.events.into_iter()); + // drop the lock as early as possible + drop(lock); + } } } } From 20af46e221068d5241c5ee9c42a261e9dd16993d Mon Sep 17 00:00:00 2001 From: sydhds Date: Tue, 10 Dec 2024 16:00:12 +0100 Subject: [PATCH 34/40] Remove tick_delay + directly mem::take struct --- massa-event-cache/src/controller.rs | 4 +++- massa-event-cache/src/worker.rs | 11 +++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index ee7b1af9550..7ed514c6608 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -9,7 +9,7 @@ use massa_models::execution::EventFilter; use massa_models::output_event::SCOutputEvent; /// structure used to communicate with controller -#[derive(Debug)] +#[derive(Debug, Default)] pub(crate) struct EventCacheWriterInputData { /// set stop to true to stop the thread pub stop: bool, @@ -24,6 +24,7 @@ impl EventCacheWriterInputData { } } + /* /// Takes the current input data into a clone that is returned, /// and resets self. pub fn take(&mut self) -> Self { @@ -32,6 +33,7 @@ impl EventCacheWriterInputData { events: std::mem::take(&mut self.events), } } + */ } /// interface that communicates with the worker thread diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index 3a4e8f20faa..a499abaaeb1 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use std::thread; use std::time::Duration; // third-party -use massa_time::MassaTime; +// use massa_time::MassaTime; use parking_lot::{Condvar, Mutex, RwLock}; use tracing::{debug, info}; // internal @@ -19,7 +19,7 @@ pub(crate) struct EventCacheWriterThread { input_data: Arc<(Condvar, Mutex)>, /// Event cache cache: Arc>, - tick_delay: Duration, + _tick_delay: Duration, } impl EventCacheWriterThread { @@ -31,7 +31,7 @@ impl EventCacheWriterThread { Self { input_data, cache: event_cache, - tick_delay, + _tick_delay: tick_delay, } } @@ -46,7 +46,7 @@ impl EventCacheWriterThread { let mut input_data_lock = self.input_data.1.lock(); // take current input data, resetting it - let input_data: EventCacheWriterInputData = input_data_lock.take(); + let input_data: EventCacheWriterInputData = std::mem::take(&mut input_data_lock); // Check if there is some input data if !input_data.events.is_empty() { @@ -58,6 +58,8 @@ impl EventCacheWriterThread { return (input_data, true); } + // Should not be needed - will be removed after careful testing + /* // Wait until deadline let now = MassaTime::now(); let wakeup_deadline = @@ -68,6 +70,7 @@ impl EventCacheWriterThread { .estimate_instant() .expect("could not estimate instant"), ); + */ } } From 59f9300d3fb6ad6ea745a73c344deb5c75166896 Mon Sep 17 00:00:00 2001 From: sydhds Date: Tue, 10 Dec 2024 16:22:08 +0100 Subject: [PATCH 35/40] Add tu for counter removal --- massa-event-cache/src/event_cache.rs | 64 ++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 097a4ad34b3..136c658e1ea 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -1162,6 +1162,70 @@ mod tests { assert_eq!(cache.iter_all(None).count(), 0); } + #[test] + #[serial] + fn test_counter_0() { + // Test snip so we enforce that all db keys are removed + + let mut cache = setup(); + cache.max_entry_count = 10; + + let dummy_addr = + Address::from_str("AU12qePoXhNbYWE1jZuafqJong7bbq1jw3k89RgbMawbrdZpaasoA").unwrap(); + let emit_addr_1 = + Address::from_str("AU122Em8qkqegdLb1eyH8rdkSCNEf7RZLeTJve4Q2inRPGiTJ2xNv").unwrap(); + let emit_addr_2 = + Address::from_str("AU12WuVR1Td74q9eAbtYZUnk5jnRbUuUacyhQFwm217bV5v1mNqTZ").unwrap(); + + let event = SCOutputEvent { + context: EventExecutionContext { + slot: Slot::new(1, 0), + block: None, + read_only: false, + index_in_slot: 0, + call_stack: VecDeque::from(vec![dummy_addr, emit_addr_1]), + origin_operation_id: None, + is_final: true, + is_error: false, + }, + data: "message foo bar".to_string(), + }; + + let event_2 = { + let mut evt = event.clone(); + evt.context.slot = Slot::new(2, 0); + evt.context.call_stack = VecDeque::from(vec![dummy_addr, emit_addr_2]); + evt + }; + + cache.insert_multi_it([event, event_2].into_iter()); + + let key_counter_1 = cache.key_builder.counter_key_from_filter_item( + &FilterItem::EmitterAddress(emit_addr_1), + &KeyIndent::EmitterAddress, + ); + let key_counter_2 = cache.key_builder.counter_key_from_filter_item( + &FilterItem::EmitterAddress(emit_addr_2), + &KeyIndent::EmitterAddress, + ); + + let v1 = cache.db.get(key_counter_1.clone()); + let v2 = cache.db.get(key_counter_2.clone()); + + // println!("v1: {:?} - v2: {:?}", v1, v2); + assert_eq!(v1, Ok(Some(1u64.to_be_bytes().to_vec()))); + assert_eq!(v2, Ok(Some(1u64.to_be_bytes().to_vec()))); + + cache.snip(Some(1)); + + let v1 = cache.db.get(key_counter_1); + let v2 = cache.db.get(key_counter_2); + + // println!("v1: {:?} - v2: {:?}", v1, v2); + assert_eq!(v1, Ok(None)); // counter has been removed + assert_eq!(v2, Ok(Some(1u64.to_be_bytes().to_vec()))); + } + #[test] #[serial] fn test_event_filter() { From f9d4cf9071b3b65aa0368551265aba8f6737bbbc Mon Sep 17 00:00:00 2001 From: sydhds Date: Wed, 11 Dec 2024 10:49:05 +0100 Subject: [PATCH 36/40] Add KeyKind in KeyBuilder --- massa-event-cache/src/event_cache.rs | 168 +++++++++++++++------------ 1 file changed, 96 insertions(+), 72 deletions(-) diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 136c658e1ea..8f72283c656 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -42,7 +42,7 @@ enum KeyIndent { IsFinal, } -/// Key type that we want to get +/// Key for this type of data that we want to get enum KeyBuilderType<'a> { Slot(&'a Slot), Event(&'a Slot, u64), @@ -52,6 +52,12 @@ enum KeyBuilderType<'a> { None, } +enum KeyKind { + Regular, + Prefix, + Counter, +} + struct DbKeyBuilder { /// Operation Id Serializer op_id_ser: OperationIdSerializer, @@ -69,14 +75,8 @@ impl DbKeyBuilder { /// Recommended to use high level function like: /// `key_from_event`, `prefix_key_from_indent`, /// `prefix_key_from_filter_item` or `counter_key_from_filter_item` - fn key( - &self, - indent: &KeyIndent, - key_type: KeyBuilderType, - _is_prefix: bool, - is_counter: bool, - ) -> Vec { - let mut key_base = if is_counter { + fn key(&self, indent: &KeyIndent, key_type: KeyBuilderType, key_kind: &KeyKind) -> Vec { + let mut key_base = if matches!(key_kind, KeyKind::Counter) { vec![u8::from(KeyIndent::Counter), u8::from(*indent)] } else { vec![u8::from(*indent)] @@ -118,31 +118,30 @@ impl DbKeyBuilder { &self, event: &SCOutputEvent, indent: &KeyIndent, - is_prefix: bool, - is_counter: bool, + key_kind: &KeyKind, ) -> Option> { // Db format: // * Regular keys: // * Event key: [Event Indent][Slot][Index] -> Event value: Event serialized // * Emitter address key: [Emitter Address Indent][Addr][Addr len][Event key] -> [] // * Prefix keys: - // * Emitter address prefix key: [Counter indent][Emitter Address Indent][Addr][Addr len] + // * Emitter address prefix key: [Emitter Address Indent][Addr][Addr len] // * Counter keys: // * Emitter address counter key: [Counter indent][Emitter Address Indent][Addr][Addr len][Event key] -> u64 let key = match indent { KeyIndent::Event => { let item = KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); - Some(self.key(indent, item, is_prefix, is_counter)) + Some(self.key(indent, item, key_kind)) } KeyIndent::EmitterAddress => { if let Some(addr) = event.context.call_stack.back() { let item = KeyBuilderType::Address(addr); - let mut key = self.key(indent, item, is_prefix, is_counter); + let mut key = self.key(indent, item, key_kind); let item = KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); - if !is_prefix && !is_counter { - key.extend(self.key(&KeyIndent::Event, item, false, false)); + if matches!(key_kind, KeyKind::Regular) { + key.extend(self.key(&KeyIndent::Event, item, &KeyKind::Regular)); } Some(key) } else { @@ -152,11 +151,11 @@ impl DbKeyBuilder { KeyIndent::OriginalCallerAddress => { if let Some(addr) = event.context.call_stack.front() { let item = KeyBuilderType::Address(addr); - let mut key = self.key(indent, item, is_prefix, is_counter); + let mut key = self.key(indent, item, key_kind); let item = KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); - if !is_prefix && !is_counter { - key.extend(self.key(&KeyIndent::Event, item, false, false)); + if matches!(key_kind, KeyKind::Regular) { + key.extend(self.key(&KeyIndent::Event, item, &KeyKind::Regular)); } Some(key) } else { @@ -166,11 +165,11 @@ impl DbKeyBuilder { KeyIndent::OriginalOperationId => { if let Some(op_id) = event.context.origin_operation_id.as_ref() { let item = KeyBuilderType::OperationId(op_id); - let mut key = self.key(indent, item, is_prefix, is_counter); + let mut key = self.key(indent, item, key_kind); let item = KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); - if !is_prefix && !is_counter { - key.extend(self.key(&KeyIndent::Event, item, false, false)); + if matches!(key_kind, KeyKind::Regular) { + key.extend(self.key(&KeyIndent::Event, item, &KeyKind::Regular)); } Some(key) } else { @@ -179,10 +178,10 @@ impl DbKeyBuilder { } KeyIndent::IsError => { let item = KeyBuilderType::Bool(event.context.is_error); - let mut key = self.key(indent, item, is_prefix, is_counter); + let mut key = self.key(indent, item, key_kind); let item = KeyBuilderType::Event(&event.context.slot, event.context.index_in_slot); - if !is_prefix && !is_counter { - key.extend(self.key(&KeyIndent::Event, item, false, false)); + if matches!(key_kind, KeyKind::Regular) { + key.extend(self.key(&KeyIndent::Event, item, &KeyKind::Regular)); } Some(key) } @@ -194,7 +193,7 @@ impl DbKeyBuilder { /// Prefix key to iterate over all events / emitter_address / ... fn prefix_key_from_indent(&self, indent: &KeyIndent) -> Vec { - self.key(indent, KeyBuilderType::None, false, false) + self.key(indent, KeyBuilderType::None, &KeyKind::Regular) } /// Prefix key to iterate over specific emitter_address / operation_id / ... @@ -204,22 +203,22 @@ impl DbKeyBuilder { unimplemented!() } (KeyIndent::Event, FilterItem::SlotStart(start)) => { - self.key(indent, KeyBuilderType::Slot(start), true, false) + self.key(indent, KeyBuilderType::Slot(start), &KeyKind::Prefix) } (KeyIndent::Event, FilterItem::SlotEnd(end)) => { - self.key(indent, KeyBuilderType::Slot(end), true, false) + self.key(indent, KeyBuilderType::Slot(end), &KeyKind::Prefix) } (KeyIndent::EmitterAddress, FilterItem::EmitterAddress(addr)) => { - self.key(indent, KeyBuilderType::Address(addr), true, false) + self.key(indent, KeyBuilderType::Address(addr), &KeyKind::Prefix) } (KeyIndent::OriginalCallerAddress, FilterItem::OriginalCallerAddress(addr)) => { - self.key(indent, KeyBuilderType::Address(addr), true, false) + self.key(indent, KeyBuilderType::Address(addr), &KeyKind::Prefix) } (KeyIndent::OriginalOperationId, FilterItem::OriginalOperationId(op_id)) => { - self.key(indent, KeyBuilderType::OperationId(op_id), true, false) + self.key(indent, KeyBuilderType::OperationId(op_id), &KeyKind::Prefix) } (KeyIndent::IsError, FilterItem::IsError(v)) => { - self.key(indent, KeyBuilderType::Bool(*v), true, false) + self.key(indent, KeyBuilderType::Bool(*v), &KeyKind::Prefix) } _ => { unreachable!() @@ -239,22 +238,24 @@ impl DbKeyBuilder { unimplemented!() } (KeyIndent::Event, FilterItem::SlotStart(start)) => { - self.key(indent, KeyBuilderType::Slot(start), false, true) + self.key(indent, KeyBuilderType::Slot(start), &KeyKind::Counter) } (KeyIndent::Event, FilterItem::SlotEnd(end)) => { - self.key(indent, KeyBuilderType::Slot(end), false, true) + self.key(indent, KeyBuilderType::Slot(end), &KeyKind::Counter) } (KeyIndent::EmitterAddress, FilterItem::EmitterAddress(addr)) => { - self.key(indent, KeyBuilderType::Address(addr), false, true) + self.key(indent, KeyBuilderType::Address(addr), &KeyKind::Counter) } (KeyIndent::OriginalCallerAddress, FilterItem::OriginalCallerAddress(addr)) => { - self.key(indent, KeyBuilderType::Address(addr), false, true) - } - (KeyIndent::OriginalOperationId, FilterItem::OriginalOperationId(op_id)) => { - self.key(indent, KeyBuilderType::OperationId(op_id), false, true) + self.key(indent, KeyBuilderType::Address(addr), &KeyKind::Counter) } + (KeyIndent::OriginalOperationId, FilterItem::OriginalOperationId(op_id)) => self.key( + indent, + KeyBuilderType::OperationId(op_id), + &KeyKind::Counter, + ), (KeyIndent::IsError, FilterItem::IsError(v)) => { - self.key(indent, KeyBuilderType::Bool(*v), false, true) + self.key(indent, KeyBuilderType::Bool(*v), &KeyKind::Counter) } _ => { unreachable!() @@ -349,47 +350,49 @@ impl EventCache { batch.put( self.key_builder - .key_from_event(&event, &KeyIndent::Event, false, false) + .key_from_event(&event, &KeyIndent::Event, &KeyKind::Regular) .unwrap(), event_buffer, ); if let Some(key) = self.key_builder - .key_from_event(&event, &KeyIndent::EmitterAddress, false, false) + .key_from_event(&event, &KeyIndent::EmitterAddress, &KeyKind::Regular) { - let key_counter = - self.key_builder - .key_from_event(&event, &KeyIndent::EmitterAddress, false, true); + let key_counter = self.key_builder.key_from_event( + &event, + &KeyIndent::EmitterAddress, + &KeyKind::Counter, + ); batch.put(key, vec![]); let key_counter = key_counter.expect(COUNTER_KEY_CREATION_ERROR); batch.merge(key_counter, 1i64.to_be_bytes()); } - if let Some(key) = - self.key_builder - .key_from_event(&event, &KeyIndent::OriginalCallerAddress, false, false) - { + if let Some(key) = self.key_builder.key_from_event( + &event, + &KeyIndent::OriginalCallerAddress, + &KeyKind::Regular, + ) { let key_counter = self.key_builder.key_from_event( &event, &KeyIndent::OriginalCallerAddress, - false, - true, + &KeyKind::Counter, ); batch.put(key, vec![]); let key_counter = key_counter.expect(COUNTER_KEY_CREATION_ERROR); batch.merge(key_counter, 1i64.to_be_bytes()); } - if let Some(key) = - self.key_builder - .key_from_event(&event, &KeyIndent::OriginalOperationId, false, false) - { + if let Some(key) = self.key_builder.key_from_event( + &event, + &KeyIndent::OriginalOperationId, + &KeyKind::Regular, + ) { let key_counter = self.key_builder.key_from_event( &event, &KeyIndent::OriginalOperationId, - false, - true, + &KeyKind::Counter, ); batch.put(key, vec![]); let key_counter = key_counter.expect(COUNTER_KEY_CREATION_ERROR); @@ -399,11 +402,11 @@ impl EventCache { { if let Some(key) = self.key_builder - .key_from_event(&event, &KeyIndent::IsError, false, false) + .key_from_event(&event, &KeyIndent::IsError, &KeyKind::Regular) { let key_counter = self.key_builder - .key_from_event(&event, &KeyIndent::IsError, false, true); + .key_from_event(&event, &KeyIndent::IsError, &KeyKind::Counter); let key_counter = key_counter.expect(COUNTER_KEY_CREATION_ERROR); batch.put(key, vec![]); batch.merge(key_counter, 1i64.to_be_bytes()); @@ -763,13 +766,14 @@ impl EventCache { .unwrap(); // delete all associated key - if let Some(key) = - self.key_builder - .key_from_event(&event, &KeyIndent::EmitterAddress, false, false) - { + if let Some(key) = self.key_builder.key_from_event( + &event, + &KeyIndent::EmitterAddress, + &KeyKind::Regular, + ) { let key_counter = self .key_builder - .key_from_event(&event, &KeyIndent::EmitterAddress, false, true) + .key_from_event(&event, &KeyIndent::EmitterAddress, &KeyKind::Counter) .expect(COUNTER_ERROR); batch.delete(key); counter_keys.push(key_counter.clone()); @@ -778,12 +782,11 @@ impl EventCache { if let Some(key) = self.key_builder.key_from_event( &event, &KeyIndent::OriginalCallerAddress, - false, - false, + &KeyKind::Regular, ) { let key_counter = self .key_builder - .key_from_event(&event, &KeyIndent::OriginalCallerAddress, false, true) + .key_from_event(&event, &KeyIndent::OriginalCallerAddress, &KeyKind::Counter) .expect(COUNTER_ERROR); batch.delete(key); counter_keys.push(key_counter.clone()); @@ -793,12 +796,11 @@ impl EventCache { if let Some(key) = self.key_builder.key_from_event( &event, &KeyIndent::OriginalOperationId, - false, - false, + &KeyKind::Regular, ) { let key_counter = self .key_builder - .key_from_event(&event, &KeyIndent::OriginalOperationId, false, true) + .key_from_event(&event, &KeyIndent::OriginalOperationId, &KeyKind::Counter) .expect(COUNTER_ERROR); batch.delete(key); counter_keys.push(key_counter.clone()); @@ -806,11 +808,11 @@ impl EventCache { } if let Some(key) = self.key_builder - .key_from_event(&event, &KeyIndent::IsError, false, false) + .key_from_event(&event, &KeyIndent::IsError, &KeyKind::Regular) { let key_counter = self .key_builder - .key_from_event(&event, &KeyIndent::IsError, false, true) + .key_from_event(&event, &KeyIndent::IsError, &KeyKind::Counter) .expect(COUNTER_ERROR); batch.delete(key); counter_keys.push(key_counter.clone()); @@ -1200,6 +1202,28 @@ mod tests { cache.insert_multi_it([event, event_2].into_iter()); + // Check counters key length + let key_counters = cache + .key_builder + .prefix_key_from_indent(&KeyIndent::Counter); + let kvbs: Result, _> = cache + .db + .prefix_iterator(key_counters) + .take_while(|kvb| { + kvb.as_ref() + .unwrap() + .0 + .starts_with(&[u8::from(KeyIndent::Counter)]) + }) + .collect(); + // println!("kvbs: {:#?}", kvbs); + + // Expected 4 counters: + // 2 for emitter address (emit_addr_1 & emit_addr_2) + // 1 for original caller address (dummy_addr) + // 1 for is_error(false) + assert_eq!(kvbs.unwrap().len(), 4); + let key_counter_1 = cache.key_builder.counter_key_from_filter_item( &FilterItem::EmitterAddress(emit_addr_1), &KeyIndent::EmitterAddress, From e9932584ed09d28fca3063a6188d07ef9b6b1800 Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 12 Dec 2024 09:57:41 +0100 Subject: [PATCH 37/40] Wait for condvar in wait_loop_event --- massa-event-cache/src/config.rs | 2 -- massa-event-cache/src/event_cache.rs | 6 +++--- massa-event-cache/src/worker.rs | 19 ++----------------- massa-models/src/config/constants.rs | 9 --------- massa-node/base_config/config.toml | 4 +++- massa-node/src/main.rs | 10 ++++------ massa-node/src/settings.rs | 2 +- 7 files changed, 13 insertions(+), 39 deletions(-) diff --git a/massa-event-cache/src/config.rs b/massa-event-cache/src/config.rs index e25d4a920a9..befad357df9 100644 --- a/massa-event-cache/src/config.rs +++ b/massa-event-cache/src/config.rs @@ -20,6 +20,4 @@ pub struct EventCacheConfig { pub max_operations_per_block: u64, /// Maximum events returned in a query pub max_events_per_query: usize, - /// Delay to wait between 2 writes in event cache writer in milliseconds - pub tick_delay: Duration, } diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index 8f72283c656..5db636c3d26 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -942,8 +942,8 @@ mod tests { use tempfile::TempDir; // internal use massa_models::config::{ - MAX_EVENTS_PER_QUERY, MAX_EVENT_DATA_SIZE, MAX_EVENT_PER_OPERATION, - MAX_OPERATIONS_PER_BLOCK, MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT, + MAX_EVENT_DATA_SIZE, MAX_EVENT_PER_OPERATION, MAX_OPERATIONS_PER_BLOCK, + MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT, }; use massa_models::operation::OperationId; use massa_models::output_event::EventExecutionContext; @@ -960,7 +960,7 @@ mod tests { MAX_EVENT_DATA_SIZE as u64, MAX_EVENT_PER_OPERATION as u64, MAX_OPERATIONS_PER_BLOCK as u64, - MAX_EVENTS_PER_QUERY, + 5000, // MAX_EVENTS_PER_QUERY, ) } diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index a499abaaeb1..d6eb3e76dcb 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -19,19 +19,16 @@ pub(crate) struct EventCacheWriterThread { input_data: Arc<(Condvar, Mutex)>, /// Event cache cache: Arc>, - _tick_delay: Duration, } impl EventCacheWriterThread { fn new( input_data: Arc<(Condvar, Mutex)>, event_cache: Arc>, - tick_delay: Duration, ) -> Self { Self { input_data, cache: event_cache, - _tick_delay: tick_delay, } } @@ -58,19 +55,7 @@ impl EventCacheWriterThread { return (input_data, true); } - // Should not be needed - will be removed after careful testing - /* - // Wait until deadline - let now = MassaTime::now(); - let wakeup_deadline = - now.saturating_add(MassaTime::from_millis(self.tick_delay.as_millis() as u64)); - let _ = self.input_data.0.wait_until( - &mut input_data_lock, - wakeup_deadline - .estimate_instant() - .expect("could not estimate instant"), - ); - */ + self.input_data.0.wait(&mut input_data_lock); } } @@ -163,7 +148,7 @@ pub fn start_event_cache_writer_worker( let thread_builder = thread::Builder::new().name("event_cache".into()); let thread_handle = thread_builder .spawn(move || { - EventCacheWriterThread::new(input_data_clone, event_cache, cfg.tick_delay).main_loop(); + EventCacheWriterThread::new(input_data_clone, event_cache).main_loop(); }) .expect("failed to spawn thread : event_cache"); diff --git a/massa-models/src/config/constants.rs b/massa-models/src/config/constants.rs index ee47b5aef01..97130583c0b 100644 --- a/massa-models/src/config/constants.rs +++ b/massa-models/src/config/constants.rs @@ -412,15 +412,6 @@ pub const DEFERRED_CALL_SLOT_OVERBOOKING_PENALTY: Amount = Amount::from_raw(1_00 /// deferred call call gas cost pub const DEFERRED_CALL_CST_GAS_COST: u64 = 750_000; -// -// Constants for event cache -// - -/// Maximum number of events that can be returned by a query -pub const MAX_EVENTS_PER_QUERY: usize = 10000; -/// Delay between writes in event cache writer thread -pub const EVENT_CACHE_TICK_DELAY: u64 = 100; - // Some checks at compile time that should not be ignored! #[allow(clippy::assertions_on_constants)] const _: () = { diff --git a/massa-node/base_config/config.toml b/massa-node/base_config/config.toml index 20862958936..d3cc6a726ea 100644 --- a/massa-node/base_config/config.toml +++ b/massa-node/base_config/config.toml @@ -227,6 +227,8 @@ event_cache_size = 20071520 # amount of entries removed when `event_cache_size` is reached event_snip_amount = 10 + # maximum number of events return by a query + max_event_per_query = 7000 [ledger] # path to the initial ledger @@ -447,4 +449,4 @@ [block_dump] block_dump_folder_path = "dump/blocks" # max number of blocks to keep in the dump folder - max_blocks = 2048000 \ No newline at end of file + max_blocks = 2048000 diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index fec0cb4a7ed..7ed568df47f 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -91,10 +91,9 @@ use massa_models::config::{ DEFERRED_CALL_CST_GAS_COST, DEFERRED_CALL_GLOBAL_OVERBOOKING_PENALTY, DEFERRED_CALL_MAX_ASYNC_GAS, DEFERRED_CALL_MAX_POOL_CHANGES, DEFERRED_CALL_MIN_GAS_COST, DEFERRED_CALL_MIN_GAS_INCREMENT, DEFERRED_CALL_SLOT_OVERBOOKING_PENALTY, - EVENT_CACHE_TICK_DELAY, KEEP_EXECUTED_HISTORY_EXTRA_PERIODS, - MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, MAX_BOOTSTRAP_VERSIONING_ELEMENTS_SIZE, - MAX_EVENTS_PER_QUERY, MAX_EVENT_DATA_SIZE, MAX_EVENT_PER_OPERATION, MAX_MESSAGE_SIZE, - MAX_RECURSIVE_CALLS_DEPTH, MAX_RUNTIME_MODULE_CUSTOM_SECTION_DATA_LEN, + KEEP_EXECUTED_HISTORY_EXTRA_PERIODS, MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, + MAX_BOOTSTRAP_VERSIONING_ELEMENTS_SIZE, MAX_EVENT_DATA_SIZE, MAX_EVENT_PER_OPERATION, + MAX_MESSAGE_SIZE, MAX_RECURSIVE_CALLS_DEPTH, MAX_RUNTIME_MODULE_CUSTOM_SECTION_DATA_LEN, MAX_RUNTIME_MODULE_CUSTOM_SECTION_LEN, MAX_RUNTIME_MODULE_EXPORTS, MAX_RUNTIME_MODULE_FUNCTIONS, MAX_RUNTIME_MODULE_FUNCTION_NAME_LEN, MAX_RUNTIME_MODULE_GLOBAL_INITIALIZER, MAX_RUNTIME_MODULE_IMPORTS, MAX_RUNTIME_MODULE_MEMORIES, @@ -491,8 +490,7 @@ async fn launch( max_events_per_operation: MAX_EVENT_PER_OPERATION as u64, max_operations_per_block: MAX_OPERATIONS_PER_BLOCK as u64, - max_events_per_query: MAX_EVENTS_PER_QUERY, - tick_delay: Duration::from_millis(EVENT_CACHE_TICK_DELAY), + max_events_per_query: SETTINGS.execution.max_event_per_query, }; let (event_cache_manager, event_cache_controller) = start_event_cache_writer_worker(event_cache_config); diff --git a/massa-node/src/settings.rs b/massa-node/src/settings.rs index ebc43c01753..6ebf309a34a 100644 --- a/massa-node/src/settings.rs +++ b/massa-node/src/settings.rs @@ -36,10 +36,10 @@ pub struct ExecutionSettings { /// slot execution traces channel capacity pub broadcast_slot_execution_traces_channel_capacity: usize, pub execution_traces_limit: usize, - pub event_cache_path: PathBuf, pub event_cache_size: usize, pub event_snip_amount: usize, + pub max_event_per_query: usize, } #[derive(Clone, Debug, Deserialize)] From 815bbe3ab8eeb5a8238ced66bc4cdf73d2a7db50 Mon Sep 17 00:00:00 2001 From: sydhds Date: Thu, 12 Dec 2024 09:59:32 +0100 Subject: [PATCH 38/40] Removed unused lib --- massa-event-cache/src/config.rs | 1 - massa-event-cache/src/worker.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/massa-event-cache/src/config.rs b/massa-event-cache/src/config.rs index befad357df9..9eab23e0d5b 100644 --- a/massa-event-cache/src/config.rs +++ b/massa-event-cache/src/config.rs @@ -1,5 +1,4 @@ use std::path::PathBuf; -use std::time::Duration; pub struct EventCacheConfig { /// Path to the hard drive cache storage diff --git a/massa-event-cache/src/worker.rs b/massa-event-cache/src/worker.rs index d6eb3e76dcb..b020aa1569e 100644 --- a/massa-event-cache/src/worker.rs +++ b/massa-event-cache/src/worker.rs @@ -1,7 +1,6 @@ // std use std::sync::Arc; use std::thread; -use std::time::Duration; // third-party // use massa_time::MassaTime; use parking_lot::{Condvar, Mutex, RwLock}; From 955bb8befe945f4df5c5b0336e82cab321566315 Mon Sep 17 00:00:00 2001 From: sydhds Date: Mon, 16 Dec 2024 10:05:01 +0100 Subject: [PATCH 39/40] Condvar wait fix --- massa-event-cache/src/controller.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/massa-event-cache/src/controller.rs b/massa-event-cache/src/controller.rs index 7ed514c6608..a4fe2148a58 100644 --- a/massa-event-cache/src/controller.rs +++ b/massa-event-cache/src/controller.rs @@ -59,8 +59,8 @@ impl EventCacheController for EventCacheControllerImpl { // lock input data let mut input_data = self.input_data.1.lock(); input_data.events.extend(events); - // wake up VM loop - self.input_data.0.notify_one(); + // Wake up the condvar in EventCacheWriterThread waiting for events + self.input_data.0.notify_all(); } fn get_filtered_sc_output_events(&self, filter: &EventFilter) -> Vec { From 0aeb191624cd5fea5a0d52fd7a08654a2199ea94 Mon Sep 17 00:00:00 2001 From: sydhds Date: Mon, 16 Dec 2024 10:31:50 +0100 Subject: [PATCH 40/40] Truncate event message in case of error --- massa-execution-worker/src/context.rs | 14 +++++++++++--- massa-execution-worker/src/execution.rs | 5 ++++- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index bdb857b99bb..939ed73ea2f 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -305,10 +305,14 @@ impl ExecutionContext { pub fn reset_to_snapshot(&mut self, snapshot: ExecutionContextSnapshot, error: ExecutionError) { // Emit the error event. // Note that the context event counter is properly handled by event_emit (see doc). - self.event_emit(self.event_create( + let mut event = self.event_create( serde_json::json!({ "massa_execution_error": format!("{}", error) }).to_string(), true, - )); + ); + if event.data.len() > self.config.max_event_size { + event.data.truncate(self.config.max_event_size); + } + self.event_emit(event); // Reset context to snapshot. self.speculative_ledger @@ -1211,7 +1215,11 @@ impl ExecutionContext { ); } - let event = self.event_create(format!("DeferredCall execution fail call_id:{}", id), true); + let mut event = + self.event_create(format!("DeferredCall execution fail call_id:{}", id), true); + if event.data.len() > self.config.max_event_size { + event.data.truncate(self.config.max_event_size); + } self.event_emit(event); #[cfg(feature = "execution-info")] diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index 4256f25eb37..a11b8ddda75 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -473,7 +473,10 @@ impl ExecutionState { if let Err(err) = context.transfer_coins(Some(sender_addr), None, operation.content.fee, false) { - let error = format!("could not spend fees: {}", err); + let mut error = format!("could not spend fees: {}", err); + if error.len() > self.config.max_event_size { + error.truncate(self.config.max_event_size); + } let event = context.event_create(error.clone(), true); context.event_emit(event); return Err(ExecutionError::IncludeOperationError(error));