diff --git a/.gitignore b/.gitignore index aad79fa8b..a6bc66543 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ data/**/*.db data/**/*.json data/**/*.txt data/**/*.rocksdb +data/rocksdb/ # E2E (Stratus) e2e/artifacts diff --git a/Cargo.lock b/Cargo.lock index 26cbaad04..942639225 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] @@ -58,9 +58,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-primitives" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8aa973e647ec336810a9356af8aea787249c9d00b1525359f3db29a68d231b" +checksum = "f783611babedbbe90db3478c120fb5f5daacceffc210b39adc0af4fe0da70bad" dependencies = [ "alloy-rlp", "bytes", @@ -105,47 +105,48 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -312,7 +313,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -323,7 +324,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -335,6 +336,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "aurora-engine-modexp" version = "1.1.0" @@ -353,14 +360,14 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] name = "autocfg" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "axum" @@ -375,7 +382,7 @@ dependencies = [ "futures-util", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.28", + "hyper 0.14.29", "itoa", "matchit", "memchr", @@ -420,9 +427,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.71" +version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", @@ -447,9 +454,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" @@ -524,7 +531,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -580,9 +587,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c94087b935a822949d3291a9989ad2b2051ea141eda0fd4e478a75f6aa3e604b" +checksum = "62dc83a094a71d43eeadd254b1ec2d24cb6a0bb6cadce00df51f0db594711a32" dependencies = [ "cc", "glob", @@ -602,9 +609,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.4.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0901fc8eb0aca4c83be0106d6f2db17d86a08dfc2c25f0e84464bf381158add6" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" dependencies = [ "borsh-derive", "cfg_aliases", @@ -612,15 +619,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.4.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51670c3aa053938b0ee3bd67c3817e471e626151131b934038e83c5bf8de48f5" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ "once_cell", - "proc-macro-crate 3.1.0", + "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", "syn_derive", ] @@ -735,7 +742,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -743,9 +750,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.95" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" +checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695" dependencies = [ "jobserver", "libc", @@ -775,9 +782,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfg_aliases" -version = "0.1.1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" @@ -796,9 +803,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", @@ -836,20 +843,20 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "combine" @@ -1020,9 +1027,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -1064,12 +1071,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" +checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" dependencies = [ - "darling_core 0.20.8", - "darling_macro 0.20.8", + "darling_core 0.20.9", + "darling_macro 0.20.9", ] [[package]] @@ -1088,16 +1095,16 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" +checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 2.0.60", + "strsim 0.11.1", + "syn 2.0.66", ] [[package]] @@ -1113,13 +1120,13 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" +checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ - "darling_core 0.20.8", + "darling_core 0.20.9", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -1178,7 +1185,7 @@ checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -1196,9 +1203,9 @@ dependencies = [ [[package]] name = "deunicode" -version = "1.4.4" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322ef0094744e63628e6f0eb2295517f79276a5b342a4c2ff3042566ca181d4e" +checksum = "339544cc9e2c4dc3fc7149fd630c5f22263a4fdf18a98afd0075784968b5cf00" [[package]] name = "digest" @@ -1263,7 +1270,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -1284,10 +1291,10 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e57e12b69e57fad516e01e2b3960f122696fdb13420e1a88ed8e210316f2876" dependencies = [ - "darling 0.20.8", + "darling 0.20.9", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -1312,9 +1319,9 @@ dependencies = [ [[package]] name = "either" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" dependencies = [ "serde", ] @@ -1355,7 +1362,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -1366,9 +1373,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -1547,12 +1554,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "finl_unicode" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" - [[package]] name = "fixed-hash" version = "0.8.0" @@ -1695,7 +1696,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -1760,9 +1761,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", @@ -1785,9 +1786,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "glob" @@ -1873,15 +1874,15 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816ec7294445779408f36fe57bc5b7fc1cf59664059096c65f905c1c61f58069" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" dependencies = [ + "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "futures-util", "http 1.1.0", "indexmap 2.2.6", "slab", @@ -2062,12 +2063,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", - "futures-core", + "futures-util", "http 1.1.0", "http-body 1.0.0", "pin-project-lite", @@ -2075,9 +2076,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "d0e7a4dd27b9476dc40cb050d3632d3bba3a70ddbff012285f7f8559a1e7e545" [[package]] name = "httpdate" @@ -2093,9 +2094,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" dependencies = [ "bytes", "futures-channel", @@ -2124,7 +2125,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.4", + "h2 0.4.5", "http 1.1.0", "http-body 1.0.0", "httparse", @@ -2144,7 +2145,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.28", + "hyper 0.14.29", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", @@ -2152,16 +2153,16 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "908bb38696d7a037a01ebcc68a00634112ac2bbf8ca74e30a2c3d2f4f021302b" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", "hyper 1.3.1", "hyper-util", "log", - "rustls 0.23.9", + "rustls 0.23.10", "rustls-native-certs 0.7.0", "rustls-pki-types", "tokio", @@ -2175,7 +2176,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper 0.14.28", + "hyper 0.14.29", "pin-project-lite", "tokio", "tokio-io-timeout", @@ -2212,9 +2213,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" dependencies = [ "bytes", "futures-channel", @@ -2368,7 +2369,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -2457,9 +2458,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", ] @@ -2481,6 +2482,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "itertools" version = "0.10.5" @@ -2554,21 +2561,20 @@ dependencies = [ [[package]] name = "json-patch" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ff1e1486799e3f64129f8ccad108b38290df9cd7015cd31bed17239f0789d6" +checksum = "ec9ad60d674508f3ca8f380a928cfe7b096bc729c4e2dbfe3852bc45da3ab30b" dependencies = [ "serde", "serde_json", "thiserror", - "treediff", ] [[package]] name = "jsonpath-rust" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0268078319393f8430e850ee9d4706aeced256d34cf104d216bb496777137162" +checksum = "19d8fe85bd70ff715f31ce8c739194b423d79811a19602115d611a3ec85d6200" dependencies = [ "lazy_static", "once_cell", @@ -2601,14 +2607,14 @@ version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "039db9fe25cd63b7221c3f8788c1ef4ea07987d40ec25a1e7d7a3c3e3e3fd130" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "futures-channel", "futures-util", "gloo-net", "http 1.1.0", "jsonrpsee-core", "pin-project", - "rustls 0.23.9", + "rustls 0.23.10", "rustls-pki-types", "rustls-platform-verifier", "soketto", @@ -2656,14 +2662,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb25cab482c8512c4f3323a5c90b95a3b8f7c90681a87bf7a68b942d52f08933" dependencies = [ "async-trait", - "base64 0.22.0", + "base64 0.22.1", "http-body 1.0.0", "hyper 1.3.1", - "hyper-rustls 0.27.1", + "hyper-rustls 0.27.2", "hyper-util", "jsonrpsee-core", "jsonrpsee-types", - "rustls 0.23.9", + "rustls 0.23.10", "rustls-platform-verifier", "serde", "serde_json", @@ -2814,7 +2820,7 @@ version = "0.90.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0708306b5c0085f249f5e3d2d56a9bbfe0cbbf4fd4eb9ed4bbba542ba7649a7" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "bytes", "chrono", "either", @@ -2824,14 +2830,14 @@ dependencies = [ "http-body 1.0.0", "http-body-util", "hyper 1.3.1", - "hyper-rustls 0.27.1", + "hyper-rustls 0.27.2", "hyper-timeout 0.5.1", "hyper-util", "jsonpath-rust", "k8s-openapi", "kube-core", "pem", - "rustls 0.23.9", + "rustls 0.23.10", "rustls-pemfile 2.1.2", "secrecy", "serde", @@ -2868,11 +2874,11 @@ version = "0.90.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d0d2527a6ff7adf00b34d558c4c5de9404abe28808cb0a4c64b57e2c1b0716a" dependencies = [ - "darling 0.20.8", + "darling 0.20.9", "proc-macro2", "quote", "serde_json", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -2918,9 +2924,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" @@ -2977,9 +2983,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.16" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" +checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" dependencies = [ "cc", "pkg-config", @@ -2988,9 +2994,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" @@ -3016,9 +3022,9 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lz4-sys" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" +checksum = "e9764018d143cc854c9f17f0b907de70f14393b1f502da6375dce70f00514eb3" dependencies = [ "cc", "libc", @@ -3071,7 +3077,7 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26eb45aff37b45cff885538e1dcbd6c2b462c04fe84ce0155ea469f325672c98" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "http-body-util", "hyper 1.3.1", "hyper-tls", @@ -3115,9 +3121,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", ] @@ -3141,11 +3147,10 @@ checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ - "lazy_static", "libc", "log", "openssl", @@ -3188,9 +3193,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ "num-bigint", "num-complex", @@ -3202,11 +3207,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ - "autocfg", "num-integer", "num-traits", ] @@ -3254,9 +3258,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -3276,9 +3280,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -3309,10 +3313,10 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -3326,9 +3330,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "576dfe1fc8f9df304abb159d767a29d0476f7750fbf8aa7ad07816004a207434" dependencies = [ "memchr", ] @@ -3393,7 +3397,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -3532,9 +3536,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parity-scale-codec" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arrayvec", "bitvec", @@ -3546,11 +3550,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate", "proc-macro2", "quote", "syn 1.0.109", @@ -3558,9 +3562,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", @@ -3591,7 +3595,7 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "serde", ] @@ -3612,9 +3616,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -3623,9 +3627,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73541b156d32197eecda1a4014d7f868fd2bcb3c550d5386087cfba442bf69c" +checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" dependencies = [ "pest", "pest_generator", @@ -3633,22 +3637,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c35eeed0a3fab112f75165fdc026b3913f4183133f19b49be773ac9ea966e8bd" +checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] name = "pest_meta" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2adbf29bb9776f28caece835398781ab24435585fe0d4dc1374a61db5accedca" +checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" dependencies = [ "once_cell", "pest", @@ -3720,7 +3724,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -3787,7 +3791,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -3804,32 +3808,13 @@ dependencies = [ "uint", ] -[[package]] -name = "proc-macro-crate" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit 0.19.15", -] - -[[package]] -name = "proc-macro-crate" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" -dependencies = [ - "toml_edit 0.20.7", -] - [[package]] name = "proc-macro-crate" version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" dependencies = [ - "toml_edit 0.21.1", + "toml_edit", ] [[package]] @@ -3864,9 +3849,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.81" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" +checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" dependencies = [ "unicode-ident", ] @@ -3885,7 +3870,7 @@ dependencies = [ "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.3", + "regex-syntax 0.8.4", "rusty-fork", "tempfile", "unarray", @@ -3918,7 +3903,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.60", + "syn 2.0.66", "tempfile", ] @@ -3932,7 +3917,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -4139,14 +4124,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.4" +version = "1.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.6", - "regex-syntax 0.8.3", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] @@ -4160,13 +4145,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.3", + "regex-syntax 0.8.4", ] [[package]] @@ -4177,9 +4162,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "rend" @@ -4204,7 +4189,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.28", + "hyper 0.14.29", "hyper-rustls 0.24.2", "ipnet", "js-sys", @@ -4237,13 +4222,13 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "bytes", "encoding_rs", "futures-channel", "futures-core", "futures-util", - "h2 0.4.4", + "h2 0.4.5", "http 1.1.0", "http-body 1.0.0", "http-body-util", @@ -4461,9 +4446,9 @@ dependencies = [ [[package]] name = "ruint" -version = "1.12.1" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f308135fef9fc398342da5472ce7c484529df23743fb7c734e0f3d472971e62" +checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" dependencies = [ "alloy-rlp", "ark-ff 0.3.0", @@ -4485,9 +4470,9 @@ dependencies = [ [[package]] name = "ruint-macro" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f86854cf50259291520509879a5c294c3c9a4c334e9ff65071c51e42ef1e2343" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" [[package]] name = "rust_decimal" @@ -4507,9 +4492,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -4538,7 +4523,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.22", + "semver 1.0.23", ] [[package]] @@ -4582,9 +4567,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.9" +version = "0.23.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a218f0f6d05669de4eabfb24f31ce802035c952429d037507b4a4a39f0e60c5b" +checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" dependencies = [ "log", "once_cell", @@ -4635,7 +4620,7 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "rustls-pki-types", ] @@ -4656,7 +4641,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.9", + "rustls 0.23.10", "rustls-native-certs 0.7.0", "rustls-platform-verifier-android", "rustls-webpki 0.102.4", @@ -4695,9 +4680,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "rusty-fork" @@ -4713,9 +4698,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "same-file" @@ -4728,9 +4713,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.2" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c453e59a955f81fb62ee5d596b450383d699f152d350e9d23a0db2adb78e4c0" +checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "cfg-if", "derive_more", @@ -4740,11 +4725,11 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.2" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18cf6c6447f813ef19eb450e985bcce6705f9ce7660db221b59093d15c79c4b7" +checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate", "proc-macro2", "quote", "syn 1.0.109", @@ -4761,9 +4746,9 @@ dependencies = [ [[package]] name = "schemars" -version = "0.8.17" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f55c82c700538496bdc329bb4918a81f87cc8888811bd123cf325a0f2f8d309" +checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" dependencies = [ "dyn-clone", "schemars_derive", @@ -4773,14 +4758,14 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.17" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83263746fe5e32097f06356968a077f96089739c927a61450efa069905eec108" +checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -4850,11 +4835,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -4864,9 +4849,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -4883,9 +4868,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -5040,18 +5025,18 @@ checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] name = "serde_derive_internals" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "330f01ce65a3a5fe59a60c82f3c9a024b573b8a6e875bd233fe5f934e71d54e3" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -5094,7 +5079,7 @@ version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", @@ -5124,10 +5109,10 @@ version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" dependencies = [ - "darling 0.20.8", + "darling 0.20.9", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -5336,7 +5321,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "bytes", "futures", "http 1.1.0", @@ -5373,11 +5358,10 @@ dependencies = [ [[package]] name = "sqlformat" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" +checksum = "f895e3734318cc55f1fe66258926c9b910c124d47520339efecbb6c59cec7c1f" dependencies = [ - "itertools 0.12.1", "nom", "unicode_categories", ] @@ -5605,6 +5589,7 @@ dependencies = [ "binary_macros", "bincode", "byte-unit", + "cfg-if", "chrono", "clap", "console-subscriber", @@ -5633,6 +5618,7 @@ dependencies = [ "k8s-openapi", "keccak-hasher", "kube", + "lazy_static", "metrics", "metrics-exporter-prometheus", "nom", @@ -5665,6 +5651,7 @@ dependencies = [ "sqlx", "stringreader", "strum", + "sugars", "tempfile", "testcontainers", "testcontainers-modules", @@ -5687,13 +5674,13 @@ dependencies = [ [[package]] name = "stringprep" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" dependencies = [ - "finl_unicode", "unicode-bidi", "unicode-normalization", + "unicode-properties", ] [[package]] @@ -5725,15 +5712,15 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "rustversion", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -5755,6 +5742,12 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +[[package]] +name = "sugars" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc0db74f9ee706e039d031a560bd7d110c7022f016051b3d33eeff9583e3e67a" + [[package]] name = "syn" version = "1.0.109" @@ -5768,9 +5761,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.60" +version = "2.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" +checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" dependencies = [ "proc-macro2", "quote", @@ -5786,7 +5779,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -5803,7 +5796,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -5841,9 +5834,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.10.1" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" dependencies = [ "cfg-if", "fastrand", @@ -5905,7 +5898,7 @@ checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -6031,7 +6024,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -6071,7 +6064,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.9", + "rustls 0.23.10", "rustls-pki-types", "tokio", ] @@ -6105,31 +6098,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" - -[[package]] -name = "toml_edit" -version = "0.19.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" -dependencies = [ - "indexmap 2.2.6", - "toml_datetime", - "winnow", -] - -[[package]] -name = "toml_edit" -version = "0.20.7" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" -dependencies = [ - "indexmap 2.2.6", - "toml_datetime", - "winnow", -] +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" [[package]] name = "toml_edit" @@ -6156,7 +6127,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.28", + "hyper 0.14.29", "hyper-timeout 0.4.1", "percent-encoding", "pin-project", @@ -6183,7 +6154,7 @@ dependencies = [ "proc-macro2", "prost-build", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -6257,7 +6228,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -6330,15 +6301,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "treediff" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d127780145176e2b5d16611cc25a900150e86e9fd79d3bde6ff3a37359c9cb5" -dependencies = [ - "serde_json", -] - [[package]] name = "triehash" version = "0.8.4" @@ -6426,6 +6388,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-properties" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4259d9d4425d9f0661581b804cb85fe66a4c631cadd8f490d1c13a35d5d9291" + [[package]] name = "unicode-segmentation" version = "1.11.0" @@ -6462,7 +6430,7 @@ version = "2.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d11a831e3c0b56e438a28308e7c810799e3c118417f342d30ecec080105395cd" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "log", "native-tls", "once_cell", @@ -6507,9 +6475,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" @@ -6616,7 +6584,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", "wasm-bindgen-shared", ] @@ -6650,7 +6618,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6683,9 +6651,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009" +checksum = "3c452ad30530b54a4d8e71952716a212b08efd0f3562baa66c29a618b07da7c3" dependencies = [ "rustls-pki-types", ] @@ -6959,28 +6927,28 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -7000,15 +6968,15 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", "synstructure", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] @@ -7021,7 +6989,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -7043,7 +7011,7 @@ checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 541e03ac4..992084047 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,7 @@ default-run = "stratus" anyhow = "=1.0.86" async-trait = "=0.1.80" byte-unit = "=5.1.4" +cfg-if = "=1.0.0" chrono = { version = "=0.4.38", features = ["serde"] } const_format = "=0.2.32" const-hex = "=1.12.0" @@ -23,6 +24,7 @@ hex-literal = "=0.4.1" humantime = "=2.1.0" indexmap = { version = "=2.2.6", features = ["serde"] } itertools = "=0.13.0" +lazy_static = "=1.4.0" nonempty = { version = "=0.10.0", features = ["serialize"] } once_cell = "=1.19.0" oneshot = "=0.1.8" @@ -33,6 +35,7 @@ quote = "=1.0.36" rand = "=0.8.5" strum = "=0.26.2" thiserror = "=1.0.61" +sugars = "=3.0.1" ulid = "=1.1.2" url = "=2.5.1" uuid = { version = "=1.8.0", features = ["v4", "fast-rng" ] } @@ -104,7 +107,7 @@ testcontainers-modules = { version = "=0.3.5", features = ["postgres"] } # test fake = { version = "=2.9.2", features = ["derive"] } -#relayer +# relayer petgraph = "=0.6.5" # ------------------------------------------------------------------------------ @@ -131,7 +134,7 @@ console-subscriber = { git = "https://github.com/tokio-rs/console.git", rev = "8 binary_macros = "=1.0.0" fancy-duration = "=0.9.2" stringreader = "=0.1.1" -tempfile = "3.3.0" +tempfile = "=3.10.0" # ------------------------------------------------------------------------------ # Build dependencies diff --git a/e2e/test/automine/e2e-tx-parallel-contract.test.ts b/e2e/test/automine/e2e-tx-parallel-contract.test.ts index a56e50e5d..56961e2e5 100644 --- a/e2e/test/automine/e2e-tx-parallel-contract.test.ts +++ b/e2e/test/automine/e2e-tx-parallel-contract.test.ts @@ -9,6 +9,7 @@ import { sendGetNonce, sendRawTransactions, sendReset, + send } from "../helpers/rpc"; describe("Transaction: parallel TestContractBalances", async () => { @@ -16,6 +17,8 @@ describe("Transaction: parallel TestContractBalances", async () => { it("Resets blockchain", async () => { await sendReset(); + const blockNumber = await send("eth_blockNumber", []); + expect(blockNumber).to.be.oneOf(["0x0", "0x1"]); }); it("Deploy TestContractBalances", async () => { @@ -98,6 +101,8 @@ describe("Transaction: parallel TestContractCounter", async () => { it("Resets blockchain", async () => { await sendReset(); + const blockNumber = await send("eth_blockNumber", []); + expect(blockNumber).to.be.oneOf(["0x0", "0x1"]); }); it("Deploy TestContractCounter", async () => { diff --git a/e2e/test/automine/e2e-tx-parallel-transfer.test.ts b/e2e/test/automine/e2e-tx-parallel-transfer.test.ts index d61fc5e93..818387a5a 100644 --- a/e2e/test/automine/e2e-tx-parallel-transfer.test.ts +++ b/e2e/test/automine/e2e-tx-parallel-transfer.test.ts @@ -1,11 +1,13 @@ import { expect } from "chai"; import { TEST_ACCOUNTS, randomAccounts } from "../helpers/account"; -import { sendGetBalance, sendRawTransactions, sendReset } from "../helpers/rpc"; +import { sendGetBalance, sendRawTransactions, sendReset, send } from "../helpers/rpc"; describe("Transaction: parallel transfer", () => { it("Resets blockchain", async () => { await sendReset(); + const blockNumber = await send("eth_blockNumber", []); + expect(blockNumber).to.be.oneOf(["0x0", "0x1"]); }); it("Sends parallel requests", async () => { const counterParty = randomAccounts(1)[0]; diff --git a/e2e/test/automine/e2e-tx-serial-contract.test.ts b/e2e/test/automine/e2e-tx-serial-contract.test.ts index 80889d5ea..28515310d 100644 --- a/e2e/test/automine/e2e-tx-serial-contract.test.ts +++ b/e2e/test/automine/e2e-tx-serial-contract.test.ts @@ -28,6 +28,8 @@ describe("Transaction: serial TestContractBalances", () => { it("Resets blockchain", async () => { await sendReset(); + const blockNumber = await send("eth_blockNumber", []); + expect(blockNumber).to.be.oneOf(["0x0", "0x1"]); }); it("Contract is deployed", async () => { diff --git a/e2e/test/automine/e2e-tx-serial-transfer.test.ts b/e2e/test/automine/e2e-tx-serial-transfer.test.ts index 7d7dc9231..e4ba9cf4d 100644 --- a/e2e/test/automine/e2e-tx-serial-transfer.test.ts +++ b/e2e/test/automine/e2e-tx-serial-transfer.test.ts @@ -30,6 +30,8 @@ describe("Transaction: serial transfer", () => { it("Resets blockchain", async () => { await sendReset(); + const blockNumber = await send("eth_blockNumber", []); + expect(blockNumber).to.be.oneOf(["0x0", "0x1"]); }); it("Send transaction", async () => { let txSigned = await ALICE.signWeiTransfer(BOB.address, TEST_TRANSFER); diff --git a/e2e/test/external/e2e-multiple-tx.test.ts b/e2e/test/external/e2e-multiple-tx.test.ts index 5eef857a3..d6c450c64 100644 --- a/e2e/test/external/e2e-multiple-tx.test.ts +++ b/e2e/test/external/e2e-multiple-tx.test.ts @@ -6,6 +6,8 @@ import { send, sendEvmMine, sendGetBalance, sendRawTransactions, sendReset } fro describe("Multiple Transactions Per Block", () => { it("Resets blockchain", async () => { await sendReset(); + const blockNumber = await send("eth_blockNumber", []); + expect(blockNumber).to.be.oneOf(["0x0", "0x1"]); }); it("Send multiple transactions and mine block", async () => { const counterParty = randomAccounts(1)[0]; @@ -67,4 +69,4 @@ describe("Multiple Transactions Per Block", () => { // check counterParty balance expect(await sendGetBalance(counterParty.address)).eq(expectedCounterPartyBalance); }); -}); \ No newline at end of file +}); diff --git a/src/bin/importer_offline.rs b/src/bin/importer_offline.rs index 024095c90..0d46ff393 100644 --- a/src/bin/importer_offline.rs +++ b/src/bin/importer_offline.rs @@ -1,8 +1,20 @@ +//! Importer-Offline binary. +//! +//! It loads blocks (and receipts) from an external RPC server, or from a PostgreSQL DB +//! that was prepared with the `rpc-downloader` binary. +//! +//! This importer will check on startup what is the `block_end` value at the external +//! storage, and will not update while running, in contrast with that, the +//! Importer-Online (other binary) will stay up to date with the newer blocks that +//! arrive. + use std::cmp::min; use std::fs; use std::sync::Arc; use anyhow::anyhow; +use anyhow::Context; +use futures::join; use futures::try_join; use futures::StreamExt; use itertools::Itertools; @@ -17,18 +29,17 @@ use stratus::eth::storage::ExternalRpcStorage; use stratus::eth::storage::InMemoryPermanentStorage; use stratus::eth::BlockMiner; use stratus::eth::Executor; -use stratus::ext::spawn_thread; +use stratus::ext::spawn_named; use stratus::ext::ResultExt; use stratus::log_and_err; use stratus::utils::calculate_tps_and_bpm; use stratus::utils::DropTimer; use stratus::GlobalServices; use stratus::GlobalState; -use tokio::runtime::Handle; use tokio::sync::mpsc; use tokio::time::Instant; -/// Number of tasks in the backlog. Each task contains 10_000 blocks and all receipts for them. +/// Number of tasks in the backlog. Each task contains `--blocks-by-fetch` blocks and all receipts for them. const BACKLOG_SIZE: usize = 50; type BacklogTask = (Vec, Vec); @@ -67,36 +78,13 @@ async fn run(config: ImporterOfflineConfig) -> anyhow::Result<()> { let initial_accounts = rpc_storage.read_initial_accounts().await?; storage.save_accounts(initial_accounts.clone())?; - // execute thread: external rpc storage loader - let storage_loader_thread = spawn_thread("storage-loader", move || { - let result = Handle::current().block_on(execute_external_rpc_storage_loader( - rpc_storage, - config.blocks_by_fetch, - config.paralellism, - block_start, - block_end, - backlog_tx, - )); - if let Err(e) = result { - tracing::error!(reason = ?e, "storage-loader failed"); - } - }); + let storage_loader = execute_external_rpc_storage_loader(rpc_storage, config.blocks_by_fetch, config.paralellism, block_start, block_end, backlog_tx); + let storage_loader = spawn_named("storage-loader", async move { storage_loader.await.context("'storage-loader' task failed") }); - // execute thread: block importer - let block_importer_thread = spawn_thread("block-importer", move || { - let result = Handle::current().block_on(execute_block_importer(executor, miner, backlog_rx, block_snapshots)); - if let Err(e) = result { - tracing::error!(reason = ?e, "block-importer failed"); - } - }); + let block_importer = execute_block_importer(executor, miner, backlog_rx, block_snapshots); + let block_importer = spawn_named("block-importer", async move { block_importer.await.context("'block-importer' task failed") }); - // await tasks - if let Err(e) = block_importer_thread.join() { - tracing::error!(reason = ?e, "block-importer thread failed"); - } - if let Err(e) = storage_loader_thread.join() { - tracing::error!(reason = ?e, "storage-loader thread failed"); - } + let (_, _) = join!(storage_loader, block_importer); Ok(()) } diff --git a/src/config.rs b/src/config.rs index 882eef635..b1d75cc3e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -38,6 +38,7 @@ use crate::eth::BlockMiner; use crate::eth::BlockMinerMode; use crate::eth::Executor; use crate::eth::TransactionRelayer; +use crate::ext::not; use crate::ext::parse_duration; use crate::infra::build_info; use crate::infra::tracing::TracingLogFormat; @@ -204,7 +205,7 @@ impl StratusStorageConfig { /// Initializes Stratus storage. pub async fn init(&self) -> anyhow::Result> { let temp_storage = self.temp_storage.init().await?; - let perm_storage = self.perm_storage.init().await?; + let perm_storage = self.perm_storage.init()?; let storage = StratusStorage::new(temp_storage, perm_storage); Ok(Arc::new(storage)) @@ -850,8 +851,12 @@ pub struct PermanentStorageConfig { #[cfg(feature = "rocks")] /// RocksDB storage path prefix to execute multiple local Stratus instances. - #[arg(long = "rocks-path-prefix", env = "ROCKS_PATH_PREFIX", default_value = "")] + #[arg(long = "rocks-path-prefix", env = "ROCKS_PATH_PREFIX")] pub rocks_path_prefix: Option, + + // Disable RocksDB backups + #[arg(long = "perm-storage-disable-backups", env = "PERM_STORAGE_DISABLE_BACKUPS")] + pub perm_storage_disable_backups: bool, } #[derive(DebugAsJson, Clone, serde::Serialize)] @@ -863,13 +868,17 @@ pub enum PermanentStorageKind { impl PermanentStorageConfig { /// Initializes permanent storage implementation. - pub async fn init(&self) -> anyhow::Result> { + pub fn init(&self) -> anyhow::Result> { tracing::info!(config = ?self, "creating permanent storage"); let perm: Arc = match self.perm_storage_kind { PermanentStorageKind::InMemory => Arc::new(InMemoryPermanentStorage::default()), #[cfg(feature = "rocks")] - PermanentStorageKind::Rocks => Arc::new(RocksPermanentStorage::new(self.rocks_path_prefix.clone())?), + PermanentStorageKind::Rocks => { + let enable_backups = not(self.perm_storage_disable_backups); + let prefix = self.rocks_path_prefix.clone(); + Arc::new(RocksPermanentStorage::new(enable_backups, prefix)?) + } }; Ok(perm) } diff --git a/src/eth/primitives/log_filter.rs b/src/eth/primitives/log_filter.rs index 27ccd1a83..926ed27b0 100644 --- a/src/eth/primitives/log_filter.rs +++ b/src/eth/primitives/log_filter.rs @@ -51,7 +51,7 @@ pub struct LogFilterTopicCombination(Vec<(usize, LogTopic)>); gen_newtype_from!(self = LogFilterTopicCombination, other = Vec<(usize, LogTopic)>); impl LogFilterTopicCombination { - fn matches(&self, log_topics: &[LogTopic]) -> bool { + pub fn matches(&self, log_topics: &[LogTopic]) -> bool { for (topic_index, topic) in &self.0 { if log_topics.get(*topic_index).is_some_and(|log_topic| log_topic != topic) { return false; diff --git a/src/eth/storage/rocks/mod.rs b/src/eth/storage/rocks/mod.rs index fffd41eb0..8246a7b42 100644 --- a/src/eth/storage/rocks/mod.rs +++ b/src/eth/storage/rocks/mod.rs @@ -1,4 +1,11 @@ -pub mod rocks_db; +/// Data manipulation for column families. +mod rocks_cf; +/// Settings and tweaks for the database and column families. +mod rocks_config; +/// Functionalities related to the whole database. +mod rocks_db; +/// Exposed API. pub mod rocks_permanent; -pub mod rocks_state; +/// State handler for DB and column families. +mod rocks_state; mod types; diff --git a/src/eth/storage/rocks/rocks_cf.rs b/src/eth/storage/rocks/rocks_cf.rs new file mode 100644 index 000000000..99df96c88 --- /dev/null +++ b/src/eth/storage/rocks/rocks_cf.rs @@ -0,0 +1,240 @@ +//! RocksDB handling of column families. + +use std::iter; +use std::marker::PhantomData; +use std::sync::Arc; + +use anyhow::Result; +use rocksdb::BoundColumnFamily; +use rocksdb::DBIteratorWithThreadMode; +use rocksdb::IteratorMode; +use rocksdb::Options; +use rocksdb::WriteBatch; +use rocksdb::DB; +use serde::Deserialize; +use serde::Serialize; + +/// A Column Family in RocksDB. +/// +/// Exposes an API for key-value pair storage. +#[derive(Clone)] +pub struct RocksCf { + db: Arc, + // TODO: check if we can gather metrics from a Column Family, if not, remove this field + _opts: Options, + column_family: String, + _marker: PhantomData<(K, V)>, +} + +impl RocksCf +where + K: Serialize + for<'de> Deserialize<'de> + std::hash::Hash + Eq, + V: Serialize + for<'de> Deserialize<'de> + Clone, +{ + /// Create Column Family for given DB if it doesn't exist. + pub fn new_cf(db: Arc, column_family: &str, opts: Options) -> Self { + Self { + db, + column_family: column_family.to_owned(), + _opts: opts, + _marker: PhantomData, + } + } + + fn handle(&self) -> Arc { + self.db.cf_handle(&self.column_family).unwrap() + } + + // Clears the database + pub fn clear(&self) -> Result<()> { + let cf = self.handle(); + + // try clearing everything + let first = self.db.iterator_cf(&cf, IteratorMode::Start).next(); + let last = self.db.iterator_cf(&cf, IteratorMode::End).next(); + if let (Some(Ok((first_key, _))), Some(Ok((last_key, _)))) = (first, last) { + self.db.delete_range_cf(&cf, first_key, last_key)?; + } + + // clear left-overs + let mut batch = WriteBatch::default(); + for item in self.db.iterator_cf(&cf, IteratorMode::Start) { + let (key, _) = item?; // Handle or unwrap the Result + batch.delete_cf(&cf, key); + } + self.db.write(batch)?; + Ok(()) + } + + pub fn get(&self, key: &K) -> Option { + let Ok(serialized_key) = bincode::serialize(key) else { return None }; + let cf = self.handle(); + let Ok(Some(value_bytes)) = self.db.get_cf(&cf, serialized_key) else { + return None; + }; + + bincode::deserialize(&value_bytes).ok() + } + + #[allow(dead_code)] + pub fn multi_get(&self, keys: I) -> anyhow::Result> + where + I: IntoIterator + Clone, + { + let cf = self.handle(); + let cf_repeated = iter::repeat(&cf); + + let serialized_keys_with_cfs = keys + .clone() + .into_iter() + .zip(cf_repeated) + .map(|(k, cf)| bincode::serialize(&k).map(|k| (cf, k))) + .collect::, _>>()?; + + Ok(self + .db + .multi_get_cf(serialized_keys_with_cfs) + .into_iter() + .zip(keys) + .filter_map(|(value, key)| { + if let Ok(Some(value)) = value { + let Ok(value) = bincode::deserialize::(&value) else { return None }; // XXX: Maybe we should fail on a failed conversion instead of ignoring; + Some((key, value)) + } else { + None + } + }) + .collect()) + } + + // Mimics the 'insert' functionality of a HashMap + pub fn insert(&self, key: K, value: V) { + let cf = self.handle(); + + let serialized_key = bincode::serialize(&key).unwrap(); + let serialized_value = bincode::serialize(&value).unwrap(); + self.db.put_cf(&cf, serialized_key, serialized_value).unwrap(); + } + + pub fn prepare_batch_insertion(&self, changes: I, batch: &mut WriteBatch) + where + I: IntoIterator, + { + let cf = self.handle(); + + for (key, value) in changes { + let serialized_key = bincode::serialize(&key).unwrap(); + let serialized_value = bincode::serialize(&value).unwrap(); + // Add each serialized key-value pair to the batch + batch.put_cf(&cf, serialized_key, serialized_value); + } + } + + // Deletes an entry from the database by key + pub fn delete(&self, key: &K) -> Result<()> { + let serialized_key = bincode::serialize(key)?; + let cf = self.handle(); + + self.db.delete_cf(&cf, serialized_key)?; + Ok(()) + } + + // Custom method that combines entry and or_insert_with from a HashMap + pub fn get_or_insert_with(&self, key: K, default: F) -> V + where + F: FnOnce() -> V, + { + match self.get(&key) { + Some(value) => value, + None => { + let new_value = default(); + self.insert(key, new_value.clone()); + new_value + } + } + } + + pub fn iter_start(&self) -> RocksDBIterator { + let cf = self.handle(); + + let iter = self.db.iterator_cf(&cf, IteratorMode::Start); + RocksDBIterator::::new(iter) + } + + pub fn iter_end(&self) -> RocksDBIterator { + let cf = self.handle(); + + let iter = self.db.iterator_cf(&cf, IteratorMode::End); + RocksDBIterator::::new(iter) + } + + pub fn iter_from Deserialize<'de> + std::hash::Hash + Eq>( + &self, + key_prefix: P, + direction: rocksdb::Direction, + ) -> RocksDBIterator { + let cf = self.handle(); + let serialized_key = bincode::serialize(&key_prefix).unwrap(); + + let iter = self.db.iterator_cf(&cf, IteratorMode::From(&serialized_key, direction)); + RocksDBIterator::::new(iter) + } + + #[allow(dead_code)] + pub fn last(&self) -> Option<(K, V)> { + let cf = self.handle(); + + let mut iter = self.db.iterator_cf(&cf, IteratorMode::End); + if let Some(Ok((k, v))) = iter.next() { + let key = bincode::deserialize(&k).unwrap(); + let value = bincode::deserialize(&v).unwrap(); + Some((key, value)) + } else { + None + } + } + + pub fn last_key(&self) -> Option { + let cf = self.handle(); + + let mut iter = self.db.iterator_cf(&cf, IteratorMode::End); + if let Some(Ok((k, _v))) = iter.next() { + let key = bincode::deserialize(&k).unwrap(); + Some(key) + } else { + None + } + } +} + +pub struct RocksDBIterator<'a, K, V> { + iter: DBIteratorWithThreadMode<'a, DB>, + _marker: PhantomData<(K, V)>, +} + +impl<'a, K: Serialize + for<'de> Deserialize<'de> + std::hash::Hash + Eq, V: Serialize + for<'de> Deserialize<'de> + Clone> RocksDBIterator<'a, K, V> { + pub fn new(iter: DBIteratorWithThreadMode<'a, DB>) -> Self { + Self { iter, _marker: PhantomData } + } +} + +/// Custom iterator for navigating RocksDB entries. +impl<'a, K: Serialize + for<'de> Deserialize<'de> + std::hash::Hash + Eq, V: Serialize + for<'de> Deserialize<'de> + Clone> Iterator + for RocksDBIterator<'a, K, V> +{ + type Item = (K, V); + + /// Retrieves the next key-value pair from the database. + /// + /// Returns: + /// - `Some((K, V))` if a valid key-value pair is found. + /// - `None` if there are no more items to process, or if only special/control keys remain. + fn next(&mut self) -> Option { + let next = self.iter.next()?; + let (key, value) = next.unwrap(); + + let deserialized_key = bincode::deserialize::(&key).unwrap(); + let deserialized_value = bincode::deserialize::(&value).unwrap(); + Some((deserialized_key, deserialized_value)) + } +} diff --git a/src/eth/storage/rocks/rocks_config.rs b/src/eth/storage/rocks/rocks_config.rs new file mode 100644 index 000000000..936030d86 --- /dev/null +++ b/src/eth/storage/rocks/rocks_config.rs @@ -0,0 +1,159 @@ +use rocksdb::BlockBasedOptions; +use rocksdb::Cache; +use rocksdb::Options; + +#[derive(Debug, Clone, Copy)] +pub enum CacheSetting { + /// Enabled cache with the given size in bytes + Enabled(usize), + Disabled, +} + +#[derive(Debug, Clone, Copy)] +pub enum DbConfig { + LargeSSTFiles, + FastWriteSST, + Default, +} + +impl Default for DbConfig { + fn default() -> Self { + Self::Default + } +} + +impl DbConfig { + pub fn to_options(self, cache_setting: CacheSetting) -> Options { + let mut opts = Options::default(); + let mut block_based_options = BlockBasedOptions::default(); + + opts.create_if_missing(true); + opts.create_missing_column_families(true); + opts.increase_parallelism(16); + + // NOTE: As per the rocks db wiki: "The overhead of statistics is usually small but non-negligible. We usually observe an overhead of 5%-10%." + #[cfg(feature = "metrics")] + opts.enable_statistics(); + #[cfg(feature = "metrics")] + opts.set_statistics_level(rocksdb::statistics::StatsLevel::ExceptTimeForMutex); + + match self { + DbConfig::LargeSSTFiles => { + // Set the compaction style to Level Compaction + opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + + // Configure the size of SST files at each level + opts.set_target_file_size_base(512 * 1024 * 1024); + + // Increase the file size multiplier to expand file size at upper levels + opts.set_target_file_size_multiplier(2); // Each level grows in file size quicker + + // Reduce the number of L0 files that trigger compaction, increasing frequency + opts.set_level_zero_file_num_compaction_trigger(2); + + // Reduce thresholds for slowing and stopping writes, which forces more frequent compaction + opts.set_level_zero_slowdown_writes_trigger(10); + opts.set_level_zero_stop_writes_trigger(20); + + // Increase the max bytes for L1 to allow more data before triggering compaction + opts.set_max_bytes_for_level_base(2048 * 1024 * 1024); + + // Increase the level multiplier to aggressively increase space at each level + opts.set_max_bytes_for_level_multiplier(8.0); // Exponential growth of levels is more pronounced + + // Configure block size to optimize for larger blocks, improving sequential read performance + block_based_options.set_block_size(128 * 1024); // 128KB blocks + + // Increase the number of write buffers to delay flushing, optimizing CPU usage for compaction + opts.set_max_write_buffer_number(5); + opts.set_write_buffer_size(128 * 1024 * 1024); // 128MB per write buffer + + // Keep a higher number of open files to accommodate more files being produced by aggressive compaction + opts.set_max_open_files(20000); + + // Apply more aggressive compression settings, if I/O and CPU permit + opts.set_compression_per_level(&[ + rocksdb::DBCompressionType::Lz4, + rocksdb::DBCompressionType::Zstd, // Use Zstd for higher compression from L1 onwards + ]); + } + DbConfig::FastWriteSST => { + // Continue using Level Compaction due to its effective use of I/O and CPU for writes + opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + + // Increase initial SST file sizes to reduce the frequency of writes to disk + opts.set_target_file_size_base(512 * 1024 * 1024); // Starting at 512MB for L1 + + // Minimize the file size multiplier to control the growth of file sizes at upper levels + opts.set_target_file_size_multiplier(1); // Minimal increase in file size at upper levels + + // Increase triggers for write slowdown and stop to maximize buffer before I/O actions + opts.set_level_zero_file_num_compaction_trigger(100); // Slow down writes at 100 L0 files + opts.set_level_zero_stop_writes_trigger(200); // Stop writes at 200 L0 files + + // Expand the maximum bytes for base level to further delay the need for compaction-related I/O + opts.set_max_bytes_for_level_base(2048 * 1024 * 1024); + + // Use a higher level multiplier to increase space exponentially at higher levels + opts.set_max_bytes_for_level_multiplier(10.0); + + // Opt for larger block sizes to decrease the number of read and write operations to disk + block_based_options.set_block_size(512 * 1024); // 512KB blocks + + // Maximize the use of write buffers to extend the time data stays in memory before flushing + opts.set_max_write_buffer_number(16); + opts.set_write_buffer_size(1024 * 1024 * 1024); // 1GB per write buffer + + // Allow a very high number of open files to minimize the overhead of opening and closing files + opts.set_max_open_files(20000); + + // Choose compression that balances CPU use and effective storage reduction + opts.set_compression_per_level(&[rocksdb::DBCompressionType::Lz4, rocksdb::DBCompressionType::Zstd]); + + // Enable settings that make full use of CPU to handle more data in memory and process compaction + opts.set_allow_concurrent_memtable_write(true); + opts.set_enable_write_thread_adaptive_yield(true); + } + DbConfig::Default => { + block_based_options.set_ribbon_filter(15.5); // https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter + + opts.set_allow_concurrent_memtable_write(true); + opts.set_enable_write_thread_adaptive_yield(true); + + let transform = rocksdb::SliceTransform::create_fixed_prefix(10); + opts.set_prefix_extractor(transform); + opts.set_memtable_prefix_bloom_ratio(0.2); + + // Enable a size-tiered compaction style, which is good for workloads with a high rate of updates and overwrites + opts.set_compaction_style(rocksdb::DBCompactionStyle::Universal); + + let mut universal_compact_options = rocksdb::UniversalCompactOptions::default(); + universal_compact_options.set_size_ratio(10); + universal_compact_options.set_min_merge_width(2); + universal_compact_options.set_max_merge_width(6); + universal_compact_options.set_max_size_amplification_percent(50); + universal_compact_options.set_compression_size_percent(-1); + universal_compact_options.set_stop_style(rocksdb::UniversalCompactionStopStyle::Total); + opts.set_universal_compaction_options(&universal_compact_options); + + let pt_opts = rocksdb::PlainTableFactoryOptions { + user_key_length: 0, + bloom_bits_per_key: 10, + hash_table_ratio: 0.75, + index_sparseness: 8, + encoding_type: rocksdb::KeyEncodingType::Plain, // Default encoding + full_scan_mode: false, // Optimized for point lookups rather than full scans + huge_page_tlb_size: 0, // Not using huge pages + store_index_in_file: false, // Store index in memory for faster access + }; + opts.set_plain_table_factory(&pt_opts); + } + } + if let CacheSetting::Enabled(cache_size) = cache_setting { + let cache = Cache::new_lru_cache(cache_size); + block_based_options.set_block_cache(&cache); + } + opts.set_block_based_table_factory(&block_based_options); + opts + } +} diff --git a/src/eth/storage/rocks/rocks_db.rs b/src/eth/storage/rocks/rocks_db.rs index 4e233a2e3..4ef1e0fb0 100644 --- a/src/eth/storage/rocks/rocks_db.rs +++ b/src/eth/storage/rocks/rocks_db.rs @@ -1,567 +1,58 @@ -#[cfg(feature = "metrics")] use std::collections::HashMap; -use std::marker::PhantomData; +use std::path::Path; use std::sync::Arc; -#[cfg(feature = "metrics")] -use std::sync::Mutex; use anyhow::anyhow; -use anyhow::Result; use rocksdb::backup::BackupEngine; use rocksdb::backup::BackupEngineOptions; -use rocksdb::backup::RestoreOptions; -#[cfg(feature = "metrics")] -use rocksdb::statistics::Histogram; -#[cfg(feature = "metrics")] -use rocksdb::statistics::Ticker; -use rocksdb::BlockBasedOptions; -use rocksdb::DBIteratorWithThreadMode; use rocksdb::Env; -use rocksdb::IteratorMode; use rocksdb::Options; -use rocksdb::WriteBatch; use rocksdb::DB; -use serde::Deserialize; -use serde::Serialize; -#[cfg(feature = "metrics")] -type HistogramInt = u32; -#[cfg(feature = "metrics")] -type Sum = u64; -#[cfg(feature = "metrics")] -type Count = u64; +use crate::eth::storage::rocks::rocks_config::CacheSetting; +use crate::eth::storage::rocks::rocks_config::DbConfig; -#[cfg(feature = "metrics")] -use crate::infra::metrics; - -pub enum DbConfig { - LargeSSTFiles, - FastWriteSST, - Default, -} - -// A generic struct that abstracts over key-value pairs stored in RocksDB. -pub struct RocksDb { - pub db: DB, - pub opts: Options, - _marker: PhantomData<(K, V)>, - // Last collected stats for a histogram - #[cfg(feature = "metrics")] - pub prev_stats: Mutex>, -} - -impl Deserialize<'de> + std::hash::Hash + Eq, V: Serialize + for<'de> Deserialize<'de> + Clone> RocksDb { - pub fn new(db_path: &str, config: DbConfig) -> anyhow::Result> { - let mut opts = Options::default(); - let mut block_based_options = BlockBasedOptions::default(); - - opts.create_if_missing(true); - opts.increase_parallelism(16); - - // NOTE: As per the rocks db wiki: "The overhead of statistics is usually small but non-negligible. We usually observe an overhead of 5%-10%." - #[cfg(feature = "metrics")] - opts.enable_statistics(); - #[cfg(feature = "metrics")] - opts.set_statistics_level(rocksdb::statistics::StatsLevel::ExceptTimeForMutex); - - match config { - DbConfig::LargeSSTFiles => { - // Set the compaction style to Level Compaction - opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - - // Configure the size of SST files at each level - opts.set_target_file_size_base(512 * 1024 * 1024); - - // Increase the file size multiplier to expand file size at upper levels - opts.set_target_file_size_multiplier(2); // Each level grows in file size quicker - - // Reduce the number of L0 files that trigger compaction, increasing frequency - opts.set_level_zero_file_num_compaction_trigger(2); - - // Reduce thresholds for slowing and stopping writes, which forces more frequent compaction - opts.set_level_zero_slowdown_writes_trigger(10); - opts.set_level_zero_stop_writes_trigger(20); - - // Increase the max bytes for L1 to allow more data before triggering compaction - opts.set_max_bytes_for_level_base(2048 * 1024 * 1024); - - // Increase the level multiplier to aggressively increase space at each level - opts.set_max_bytes_for_level_multiplier(8.0); // Exponential growth of levels is more pronounced - - // Configure block size to optimize for larger blocks, improving sequential read performance - block_based_options.set_block_size(128 * 1024); // 128KB blocks - - // Increase the number of write buffers to delay flushing, optimizing CPU usage for compaction - opts.set_max_write_buffer_number(5); - opts.set_write_buffer_size(128 * 1024 * 1024); // 128MB per write buffer - - // Keep a higher number of open files to accommodate more files being produced by aggressive compaction - opts.set_max_open_files(20000); - - // Apply more aggressive compression settings, if I/O and CPU permit - opts.set_compression_per_level(&[ - rocksdb::DBCompressionType::Lz4, - rocksdb::DBCompressionType::Zstd, // Use Zstd for higher compression from L1 onwards - ]); - } - DbConfig::FastWriteSST => { - // Continue using Level Compaction due to its effective use of I/O and CPU for writes - opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - - // Increase initial SST file sizes to reduce the frequency of writes to disk - opts.set_target_file_size_base(512 * 1024 * 1024); // Starting at 512MB for L1 - - // Minimize the file size multiplier to control the growth of file sizes at upper levels - opts.set_target_file_size_multiplier(1); // Minimal increase in file size at upper levels - - // Increase triggers for write slowdown and stop to maximize buffer before I/O actions - opts.set_level_zero_file_num_compaction_trigger(100); // Slow down writes at 100 L0 files - opts.set_level_zero_stop_writes_trigger(200); // Stop writes at 200 L0 files - - // Expand the maximum bytes for base level to further delay the need for compaction-related I/O - opts.set_max_bytes_for_level_base(2048 * 1024 * 1024); - - // Use a higher level multiplier to increase space exponentially at higher levels - opts.set_max_bytes_for_level_multiplier(10.0); - - // Opt for larger block sizes to decrease the number of read and write operations to disk - block_based_options.set_block_size(512 * 1024); // 512KB blocks - - // Maximize the use of write buffers to extend the time data stays in memory before flushing - opts.set_max_write_buffer_number(16); - opts.set_write_buffer_size(1024 * 1024 * 1024); // 1GB per write buffer - - // Allow a very high number of open files to minimize the overhead of opening and closing files - opts.set_max_open_files(20000); - - // Choose compression that balances CPU use and effective storage reduction - opts.set_compression_per_level(&[rocksdb::DBCompressionType::Lz4, rocksdb::DBCompressionType::Zstd]); - - // Enable settings that make full use of CPU to handle more data in memory and process compaction - opts.set_allow_concurrent_memtable_write(true); - opts.set_enable_write_thread_adaptive_yield(true); - } - DbConfig::Default => { - block_based_options.set_ribbon_filter(15.5); // https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter - - opts.set_allow_concurrent_memtable_write(true); - opts.set_enable_write_thread_adaptive_yield(true); - - let transform = rocksdb::SliceTransform::create_fixed_prefix(10); - opts.set_prefix_extractor(transform); - opts.set_memtable_prefix_bloom_ratio(0.2); - - // Enable a size-tiered compaction style, which is good for workloads with a high rate of updates and overwrites - opts.set_compaction_style(rocksdb::DBCompactionStyle::Universal); - - let mut universal_compact_options = rocksdb::UniversalCompactOptions::default(); - universal_compact_options.set_size_ratio(10); - universal_compact_options.set_min_merge_width(2); - universal_compact_options.set_max_merge_width(6); - universal_compact_options.set_max_size_amplification_percent(50); - universal_compact_options.set_compression_size_percent(-1); - universal_compact_options.set_stop_style(rocksdb::UniversalCompactionStopStyle::Total); - opts.set_universal_compaction_options(&universal_compact_options); - - let pt_opts = rocksdb::PlainTableFactoryOptions { - user_key_length: 0, - bloom_bits_per_key: 10, - hash_table_ratio: 0.75, - index_sparseness: 8, - encoding_type: rocksdb::KeyEncodingType::Plain, // Default encoding - full_scan_mode: false, // Optimized for point lookups rather than full scans - huge_page_tlb_size: 0, // Not using huge pages - store_index_in_file: false, // Store index in memory for faster access - }; - opts.set_plain_table_factory(&pt_opts); - } - } - opts.set_block_based_table_factory(&block_based_options); - let db = match DB::open(&opts, db_path) { - Ok(db) => db, - Err(e) => { - tracing::error!("Failed to open RocksDB: {}", e); - DB::repair(&opts, db_path)?; - DB::open(&opts, db_path)? - } - }; //XXX in case of corruption, use DB - - Ok(Arc::new(RocksDb { - db, - opts, - _marker: PhantomData, - #[cfg(feature = "metrics")] - prev_stats: Mutex::new(HashMap::new()), - })) - } - - pub fn backup_path(&self) -> anyhow::Result { - Ok(format!("{}backup", self.db.path().to_str().ok_or(anyhow!("Invalid path"))?)) - } - - fn backup_engine(&self) -> anyhow::Result { - let backup_opts = BackupEngineOptions::new(self.backup_path()?)?; - let backup_env = Env::new()?; - Ok(BackupEngine::open(&backup_opts, &backup_env)?) - } - - pub fn backup(&self) -> anyhow::Result<()> { - let mut backup_engine = self.backup_engine()?; - backup_engine.create_new_backup(&self.db)?; - backup_engine.purge_old_backups(2)?; - Ok(()) - } - - pub fn restore(&self) -> anyhow::Result<()> { - let mut backup_engine = self.backup_engine()?; - let restore_options = RestoreOptions::default(); - backup_engine.restore_from_latest_backup(self.db.path(), self.backup_path()?, &restore_options)?; - Ok(()) - } - - // Clears the database - pub fn clear(&self) -> Result<()> { - let mut batch = WriteBatch::default(); - for item in self.db.iterator(IteratorMode::Start) { - let (key, _) = item?; // Handle or unwrap the Result - batch.delete(key); - } - self.db.write(batch)?; - Ok(()) - } - - pub fn get(&self, key: &K) -> Option { - let Ok(serialized_key) = bincode::serialize(key) else { return None }; - let Ok(Some(value_bytes)) = self.db.get(serialized_key) else { return None }; - - bincode::deserialize(&value_bytes).ok() - } - - pub fn multi_get(&self, keys: I) -> anyhow::Result> - where - I: IntoIterator + Clone, - { - let serialized_keys = keys.clone().into_iter().map(|k| bincode::serialize(&k)).collect::, _>>()?; - Ok(self - .db - .multi_get(serialized_keys) - .into_iter() - .zip(keys) - .filter_map(|(value, key)| { - if let Ok(Some(value)) = value { - let Ok(value) = bincode::deserialize::(&value) else { return None }; // XXX: Maybe we should fail on a failed conversion instead of ignoring; - Some((key, value)) - } else { - None - } - }) - .collect()) - } - - pub fn get_current_block_number(&self) -> u64 { - let Ok(serialized_key) = bincode::serialize(&"current_block") else { - return 0; - }; - let Ok(Some(value_bytes)) = self.db.get(serialized_key) else { return 0 }; - - bincode::deserialize(&value_bytes).ok().unwrap_or(0) - } - - pub fn get_index_block_number(&self) -> u64 { - self.last_index().map(|(block_number, _)| block_number).unwrap_or(0) - } - - // Mimics the 'insert' functionality of a HashMap - pub fn insert(&self, key: K, value: V) { - let serialized_key = bincode::serialize(&key).unwrap(); - let serialized_value = bincode::serialize(&value).unwrap(); - self.db.put(serialized_key, serialized_value).unwrap(); - } - - pub fn insert_batch(&self, changes: Vec<(K, V)>, current_block: Option) { - let mut batch = WriteBatch::default(); - - for (key, value) in changes { - let serialized_key = bincode::serialize(&key).unwrap(); - let serialized_value = bincode::serialize(&value).unwrap(); - // Add each serialized key-value pair to the batch - batch.put(serialized_key, serialized_value); - } - - if let Some(current_block) = current_block { - let serialized_block_key = bincode::serialize(&"current_block").unwrap(); - let serialized_block_value = bincode::serialize(¤t_block).unwrap(); - batch.put(serialized_block_key, serialized_block_value); - } - - // Execute the batch operation atomically - self.db.write(batch).unwrap(); - } - - /// inserts data but keep a block as key pointing to the keys inserted in a given block - /// this makes for faster search based on block_number, ergo index - pub fn insert_batch_indexed(&self, changes: Vec<(K, V)>, current_block: u64) { - let mut batch = WriteBatch::default(); - - let mut keys = vec![]; - - for (key, value) in changes { - let serialized_key = bincode::serialize(&key).unwrap(); - let serialized_value = bincode::serialize(&value).unwrap(); - - keys.push(key); - - // Add each serialized key-value pair to the batch - batch.put(serialized_key, serialized_value); - } - - let serialized_block_value = bincode::serialize(¤t_block).unwrap(); - let serialized_keys = bincode::serialize(&keys).unwrap(); - batch.put(serialized_block_value, serialized_keys); - - // Execute the batch operation atomically - self.db.write(batch).unwrap(); - } - - // Deletes an entry from the database by key - pub fn delete(&self, key: &K) -> Result<()> { - let serialized_key = bincode::serialize(key)?; - self.db.delete(serialized_key)?; - Ok(()) - } - - // Deletes an entry from the database by key - pub fn delete_index(&self, key: u64) -> Result<()> { - let serialized_key = bincode::serialize(&key)?; - //XXX check if value is a vec that can be deserialized as a safety measure - self.db.delete(serialized_key)?; - Ok(()) - } - - // Custom method that combines entry and or_insert_with from a HashMap - pub fn entry_or_insert_with(&self, key: K, default: F) -> V - where - F: FnOnce() -> V, - { - match self.get(&key) { - Some(value) => value, - None => { - let new_value = default(); - self.insert(key, new_value.clone()); - new_value - } - } - } - - pub fn iter_start(&self) -> RocksDBIterator { - let iter = self.db.iterator(IteratorMode::Start); - RocksDBIterator::::new(iter) - } - - pub fn iter_end(&self) -> RocksDBIterator { - let iter = self.db.iterator(IteratorMode::End); - RocksDBIterator::::new(iter) - } - - pub fn indexed_iter_end(&self) -> IndexedRocksDBIterator { - let iter = self.db.iterator(IteratorMode::End); - IndexedRocksDBIterator::::new(iter) - } - - pub fn iter_from Deserialize<'de> + std::hash::Hash + Eq>( - &self, - key_prefix: P, - direction: rocksdb::Direction, - ) -> RocksDBIterator { - let serialized_key = bincode::serialize(&key_prefix).unwrap(); - let iter = self.db.iterator(IteratorMode::From(&serialized_key, direction)); - RocksDBIterator::::new(iter) - } - - pub fn last_index(&self) -> Option<(u64, Vec)> { - let iter = self.db.iterator(IteratorMode::End); - IndexedRocksDBIterator::::new(iter).next() - } - - pub fn last(&self) -> Option<(K, V)> { - let mut iter = self.db.iterator(IteratorMode::End); - if let Some(Ok((k, v))) = iter.next() { - let key = bincode::deserialize(&k).unwrap(); - let value = bincode::deserialize(&v).unwrap(); - Some((key, value)) - } else { - None - } - } - - #[cfg(feature = "metrics")] - pub fn get_histogram_average_in_interval(&self, hist: Histogram) -> u64 { - // The stats are cumulative since opening the db - // we can get the average in the time interval with: avg = (new_sum - sum)/(new_count - count) - - let mut prev_values = self.prev_stats.lock().unwrap(); - let (prev_sum, prev_count): (Sum, Count) = *prev_values.get(&(hist as u32)).unwrap_or(&(0, 0)); - let data = self.opts.get_histogram_data(hist); - let data_count = data.count(); - let data_sum = data.sum(); - - let Some(avg) = (data_sum - prev_sum).checked_div(data_count - prev_count) else { - return 0; - }; - - prev_values.insert(hist as u32, (data_sum, data_count)); - avg - } - - #[cfg(feature = "metrics")] - pub fn export_metrics(&self) { - let db_get = self.opts.get_histogram_data(Histogram::DbGet); - let db_write = self.opts.get_histogram_data(Histogram::DbWrite); - - let block_cache_miss = self.opts.get_ticker_count(Ticker::BlockCacheMiss); - let block_cache_hit = self.opts.get_ticker_count(Ticker::BlockCacheHit); - let bytes_written = self.opts.get_ticker_count(Ticker::BytesWritten); - let bytes_read = self.opts.get_ticker_count(Ticker::BytesRead); - - let db_name = self.db.path().file_name().unwrap().to_str(); - - metrics::set_rocks_db_get(db_get.count(), db_name); - metrics::set_rocks_db_write(db_write.count(), db_name); - metrics::set_rocks_block_cache_miss(block_cache_miss, db_name); - metrics::set_rocks_block_cache_hit(block_cache_hit, db_name); - metrics::set_rocks_bytes_written(bytes_written, db_name); - metrics::set_rocks_bytes_read(bytes_read, db_name); - - metrics::set_rocks_compaction_time(self.get_histogram_average_in_interval(Histogram::CompactionTime), db_name); - metrics::set_rocks_compaction_cpu_time(self.get_histogram_average_in_interval(Histogram::CompactionCpuTime), db_name); - metrics::set_rocks_flush_time(self.get_histogram_average_in_interval(Histogram::FlushTime), db_name); - } -} - -pub struct RocksDBIterator<'a, K, V> { - iter: DBIteratorWithThreadMode<'a, DB>, - _marker: PhantomData<(K, V)>, -} - -impl<'a, K: Serialize + for<'de> Deserialize<'de> + std::hash::Hash + Eq, V: Serialize + for<'de> Deserialize<'de> + Clone> RocksDBIterator<'a, K, V> { - pub fn new(iter: DBIteratorWithThreadMode<'a, DB>) -> Self { - Self { iter, _marker: PhantomData } - } -} - -/// Custom iterator for navigating RocksDB entries. -/// -/// This iterator is designed to skip over specific keys used for internal control purposes, such as: -/// - `"current_block"`: Used to indicate the current block number in the database. -/// - Keys representing index keys (if deserialized as `u64`): Used for various indexing purposes. +/// Create or open the Database with the configs applied to all column families /// -/// The iterator will: -/// - Ignore any entries where the key is `"current_block"`. -/// - Attempt to deserialize all other keys to the generic type `K`. If deserialization fails, it assumes -/// the key might be an index key or improperly formatted, and skips it. -impl<'a, K: Serialize + for<'de> Deserialize<'de> + std::hash::Hash + Eq, V: Serialize + for<'de> Deserialize<'de> + Clone> Iterator - for RocksDBIterator<'a, K, V> -{ - type Item = (K, V); +/// The returned `Options` need to be stored to refer to the DB metrics +#[tracing::instrument(skip_all)] +pub fn create_or_open_db(path: impl AsRef, cf_configs: &HashMap<&'static str, Options>) -> anyhow::Result<(Arc, Options)> { + let path = path.as_ref(); - /// Retrieves the next key-value pair from the database, skipping over special control keys and - /// potentially misformatted keys. - /// - /// Returns: - /// - `Some((K, V))` if a valid key-value pair is found. - /// - `None` if there are no more items to process, or if only special/control keys remain. - fn next(&mut self) -> Option { - for key_value_result in self.iter.by_ref() { - let Ok((key, value)) = key_value_result else { continue }; + // settings for each Column Family to be created + let cf_config_iter = cf_configs.iter().map(|(name, opts)| (*name, opts.clone())); - // Check if the key is a special 'current_block' key and skip it - if key == bincode::serialize(&"current_block").unwrap().into_boxed_slice() { - continue; // Move to the next key-value pair - } + // options for the "default" column family (used only to refer to the DB metrics) + let db_opts = DbConfig::Default.to_options(CacheSetting::Disabled); - // Attempt to deserialize the key to type `K` - if let Ok(deserialized_key) = bincode::deserialize::(&key) { - // Attempt to deserialize the value to type `V` - if let Ok(deserialized_value) = bincode::deserialize::(&value) { - // Return the deserialized key-value pair if both are successful - return Some((deserialized_key, deserialized_value)); - } - } - // If deserialization fails, continue to the next item - } - // Return None if all items are processed or if all remaining items fail conditions - None - } -} + let open_db = || DB::open_cf_with_opts(&db_opts, path, cf_config_iter.clone()); -impl<'a, K: Serialize + for<'de> Deserialize<'de> + std::hash::Hash + Eq> IndexedRocksDBIterator<'a, K> { - pub fn new(iter: DBIteratorWithThreadMode<'a, DB>) -> Self { - Self { iter, _marker: PhantomData } - } -} + let db = match open_db() { + Ok(db) => db, + Err(e) => { + tracing::error!("Failed to open RocksDB: {}", e); + DB::repair(&db_opts, path)?; + open_db()? + } + }; // XXX in case of corruption, use DB -pub struct IndexedRocksDBIterator<'a, K> { - iter: DBIteratorWithThreadMode<'a, DB>, - _marker: PhantomData>, + Ok((Arc::new(db), db_opts)) } -impl<'a, K: Serialize + for<'de> Deserialize<'de> + std::hash::Hash + Eq> Iterator for IndexedRocksDBIterator<'a, K> { - type Item = (u64, Vec); - - fn next(&mut self) -> Option { - for key_value_result in self.iter.by_ref() { - let Ok((key, value)) = key_value_result else { continue }; - - if let Ok(index_key) = bincode::deserialize::(&key) { - if let Ok(index_values) = bincode::deserialize::>(&value) { - return Some((index_key, index_values)); - } - } - } - None - } +#[tracing::instrument(skip_all)] +pub fn create_new_backup(db: &DB) -> anyhow::Result<()> { + tracing::info!("Creating new DB backup"); + let mut backup_engine = backup_engine(db)?; + backup_engine.create_new_backup(db)?; + backup_engine.purge_old_backups(2)?; + Ok(()) } -#[cfg(test)] -mod tests { - use std::collections::HashMap; - use std::collections::HashSet; - use std::fs; - use std::sync::Arc; - - use fake::Fake; - use fake::Faker; - - use super::RocksDb; - use crate::eth::primitives::SlotIndex; - use crate::eth::primitives::SlotValue; - - #[test] - fn test_multi_get() { - let db: Arc> = RocksDb::new("./data/slots_test.rocksdb", super::DbConfig::Default).unwrap(); - - let slots: HashMap = (0..1000).map(|_| (Faker.fake(), Faker.fake())).collect(); - let extra_slots: HashMap = (0..1000) - .map(|_| (Faker.fake(), Faker.fake())) - .filter(|(key, _)| !slots.contains_key(key)) - .collect(); - - db.insert_batch(slots.clone().into_iter().collect(), None); - db.insert_batch(extra_slots.clone().into_iter().collect(), None); - - let extra_keys: HashSet = (0..1000) - .map(|_| Faker.fake()) - .filter(|key| !extra_slots.contains_key(key) && !slots.contains_key(key)) - .collect(); - - let keys: Vec = slots.keys().cloned().chain(extra_keys).collect(); - let result = db.multi_get(keys).expect("this should not fail"); - - assert_eq!(result.len(), slots.keys().len()); - for (idx, value) in result { - assert_eq!(value, *slots.get(&idx).expect("should not be None")); - } +fn backup_engine(db: &DB) -> anyhow::Result { + let db_path = db.path().to_str().ok_or(anyhow!("Invalid path"))?; + let backup_path = format!("{db_path}backup"); + let backup_opts = BackupEngineOptions::new(backup_path)?; - fs::remove_dir_all("./data/slots_test.rocksdb").unwrap(); - } + let backup_env = Env::new()?; + Ok(BackupEngine::open(&backup_opts, &backup_env)?) } diff --git a/src/eth/storage/rocks/rocks_permanent.rs b/src/eth/storage/rocks/rocks_permanent.rs index f39453b2f..d35de95ba 100644 --- a/src/eth/storage/rocks/rocks_permanent.rs +++ b/src/eth/storage/rocks/rocks_permanent.rs @@ -1,14 +1,8 @@ +use std::path::Path; use std::sync::atomic::AtomicU64; -use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::thread; - -use anyhow::Context; use super::rocks_state::RocksStorageState; -use super::types::AddressRocksdb; -use super::types::SlotIndexRocksdb; use crate::eth::primitives::Account; use crate::eth::primitives::Address; use crate::eth::primitives::Block; @@ -24,25 +18,38 @@ use crate::eth::primitives::StoragePointInTime; use crate::eth::primitives::TransactionMined; use crate::eth::storage::PermanentStorage; -/// used for multiple purposes, such as TPS counting and backup management -const TRANSACTION_LOOP_THRESHOLD: usize = 120_000; - -static TRANSACTIONS_COUNT: AtomicUsize = AtomicUsize::new(0); - #[derive(Debug)] pub struct RocksPermanentStorage { pub state: RocksStorageState, - pub block_number: AtomicU64, + block_number: AtomicU64, + enable_backups: bool, } impl RocksPermanentStorage { - pub fn new(rocks_path_prefix: Option) -> anyhow::Result { - tracing::info!("creating rocksdb storage"); + pub fn new(enable_backups: bool, rocks_path_prefix: Option) -> anyhow::Result { + tracing::info!("setting up rocksdb storage"); + let path = if let Some(prefix) = rocks_path_prefix { + // run some checks on the given prefix + assert!(!prefix.is_empty(), "given prefix for RocksDB is empty, try not providing the flag"); + if Path::new(&prefix).is_dir() || Path::new(&prefix).iter().count() > 1 { + tracing::warn!(?prefix, "given prefix for RocksDB might put it in another folder"); + } + + let path = format!("data/{prefix}-rocksdb"); + tracing::info!("starting rocksdb storage - at custom path: '{:?}'", path); + path + } else { + tracing::info!("starting rocksdb storage - at default path: 'data/rocksdb'"); + "data/rocksdb".to_string() + }; - let state = RocksStorageState::new(rocks_path_prefix); - state.sync_data()?; + let state = RocksStorageState::new(path); let block_number = state.preload_block_number()?; - Ok(Self { state, block_number }) + Ok(Self { + state, + block_number, + enable_backups, + }) } // ------------------------------------------------------------------------- @@ -101,95 +108,29 @@ impl PermanentStorage for RocksPermanentStorage { { self.state.export_metrics(); } - let account_changes = block.compact_account_changes(); - - //TODO move those loops inside the spawn and check if speed improves - let mut txs_batch = vec![]; - let mut logs_batch = vec![]; - for transaction in block.transactions.clone() { - txs_batch.push((transaction.input.hash.into(), transaction.block_number.into())); - for log in transaction.logs { - logs_batch.push(((transaction.input.hash.into(), log.log_index.into()), transaction.block_number.into())); - } - } - - // save block - let mut threads = Vec::with_capacity(9); - let block_number = block.number(); - let txs_rocks = Arc::clone(&self.state.transactions); - let logs_rocks = Arc::clone(&self.state.logs); - threads.push(thread::spawn(move || { - txs_rocks.insert_batch_indexed(txs_batch, block_number.as_u64()); - })); - threads.push(thread::spawn(move || { - logs_rocks.insert_batch_indexed(logs_batch, block_number.as_u64()); - })); - - let block_hash = block.hash(); - - let blocks_by_number = Arc::clone(&self.state.blocks_by_number); - let blocks_by_hash = Arc::clone(&self.state.blocks_by_hash); - let mut block_without_changes = block.clone(); - for transaction in &mut block_without_changes.transactions { - // checks if it has a contract address to keep, later this will be used to gather deployed_contract_address - transaction.execution.changes.retain(|_, change| change.bytecode.clone().is_modified()); - } - let block_hash_clone = block_hash; - threads.push(thread::spawn(move || { - blocks_by_number.insert(block_number.into(), block_without_changes.into()); - })); - threads.push(thread::spawn(move || { - blocks_by_hash.insert_batch_indexed(vec![(block_hash_clone.into(), block_number.into())], block_number.as_u64()); - })); - - threads.append( - &mut self - .state - .update_state_with_execution_changes(&account_changes, block_number) - .context("failed to update state with execution changes")?, - ); - - let previous_count = TRANSACTIONS_COUNT.load(Ordering::Relaxed); - let _ = TRANSACTIONS_COUNT.fetch_add(block.transactions.len(), Ordering::Relaxed); - let current_count = TRANSACTIONS_COUNT.load(Ordering::Relaxed); - - // for every multiple of TRANSACTION_LOOP_THRESHOLD transactions, send a Backup signal - if previous_count % TRANSACTION_LOOP_THRESHOLD > current_count % TRANSACTION_LOOP_THRESHOLD { - self.state.backup_trigger.send(()).unwrap(); - TRANSACTIONS_COUNT.store(0, Ordering::Relaxed); - } - - for thread in threads { - let _ = thread.join(); - } - - Ok(()) + self.state.save_block(block, self.enable_backups) } fn save_accounts(&self, accounts: Vec) -> anyhow::Result<()> { tracing::debug!(?accounts, "saving initial accounts"); - - for account in accounts { - let (key, value) = account.into(); - self.state.accounts.insert(key, value.clone()); - self.state.accounts_history.insert((key, 0.into()), value); - } - + self.state.save_accounts(accounts); Ok(()) } fn reset_at(&self, block_number: BlockNumber) -> anyhow::Result<()> { + let block_number_u64 = block_number.as_u64(); + tracing::info!(?block_number, "resetting Rocks DB to given block number"); + // reset block number - let block_number_u64: u64 = block_number.into(); - let _ = self.block_number.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |current| { - if block_number_u64 <= current { + let _ = self.block_number.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |current| { + if block_number_u64 < current { Some(block_number_u64) } else { None } }); - self.state.reset_at(block_number) + self.state.reset_at(block_number.into()) } fn read_slots_sample(&self, _start: BlockNumber, _end: BlockNumber, _max_samples: u64, _seed: u64) -> anyhow::Result> { @@ -197,16 +138,7 @@ impl PermanentStorage for RocksPermanentStorage { } fn read_all_slots(&self, address: &Address) -> anyhow::Result> { - let address: AddressRocksdb = (*address).into(); - Ok(self - .state - .account_slots - .iter_from((address, SlotIndexRocksdb::from(0)), rocksdb::Direction::Forward) - .take_while(|((addr, _), _)| &address == addr) - .map(|((_, idx), value)| Slot { - index: idx.into(), - value: value.into(), - }) - .collect()) + tracing::info!(?address, "reading all slots"); + self.state.read_all_slots(address) } } diff --git a/src/eth/storage/rocks/rocks_state.rs b/src/eth/storage/rocks/rocks_state.rs index 05c8c308a..2e8e7cad2 100644 --- a/src/eth/storage/rocks/rocks_state.rs +++ b/src/eth/storage/rocks/rocks_state.rs @@ -1,17 +1,29 @@ use core::fmt; use std::collections::HashMap; +use std::collections::HashSet; +use std::path::Path; +use std::path::PathBuf; use std::sync::atomic::AtomicU64; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; use std::sync::mpsc; use std::sync::Arc; -use std::thread; -use std::thread::JoinHandle; use anyhow::anyhow; use anyhow::Context; -use itertools::Itertools; -use tracing::info; -use tracing::warn; - +use lazy_static::lazy_static; +use rocksdb::Options; +use rocksdb::WriteBatch; +use rocksdb::DB; +use serde::Deserialize; +use serde::Serialize; +use sugars::hmap; + +use super::rocks_cf::RocksCf; +use super::rocks_config::CacheSetting; +use super::rocks_config::DbConfig; +use super::rocks_db::create_new_backup; +use super::rocks_db::create_or_open_db; use crate::eth::primitives::Account; use crate::eth::primitives::Address; use crate::eth::primitives::Block; @@ -25,8 +37,6 @@ use crate::eth::primitives::Slot; use crate::eth::primitives::SlotIndex; use crate::eth::primitives::StoragePointInTime; use crate::eth::primitives::TransactionMined; -use crate::eth::storage::rocks::rocks_db::DbConfig; -use crate::eth::storage::rocks::rocks_db::RocksDb; use crate::eth::storage::rocks::types::AccountRocksdb; use crate::eth::storage::rocks::types::AddressRocksdb; use crate::eth::storage::rocks::types::BlockNumberRocksdb; @@ -36,95 +46,107 @@ use crate::eth::storage::rocks::types::IndexRocksdb; use crate::eth::storage::rocks::types::SlotIndexRocksdb; use crate::eth::storage::rocks::types::SlotValueRocksdb; use crate::ext::spawn_blocking_named; -use crate::ext::spawn_named; use crate::ext::OptionExt; use crate::log_and_err; +use crate::utils::GIGABYTE; + +cfg_if::cfg_if! { + if #[cfg(feature = "metrics")] { + use std::sync::Mutex; + + use rocksdb::statistics::Histogram; + use rocksdb::statistics::Ticker; + use crate::infra::metrics::{self, Count, HistogramInt, Sum}; + } +} + +const BACKUP_TRANSACTION_COUNT_THRESHOLD: usize = 120_000; + +lazy_static! { + /// Map setting presets for each Column Family + static ref CF_OPTIONS_MAP: HashMap<&'static str, Options> = hmap! { + "accounts" => DbConfig::Default.to_options(CacheSetting::Enabled(15 * GIGABYTE)), + "accounts_history" => DbConfig::FastWriteSST.to_options(CacheSetting::Disabled), + "account_slots" => DbConfig::Default.to_options(CacheSetting::Enabled(45 * GIGABYTE)), + "account_slots_history" => DbConfig::FastWriteSST.to_options(CacheSetting::Disabled), + "transactions" => DbConfig::LargeSSTFiles.to_options(CacheSetting::Disabled), + "blocks_by_number" => DbConfig::LargeSSTFiles.to_options(CacheSetting::Disabled), + "blocks_by_hash" => DbConfig::LargeSSTFiles.to_options(CacheSetting::Disabled), + "logs" => DbConfig::LargeSSTFiles.to_options(CacheSetting::Disabled), + }; +} + +/// State handler for our RocksDB storage, separating "tables" by column families. +/// +/// With data separated by column families, writing and reading should be done via the `RocksCf` fields, +/// while operations that include the whole database (e.g. backup) should refer to the inner `DB` directly. pub struct RocksStorageState { - pub accounts: Arc>, - pub accounts_history: Arc>, - pub account_slots: Arc>, - pub account_slots_history: Arc>, - pub transactions: Arc>, - pub blocks_by_number: Arc>, - pub blocks_by_hash: Arc>, - pub logs: Arc>, - pub backup_trigger: Arc>, + db: Arc, + db_path: PathBuf, + accounts: RocksCf, + accounts_history: RocksCf<(AddressRocksdb, BlockNumberRocksdb), AccountRocksdb>, + account_slots: RocksCf<(AddressRocksdb, SlotIndexRocksdb), SlotValueRocksdb>, + account_slots_history: RocksCf<(AddressRocksdb, SlotIndexRocksdb, BlockNumberRocksdb), SlotValueRocksdb>, + transactions: RocksCf, + blocks_by_number: RocksCf, + blocks_by_hash: RocksCf, + logs: RocksCf<(HashRocksdb, IndexRocksdb), BlockNumberRocksdb>, + backup_trigger: mpsc::SyncSender<()>, + /// Used to trigger backup after threshold is hit + transactions_processed: AtomicUsize, + /// Last collected stats for a histogram + #[cfg(feature = "metrics")] + prev_stats: Mutex>, + /// Options passed at DB creation, stored for metrics + /// + /// a newly created `rocksdb::Options` object is unique, with an underlying pointer identifier inside of it, and is used to access + /// the DB metrics, `Options` can be cloned, but two equal `Options` might not retrieve the same metrics + #[cfg(feature = "metrics")] + db_options: Options, } impl RocksStorageState { - pub fn new(rocks_path_prefix: Option) -> Self { - let (tx, rx) = mpsc::channel(); - - //XXX TODO while repair/restore from backup, make sure to sync online and only when its in sync with other nodes, receive requests + pub fn new(path: impl AsRef) -> Self { + let db_path = path.as_ref().to_path_buf(); + let (backup_trigger_tx, backup_trigger_rx) = mpsc::sync_channel::<()>(1); - let path_prefix = rocks_path_prefix.unwrap_or_default(); + #[cfg_attr(not(feature = "metrics"), allow(unused_variables))] + let (db, db_options) = create_or_open_db(&db_path, &CF_OPTIONS_MAP).unwrap(); + //XXX TODO while repair/restore from backup, make sure to sync online and only when its in sync with other nodes, receive requests let state = Self { - accounts: RocksDb::new(&format!("{}./data/accounts.rocksdb", path_prefix), DbConfig::Default).unwrap(), - accounts_history: RocksDb::new(&format!("{}./data/accounts_history.rocksdb", path_prefix), DbConfig::FastWriteSST).unwrap(), - account_slots: RocksDb::new(&format!("{}./data/account_slots.rocksdb", path_prefix), DbConfig::Default).unwrap(), - account_slots_history: RocksDb::new(&format!("{}./data/account_slots_history.rocksdb", path_prefix), DbConfig::FastWriteSST).unwrap(), - transactions: RocksDb::new(&format!("{}./data/transactions.rocksdb", path_prefix), DbConfig::LargeSSTFiles).unwrap(), - blocks_by_number: RocksDb::new(&format!("{}./data/blocks_by_number.rocksdb", path_prefix), DbConfig::LargeSSTFiles).unwrap(), - blocks_by_hash: RocksDb::new(&format!("{}./data/blocks_by_hash.rocksdb", path_prefix), DbConfig::LargeSSTFiles).unwrap(), //XXX this is not needed we can afford to have blocks_by_hash pointing into blocks_by_number - logs: RocksDb::new(&format!("{}./data/logs.rocksdb", path_prefix), DbConfig::LargeSSTFiles).unwrap(), - backup_trigger: Arc::new(tx), + db_path, + accounts: new_cf(&db, "accounts"), + accounts_history: new_cf(&db, "accounts_history"), + account_slots: new_cf(&db, "account_slots"), + account_slots_history: new_cf(&db, "account_slots_history"), + transactions: new_cf(&db, "transactions"), + blocks_by_number: new_cf(&db, "blocks_by_number"), + blocks_by_hash: new_cf(&db, "blocks_by_hash"), //XXX this is not needed we can afford to have blocks_by_hash pointing into blocks_by_number + logs: new_cf(&db, "logs"), + backup_trigger: backup_trigger_tx, + transactions_processed: AtomicUsize::new(0), + #[cfg(feature = "metrics")] + prev_stats: Default::default(), + #[cfg(feature = "metrics")] + db_options, + db, }; - state.listen_for_backup_trigger(rx).unwrap(); - + state.listen_for_backup_trigger(backup_trigger_rx).unwrap(); state } - pub fn listen_for_backup_trigger(&self, rx: mpsc::Receiver<()>) -> anyhow::Result<()> { - tracing::info!("creating backup trigger listener"); - let accounts = Arc::>::clone(&self.accounts); - let accounts_history = Arc::>::clone(&self.accounts_history); - let account_slots = Arc::>::clone(&self.account_slots); - let account_slots_history = - Arc::>::clone(&self.account_slots_history); - let blocks_by_hash = Arc::>::clone(&self.blocks_by_hash); - let blocks_by_number = Arc::>::clone(&self.blocks_by_number); - let transactions = Arc::>::clone(&self.transactions); - let logs = Arc::>::clone(&self.logs); - - spawn_named("storage::backup_trigger", async move { - while rx.recv().is_ok() { - let accounts_clone = Arc::clone(&accounts); - let accounts_history_clone = Arc::clone(&accounts_history); - let account_slots_clone = Arc::clone(&account_slots); - let account_slots_history_clone = Arc::clone(&account_slots_history); - let transactions_clone = Arc::clone(&transactions); - let blocks_by_number_clone = Arc::clone(&blocks_by_number); - let blocks_by_hash_clone = Arc::clone(&blocks_by_hash); - let logs_clone = Arc::clone(&logs); - - spawn_blocking_named("storage::backup_execution", move || { - tracing::info!("rocksdb backuping accounts"); - accounts_clone.backup().unwrap(); - - tracing::info!("rocksdb backuping accounts history"); - accounts_history_clone.backup().unwrap(); + fn listen_for_backup_trigger(&self, rx: mpsc::Receiver<()>) -> anyhow::Result<()> { + tracing::info!("starting rocksdb backup trigger listener"); - tracing::info!("rocksdb backuping account slots"); - account_slots_clone.backup().unwrap(); - - tracing::info!("rocksdb backuping account slots history"); - account_slots_history_clone.backup().unwrap(); - - tracing::info!("rocksdb backuping transactions"); - transactions_clone.backup().unwrap(); - - tracing::info!("rocksdb backuping blocks by number"); - blocks_by_number_clone.backup().unwrap(); - - tracing::info!("rocksdb backuping blocks by hash"); - blocks_by_hash_clone.backup().unwrap(); - - tracing::info!("rocksdb backuping logs"); - logs_clone.backup().unwrap(); - }); + let db = Arc::clone(&self.db); + spawn_blocking_named("storage::listen_backup_trigger", move || { + while rx.recv().is_ok() { + if let Err(err) = create_new_backup(&db) { + tracing::error!(?err, "failed to backup DB"); + } } }); @@ -132,338 +154,126 @@ impl RocksStorageState { } pub fn preload_block_number(&self) -> anyhow::Result { - let block_number = self.blocks_by_number.last().map(|(num, _)| num).unwrap_or_default(); + let block_number = self.blocks_by_number.last_key().unwrap_or_default(); tracing::info!(number = %block_number, "preloaded block_number"); Ok((u64::from(block_number)).into()) } - pub fn sync_data(&self) -> anyhow::Result<()> { - tracing::info!("starting sync_data"); - tracing::info!("account_block_number {:?}", self.accounts.get_current_block_number()); - tracing::info!("slots_block_number {:?}", self.account_slots.get_current_block_number()); - tracing::info!("slots_history_block_number {:?}", self.account_slots_history.get_index_block_number()); - tracing::info!("accounts_history_block_number {:?}", self.accounts_history.get_index_block_number()); - tracing::info!("logs_block_number {:?}", self.logs.get_index_block_number()); - tracing::info!("transactions_block_number {:?}", self.transactions.get_index_block_number()); - - if let Some((last_block_number, _)) = self.blocks_by_number.last() { - tracing::info!("last_block_number {:?}", last_block_number); - if self.accounts.get_current_block_number() != self.account_slots.get_current_block_number() { - warn!( - "block numbers are not in sync {:?} {:?} {:?} {:?} {:?} {:?}", - self.accounts.get_current_block_number(), - self.account_slots.get_current_block_number(), - self.account_slots_history.get_index_block_number(), - self.accounts_history.get_index_block_number(), - self.logs.get_index_block_number(), - self.transactions.get_index_block_number(), - ); - let mut min_block_number = std::cmp::min( - std::cmp::min( - std::cmp::min(self.accounts.get_current_block_number(), self.account_slots.get_current_block_number()), - std::cmp::min( - self.account_slots_history.get_index_block_number(), - self.accounts_history.get_index_block_number(), - ), - ), - std::cmp::min(self.logs.get_index_block_number(), self.transactions.get_index_block_number()), - ); - - let last_secure_block_number = last_block_number.inner_value().as_u64() - 5000; - if last_secure_block_number > min_block_number { - self.accounts.restore().unwrap(); - tracing::warn!("accounts restored"); - self.accounts_history.restore().unwrap(); - tracing::warn!("accounts_history restored"); - self.account_slots.restore().unwrap(); - tracing::warn!("account_slots restored"); - self.account_slots_history.restore().unwrap(); - tracing::warn!("account_slots_history restored"); - self.transactions.restore().unwrap(); - tracing::warn!("transactions restored"); - self.blocks_by_number.restore().unwrap(); - tracing::warn!("blocks_by_number restored"); - self.blocks_by_hash.restore().unwrap(); - tracing::warn!("blocks_by_hash restored"); - self.logs.restore().unwrap(); - tracing::warn!("logs restored"); - - min_block_number = std::cmp::min( - std::cmp::min( - std::cmp::min(self.accounts.get_current_block_number(), self.account_slots.get_current_block_number()), - std::cmp::min( - self.account_slots_history.get_index_block_number(), - self.accounts_history.get_index_block_number(), - ), - ), - std::cmp::min(self.logs.get_index_block_number(), self.transactions.get_index_block_number()), - ); + pub fn reset_at(&self, block_number: BlockNumberRocksdb) -> anyhow::Result<()> { + // Clear current state + self.account_slots.clear().unwrap(); + self.accounts.clear().unwrap(); + + // Get current state back from historical + let mut latest_slots: HashMap<(AddressRocksdb, SlotIndexRocksdb), (BlockNumberRocksdb, SlotValueRocksdb)> = HashMap::new(); + let mut latest_accounts: HashMap = HashMap::new(); + for ((address, idx, block), value) in self.account_slots_history.iter_start() { + if block > block_number { + self.account_slots_history.delete(&(address, idx, block)).unwrap(); + } else if let Some((bnum, _)) = latest_slots.get(&(address, idx)) { + if bnum < &block { + latest_slots.insert((address, idx), (block, value.clone())); } - self.reset_at(BlockNumber::from(min_block_number))?; + } else { + latest_slots.insert((address, idx), (block, value.clone())); } } - - tracing::info!("data is in sync"); - - Ok(()) - } - - pub fn reset_at(&self, block_number: BlockNumber) -> anyhow::Result<()> { - let threads = vec![ - { - let self_blocks_by_hash_clone = Arc::clone(&self.blocks_by_hash); - let block_number_clone = block_number; - thread::spawn(move || { - for (block_num, block_hash_vec) in self_blocks_by_hash_clone.indexed_iter_end() { - if block_num <= block_number_clone.as_u64() { - break; - } - for block_hash in block_hash_vec { - self_blocks_by_hash_clone.delete(&block_hash).unwrap(); - } - self_blocks_by_hash_clone.delete_index(block_num).unwrap(); - } - - info!( - "Deleted blocks by hash above block number {}. This ensures synchronization with the lowest block height across nodes.", - block_number_clone - ); - }) - }, - { - let self_blocks_by_number_clone = Arc::clone(&self.blocks_by_number); - let block_number_clone = block_number; - thread::spawn(move || { - let blocks_by_number = self_blocks_by_number_clone.iter_end(); - for (num, _) in blocks_by_number { - if num <= block_number_clone.into() { - break; - } - self_blocks_by_number_clone.delete(&num).unwrap(); - } - info!( - "Deleted blocks by number above block number {}. Helps in reverting to a common state prior to a network fork or error.", - block_number_clone - ); - }) - }, - { - let self_transactions_clone = Arc::clone(&self.transactions); - let block_number_clone = block_number; - thread::spawn(move || { - let transactions = self_transactions_clone.indexed_iter_end(); - for (index_block_number, hash_vec) in transactions { - if index_block_number <= block_number_clone.as_u64() { - break; - } - for hash in hash_vec { - self_transactions_clone.delete(&hash).unwrap(); - } - self_transactions_clone.delete_index(index_block_number).unwrap(); - } - info!( - "Cleared transactions above block number {}. Necessary to remove transactions not confirmed in the finalized blockchain state.", - block_number_clone - ); - }) - }, - { - let self_logs_clone = Arc::clone(&self.logs); - let block_number_clone = block_number; - thread::spawn(move || { - let logs = self_logs_clone.indexed_iter_end(); - for (index_block_number, logs_vec) in logs { - if index_block_number <= block_number_clone.as_u64() { - break; - } - for (hash, index) in logs_vec { - self_logs_clone.delete(&(hash, index)).unwrap(); - } - self_logs_clone.delete_index(index_block_number).unwrap(); - } - info!( - "Removed logs above block number {}. Ensures log consistency with the blockchain's current confirmed state.", - block_number_clone - ); - }) - }, - { - let self_accounts_history_clone = Arc::clone(&self.accounts_history); - let block_number_clone = block_number; - thread::spawn(move || { - let accounts_history = self_accounts_history_clone.indexed_iter_end(); - for (index_block_number, accounts_history_vec) in accounts_history { - if index_block_number <= block_number_clone.as_u64() { - break; - } - for (address, historic_block_number) in accounts_history_vec { - self_accounts_history_clone.delete(&(address, historic_block_number)).unwrap(); - } - self_accounts_history_clone.delete_index(index_block_number).unwrap(); - } - info!( - "Deleted account history records above block number {}. Important for maintaining historical accuracy in account state across nodes.", - block_number_clone - ); - }) - }, - { - let self_account_slots_history_clone = Arc::clone(&self.account_slots_history); - let block_number_clone = block_number; - thread::spawn(move || { - let account_slots_history = self_account_slots_history_clone.indexed_iter_end(); - for (index_block_number, account_slots_history_vec) in account_slots_history { - if index_block_number <= block_number_clone.as_u64() { - break; - } - for (address, slot_index, historic_block_number) in account_slots_history_vec { - self_account_slots_history_clone.delete(&(address, slot_index, historic_block_number)).unwrap(); - } - self_account_slots_history_clone.delete_index(index_block_number).unwrap(); - } - info!( - "Cleared account slot history above block number {}. Vital for synchronizing account slot states after discrepancies.", - block_number_clone - ); - }) - }, - ]; - - // Wait for all tasks - for thread in threads { - let _ = thread.join(); - } - - // Clear current states - let _ = self.accounts.clear(); - let _ = self.account_slots.clear(); - - // Spawn task for handling accounts - let accounts_task = thread::spawn({ - let self_accounts_history_clone = Arc::clone(&self.accounts_history); - let self_accounts_clone = Arc::clone(&self.accounts); - let block_number_clone = block_number; - move || { - let mut latest_accounts: HashMap = std::collections::HashMap::new(); - let account_histories = self_accounts_history_clone.iter_start(); - for ((address, historic_block_number), account_info) in account_histories { - if let Some((existing_block_number, _)) = latest_accounts.get(&address) { - if existing_block_number < &historic_block_number { - latest_accounts.insert(address, (historic_block_number, account_info)); - } - } else { - latest_accounts.insert(address, (historic_block_number, account_info)); - } + for ((address, block), account) in self.accounts_history.iter_start() { + if block > block_number { + self.accounts_history.delete(&(address, block)).unwrap(); + } else if let Some((bnum, _)) = latest_accounts.get(&address) { + if bnum < &block { + latest_accounts.insert(address, (block, account)).unwrap(); } - - let accounts_temp_vec = latest_accounts - .into_iter() - .map(|(address, (_, account_info))| (address, account_info)) - .collect::>(); - self_accounts_clone.insert_batch(accounts_temp_vec, Some(block_number_clone.into())); - info!("Accounts updated up to block number {}", block_number_clone); + } else { + latest_accounts.insert(address, (block, account)); } - }); + } - // Spawn task for handling slots - let slots_task = thread::spawn({ - let self_account_slots_history_clone = Arc::clone(&self.account_slots_history); - let self_account_slots_clone = Arc::clone(&self.account_slots); - let block_number_clone = block_number; - move || { - let mut latest_slots: HashMap<(AddressRocksdb, SlotIndexRocksdb), (BlockNumberRocksdb, SlotValueRocksdb)> = std::collections::HashMap::new(); - let slot_histories = self_account_slots_history_clone.iter_start(); - for ((address, slot_index, historic_block_number), slot_value) in slot_histories { - let slot_key = (address, slot_index); - if let Some((existing_block_number, _)) = latest_slots.get(&slot_key) { - if existing_block_number < &historic_block_number { - latest_slots.insert(slot_key, (historic_block_number, slot_value)); - } - } else { - latest_slots.insert(slot_key, (historic_block_number, slot_value)); - } - } + // write new current state + let mut batch = WriteBatch::default(); + let accounts_iter = latest_accounts.into_iter().map(|(address, (_, account))| (address, account)); + self.accounts.prepare_batch_insertion(accounts_iter, &mut batch); + let slots_iter = latest_slots.into_iter().map(|((address, idx), (_, value))| ((address, idx), value)); + self.account_slots.prepare_batch_insertion(slots_iter, &mut batch); + self.write_batch(batch).unwrap(); + + // Truncate rest of + for (hash, block) in self.transactions.iter_start() { + if block > block_number { + self.transactions.delete(&hash).unwrap(); + } + } - let slots_temp_vec = latest_slots - .into_iter() - .map(|((address, slot_index), (_, slot_value))| ((address, slot_index), slot_value)) - .collect::>(); - self_account_slots_clone.insert_batch(slots_temp_vec, Some(block_number_clone.into())); - info!("Slots updated up to block number {}", block_number_clone); + for (key, block) in self.logs.iter_start() { + if block > block_number { + self.logs.delete(&key).unwrap(); } - }); + } - let _ = accounts_task.join(); - let _ = slots_task.join(); + for (hash, block) in self.blocks_by_hash.iter_start() { + if block > block_number { + self.blocks_by_hash.delete(&hash).unwrap(); + } + } - info!( - "All reset tasks have been completed or encountered errors. The system is now aligned to block number {}.", - block_number - ); + for (block, _) in self.blocks_by_number.iter_end() { + if block > block_number { + self.blocks_by_number.delete(&block).unwrap(); + } else { + break; + } + } Ok(()) } /// Updates the in-memory state with changes from transaction execution - pub fn update_state_with_execution_changes( - &self, - changes: &[ExecutionAccountChanges], - block_number: BlockNumber, - ) -> Result>, sqlx::Error> { - // Directly capture the fields needed by each future from `self` - let accounts = Arc::clone(&self.accounts); - let accounts_history = Arc::clone(&self.accounts_history); - let account_slots = Arc::clone(&self.account_slots); - let account_slots_history = Arc::clone(&self.account_slots_history); - - let changes_clone_for_accounts = changes.to_vec(); // Clone changes for accounts future - let changes_clone_for_slots = changes.to_vec(); // Clone changes for slots future + fn prepare_batch_state_update_with_execution_changes(&self, changes: &[ExecutionAccountChanges], block_number: BlockNumber, batch: &mut WriteBatch) { + let accounts = self.accounts.clone(); + let accounts_history = self.accounts_history.clone(); + let account_slots = self.account_slots.clone(); + let account_slots_history = self.account_slots_history.clone(); let mut account_changes = Vec::new(); let mut account_history_changes = Vec::new(); - let account_changes_future = thread::spawn(move || { - for change in changes_clone_for_accounts { - if change.is_changed() { - let address: AddressRocksdb = change.address.into(); - let mut account_info_entry = accounts.entry_or_insert_with(address, AccountRocksdb::default); - if let Some(nonce) = change.nonce.clone().take_modified() { - account_info_entry.nonce = nonce.into(); - } - if let Some(balance) = change.balance.clone().take_modified() { - account_info_entry.balance = balance.into(); - } - if let Some(bytecode) = change.bytecode.clone().take_modified() { - account_info_entry.bytecode = bytecode.map_into(); - } + for change in changes { + let address: AddressRocksdb = change.address.into(); + let mut account_info_entry = accounts.get_or_insert_with(address, AccountRocksdb::default); - account_changes.push((address, account_info_entry.clone())); - account_history_changes.push(((address, block_number.into()), account_info_entry)); - } + if let Some(nonce) = change.nonce.clone().take_modified() { + account_info_entry.nonce = nonce.into(); + } + if let Some(balance) = change.balance.clone().take_modified() { + account_info_entry.balance = balance.into(); + } + if let Some(bytecode) = change.bytecode.clone().take_modified() { + account_info_entry.bytecode = bytecode.map_into(); } - accounts.insert_batch(account_changes, Some(block_number.into())); - accounts_history.insert_batch_indexed(account_history_changes, block_number.into()); - }); + account_changes.push((address, account_info_entry.clone())); + account_history_changes.push(((address, block_number.into()), account_info_entry)); + } + + accounts.prepare_batch_insertion(account_changes, batch); + accounts_history.prepare_batch_insertion(account_history_changes, batch); let mut slot_changes = Vec::new(); let mut slot_history_changes = Vec::new(); - let slot_changes_future = thread::spawn(move || { - for change in changes_clone_for_slots { - let address: AddressRocksdb = change.address.into(); - for (slot_index, slot_change) in change.slots.clone() { - if let Some(slot) = slot_change.take_modified() { - slot_changes.push(((address, slot_index.into()), slot.value.into())); - slot_history_changes.push(((address, slot_index.into(), block_number.into()), slot.value.into())); - } + for change in changes { + for (slot_index, slot_change) in &change.slots { + if let Some(slot) = slot_change.take_modified_ref() { + let address: AddressRocksdb = change.address.into(); + let slot_index = *slot_index; + slot_changes.push(((address, slot_index.into()), slot.value.into())); + slot_history_changes.push(((address, slot_index.into(), block_number.into()), slot.value.into())); } } - account_slots.insert_batch(slot_changes, Some(block_number.into())); - account_slots_history.insert_batch_indexed(slot_history_changes, block_number.into()); - }); - - Ok(vec![account_changes_future, slot_changes_future]) + } + account_slots.prepare_batch_insertion(slot_changes, batch); + account_slots_history.prepare_batch_insertion(slot_history_changes, batch); } pub fn read_transaction(&self, tx_hash: &Hash) -> anyhow::Result> { @@ -485,29 +295,21 @@ impl RocksStorageState { } pub fn read_logs(&self, filter: &LogFilter) -> anyhow::Result> { - self.logs - .iter_start() - .skip_while(|(_, log_block_number)| log_block_number < &filter.from_block.into()) - .take_while(|(_, log_block_number)| match filter.to_block { - Some(to_block) => log_block_number <= &to_block.into(), - None => true, + let addresses: HashSet = filter.addresses.iter().map(|&address| AddressRocksdb::from(address)).collect(); + + Ok(self + .blocks_by_number + .iter_from(BlockNumberRocksdb::from(filter.from_block), rocksdb::Direction::Forward) + .take_while(|(number, _)| filter.to_block.as_ref().map_or(true, |last_block| BlockNumber::from(*number) <= *last_block)) + .flat_map(|(_, block)| block.transactions) + .filter(|transaction| transaction.input.to.is_some_and(|to| addresses.contains(&to))) + .flat_map(|transaction| transaction.logs) + .filter(|log_mined| { + let topics = log_mined.log.to_topics_vec(); + filter.topics_combinations.is_empty() || filter.topics_combinations.iter().any(|topic_filter| topic_filter.matches(&topics)) }) - .map(|((tx_hash, _), _)| match self.read_transaction(&tx_hash.into()) { - Ok(Some(tx)) => Ok(tx.logs), - Ok(None) => Err(anyhow!("the transaction the log was supposed to be in was not found")).with_context(|| format!("tx_hash = {:?}", tx_hash)), - Err(err) => Err(err), - }) - .flatten_ok() - .filter_map(|log_res| match log_res { - Ok(log) => - if filter.matches(&log) { - Some(Ok(log)) - } else { - None - }, - err => Some(err), - }) - .collect() + .map(LogMined::from) + .collect()) } pub fn read_slot(&self, address: &Address, index: &SlotIndex, point_in_time: &StoragePointInTime) -> Option { @@ -539,6 +341,19 @@ impl RocksStorageState { } } + pub fn read_all_slots(&self, address: &Address) -> anyhow::Result> { + let address: AddressRocksdb = (*address).into(); + Ok(self + .account_slots + .iter_from((address, SlotIndexRocksdb::from(0)), rocksdb::Direction::Forward) + .take_while(|((addr, _), _)| &address == addr) + .map(|((_, idx), value)| Slot { + index: idx.into(), + value: value.into(), + }) + .collect()) + } + pub fn read_account(&self, address: &Address, point_in_time: &StoragePointInTime) -> Option { if address.is_coinbase() || address.is_zero() { //XXX temporary, we will reload the database later without it @@ -597,24 +412,109 @@ impl RocksStorageState { } } - /// Writes accounts to state (does not write to account history) - pub fn write_accounts(&self, accounts: Vec, block_number: BlockNumber) { - let mut account_batch = vec![]; + pub fn save_accounts(&self, accounts: Vec) { for account in accounts { - account_batch.push(account.into()); + let (key, value) = account.into(); + self.accounts.insert(key, value.clone()); + self.accounts_history.insert((key, 0.into()), value); + } + } + + pub fn save_block(&self, block: Block, enable_backups: bool) -> anyhow::Result<()> { + let account_changes = block.compact_account_changes(); + + let mut txs_batch = vec![]; + let mut logs_batch = vec![]; + for transaction in block.transactions.clone() { + txs_batch.push((transaction.input.hash.into(), transaction.block_number.into())); + for log in transaction.logs { + logs_batch.push(((transaction.input.hash.into(), log.log_index.into()), transaction.block_number.into())); + } + } + let mut batch = WriteBatch::default(); + + self.transactions.prepare_batch_insertion(txs_batch, &mut batch); + self.logs.prepare_batch_insertion(logs_batch, &mut batch); + + let number = block.number(); + let block_hash = block.hash(); + let txs_len = block.transactions.len(); + + // this is an optimization, instead of saving the entire block into the database, + // remove all discardable account changes + let block_without_changes = { + let mut block_mut = block; + // mutate it + block_mut.transactions.iter_mut().for_each(|transaction| { + // checks if it has a contract address to keep, later this will be used to gather deployed_contract_address + transaction.execution.changes.retain(|_, change| change.bytecode.is_modified()); + }); + block_mut + }; + + let block_by_number = (number.into(), block_without_changes.into()); + self.blocks_by_number.prepare_batch_insertion([block_by_number], &mut batch); + + let block_by_hash = (block_hash.into(), number.into()); + self.blocks_by_hash.prepare_batch_insertion([block_by_hash], &mut batch); + + self.prepare_batch_state_update_with_execution_changes(&account_changes, number, &mut batch); + + if enable_backups { + self.check_backup_threshold_trigger(txs_len); + } + + self.write_batch(batch).unwrap(); + Ok(()) + } + + fn check_backup_threshold_trigger(&self, transactions_just_processed: usize) { + let previous = self.transactions_processed.fetch_add(transactions_just_processed, Ordering::Relaxed); + let current = previous + transactions_just_processed; + + // threshold hit, trigger backup and reset value + if current > BACKUP_TRANSACTION_COUNT_THRESHOLD { + if let Err(err) = self.backup_trigger.try_send(()) { + tracing::error!( + reason = ?err, + "Failed to trigger backup signal, either listener panicked or signal was triggered while another backup was in progress" + ); + } + self.transactions_processed.store(0, Ordering::Relaxed); } + } + + /// Writes accounts to state (does not write to account history) + #[allow(dead_code)] + fn write_accounts(&self, accounts: Vec) { + let accounts = accounts.into_iter().map(Into::into); - self.accounts.insert_batch(account_batch, Some(block_number.into())); + let mut batch = WriteBatch::default(); + self.accounts.prepare_batch_insertion(accounts, &mut batch); + self.db.write(batch).unwrap(); } /// Writes slots to state (does not write to slot history) - pub fn write_slots(&self, slots: Vec<(Address, Slot)>, block_number: BlockNumber) { - let mut slot_batch = vec![]; + #[cfg_attr(not(test), allow(dead_code))] + pub fn write_slots(&self, slots: Vec<(Address, Slot)>) { + let slots = slots + .into_iter() + .map(|(address, slot)| ((address.into(), slot.index.into()), slot.value.into())); + + let mut batch = WriteBatch::default(); + self.account_slots.prepare_batch_insertion(slots, &mut batch); + self.db.write(batch).unwrap(); + } + + /// Write to all DBs in a batch + fn write_batch(&self, batch: WriteBatch) -> anyhow::Result<()> { + let batch_len = batch.len(); + let result = self.db.write(batch); - for (address, slot) in slots { - slot_batch.push(((address.into(), slot.index.into()), slot.value.into())); + if let Err(err) = &result { + tracing::error!(?err, batch_len, "failed to write batch to DB"); } - self.account_slots.insert_batch(slot_batch, Some(block_number.into())); + result.map_err(Into::into) } /// Clears in-memory state. @@ -629,22 +529,109 @@ impl RocksStorageState { self.logs.clear()?; Ok(()) } +} - #[cfg(feature = "metrics")] +#[cfg(feature = "metrics")] +impl RocksStorageState { pub fn export_metrics(&self) { - self.account_slots.export_metrics(); - self.account_slots_history.export_metrics(); + let db_get = self.db_options.get_histogram_data(Histogram::DbGet); + let db_write = self.db_options.get_histogram_data(Histogram::DbWrite); + + let block_cache_miss = self.db_options.get_ticker_count(Ticker::BlockCacheMiss); + let block_cache_hit = self.db_options.get_ticker_count(Ticker::BlockCacheHit); + let bytes_written = self.db_options.get_ticker_count(Ticker::BytesWritten); + let bytes_read = self.db_options.get_ticker_count(Ticker::BytesRead); + + let db_name = self.db.path().file_name().unwrap().to_str(); + + metrics::set_rocks_db_get(db_get.count(), db_name); + metrics::set_rocks_db_write(db_write.count(), db_name); + metrics::set_rocks_block_cache_miss(block_cache_miss, db_name); + metrics::set_rocks_block_cache_hit(block_cache_hit, db_name); + metrics::set_rocks_bytes_written(bytes_written, db_name); + metrics::set_rocks_bytes_read(bytes_read, db_name); + + metrics::set_rocks_compaction_time(self.get_histogram_average_in_interval(Histogram::CompactionTime), db_name); + metrics::set_rocks_compaction_cpu_time(self.get_histogram_average_in_interval(Histogram::CompactionCpuTime), db_name); + metrics::set_rocks_flush_time(self.get_histogram_average_in_interval(Histogram::FlushTime), db_name); + } + + fn get_histogram_average_in_interval(&self, hist: Histogram) -> u64 { + // The stats are cumulative since opening the db + // we can get the average in the time interval with: avg = (new_sum - sum)/(new_count - count) + + let mut prev_values = self.prev_stats.lock().unwrap(); + let (prev_sum, prev_count): (Sum, Count) = *prev_values.get(&(hist as u32)).unwrap_or(&(0, 0)); + let data = self.db_options.get_histogram_data(hist); + let data_count = data.count(); + let data_sum = data.sum(); - self.accounts.export_metrics(); - self.accounts_history.export_metrics(); + let Some(avg) = (data_sum - prev_sum).checked_div(data_count - prev_count) else { + return 0; + }; - self.blocks_by_hash.export_metrics(); - self.blocks_by_number.export_metrics(); + prev_values.insert(hist as u32, (data_sum, data_count)); + avg } } impl fmt::Debug for RocksStorageState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RocksDb").field("db", &"Arc").finish() + f.debug_struct("RocksStorageState").field("db_path", &self.db_path).finish() + } +} + +fn new_cf(db: &Arc, column_family: &str) -> RocksCf +where + K: Serialize + for<'de> Deserialize<'de> + std::hash::Hash + Eq, + V: Serialize + for<'de> Deserialize<'de> + Clone, +{ + let Some(options) = CF_OPTIONS_MAP.get(column_family) else { + panic!("column_family `{column_family}` given to `new_cf` not found in options map"); + }; + RocksCf::new_cf(Arc::clone(db), column_family, options.clone()) +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + use std::fs; + + use fake::Fake; + use fake::Faker; + + use super::*; + use crate::eth::primitives::SlotValue; + + #[test] + fn test_rocks_multi_get() { + let (db, _db_options) = create_or_open_db("./data/slots_test.rocksdb", &CF_OPTIONS_MAP).unwrap(); + let account_slots: RocksCf = new_cf(&db, "account_slots"); + + let slots: HashMap = (0..1000).map(|_| (Faker.fake(), Faker.fake())).collect(); + let extra_slots: HashMap = (0..1000) + .map(|_| (Faker.fake(), Faker.fake())) + .filter(|(key, _)| !slots.contains_key(key)) + .collect(); + + let mut batch = WriteBatch::default(); + account_slots.prepare_batch_insertion(slots.clone(), &mut batch); + account_slots.prepare_batch_insertion(extra_slots.clone(), &mut batch); + db.write(batch).unwrap(); + + let extra_keys: HashSet = (0..1000) + .map(|_| Faker.fake()) + .filter(|key| !extra_slots.contains_key(key) && !slots.contains_key(key)) + .collect(); + + let keys: Vec = slots.keys().cloned().chain(extra_keys).collect(); + let result = account_slots.multi_get(keys).expect("this should not fail"); + + assert_eq!(result.len(), slots.keys().len()); + for (idx, value) in result { + assert_eq!(value, *slots.get(&idx).expect("should not be None")); + } + + fs::remove_dir_all("./data/slots_test.rocksdb").unwrap(); } } diff --git a/src/eth/storage/rocks/types.rs b/src/eth/storage/rocks/types.rs index 58df60199..606255880 100644 --- a/src/eth/storage/rocks/types.rs +++ b/src/eth/storage/rocks/types.rs @@ -9,6 +9,7 @@ use ethereum_types::H256; use ethereum_types::H64; use ethereum_types::U256; use ethereum_types::U64; +use itertools::Itertools; use revm::primitives::KECCAK_EMPTY; use crate::eth::primitives::logs_bloom::LogsBloom; @@ -27,6 +28,7 @@ use crate::eth::primitives::Hash; use crate::eth::primitives::Index; use crate::eth::primitives::Log; use crate::eth::primitives::LogMined; +use crate::eth::primitives::LogTopic; use crate::eth::primitives::MinerNonce; use crate::eth::primitives::Nonce; use crate::eth::primitives::Size; @@ -205,7 +207,7 @@ impl From for Address { } } -#[derive(Debug, derive_more::Display, Clone, Default, Eq, PartialEq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize)] +#[derive(Debug, derive_more::Display, Clone, Copy, Default, Eq, PartialEq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize)] pub struct BlockNumberRocksdb(U64); gen_newtype_from!(self = BlockNumberRocksdb, other = u8, u16, u32, u64, U64, usize, i32, i64); @@ -233,7 +235,7 @@ impl From for u64 { } } -#[derive(Clone, Default, Hash, Eq, PartialEq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Copy, Default, Hash, Eq, PartialEq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)] pub struct SlotIndexRocksdb(U256); gen_newtype_from!(self = SlotIndexRocksdb, other = u64); @@ -526,6 +528,13 @@ pub struct LogRocksdb { pub data: BytesRocksdb, } +impl LogRocksdb { + pub fn to_topics_vec(&self) -> Vec { + let topics = &self.topics; + [topics.0, topics.1, topics.2, topics.3].into_iter().while_some().map(LogTopic::from).collect() + } +} + impl From for LogRocksdb { fn from(item: Log) -> Self { Self { diff --git a/src/infra/metrics/metrics_types.rs b/src/infra/metrics/metrics_types.rs index a8c0422ec..884ec8707 100644 --- a/src/infra/metrics/metrics_types.rs +++ b/src/infra/metrics/metrics_types.rs @@ -5,6 +5,10 @@ use metrics::describe_gauge; use metrics::describe_histogram; use metrics::Label; +pub type HistogramInt = u32; +pub type Sum = u64; +pub type Count = u64; + // ----------------------------------------------------------------------------- // Metric // ----------------------------------------------------------------------------- diff --git a/src/utils.rs b/src/utils.rs index 51146bdc6..1498adda0 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,6 +1,14 @@ use std::time::Duration; use tokio::time::Instant; +use uuid::Uuid; + +/// Amount of bytes in one GB +pub const GIGABYTE: usize = 1024 * 1024 * 1024; + +pub fn new_context_id() -> String { + Uuid::new_v4().to_string() +} pub fn calculate_tps_and_bpm(duration: Duration, transaction_count: usize, block_count: usize) -> (f64, f64) { let seconds_elapsed = duration.as_secs_f64() + f64::EPSILON; diff --git a/tests/test_import_external_snapshot_rocksdb.rs b/tests/test_import_external_snapshot_rocksdb.rs index 8ccd9144b..767aabd45 100644 --- a/tests/test_import_external_snapshot_rocksdb.rs +++ b/tests/test_import_external_snapshot_rocksdb.rs @@ -2,7 +2,6 @@ mod test_import_external_snapshot_common; #[cfg(feature = "rocks")] pub mod rocks_test { - use stratus::eth::primitives::BlockNumber; use stratus::eth::storage::PermanentStorage; use stratus::eth::storage::RocksPermanentStorage; use stratus::infra::docker::Docker; @@ -18,10 +17,9 @@ pub mod rocks_test { let (accounts, slots) = common::filter_accounts_and_slots(snapshot); - let rocks_path_prefix: Option = Some(String::new()); - let rocks = RocksPermanentStorage::new(rocks_path_prefix).unwrap(); + let rocks = RocksPermanentStorage::new(false, Some("test_import_external_snapshot_with_rocksdb".to_string())).unwrap(); rocks.save_accounts(accounts).unwrap(); - rocks.state.write_slots(slots, BlockNumber::ZERO); + rocks.state.write_slots(slots); common::execute_test("RocksDB", &global_services.config, &docker, rocks, block, receipts).await; });