diff --git a/.config/zepter.yaml b/.config/zepter.yaml index 8c6425f4ff0d..f5d320b4af97 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -16,6 +16,7 @@ workflows: # Do not try to add a new section into `[features]` of `A` only because `B` expose that feature. There are edge-cases where this is still needed, but we can add them manually. "--left-side-feature-missing=ignore", # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. + "--left-side-outside-workspace=ignore", # Auxillary flags: "--offline", diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 3b2f2d6b7e6c..c34f82d2e315 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -11,7 +11,6 @@ exclude_crates=( # The following are not working yet, but known to be fixable reth-exex-types # https://github.com/paradigmxyz/reth/issues/9946 # The following require investigation if they can be fixed - reth-auto-seal-consensus reth-basic-payload-builder reth-beacon-consensus reth-bench diff --git a/.github/assets/kurtosis_op_network_params.yaml b/.github/assets/kurtosis_op_network_params.yaml new file mode 100644 index 000000000000..0e1516cc8890 --- /dev/null +++ b/.github/assets/kurtosis_op_network_params.yaml @@ -0,0 +1,15 @@ +ethereum_package: + participants: + - el_type: reth + cl_type: lighthouse +optimism_package: + chains: + - participants: + - el_type: op-geth + cl_type: op-node + - el_type: op-reth + el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" + cl_type: op-node + batcher_params: + extra_params: + - "--throttle-interval=0" diff --git a/.github/workflows/compact.yml b/.github/workflows/compact.yml index 63f6f282fa26..484b27c820d0 100644 --- a/.github/workflows/compact.yml +++ b/.github/workflows/compact.yml @@ -36,7 +36,8 @@ jobs: ref: ${{ github.base_ref || 'main' }} # On `main` branch, generates test vectors and serializes them to disk using `Compact`. - name: Generate compact vectors - run: ${{ matrix.bin }} -- test-vectors compact --write + run: | + ${{ matrix.bin }} -- test-vectors compact --write - name: Checkout PR uses: actions/checkout@v4 with: diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml new file mode 100644 index 000000000000..2652992fca92 --- /dev/null +++ b/.github/workflows/kurtosis-op.yml @@ -0,0 +1,119 @@ +# Runs simple OP stack setup in Kurtosis + +name: kurtosis-op + +on: + workflow_dispatch: + schedule: + # every day + - cron: "0 1 * * *" + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + prepare-reth: + if: github.repository == 'paradigmxyz/reth' + timeout-minutes: 45 + runs-on: + group: Reth + steps: + - uses: actions/checkout@v4 + - run: mkdir artifacts + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build reth + run: | + cargo build --features optimism,asm-keccak --profile hivetests --bin op-reth --manifest-path crates/optimism/bin/Cargo.toml --locked + mkdir dist && cp ./target/hivetests/op-reth ./dist/reth + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Build and export reth image + uses: docker/build-push-action@v6 + with: + context: . + file: .github/assets/hive/Dockerfile + tags: ghcr.io/paradigmxyz/op-reth:kurtosis-ci + outputs: type=docker,dest=./artifacts/reth_image.tar + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Upload reth image + uses: actions/upload-artifact@v4 + with: + name: artifacts + path: ./artifacts + + test: + timeout-minutes: 60 + strategy: + fail-fast: false + name: run kurtosis + runs-on: + group: Reth + needs: + - prepare-reth + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download reth image + uses: actions/download-artifact@v4 + with: + name: artifacts + path: /tmp + + - name: Load Docker image + run: | + docker load -i /tmp/reth_image.tar & + wait + docker image ls -a + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Run kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install kurtosis-cli + kurtosis engine start + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params.yaml + ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') + GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') + RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') + echo "GETH_RPC=http://127.0.0.1:$GETH_PORT" >> $GITHUB_ENV + echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV + + - name: Assert that clients advance + run: | + for i in {1..100}; do + sleep 5 + BLOCK_GETH=$(cast bn --rpc-url $GETH_RPC) + BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC) + + if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi + echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH" + done + exit 1 + + + notify-on-error: + needs: test + if: failure() + runs-on: + group: Reth + steps: + - name: Slack Webhook Action + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: ${{ job.status }} + SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}" + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4723d8a4d57f..fa7b4f9f45c2 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -18,10 +18,10 @@ jobs: matrix: include: - type: ethereum - args: --bin reth --workspace --locked + args: --bin reth --workspace --lib --examples --tests --benches --locked features: "ethereum asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - type: optimism - args: --bin op-reth --workspace --locked + args: --bin op-reth --workspace --lib --examples --tests --benches --locked features: "optimism asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - type: book args: --manifest-path book/sources/Cargo.toml --workspace --bins diff --git a/Cargo.lock b/Cargo.lock index bf73d7eef39f..ca5465d37bc9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,15 +91,15 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" [[package]] name = "alloy-chains" -version = "0.1.40" +version = "0.1.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4932d790c723181807738cf1ac68198ab581cd699545b155601332541ee47bd" +checksum = "18c5c520273946ecf715c0010b4e3503d7eba9893cd9ce6b7fff5654c4a3c470" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ed961a48297c732a5d97ee321aa8bb5009ecadbcb077d8bec90cb54e651629" +checksum = "ae09ffd7c29062431dd86061deefe4e3c6f07fa0d674930095f8dcedb0baf02c" dependencies = [ "alloy-eips", "alloy-primitives", @@ -124,15 +124,16 @@ dependencies = [ "auto_impl", "c-kzg", "derive_more 1.0.0", + "rand 0.8.5", "serde", "serde_with", ] [[package]] name = "alloy-contract" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460ab80ce4bda1c80bcf96fe7460520476f2c7b734581c6567fac2708e2a60ef" +checksum = "66430a72d5bf5edead101c8c2f0a24bada5ec9f3cf9909b3e08b6d6899b4803e" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -150,9 +151,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6228abfc751a29cde117b0879b805a3e0b3b641358f063272c83ca459a56886" +checksum = "85132f2698b520fab3f54beed55a44389f7006a7b557a0261e1e69439dcc1572" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -181,9 +182,9 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.3.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ffc577390ce50234e02d841214b3dc0bea6aaaae8e04bbf3cb82e9a45da9eb" +checksum = "69fb9fd842fdf10a524bbf2c4de6942ad869c1c8c3d128a1b09e67ed5f7cedbd" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -197,9 +198,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69e06cf9c37be824b9d26d6d101114fdde6af0c87de2828b414c05c4b3daa71" +checksum = "5b6aa3961694b30ba53d41006131a2fca3bdab22e4c344e46db2c639e7c2dfdd" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -218,9 +219,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dde15e14944a88bd6a57d325e9a49b75558746fe16aaccc79713ae50a6a9574c" +checksum = "e53f7877ded3921d18a0a9556d55bedf84535567198c9edab2aa23106da91855" dependencies = [ "alloy-primitives", "alloy-serde", @@ -229,9 +230,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.8" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d46eb5871592c216d39192499c95a99f7175cb94104f88c307e6dc960676d9f1" +checksum = "b84c506bf264110fa7e90d9924f742f40ef53c6572ea56a0b0bd714a567ed389" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -241,9 +242,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af5979e0d5a7bf9c7eb79749121e8256e59021af611322aee56e77e20776b4b3" +checksum = "3694b7e480728c0b3e228384f223937f14c10caef5a4c766021190fc8f283d35" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -255,9 +256,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "204237129086ce5dc17a58025e93739b01b45313841f98fa339eb1d780511e57" +checksum = "ea94b8ceb5c75d7df0a93ba0acc53b55a22b47b532b600a800a87ef04eb5b0b4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -271,14 +272,16 @@ dependencies = [ "async-trait", "auto_impl", "futures-utils-wasm", + "serde", + "serde_json", "thiserror", ] [[package]] name = "alloy-network-primitives" -version = "0.5.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a968c063fcfcb937736665c865a71fc2242b68916156f5ffa41fee7b44bb695" +checksum = "df9f3e281005943944d15ee8491534a1c7b3cbf7a7de26f8c433b842b93eb5f9" dependencies = [ "alloy-consensus", "alloy-eips", @@ -289,9 +292,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27444ea67d360508753022807cdd0b49a95c878924c9c5f8f32668b7d7768245" +checksum = "c9805d126f24be459b958973c0569c73e1aadd27d4535eee82b2b6764aa03616" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -306,9 +309,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.9" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71738eb20c42c5fb149571e76536a0f309d142f3957c28791662b96baf77a3d" +checksum = "9fce5dbd6a4f118eecc4719eaa9c7ffc31c315e6c5ccde3642db927802312425" dependencies = [ "alloy-rlp", "arbitrary", @@ -319,7 +322,7 @@ dependencies = [ "derive_more 1.0.0", "foldhash", "getrandom 0.2.15", - "hashbrown 0.15.0", + "hashbrown 0.15.1", "hex-literal", "indexmap 2.6.0", "itoa", @@ -338,9 +341,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4814d141ede360bb6cd1b4b064f1aab9de391e7c4d0d4d50ac89ea4bc1e25fbd" +checksum = "40c1f9eede27bf4c13c099e8e64d54efd7ce80ef6ea47478aa75d5d74e2dba3b" dependencies = [ "alloy-chains", "alloy-consensus", @@ -379,9 +382,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ba46eb69ddf7a9925b81f15229cb74658e6eebe5dd30a5b74e2cd040380573" +checksum = "90f1f34232f77341076541c405482e4ae12f0ee7153d8f9969fc1691201b2247" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -398,9 +401,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" +checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -409,20 +412,20 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" +checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] name = "alloy-rpc-client" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc2bd1e7403463a5f2c61e955bcc9d3072b63aa177442b0f9aa6a6d22a941e3" +checksum = "374dbe0dc3abdc2c964f36b3d3edf9cdb3db29d16bda34aa123f03d810bec1dd" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -445,9 +448,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eea9bf1abdd506f985a53533f5ac01296bcd6102c5e139bbc5d40bc468d2c916" +checksum = "c74832aa474b670309c20fffc2a869fa141edab7c79ff7963fad0a08de60bae1" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -458,9 +461,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea02c25541fb19eaac4278aa5c41d2d7e0245898887e54a74bfc0f3103e99415" +checksum = "6bfd9b2cc3a1985f1f6da5afc41120256f9f9316fcd89e054cea99dbb10172f6" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -470,20 +473,21 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2382fc63fb0cf3e02818d547b80cb66cc49a31f8803d0c328402b2008bc13650" +checksum = "5ca97963132f78ddfc60e43a017348e6d52eea983925c23652f5b330e8e02291" dependencies = [ "alloy-primitives", + "alloy-rpc-types-eth", "alloy-serde", "serde", ] [[package]] name = "alloy-rpc-types-beacon" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45357a642081c8ce235c0ad990c4e9279f5f18a723545076b38cfcc05cc25234" +checksum = "922fa76678d2f9f07ea1b19309b5cfbf244c6029dcba3515227b515fdd6ed4a7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -495,9 +499,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5afe3ab1038f90faf56304aa0adf1e6a8c9844615d8f83967f932f3a70390b1" +checksum = "ba2253bee958658ebd614c07a61c40580e09dd1fad3f017684314442332ab753" dependencies = [ "alloy-primitives", "serde", @@ -505,9 +509,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "886d22d41992287a235af2f3af4299b5ced2bcafb81eb835572ad35747476946" +checksum = "3f56294dce86af23ad6ee8df46cf8b0d292eb5d1ff67dc88a0886051e32b1faf" dependencies = [ "alloy-consensus", "alloy-eips", @@ -526,9 +530,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b034779a4850b4b03f5be5ea674a1cf7d746b2da762b34d1860ab45e48ca27" +checksum = "a8a477281940d82d29315846c7216db45b15e90bcd52309da9f54bcf7ad94a11" dependencies = [ "alloy-consensus", "alloy-eips", @@ -547,9 +551,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3246948dfa5f5060a9abe04233d741ea656ef076b12958f3242416ce9f375058" +checksum = "8647f8135ee3d5de1cf196706c905c05728a4e38bb4a5b61a7214bd1ba8f60a6" dependencies = [ "alloy-eips", "alloy-primitives", @@ -560,9 +564,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e5fb6c5c401321f802f69dcdb95b932f30f8158f6798793f914baac5995628e" +checksum = "ecd8b4877ef520c138af702097477cdd19504a8e1e4675ba37e92ba40f2d3c6f" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -574,9 +578,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ad066b49c3b1b5f64cdd2399177a19926a6a15db2dbf11e2098de621f9e7480" +checksum = "1d4ab49acf90a71f7fb894dc5fd485f1f07a1e348966c714c4d1e0b7478850a8" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -586,9 +590,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028e72eaa9703e4882344983cfe7636ce06d8cce104a78ea62fd19b46659efc4" +checksum = "4dfa4a7ccf15b2492bb68088692481fd6b2604ccbee1d0d6c44c21427ae4df83" dependencies = [ "alloy-primitives", "arbitrary", @@ -598,9 +602,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "592c185d7100258c041afac51877660c7bf6213447999787197db4842f0e938e" +checksum = "2e10aec39d60dc27edcac447302c7803d2371946fb737245320a05b78eb2fafd" dependencies = [ "alloy-primitives", "async-trait", @@ -612,9 +616,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6614f02fc1d5b079b2a4a5320018317b506fd0a6d67c1fd5542a71201724986c" +checksum = "d8396f6dff60700bc1d215ee03d86ff56de268af96e2bf833a14d0bafcab9882" dependencies = [ "alloy-consensus", "alloy-network", @@ -630,23 +634,23 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.9" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0900b83f4ee1f45c640ceee596afbc118051921b9438fdb5a3175c1a7e05f8b" +checksum = "9343289b4a7461ed8bab8618504c995c049c082b70c7332efd7b32125633dc05" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.9" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41b1e78dde06b5e12e6702fa8c1d30621bf07728ba75b801fb801c9c6a0ba10" +checksum = "4222d70bec485ceccc5d8fd4f2909edd65b5d5e43d4aca0b5dcee65d519ae98f" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -655,31 +659,31 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.9" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91dc311a561a306664393407b88d3e53ae58581624128afd8a15faa5de3627dc" +checksum = "2e17f2677369571b976e51ea1430eb41c3690d344fef567b840bfc0b01b6f83a" dependencies = [ "const-hex", "dunce", "heck", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.8" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f631f0bd9a9d79619b27c91b6b1ab2c4ef4e606a65192369a1ee05d40dcf81cc" +checksum = "aa64d80ae58ffaafdff9d5d84f58d03775f66c84433916dc9a64ed16af5755da" dependencies = [ "serde", "winnow", @@ -687,9 +691,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.8" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2841af22d99e2c0f82a78fe107b6481be3dd20b89bfb067290092794734343a" +checksum = "6520d427d4a8eb7aa803d852d7a52ceb0c519e784c292f64bb339e636918cf27" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -700,9 +704,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be77579633ebbc1266ae6fd7694f75c408beb1aeb6865d0b18f22893c265a061" +checksum = "f99acddb34000d104961897dbb0240298e8b775a7efffb9fda2a1a3efedd65b3" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -720,9 +724,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fd1a5d0827939847983b46f2f79510361f901dc82f8e3c38ac7397af142c6e" +checksum = "5dc013132e34eeadaa0add7e74164c1503988bfba8bae885b32e0918ba85a8a6" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -735,9 +739,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8073d1186bfeeb8fbdd1292b6f1a0731f3aed8e21e1463905abfae0b96a887a6" +checksum = "063edc0660e81260653cc6a95777c29d54c2543a668aa5da2359fb450d25a1ba" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -754,9 +758,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.5.4" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f27837bb4a1d6c83a28231c94493e814882f0e9058648a97e908a5f3fc9fcf" +checksum = "abd170e600801116d5efe64f74a4fc073dbbb35c807013a7d0a388742aeebba0" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -772,9 +776,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd7f8b3a7c65ca09b3c7bdd7c7d72d7423d026f5247eda96af53d24e58315c1" +checksum = "40d8e28db02c006f7abb20f345ffb3cc99c465e36f676ba262534e654ae76042" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -813,9 +817,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -828,43 +832,43 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.90" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "aquamarine" @@ -877,14 +881,14 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] @@ -1053,9 +1057,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "103db485efc3e41214fe4fda9f3dbeae2eb9082f48fd236e6095627a9422066e" +checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" dependencies = [ "brotli", "flate2", @@ -1100,7 +1104,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -1111,7 +1115,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -1149,7 +1153,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -1164,7 +1168,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4fa97bb310c33c811334143cf64c5bb2b7b3c06e453db6b095d7061eff8f113" dependencies = [ - "fastrand 2.1.1", + "fastrand 2.2.0", "tokio", ] @@ -1255,7 +1259,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -1437,7 +1441,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", "synstructure", ] @@ -1526,7 +1530,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" dependencies = [ "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "serde", ] @@ -1559,7 +1563,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -1570,9 +1574,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" dependencies = [ "serde", ] @@ -1647,9 +1651,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.31" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +checksum = "1aeb932158bd710538c73702db6945cb68a8fb08c519e6e12706b94263b36db8" dependencies = [ "jobserver", "libc", @@ -1677,6 +1681,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.38" @@ -1771,7 +1781,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -1833,9 +1843,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "combine" @@ -1988,9 +1998,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" dependencies = [ "libc", ] @@ -2176,9 +2186,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" dependencies = [ "csv-core", "itoa", @@ -2228,7 +2238,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -2252,7 +2262,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -2263,7 +2273,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -2379,13 +2389,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -2396,7 +2406,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -2417,7 +2427,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", "unicode-xid", ] @@ -2531,7 +2541,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -2599,8 +2609,10 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "rayon", @@ -2679,7 +2691,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -2690,7 +2702,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -2747,7 +2759,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -2842,6 +2854,7 @@ dependencies = [ name = "example-custom-engine-types" version = "0.0.0" dependencies = [ + "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-rpc-types", @@ -2856,6 +2869,7 @@ dependencies = [ "reth-payload-builder", "reth-primitives", "reth-tracing", + "reth-trie-db", "serde", "thiserror", "tokio", @@ -2865,6 +2879,7 @@ dependencies = [ name = "example-custom-evm" version = "0.0.0" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "eyre", @@ -2883,8 +2898,9 @@ dependencies = [ name = "example-custom-inspector" version = "0.0.0" dependencies = [ + "alloy-eips", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types-eth", "clap", "futures-util", "reth", @@ -2906,6 +2922,7 @@ dependencies = [ name = "example-custom-payload-builder" version = "0.0.0" dependencies = [ + "alloy-eips", "alloy-primitives", "eyre", "futures-util", @@ -2943,7 +2960,7 @@ name = "example-db-access" version = "0.0.0" dependencies = [ "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types-eth", "eyre", "reth-chainspec", "reth-db", @@ -3048,6 +3065,7 @@ dependencies = [ name = "example-stateful-precompile" version = "0.0.0" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "eyre", @@ -3102,9 +3120,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "fastrlp" @@ -3302,7 +3320,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -3538,9 +3556,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" dependencies = [ "allocator-api2", "equivalent", @@ -3787,9 +3805,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -3828,7 +3846,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -3978,7 +3996,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -3999,12 +4017,23 @@ dependencies = [ [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] @@ -4081,10 +4110,16 @@ checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.1", "serde", ] +[[package]] +name = "indoc" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" + [[package]] name = "infer" version = "0.2.3" @@ -4141,12 +4176,16 @@ dependencies = [ [[package]] name = "instability" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" +checksum = "b829f37dead9dc39df40c2d3376c179fdfd2ac771f53f55d3c30dc096a3c0c6e" dependencies = [ + "darling", + "indoc", + "pretty_assertions", + "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -4394,7 +4433,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -4554,9 +4593,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.161" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" @@ -4570,9 +4609,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libp2p-identity" @@ -4727,7 +4766,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.0", + "hashbrown 0.15.1", ] [[package]] @@ -4795,9 +4834,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884adb57038347dfbaf2d5065887b6cf4312330dc8e94bc30a1a839bd79d3261" +checksum = "8ae428771d17306715c5091d446327d1cfdedc82185c65ba8423ab404e45bf10" dependencies = [ "ahash", "portable-atomic", @@ -4812,14 +4851,14 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] name = "metrics-exporter-prometheus" -version = "0.15.3" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6" +checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", "indexmap 2.6.0", @@ -4831,9 +4870,9 @@ dependencies = [ [[package]] name = "metrics-process" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69e6ced169644e186e060ddc15f3923fdf06862c811a867bb1e5e7c7824f4d0" +checksum = "57ca8ecd85575fbb143b2678cb123bb818779391ec0f745b1c4a9dbabadde407" dependencies = [ "libc", "libproc", @@ -4847,15 +4886,16 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4259040465c955f9f2f1a4a8a16dc46726169bca0f88e8fb2dbeced487c3e828" +checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.14.5", + "hashbrown 0.15.1", + "indexmap 2.6.0", "metrics", - "num_cpus", + "ordered-float", "quanta", "sketches-ddsketch", ] @@ -4959,7 +4999,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -5004,7 +5044,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.8.0", + "unsigned-varint", "url", ] @@ -5021,12 +5061,12 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" +checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" dependencies = [ "core2", - "unsigned-varint 0.7.2", + "unsigned-varint", ] [[package]] @@ -5207,7 +5247,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -5260,9 +5300,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.5.0" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d49163f952491820088dd0e66f3a35d63337c3066eceff0a931bf83a8e2101" +checksum = "bff54d1d790eca1f3aedbd666162e9c42eceff90b9f9d24b352ed9c2df1e901a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5278,9 +5318,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.5.0" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e46c2ab105f679f0cbfbc3fb762f3456d4b8556c841e667fc8f3c2226eb6c1e" +checksum = "ae84fd64fbc53b3e958ea5a96d7f5633e4a111092e41c51672c2d91835c09efb" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5292,40 +5332,44 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.5.0" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75ff1ea317441b9eb6317b24d13f9088e3b14ef48b15bfb6a125ca404df036d8" +checksum = "d71e777450ee3e9c5177e00865e9b4496472b623c50f146fc907b667c6b4ab37" dependencies = [ "alloy-consensus", "alloy-network", "alloy-primitives", "alloy-rpc-types-eth", + "alloy-signer", "op-alloy-consensus", "op-alloy-rpc-types", ] [[package]] name = "op-alloy-protocol" -version = "0.5.0" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c439457b2a1791325603fc18a94cc175e0b4b1127f11ff8a45071f05d044dcb" +checksum = "1e854d2d4958d0a213731560172e8455536329ee9574473ff79fa953da91eb6a" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", + "async-trait", "derive_more 1.0.0", "op-alloy-consensus", "op-alloy-genesis", "serde", + "tracing", + "unsigned-varint", ] [[package]] name = "op-alloy-rpc-types" -version = "0.5.0" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9556293835232b019ec9c6fd84e4265a3151111af60ea09b5b513e3dbed41c" +checksum = "981b7f8ab11fe85ba3c1723702f000429b8d0c16b5883c93d577895f262cbac6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5334,6 +5378,7 @@ dependencies = [ "alloy-rpc-types-eth", "alloy-serde", "arbitrary", + "derive_more 1.0.0", "op-alloy-consensus", "serde", "serde_json", @@ -5341,15 +5386,17 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.5.0" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a42a5ac4e07ed226b6a2aeefaad9b2cc7ec160e372ba626a4214d681a355fc2" +checksum = "a227b16c9c5df68b112c8db9d268ebf46b3e26c744b4d59d4949575cd603a292" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "alloy-serde", "derive_more 1.0.0", "ethereum_ssz", + "op-alloy-consensus", "op-alloy-protocol", "serde", "snap", @@ -5357,7 +5404,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.1.0" +version = "1.1.1" dependencies = [ "clap", "reth-cli-util", @@ -5393,6 +5440,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "ordered-float" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c65ee1f9701bf938026630b455d5315f490640234259037edb259798b3bcf85e" +dependencies = [ + "num-traits", +] + [[package]] name = "overload" version = "0.1.1" @@ -5561,7 +5617,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -5575,29 +5631,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -5757,12 +5813,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904afd36257cdb6ce0bee88b7981847bd7b955e5e216bb32f466b302923ad446" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -5813,14 +5869,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] name = "proc-macro2" -version = "1.0.88" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c3a7fc5db1e57d5a779a352c8cdb57b29aa4c40cc69c3a68a7fedc815fbf2f9" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] @@ -5911,7 +5967,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -5990,10 +6046,11 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2", @@ -6175,13 +6232,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -6196,9 +6253,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -6227,11 +6284,17 @@ dependencies = [ "memchr", ] +[[package]] +name = "relative-path" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" + [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "base64 0.22.1", "bytes", @@ -6284,7 +6347,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6331,6 +6394,7 @@ dependencies = [ "reth-payload-primitives", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-revm", @@ -6354,40 +6418,9 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-auto-seal-consensus" -version = "1.1.0" -dependencies = [ - "alloy-eips", - "alloy-primitives", - "alloy-rpc-types-engine", - "futures-util", - "reth-beacon-consensus", - "reth-chainspec", - "reth-consensus", - "reth-engine-primitives", - "reth-evm", - "reth-execution-errors", - "reth-execution-types", - "reth-network-p2p", - "reth-network-peers", - "reth-optimism-consensus", - "reth-primitives", - "reth-provider", - "reth-revm", - "reth-stages-api", - "reth-tokio-util", - "reth-transaction-pool", - "reth-trie", - "revm-primitives", - "tokio", - "tokio-stream", - "tracing", -] - [[package]] name = "reth-basic-payload-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6403,6 +6436,7 @@ dependencies = [ "reth-payload-primitives", "reth-primitives", "reth-provider", + "reth-revm", "reth-tasks", "reth-transaction-pool", "revm", @@ -6412,8 +6446,9 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", @@ -6464,7 +6499,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -6499,7 +6534,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6537,8 +6572,9 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-eips", "alloy-primitives", "reth-consensus", "reth-execution-errors", @@ -6549,7 +6585,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6578,7 +6614,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -6599,7 +6635,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-genesis", "clap", @@ -6612,11 +6648,13 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.1.0" +version = "1.1.1" dependencies = [ "ahash", + "alloy-consensus", "alloy-eips", "alloy-primitives", + "alloy-rlp", "arbitrary", "backon", "clap", @@ -6677,7 +6715,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.1.0" +version = "1.1.1" dependencies = [ "reth-tasks", "tokio", @@ -6686,7 +6724,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6696,6 +6734,7 @@ dependencies = [ "rand 0.8.5", "reth-fs-util", "secp256k1", + "serde", "thiserror", "tikv-jemallocator", "tracy-client", @@ -6703,7 +6742,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6725,18 +6764,18 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.1.0" +version = "1.1.1" dependencies = [ "convert_case", "proc-macro2", "quote", "similar-asserts", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] name = "reth-config" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "eyre", @@ -6752,8 +6791,9 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", @@ -6763,7 +6803,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6779,14 +6819,14 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-provider", - "alloy-rpc-types", "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "auto_impl", "eyre", "futures", @@ -6802,8 +6842,9 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-primitives", "arbitrary", "assert_matches", @@ -6842,8 +6883,9 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "arbitrary", @@ -6869,7 +6911,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -6898,8 +6940,9 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-eips", "alloy-primitives", "arbitrary", "bytes", @@ -6907,14 +6950,14 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "reth-codecs", - "reth-primitives", + "reth-primitives-traits", "serde", "test-fuzz", ] [[package]] name = "reth-discv4" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6922,6 +6965,7 @@ dependencies = [ "discv5", "enr", "generic-array", + "itertools 0.13.0", "parking_lot", "rand 0.8.5", "reth-ethereum-forks", @@ -6940,7 +6984,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6964,7 +7008,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-primitives", @@ -6992,8 +7036,9 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -7014,6 +7059,7 @@ dependencies = [ "reth-network-p2p", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", @@ -7029,13 +7075,14 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-network", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "alloy-signer", "alloy-signer-local", "derive_more 1.0.0", @@ -7065,7 +7112,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.1.0" +version = "1.1.1" dependencies = [ "aes", "alloy-primitives", @@ -7095,7 +7142,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -7125,7 +7172,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "reth-execution-types", @@ -7137,7 +7184,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.1.0" +version = "1.1.1" dependencies = [ "futures", "pin-project", @@ -7165,8 +7212,9 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -7174,6 +7222,7 @@ dependencies = [ "assert_matches", "futures", "metrics", + "pin-project", "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", @@ -7204,14 +7253,16 @@ dependencies = [ "reth-tracing", "reth-trie", "reth-trie-parallel", + "revm-primitives", "thiserror", "tokio", + "tokio-stream", "tracing", ] [[package]] name = "reth-engine-util" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7243,7 +7294,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.1.0" +version = "1.1.1" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7255,7 +7306,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7290,7 +7341,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7307,13 +7358,14 @@ dependencies = [ "reth-chainspec", "reth-codecs-derive", "reth-primitives", + "reth-primitives-traits", "serde", "thiserror", ] [[package]] name = "reth-ethereum-cli" -version = "1.1.0" +version = "1.1.1" dependencies = [ "clap", "eyre", @@ -7324,7 +7376,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7338,7 +7390,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7357,7 +7409,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7377,7 +7429,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7403,7 +7455,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "rayon", @@ -7413,7 +7465,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7421,6 +7473,7 @@ dependencies = [ "auto_impl", "futures-util", "metrics", + "metrics-util", "parking_lot", "reth-chainspec", "reth-consensus", @@ -7440,7 +7493,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7463,7 +7516,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7478,7 +7531,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7487,6 +7540,7 @@ dependencies = [ "rand 0.8.5", "reth-execution-errors", "reth-primitives", + "reth-primitives-traits", "reth-trie", "revm", "serde", @@ -7495,7 +7549,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7519,7 +7573,6 @@ dependencies = [ "reth-metrics", "reth-node-api", "reth-node-core", - "reth-payload-builder", "reth-primitives", "reth-primitives-traits", "reth-provider", @@ -7539,8 +7592,9 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-eips", "eyre", "futures-util", "rand 0.8.5", @@ -7563,6 +7617,7 @@ dependencies = [ "reth-provider", "reth-tasks", "reth-transaction-pool", + "reth-trie-db", "tempfile", "thiserror", "tokio", @@ -7570,7 +7625,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7586,7 +7641,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.1.0" +version = "1.1.1" dependencies = [ "serde", "serde_json", @@ -7595,8 +7650,9 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", @@ -7619,7 +7675,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.1.0" +version = "1.1.1" dependencies = [ "async-trait", "bytes", @@ -7641,7 +7697,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.1.0" +version = "1.1.1" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -7662,7 +7718,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.1.0" +version = "1.1.1" dependencies = [ "bindgen", "cc", @@ -7670,7 +7726,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.1.0" +version = "1.1.1" dependencies = [ "futures", "metrics", @@ -7681,14 +7737,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.1.0" +version = "1.1.1" dependencies = [ "futures-util", "if-addrs", @@ -7702,7 +7758,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7739,6 +7795,7 @@ dependencies = [ "reth-network-peers", "reth-network-types", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", @@ -7762,7 +7819,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -7784,8 +7841,9 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", @@ -7797,6 +7855,7 @@ dependencies = [ "reth-network-peers", "reth-network-types", "reth-primitives", + "reth-primitives-traits", "reth-storage-errors", "tokio", "tracing", @@ -7804,7 +7863,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7820,7 +7879,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -7833,7 +7892,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.1.0" +version = "1.1.1" dependencies = [ "anyhow", "bincode", @@ -7851,8 +7910,9 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-rpc-types-engine", "eyre", "reth-beacon-consensus", @@ -7862,19 +7922,17 @@ dependencies = [ "reth-network-api", "reth-node-core", "reth-node-types", - "reth-payload-builder", "reth-payload-primitives", - "reth-primitives", "reth-provider", - "reth-rpc-eth-api", "reth-tasks", "reth-transaction-pool", ] [[package]] name = "reth-node-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types", "aquamarine", @@ -7883,7 +7941,6 @@ dependencies = [ "futures", "jsonrpsee", "rayon", - "reth-auto-seal-consensus", "reth-beacon-consensus", "reth-blockchain-tree", "reth-chain-state", @@ -7938,7 +7995,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7956,7 +8013,7 @@ dependencies = [ "reth-chainspec", "reth-cli-util", "reth-config", - "reth-consensus-common", + "reth-consensus", "reth-db", "reth-discv4", "reth-discv5", @@ -7965,9 +8022,8 @@ dependencies = [ "reth-network-p2p", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", - "reth-rpc-api", - "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-server-types", "reth-rpc-types-compat", @@ -7989,7 +8045,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-contract", @@ -7997,13 +8053,13 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "alloy-provider", + "alloy-rpc-types-beacon", "alloy-signer", "alloy-sol-types", "eyre", "futures", "rand 0.8.5", "reth", - "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", @@ -8026,6 +8082,7 @@ dependencies = [ "reth-tasks", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", "revm", "serde_json", "tokio", @@ -8033,30 +8090,29 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "futures", "humantime", "pin-project", "reth-beacon-consensus", - "reth-network", "reth-network-api", - "reth-primitives", "reth-primitives-traits", - "reth-provider", "reth-prune", "reth-stages", - "reth-static-file", + "reth-static-file-types", + "reth-storage-api", "tokio", "tracing", ] [[package]] name = "reth-node-metrics" -version = "1.1.0" +version = "1.1.1" dependencies = [ "eyre", "http", @@ -8081,18 +8137,18 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "reth-chainspec", "reth-db-api", "reth-engine-primitives", - "reth-primitives", "reth-primitives-traits", + "reth-trie-db", ] [[package]] name = "reth-optimism-chainspec" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8112,11 +8168,14 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "clap", + "derive_more 1.0.0", "eyre", "futures-util", "op-alloy-consensus", @@ -8150,6 +8209,7 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-tracing", + "serde", "tempfile", "tokio", "tokio-util", @@ -8158,10 +8218,11 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-primitives", + "alloy-trie", "reth-chainspec", "reth-consensus", "reth-consensus-common", @@ -8174,7 +8235,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8201,7 +8262,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8212,19 +8273,22 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-genesis", + "alloy-network", "alloy-primitives", "alloy-rpc-types-engine", + "alloy-signer-local", "clap", "eyre", + "futures", "op-alloy-consensus", "op-alloy-rpc-types-engine", "parking_lot", "reth", - "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", @@ -8240,6 +8304,7 @@ dependencies = [ "reth-optimism-consensus", "reth-optimism-evm", "reth-optimism-forks", + "reth-optimism-node", "reth-optimism-payload-builder", "reth-optimism-rpc", "reth-payload-builder", @@ -8248,6 +8313,7 @@ dependencies = [ "reth-revm", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", "revm", "serde", "serde_json", @@ -8256,13 +8322,14 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-engine", + "op-alloy-consensus", "op-alloy-rpc-types-engine", "reth-basic-payload-builder", "reth-chain-state", @@ -8282,7 +8349,6 @@ dependencies = [ "reth-transaction-pool", "reth-trie", "revm", - "revm-primitives", "sha2 0.10.8", "thiserror", "tracing", @@ -8290,21 +8356,24 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", - "reth-primitives", + "alloy-rlp", + "bytes", + "derive_more 1.0.0", + "op-alloy-consensus", ] [[package]] name = "reth-optimism-rpc" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-eth", "derive_more 1.0.0", "jsonrpsee-types", @@ -8339,7 +8408,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.1.0" +version = "1.1.1" dependencies = [ "reth-codecs", "reth-db-api", @@ -8350,8 +8419,9 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types", "async-trait", @@ -8362,7 +8432,6 @@ dependencies = [ "reth-metrics", "reth-payload-primitives", "reth-primitives", - "reth-provider", "revm", "tokio", "tokio-stream", @@ -8371,11 +8440,11 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types-engine", "async-trait", "op-alloy-rpc-types-engine", "pin-project", @@ -8393,7 +8462,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -8403,15 +8472,17 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-genesis", + "alloy-network", "alloy-primitives", "alloy-rlp", "alloy-rpc-types", "alloy-serde", + "alloy-trie", "arbitrary", "assert_matches", "bincode", @@ -8437,6 +8508,7 @@ dependencies = [ "reth-testing-utils", "reth-trie-common", "revm-primitives", + "rstest", "secp256k1", "serde", "serde_json", @@ -8447,7 +8519,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8475,7 +8547,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8524,7 +8596,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "assert_matches", @@ -8538,6 +8610,7 @@ dependencies = [ "reth-errors", "reth-exex-types", "reth-metrics", + "reth-primitives-traits", "reth-provider", "reth-prune-types", "reth-stages", @@ -8553,7 +8626,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -8573,7 +8646,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8590,7 +8663,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8601,7 +8674,9 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types", "alloy-rpc-types-admin", + "alloy-rpc-types-beacon", "alloy-rpc-types-debug", + "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-rpc-types-mev", "alloy-rpc-types-trace", @@ -8622,14 +8697,16 @@ dependencies = [ "pin-project", "rand 0.8.5", "reth-chainspec", + "reth-consensus", "reth-consensus-common", "reth-errors", + "reth-ethereum-consensus", "reth-evm", "reth-evm-ethereum", "reth-network-api", "reth-network-peers", "reth-network-types", - "reth-node-api", + "reth-payload-validator", "reth-primitives", "reth-provider", "reth-revm", @@ -8658,7 +8735,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -8677,16 +8754,15 @@ dependencies = [ "jsonrpsee", "reth-engine-primitives", "reth-network-peers", - "reth-primitives", "reth-rpc-eth-api", ] [[package]] name = "reth-rpc-api-testing-util" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-eips", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-eth", "alloy-rpc-types-trace", "futures", @@ -8702,15 +8778,14 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.1.0" +version = "1.1.1" dependencies = [ - "alloy-network", + "alloy-consensus", + "alloy-eips", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-rpc-types-trace", - "alloy-serde", "clap", "http", "jsonrpsee", @@ -8718,6 +8793,7 @@ dependencies = [ "pin-project", "reth-beacon-consensus", "reth-chainspec", + "reth-consensus", "reth-engine-primitives", "reth-ethereum-engine-primitives", "reth-evm", @@ -8753,7 +8829,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8764,6 +8840,7 @@ dependencies = [ "jsonrpsee-core", "jsonrpsee-types", "metrics", + "parking_lot", "reth-beacon-consensus", "reth-chainspec", "reth-engine-primitives", @@ -8789,7 +8866,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8797,9 +8874,9 @@ dependencies = [ "alloy-json-rpc", "alloy-network", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-eth", "alloy-rpc-types-mev", + "alloy-serde", "async-trait", "auto_impl", "dyn-clone", @@ -8812,6 +8889,7 @@ dependencies = [ "reth-evm", "reth-execution-types", "reth-network-api", + "reth-node-api", "reth-primitives", "reth-provider", "reth-revm", @@ -8830,14 +8908,12 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-eth", - "alloy-serde", "alloy-sol-types", "derive_more 1.0.0", "futures", @@ -8874,46 +8950,48 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-rpc-types-engine", "http", + "http-body-util", "jsonrpsee", "jsonrpsee-http-client", "pin-project", "reqwest", "tokio", "tower 0.4.13", + "tower-http", "tracing", ] [[package]] name = "reth-rpc-server-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "jsonrpsee-core", "jsonrpsee-types", "reth-errors", "reth-network-api", - "reth-primitives", "serde", "strum", ] [[package]] name = "reth-rpc-types-compat" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-serde", + "jsonrpsee-types", "reth-primitives", "reth-trie-common", "serde", @@ -8922,8 +9000,9 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.1.0" +version = "1.1.1" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rlp", "assert_matches", @@ -8971,7 +9050,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "aquamarine", @@ -8999,7 +9078,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -9016,7 +9095,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "assert_matches", @@ -9038,7 +9117,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "clap", @@ -9049,13 +9128,15 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", + "alloy-rpc-types-engine", "auto_impl", "reth-chainspec", + "reth-db", "reth-db-api", "reth-db-models", "reth-execution-types", @@ -9068,7 +9149,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9080,7 +9161,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.1.0" +version = "1.1.1" dependencies = [ "auto_impl", "dyn-clone", @@ -9097,7 +9178,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9110,7 +9191,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.1.0" +version = "1.1.1" dependencies = [ "tokio", "tokio-stream", @@ -9119,7 +9200,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.1.0" +version = "1.1.1" dependencies = [ "clap", "eyre", @@ -9133,7 +9214,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9159,6 +9240,7 @@ dependencies = [ "reth-fs-util", "reth-metrics", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", @@ -9178,11 +9260,12 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-primitives", "alloy-rlp", + "alloy-trie", "auto_impl", "bincode", "criterion", @@ -9207,7 +9290,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9231,7 +9314,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9260,7 +9343,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9278,6 +9361,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-trie", + "reth-trie-common", "reth-trie-db", "thiserror", "tokio", @@ -9286,7 +9370,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.1.0" +version = "1.1.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9306,9 +9390,9 @@ dependencies = [ [[package]] name = "revm" -version = "17.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eab16cb0a8cd5ac88b11230b20df588b7e8aae7dfab4b3f830e98aebeb4b365" +checksum = "15689a3c6a8d14b647b4666f2e236ef47b5a5133cdfd423f545947986fff7013" dependencies = [ "auto_impl", "cfg-if", @@ -9321,9 +9405,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e29c662f7887f3b659d4b0fd234673419a8fcbeaa1ecc29bf7034c0a75cc8ea" +checksum = "747291a18ad6726a08dd73f8b6a6b3a844db582ecae2063ccf0a04880c44f482" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -9340,9 +9424,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "13.0.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fac2034454f8bc69dc7d3c94cdb1b57559e27f5ef0518771f1787de543d7d6a1" +checksum = "74e3f11d0fed049a4a10f79820c59113a79b38aed4ebec786a79d5c667bfeb51" dependencies = [ "revm-primitives", "serde", @@ -9350,9 +9434,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "14.0.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a88c8c7c5f9b988a9e65fc0990c6ce859cdb74114db705bd118a96d22d08027" +checksum = "e381060af24b750069a2b2d2c54bba273d84e8f5f9e8026fc9262298e26cc336" dependencies = [ "aurora-engine-modexp", "blst", @@ -9370,9 +9454,9 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "13.0.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d11fa1e195b0bebaf3fb18596f314a13ba3a4cb1fdd16d3465934d812fd921e" +checksum = "3702f132bb484f4f0d0ca4f6fbde3c82cfd745041abbedd6eda67730e1868ef0" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -9503,6 +9587,36 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" +[[package]] +name = "rstest" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a2c585be59b6b5dd66a9d2084aa1d8bd52fbdb806eafdeffb52791147862035" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version 0.4.1", +] + +[[package]] +name = "rstest_macros" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "825ea780781b15345a146be27eaefb05085e337e869bff01b4306a4fd4a9ad5a" +dependencies = [ + "cfg-if", + "glob", + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version 0.4.1", + "syn 2.0.87", + "unicode-ident", +] + [[package]] name = "ruint" version = "1.12.3" @@ -9581,9 +9695,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" dependencies = [ "bitflags 2.6.0", "errno", @@ -9594,9 +9708,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.15" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "log", "once_cell", @@ -9727,9 +9841,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.2.2" +version = "2.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c1f7fc6deb21665a9060dfc7d271be784669295a31babdcd4dd2c79ae8cbfb" +checksum = "d8d25269dd3a12467afe2e510f69fb0b46b698e5afb296b59f2145259deaf8e8" dependencies = [ "sdd", ] @@ -9822,9 +9936,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -9871,22 +9985,22 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -9921,7 +10035,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -9972,14 +10086,14 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] name = "serial_test" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" dependencies = [ "once_cell", "parking_lot", @@ -9989,13 +10103,13 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -10158,9 +10272,9 @@ checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "sketches-ddsketch" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" +checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a" [[package]] name = "slab" @@ -10281,7 +10395,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -10305,9 +10419,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.12.0" +version = "12.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" +checksum = "3d4d73159efebfb389d819fd479afb2dbd57dcb3e3f4b7fcfa0e675f5a46c1cb" dependencies = [ "debugid", "memmap2", @@ -10317,9 +10431,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.12.0" +version = "12.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" +checksum = "a767859f6549c665011970874c3f541838b4835d5aaaa493d3ee383918be9f10" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -10339,9 +10453,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.80" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6e185e337f816bc8da115b8afcb3324006ccc82eeaddf35113888d3bd8e44ac" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", @@ -10350,14 +10464,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.9" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5e0c2ea8db64b2898b62ea2fbd60204ca95e0b2c6bdf53ff768bbe916fbe4d" +checksum = "f76fe0a3e1476bdaa0775b9aec5b869ed9520c2b2fedfe9c6df3618f8ea6290b" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -10383,7 +10497,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -10407,12 +10521,12 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand 2.1.1", + "fastrand 2.2.0", "once_cell", "rustix", "windows-sys 0.59.0", @@ -10460,7 +10574,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -10484,22 +10598,22 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -10652,9 +10766,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", @@ -10676,7 +10790,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -10804,12 +10918,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ "async-compression", - "base64 0.21.7", + "base64 0.22.1", "bitflags 2.6.0", "bytes", "futures-core", @@ -10826,7 +10940,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower 0.4.13", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -10877,7 +10991,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -10979,11 +11093,12 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68613466112302fdbeabc5fa55f7d57462a0b247d5a6b7d7e09401fb471a144d" +checksum = "3637e734239e12ab152cd269302500bd063f37624ee210cd04b4936ed671f3b1" dependencies = [ "cc", + "windows-targets 0.52.6", ] [[package]] @@ -11177,12 +11292,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "unsigned-varint" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" - [[package]] name = "unsigned-varint" version = "0.8.0" @@ -11197,12 +11306,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna 1.0.3", "percent-encoding", "serde", ] @@ -11274,7 +11383,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -11345,7 +11454,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", "wasm-bindgen-shared", ] @@ -11379,7 +11488,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11392,9 +11501,9 @@ checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "wasm-streams" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e072d4e72f700fb3443d8fe94a39315df013eef1104903cdb0a2abd322bbecd" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -11405,9 +11514,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.2.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7ed9d8b15c7fb594d72bfb4b5a276f3d2029333cd93a932f376f5937f6f80ee" +checksum = "bb4f099acbc1043cc752b91615b24b02d7f6fcd975bd781fed9f50b3c3e15bf7" dependencies = [ "futures", "js-sys", @@ -11535,7 +11644,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -11546,7 +11655,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -11557,7 +11666,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -11568,7 +11677,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -11843,7 +11952,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", "synstructure", ] @@ -11865,7 +11974,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -11885,7 +11994,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", "synstructure", ] @@ -11906,7 +12015,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] @@ -11928,7 +12037,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.80", + "syn 2.0.87", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d01ee01ce5cf..3f65bceb4bf8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.1.0" +version = "1.1.1" edition = "2021" rust-version = "1.82" license = "MIT OR Apache-2.0" @@ -20,7 +20,6 @@ members = [ "crates/cli/runner/", "crates/cli/util/", "crates/config/", - "crates/consensus/auto-seal/", "crates/consensus/beacon/", "crates/consensus/common/", "crates/consensus/consensus/", @@ -176,6 +175,7 @@ branches_sharing_code = "warn" clear_with_drain = "warn" cloned_instead_of_copied = "warn" collection_is_never_read = "warn" +dbg_macro = "warn" derive_partial_eq_without_eq = "warn" doc_markdown = "warn" empty_line_after_doc_comments = "warn" @@ -299,7 +299,6 @@ overflow-checks = true # reth op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } -reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-bench = { path = "bin/reth-bench" } @@ -418,60 +417,60 @@ reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } # revm -revm = { version = "17.0.0", features = ["std"], default-features = false } -revm-inspectors = "0.10.0" -revm-primitives = { version = "13.0.0", features = [ +revm = { version = "18.0.0", features = ["std"], default-features = false } +revm-inspectors = "0.11.0" +revm-primitives = { version = "14.0.0", features = [ "std", ], default-features = false } # eth alloy-chains = "0.1.32" -alloy-dyn-abi = "0.8.0" -alloy-primitives = { version = "0.8.9", default-features = false } +alloy-dyn-abi = "0.8.11" +alloy-primitives = { version = "0.8.11", default-features = false } alloy-rlp = "0.3.4" -alloy-sol-types = "0.8.0" +alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.5.4", default-features = false } -alloy-contract = { version = "0.5.4", default-features = false } -alloy-eips = { version = "0.5.4", default-features = false } -alloy-genesis = { version = "0.5.4", default-features = false } -alloy-json-rpc = { version = "0.5.4", default-features = false } -alloy-network = { version = "0.5.4", default-features = false } -alloy-network-primitives = { version = "0.5.4", default-features = false } -alloy-node-bindings = { version = "0.5.4", default-features = false } -alloy-provider = { version = "0.5.4", features = [ +alloy-consensus = { version = "0.6.4", default-features = false } +alloy-contract = { version = "0.6.4", default-features = false } +alloy-eips = { version = "0.6.4", default-features = false } +alloy-genesis = { version = "0.6.4", default-features = false } +alloy-json-rpc = { version = "0.6.4", default-features = false } +alloy-network = { version = "0.6.4", default-features = false } +alloy-network-primitives = { version = "0.6.4", default-features = false } +alloy-node-bindings = { version = "0.6.4", default-features = false } +alloy-provider = { version = "0.6.4", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.5.4", default-features = false } -alloy-rpc-client = { version = "0.5.4", default-features = false } -alloy-rpc-types = { version = "0.5.4", features = [ +alloy-pubsub = { version = "0.6.4", default-features = false } +alloy-rpc-client = { version = "0.6.4", default-features = false } +alloy-rpc-types = { version = "0.6.4", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.5.4", default-features = false } -alloy-rpc-types-anvil = { version = "0.5.4", default-features = false } -alloy-rpc-types-beacon = { version = "0.5.4", default-features = false } -alloy-rpc-types-debug = { version = "0.5.4", default-features = false } -alloy-rpc-types-engine = { version = "0.5.4", default-features = false } -alloy-rpc-types-eth = { version = "0.5.4", default-features = false } -alloy-rpc-types-mev = { version = "0.5.4", default-features = false } -alloy-rpc-types-trace = { version = "0.5.4", default-features = false } -alloy-rpc-types-txpool = { version = "0.5.4", default-features = false } -alloy-serde = { version = "0.5.4", default-features = false } -alloy-signer = { version = "0.5.4", default-features = false } -alloy-signer-local = { version = "0.5.4", default-features = false } -alloy-transport = { version = "0.5.4" } -alloy-transport-http = { version = "0.5.4", features = [ +alloy-rpc-types-admin = { version = "0.6.4", default-features = false } +alloy-rpc-types-anvil = { version = "0.6.4", default-features = false } +alloy-rpc-types-beacon = { version = "0.6.4", default-features = false } +alloy-rpc-types-debug = { version = "0.6.4", default-features = false } +alloy-rpc-types-engine = { version = "0.6.4", default-features = false } +alloy-rpc-types-eth = { version = "0.6.4", default-features = false } +alloy-rpc-types-mev = { version = "0.6.4", default-features = false } +alloy-rpc-types-trace = { version = "0.6.4", default-features = false } +alloy-rpc-types-txpool = { version = "0.6.4", default-features = false } +alloy-serde = { version = "0.6.4", default-features = false } +alloy-signer = { version = "0.6.4", default-features = false } +alloy-signer-local = { version = "0.6.4", default-features = false } +alloy-transport = { version = "0.6.4" } +alloy-transport-http = { version = "0.6.4", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.5.4", default-features = false } -alloy-transport-ws = { version = "0.5.4", default-features = false } +alloy-transport-ipc = { version = "0.6.4", default-features = false } +alloy-transport-ws = { version = "0.6.4", default-features = false } # op -op-alloy-rpc-types = "0.5" -op-alloy-rpc-types-engine = "0.5" -op-alloy-network = "0.5" -op-alloy-consensus = "0.5" +op-alloy-rpc-types = "0.6.5" +op-alloy-rpc-types-engine = "0.6.5" +op-alloy-network = "0.6.5" +op-alloy-consensus = "0.6.5" # misc aquamarine = "0.6" @@ -527,11 +526,11 @@ url = "2.3" zstd = "0.13" # metrics -metrics = "0.23.0" +metrics = "0.24.0" metrics-derive = "0.1" -metrics-exporter-prometheus = { version = "0.15.0", default-features = false } +metrics-exporter-prometheus = { version = "0.16.0", default-features = false } metrics-process = "2.1.0" -metrics-util = { default-features = false, version = "0.17.0" } +metrics-util = { default-features = false, version = "0.18.0" } # proc-macros proc-macro2 = "1.0" @@ -553,7 +552,8 @@ hyper-util = "0.1.5" pin-project = "1.0.12" reqwest = { version = "0.12", default-features = false } tower = "0.4" -tower-http = "0.5" +tower-http = "0.6" + # p2p discv5 = "0.8.0" @@ -568,6 +568,7 @@ jsonrpsee-types = "0.24" # http http = "1.0" http-body = "1.0" +http-body-util = "0.1.2" jsonwebtoken = "9" proptest-arbitrary-interop = "0.1.0" @@ -597,6 +598,7 @@ serial_test = { default-features = false, version = "3" } similar-asserts = { version = "1.5.0", features = ["serde"] } tempfile = "3.8" test-fuzz = "6" +rstest = "0.23.0" tikv-jemalloc-ctl = "0.6" tikv-jemallocator = "0.6" diff --git a/Makefile b/Makefile index ac4ad1038582..b1908d7b109d 100644 --- a/Makefile +++ b/Makefile @@ -62,7 +62,7 @@ install-op: ## Build and install the op-reth binary under `~/.cargo/bin`. build: ## Build the reth binary into `target` directory. cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" -SOURCE_DATE_EPOCH := $(shell git log -1 --pretty=%ct) +SOURCE_DATE_EPOCH ?= $(shell git log -1 --pretty=%ct) .PHONY: reproducible reproducible: ## Build the reth binary into `target` directory with reproducible builds. Only works for x86_64-unknown-linux-gnu currently SOURCE_DATE_EPOCH=$(SOURCE_DATE_EPOCH) \ diff --git a/SECURITY.md b/SECURITY.md index 5260d529f5a4..7521d62e9593 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,5 +1,5 @@ # Security Policy -## Reporting a Vulnerability +## Report a Vulnerability -Contact georgios at paradigm.xyz. +Contact [security@ithaca.xyz](mailto:security@ithaca.xyz). diff --git a/bin/reth-bench/src/bench/context.rs b/bin/reth-bench/src/bench/context.rs index 5f8936934c69..59533bc6e979 100644 --- a/bin/reth-bench/src/bench/context.rs +++ b/bin/reth-bench/src/bench/context.rs @@ -74,14 +74,17 @@ impl BenchContext { let first_block = match benchmark_mode { BenchMode::Continuous => { // fetch Latest block - block_provider.get_block_by_number(BlockNumberOrTag::Latest, true).await?.unwrap() + block_provider + .get_block_by_number(BlockNumberOrTag::Latest, true.into()) + .await? + .unwrap() } BenchMode::Range(ref mut range) => { match range.next() { Some(block_number) => { // fetch first block in range block_provider - .get_block_by_number(block_number.into(), true) + .get_block_by_number(block_number.into(), true.into()) .await? .unwrap() } diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index ca5359fb8c26..dd2f863e2c94 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -43,16 +43,17 @@ impl Command { let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { - let block_res = block_provider.get_block_by_number(next_block.into(), true).await; + let block_res = + block_provider.get_block_by_number(next_block.into(), true.into()).await; let block = block_res.unwrap().unwrap(); let block_hash = block.header.hash; - let block = Block::try_from(block.inner).unwrap().seal(block_hash); + let block = Block::try_from(block).unwrap().seal(block_hash); let head_block_hash = block.hash(); let safe_block_hash = block_provider - .get_block_by_number(block.number.saturating_sub(32).into(), false); + .get_block_by_number(block.number.saturating_sub(32).into(), false.into()); let finalized_block_hash = block_provider - .get_block_by_number(block.number.saturating_sub(64).into(), false); + .get_block_by_number(block.number.saturating_sub(64).into(), false.into()); let (safe, finalized) = tokio::join!(safe_block_hash, finalized_block_hash,); diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 85342d1af76c..68b2f76527df 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -43,10 +43,11 @@ impl Command { let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { - let block_res = block_provider.get_block_by_number(next_block.into(), true).await; + let block_res = + block_provider.get_block_by_number(next_block.into(), true.into()).await; let block = block_res.unwrap().unwrap(); let block_hash = block.header.hash; - let block = Block::try_from(block.inner).unwrap().seal(block_hash); + let block = Block::try_from(block).unwrap().seal(block_hash); next_block += 1; sender.send(block).await.unwrap(); diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index ffd1998b24ea..a152bea2681e 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -19,6 +19,7 @@ reth-ethereum-cli.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-db-api.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 455d8356aff9..30af4c61c536 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -1,6 +1,6 @@ //! Command for debugging block building. use alloy_consensus::TxEip4844; -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, eip4844::BlobTransactionSidecar}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::Decodable; use alloy_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; @@ -22,18 +22,19 @@ use reth_errors::RethResult; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttributes}; +use reth_node_api::{ + EngineApiMessageVersion, NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttributes, +}; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; -use reth_payload_builder::database::CachedReads; use reth_primitives::{ - revm_primitives::KzgSettings, BlobTransaction, BlobTransactionSidecar, - PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, + revm_primitives::KzgSettings, BlobTransaction, PooledTransactionsElement, SealedBlock, + SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::{database::StateProviderDatabase, primitives::EnvKzgSettings}; +use reth_revm::{cached::CachedReads, database::StateProviderDatabase, primitives::EnvKzgSettings}; use reth_stages::StageId; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, @@ -222,11 +223,12 @@ impl> Command { withdrawals: None, }; let payload_config = PayloadConfig::new( - Arc::clone(&best_block), + Arc::new(SealedHeader::new(best_block.header().clone(), best_block.hash())), Bytes::default(), reth_payload_builder::EthPayloadBuilderAttributes::try_new( best_block.hash(), payload_attrs, + EngineApiMessageVersion::default() as u8, )?, ); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 215afacb583c..9056d3424c78 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -1,6 +1,7 @@ //! Command for debugging execution. use crate::{args::NetworkArgs, utils::get_single_header}; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; @@ -20,10 +21,9 @@ use reth_downloaders::{ use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_network_p2p::{headers::client::HeadersClient, BlockClient}; +use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; use reth_provider::{ BlockExecutionWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, }; @@ -68,7 +68,7 @@ impl> Command { static_file_producer: StaticFileProducer>, ) -> eyre::Result> where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) @@ -137,11 +137,14 @@ impl> Command { Ok(network) } - async fn fetch_block_hash( + async fn fetch_block_hash( &self, client: Client, block: BlockNumber, - ) -> eyre::Result { + ) -> eyre::Result + where + Client: HeadersClient, + { info!(target: "reth::cli", ?block, "Fetching block from the network."); loop { match get_single_header(&client, BlockHashOrNumber::Number(block)).await { diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 51851c0b0ad2..bc36578a3276 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -4,8 +4,10 @@ use crate::{ args::NetworkArgs, utils::{get_single_body, get_single_header}, }; +use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; +use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; @@ -19,7 +21,6 @@ use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; use reth_provider::{ writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, @@ -124,7 +125,8 @@ impl> Command { let client = fetch_client.clone(); let chain = provider_factory.chain_spec(); - let block = (move || get_single_body(client.clone(), Arc::clone(&chain), header.clone())) + let consensus = Arc::new(EthBeaconConsensus::new(chain.clone())); + let block = (move || get_single_body(client.clone(), header.clone(), consensus.clone())) .retry(backoff) .notify( |err, _| warn!(target: "reth::cli", "Error requesting body: {err}. Retrying..."), diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 8e02a52eaf07..3c6e38512c9e 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -1,5 +1,6 @@ //! Command for debugging merkle trie calculation. use crate::{args::NetworkArgs, utils::get_single_header}; +use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; @@ -18,7 +19,6 @@ use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; use reth_provider::{ writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index e7b3de6b6c13..9314a439265d 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -18,7 +18,9 @@ use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage} use reth_fs_util as fs; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_node_api::{ + EngineApiMessageVersion, NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine, +}; use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ @@ -166,8 +168,13 @@ impl> Command { debug!(target: "reth::cli", filepath = %filepath.display(), ?message, "Forwarding Engine API message"); match message { StoredEngineApiMessage::ForkchoiceUpdated { state, payload_attrs } => { - let response = - beacon_engine_handle.fork_choice_updated(state, payload_attrs).await?; + let response = beacon_engine_handle + .fork_choice_updated( + state, + payload_attrs, + EngineApiMessageVersion::default(), + ) + .await?; debug!(target: "reth::cli", ?response, "Received for forkchoice updated"); } StoredEngineApiMessage::NewPayload { payload, sidecar } => { diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index f9a8a158adc8..17a6de4e607e 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -81,6 +81,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index f57c6ac364fe..efb9e7d32e30 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -45,6 +45,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/build-block.md b/book/cli/reth/debug/build-block.md index 2e6d637d52c2..7bceb62b940a 100644 --- a/book/cli/reth/debug/build-block.md +++ b/book/cli/reth/debug/build-block.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/execution.md b/book/cli/reth/debug/execution.md index 9ca74897c5ea..b8e1ce05d171 100644 --- a/book/cli/reth/debug/execution.md +++ b/book/cli/reth/debug/execution.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/in-memory-merkle.md b/book/cli/reth/debug/in-memory-merkle.md index 3e322a6913dd..a183db997e9d 100644 --- a/book/cli/reth/debug/in-memory-merkle.md +++ b/book/cli/reth/debug/in-memory-merkle.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/merkle.md b/book/cli/reth/debug/merkle.md index d701803b81ca..d9a72794ef2c 100644 --- a/book/cli/reth/debug/merkle.md +++ b/book/cli/reth/debug/merkle.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/debug/replay-engine.md b/book/cli/reth/debug/replay-engine.md index dd587620a868..b7a1266d3993 100644 --- a/book/cli/reth/debug/replay-engine.md +++ b/book/cli/reth/debug/replay-engine.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 28e085bda718..82a521ac0ab2 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index ddcd3cece378..533c0f8f8889 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -69,9 +69,31 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout + --without-evm + Specifies whether to initialize the state without relying on EVM historical data. + + When enabled, and before inserting the state, it creates a dummy chain up to the last EVM block specified. It then, appends the first block provided block. + + - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be ignored. + + --header + Header file containing the header in an RLP encoded format. + + --total-difficulty + Total difficulty of the header. + + --header-hash + Hash of the header. + JSONL file with state dump. diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index cd01accc0471..ebe2a8386cf7 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 4cd55db1fe08..d1bca0a43b22 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -245,7 +245,7 @@ RPC: --http.api Rpc Modules to be configured for the HTTP server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots] --http.corsdomain Http Corsdomain to allow request from @@ -269,7 +269,7 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots] --ipcdisable Disable the IPC-RPC server @@ -367,6 +367,9 @@ RPC: [default: 25] + --builder.disallow + Path to file containing disallowed addresses, json-encoded list of strings. Block validation API will reject blocks containing transactions from these addresses + RPC State Cache: --rpc-cache.max-blocks Max number of blocks in cache @@ -499,6 +502,11 @@ TxPool: [default: 1024] + --txpool.max-new-pending-txs-notifications + How many new pending transactions to buffer and send to in progress pending transaction iterators + + [default: 200] + Builder: --builder.extradata Block extra data set by the payload builder @@ -590,6 +598,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 603b451d9405..33639042a1de 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -247,6 +247,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/prune.md b/book/cli/reth/prune.md index ed16197a76c3..41684ecd9e0e 100644 --- a/book/cli/reth/prune.md +++ b/book/cli/reth/prune.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index ecdaabe77817..1afe94f55dbc 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index 399b3818c28f..c22d6be66805 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index 4b3de3fb1cb5..e3df5bf2df7c 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -76,6 +76,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index 9da3ce0deb6d..204efc9685bc 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -69,6 +69,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index 700ab3d7e7ce..cb72b9313c0f 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -74,6 +74,12 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + --db.read-transaction-timeout Read transaction timeout in seconds, 0 means no timeout diff --git a/book/run/mainnet.md b/book/run/mainnet.md index 4412f51c7bf0..c4908971f69f 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -83,4 +83,14 @@ In the meantime, consider setting up [observability](./observability.md) to moni ## Running without a Consensus Layer -We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending a `engine_forkChoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients other methods of syncing like importing Lighthouse as a library. +We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending an `engine_forkchoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients other methods of syncing like importing Lighthouse as a library. + +## Running with Etherscan as Block Source + +You can use `--debug.etherscan` to run Reth with a fake consensus client that advances the chain using recent blocks on Etherscan. This requires an Etherscan API key (set via `ETHERSCAN_API_KEY` environment variable). Optionally, specify a custom API URL with `--debug.etherscan `. + +Example: +```bash +export ETHERSCAN_API_KEY=your_api_key_here +reth node --debug.etherscan +``` \ No newline at end of file diff --git a/book/run/pruning.md b/book/run/pruning.md index da3bb07e2cdf..25d11b4e46e3 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -18,7 +18,7 @@ the steps for running Reth as a full node, what caveats to expect and how to con - Full Node – Reth node that has the latest state and historical data for only the last 10064 blocks available for querying in the same way as an archive node. -The node type that was chosen when first [running a node](./run-a-node.md) **can not** be changed after +The node type that was chosen when first [running a node](./run-a-node.md) **cannot** be changed after the initial sync. Turning Archive into Pruned, or Pruned into Full is not supported. ## Modes diff --git a/clippy.toml b/clippy.toml index 862c568634e5..ab08b1132c1a 100644 --- a/clippy.toml +++ b/clippy.toml @@ -15,3 +15,4 @@ doc-valid-idents = [ "WAL", "MessagePack", ] +allow-dbg-in-tests = true diff --git a/crates/blockchain-tree-api/Cargo.toml b/crates/blockchain-tree-api/Cargo.toml index 552b72767177..b1c01f85938f 100644 --- a/crates/blockchain-tree-api/Cargo.toml +++ b/crates/blockchain-tree-api/Cargo.toml @@ -18,6 +18,7 @@ reth-storage-errors.workspace = true # alloy alloy-primitives.workspace = true +alloy-eips.workspace = true # misc thiserror.workspace = true diff --git a/crates/blockchain-tree-api/src/lib.rs b/crates/blockchain-tree-api/src/lib.rs index 0a1bf6164e0e..7e1d0d714c14 100644 --- a/crates/blockchain-tree-api/src/lib.rs +++ b/crates/blockchain-tree-api/src/lib.rs @@ -9,8 +9,9 @@ use self::error::CanonicalError; use crate::error::InsertBlockError; +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::{BlockNumHash, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::collections::BTreeMap; diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 0c48b3b9ce85..7778fb9262c5 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -377,8 +377,9 @@ impl BlockIndices { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; use alloy_primitives::B256; - use reth_primitives::{Header, SealedBlock, SealedHeader}; + use reth_primitives::{SealedBlock, SealedHeader}; #[test] fn pending_block_num_hash_returns_none_if_no_fork() { diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 4468d82052c8..c48e15484343 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -23,8 +23,8 @@ use reth_primitives::{ use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, - ChainSpecProvider, ChainSplit, ChainSplitTarget, DisplayBlocksChain, HeaderProvider, - ProviderError, StaticFileProviderFactory, + ChainSpecProvider, ChainSplit, ChainSplitTarget, DBProvider, DisplayBlocksChain, + HeaderProvider, ProviderError, StaticFileProviderFactory, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; @@ -1374,10 +1374,10 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxEip1559, EMPTY_ROOT_HASH}; - use alloy_eips::eip1559::INITIAL_BASE_FEE; + use alloy_consensus::{Header, TxEip1559, EMPTY_ROOT_HASH}; + use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip4895::Withdrawals}; use alloy_genesis::{Genesis, GenesisAccount}; - use alloy_primitives::{keccak256, Address, Sealable, B256}; + use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, B256}; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; @@ -1389,8 +1389,7 @@ mod tests { use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, revm_primitives::AccountInfo, - Account, BlockBody, Header, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, Withdrawals, + Account, BlockBody, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use reth_provider::{ test_utils::{ @@ -1599,7 +1598,7 @@ mod tests { // receipts root computation is different for OP let receipts_root = calculate_receipt_root(&receipts); - let sealed = Header { + let header = Header { number, parent_hash: parent.unwrap_or_default(), gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, @@ -1621,13 +1620,11 @@ mod tests { ), )])), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; SealedBlockWithSenders::new( SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions: body.clone().into_iter().map(|tx| tx.into_signed()).collect(), ommers: Vec::new(), diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 393e525d5ae2..6ac39c316702 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -18,11 +18,12 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::{BundleStateProvider, ConsistentDbView, ProviderNodeTypes}, - FullExecutionDataProvider, ProviderError, StateRootProvider, TryIntoHistoricalStateProvider, + DBProvider, FullExecutionDataProvider, ProviderError, StateRootProvider, + TryIntoHistoricalStateProvider, }; use reth_revm::database::StateProviderDatabase; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; -use reth_trie_parallel::parallel_root::ParallelStateRoot; +use reth_trie_parallel::root::ParallelStateRoot; use std::{ collections::BTreeMap, ops::{Deref, DerefMut}, diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 925b8f03add7..862b02e76070 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -60,6 +60,12 @@ impl BlockchainTreeEngine for NoopBlockchainTree { Ok(()) } + fn update_block_hashes_and_clear_buffered( + &self, + ) -> Result, CanonicalError> { + Ok(BTreeMap::new()) + } + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { Ok(()) } @@ -67,12 +73,6 @@ impl BlockchainTreeEngine for NoopBlockchainTree { fn make_canonical(&self, block_hash: BlockHash) -> Result { Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into()) } - - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError> { - Ok(BTreeMap::new()) - } } impl BlockchainTreeViewer for NoopBlockchainTree { diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 9a88a3c54bc6..0a2f53715ffb 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -24,6 +24,7 @@ reth-trie.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } @@ -42,7 +43,6 @@ pin-project.workspace = true # optional deps for test-utils alloy-signer = { workspace = true, optional = true } alloy-signer-local = { workspace = true, optional = true } -alloy-consensus = { workspace = true, optional = true } rand = { workspace = true, optional = true } revm = { workspace = true, optional = true } @@ -56,13 +56,12 @@ revm.workspace = true [features] test-utils = [ - "alloy-signer", - "alloy-signer-local", - "alloy-consensus", - "rand", - "revm", - "reth-chainspec/test-utils", - "reth-primitives/test-utils", - "reth-trie/test-utils", - "revm?/test-utils" + "alloy-signer", + "alloy-signer-local", + "rand", + "revm", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-trie/test-utils", + "revm?/test-utils", ] diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index a850e66521a6..47443b36c67b 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -4,6 +4,7 @@ use crate::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainInfoTracker, MemoryOverlayStateProvider, }; +use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, Address, TxHash, B256}; use parking_lot::RwLock; @@ -11,7 +12,7 @@ use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, + BlockWithSenders, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, }; use reth_storage_api::StateProviderBox; @@ -269,7 +270,7 @@ impl CanonicalInMemoryState { // insert the new blocks for block in new_blocks { let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = BlockState::with_parent(block.clone(), parent); + let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -338,7 +339,7 @@ impl CanonicalInMemoryState { // re-insert the blocks in natural order and connect them to their parent blocks for block in old_blocks { let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = BlockState::with_parent(block.clone(), parent); + let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -777,7 +778,7 @@ pub struct ExecutedBlock { pub senders: Arc>, /// Block's execution outcome. pub execution_output: Arc, - /// Block's hashedst state. + /// Block's hashed state. pub hashed_state: Arc, /// Trie updates that result of applying the block. pub trie: Arc, diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index fc717314b3fc..582e1d2a05d4 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -190,3 +190,232 @@ impl Stream for ForkChoiceStream { } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + use reth_execution_types::ExecutionOutcome; + use reth_primitives::{Receipt, Receipts, TransactionSigned, TxType}; + + #[test] + fn test_commit_notification() { + let block = SealedBlockWithSenders::default(); + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + + let mut block2 = block; + block2.set_block_number(2); + block2.set_hash(block2_hash); + + let chain = Arc::new(Chain::new( + vec![block1.clone(), block2.clone()], + ExecutionOutcome::default(), + None, + )); + + // Create a commit notification + let notification = CanonStateNotification::Commit { new: chain.clone() }; + + // Test that `committed` returns the correct chain + assert_eq!(notification.committed(), chain); + + // Test that `reverted` returns None for `Commit` + assert!(notification.reverted().is_none()); + + // Test that `tip` returns the correct block + assert_eq!(*notification.tip(), block2); + } + + #[test] + fn test_reorg_notification() { + let block = SealedBlockWithSenders::default(); + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + let block3_hash = B256::new([0x03; 32]); + + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + + let mut block2 = block.clone(); + block2.set_block_number(2); + block2.set_hash(block2_hash); + + let mut block3 = block; + block3.set_block_number(3); + block3.set_hash(block3_hash); + + let old_chain = + Arc::new(Chain::new(vec![block1.clone()], ExecutionOutcome::default(), None)); + let new_chain = Arc::new(Chain::new( + vec![block2.clone(), block3.clone()], + ExecutionOutcome::default(), + None, + )); + + // Create a reorg notification + let notification = + CanonStateNotification::Reorg { old: old_chain.clone(), new: new_chain.clone() }; + + // Test that `reverted` returns the old chain + assert_eq!(notification.reverted(), Some(old_chain)); + + // Test that `committed` returns the new chain + assert_eq!(notification.committed(), new_chain); + + // Test that `tip` returns the tip of the new chain (last block in the new chain) + assert_eq!(*notification.tip(), block3); + } + + #[test] + fn test_block_receipts_commit() { + // Create a default block instance for use in block definitions. + let block = SealedBlockWithSenders::default(); + + // Define unique hashes for two blocks to differentiate them in the chain. + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + + // Create a default transaction to include in block1's transactions. + let tx = TransactionSigned::default(); + + // Create a clone of the default block and customize it to act as block1. + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + // Add the transaction to block1's transactions. + block1.block.body.transactions.push(tx); + + // Clone the default block and customize it to act as block2. + let mut block2 = block; + block2.set_block_number(2); + block2.set_hash(block2_hash); + + // Create a receipt for the transaction in block1. + #[allow(clippy::needless_update)] + let receipt1 = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 12345, + logs: vec![], + success: true, + ..Default::default() + }; + + // Wrap the receipt in a `Receipts` structure, as expected in the `ExecutionOutcome`. + let receipts = Receipts { receipt_vec: vec![vec![Some(receipt1.clone())]] }; + + // Define an `ExecutionOutcome` with the created receipts. + let execution_outcome = ExecutionOutcome { receipts, ..Default::default() }; + + // Create a new chain segment with `block1` and `block2` and the execution outcome. + let new_chain = + Arc::new(Chain::new(vec![block1.clone(), block2.clone()], execution_outcome, None)); + + // Create a commit notification containing the new chain segment. + let notification = CanonStateNotification::Commit { new: new_chain }; + + // Call `block_receipts` on the commit notification to retrieve block receipts. + let block_receipts = notification.block_receipts(); + + // Assert that only one receipt entry exists in the `block_receipts` list. + assert_eq!(block_receipts.len(), 1); + + // Verify that the first entry matches block1's hash and transaction receipt. + assert_eq!( + block_receipts[0].0, + BlockReceipts { + block: block1.num_hash(), + tx_receipts: vec![(B256::default(), receipt1)] + } + ); + + // Assert that the receipt is from the committed segment (not reverted). + assert!(!block_receipts[0].1); + } + + #[test] + fn test_block_receipts_reorg() { + // Define block1 for the old chain segment, which will be reverted. + let mut old_block1 = SealedBlockWithSenders::default(); + old_block1.set_block_number(1); + old_block1.set_hash(B256::new([0x01; 32])); + old_block1.block.body.transactions.push(TransactionSigned::default()); + + // Create a receipt for a transaction in the reverted block. + #[allow(clippy::needless_update)] + let old_receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 54321, + logs: vec![], + success: false, + ..Default::default() + }; + let old_receipts = Receipts { receipt_vec: vec![vec![Some(old_receipt.clone())]] }; + + let old_execution_outcome = + ExecutionOutcome { receipts: old_receipts, ..Default::default() }; + + // Create an old chain segment to be reverted, containing `old_block1`. + let old_chain = Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None)); + + // Define block2 for the new chain segment, which will be committed. + let mut new_block1 = SealedBlockWithSenders::default(); + new_block1.set_block_number(2); + new_block1.set_hash(B256::new([0x02; 32])); + new_block1.block.body.transactions.push(TransactionSigned::default()); + + // Create a receipt for a transaction in the new committed block. + #[allow(clippy::needless_update)] + let new_receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 12345, + logs: vec![], + success: true, + ..Default::default() + }; + let new_receipts = Receipts { receipt_vec: vec![vec![Some(new_receipt.clone())]] }; + + let new_execution_outcome = + ExecutionOutcome { receipts: new_receipts, ..Default::default() }; + + // Create a new chain segment to be committed, containing `new_block1`. + let new_chain = Arc::new(Chain::new(vec![new_block1.clone()], new_execution_outcome, None)); + + // Create a reorg notification with both reverted (old) and committed (new) chain segments. + let notification = CanonStateNotification::Reorg { old: old_chain, new: new_chain }; + + // Retrieve receipts from both old (reverted) and new (committed) segments. + let block_receipts = notification.block_receipts(); + + // Assert there are two receipt entries, one from each chain segment. + assert_eq!(block_receipts.len(), 2); + + // Verify that the first entry matches old_block1 and its receipt from the reverted segment. + assert_eq!( + block_receipts[0].0, + BlockReceipts { + block: old_block1.num_hash(), + tx_receipts: vec![(B256::default(), old_receipt)] + } + ); + // Confirm this is from the reverted segment. + assert!(block_receipts[0].1); + + // Verify that the second entry matches new_block1 and its receipt from the committed + // segment. + assert_eq!( + block_receipts[1].0, + BlockReceipts { + block: new_block1.num_hash(), + tx_receipts: vec![(B256::default(), new_receipt)] + } + ); + // Confirm this is from the committed segment. + assert!(!block_receipts[1].1); + } +} diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 564df9fe341a..60a90e43fee0 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -2,9 +2,9 @@ use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, }; -use alloy_consensus::{Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; +use alloy_consensus::{Header, Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7685::Requests}; -use alloy_primitives::{Address, BlockNumber, Sealable, B256, U256}; +use alloy_primitives::{Address, BlockNumber, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use rand::{thread_rng, Rng}; @@ -12,8 +12,8 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, - Transaction, TransactionSigned, TransactionSignedEcRecovered, + BlockBody, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + TransactionSigned, TransactionSignedEcRecovered, }; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; @@ -160,11 +160,8 @@ impl TestBlockBuilder { ..Default::default() }; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let block = SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions: transactions.into_iter().map(|tx| tx.into_signed()).collect(), ommers: Vec::new(), diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index 3751789cac8c..f0cc31bb44dc 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,13 +1,12 @@ use crate::{ChainSpec, DepositContract}; -use alloc::vec::Vec; +use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; +use alloy_consensus::Header; use alloy_eips::eip1559::BaseFeeParams; use alloy_genesis::Genesis; use alloy_primitives::B256; -use core::fmt::Debug; -use reth_ethereum_forks::DisplayHardforks; +use core::fmt::{Debug, Display}; use reth_network_peers::NodeRecord; -use reth_primitives_traits::Header; /// Trait representing type configuring a chain spec. #[auto_impl::auto_impl(&, Arc)] @@ -39,7 +38,7 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { fn prune_delete_limit(&self) -> usize; /// Returns a string representation of the hardforks. - fn display_hardforks(&self) -> DisplayHardforks; + fn display_hardforks(&self) -> Box; /// The genesis header. fn genesis_header(&self) -> &Header; @@ -57,6 +56,11 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { fn is_optimism(&self) -> bool { self.chain().is_optimism() } + + /// Returns `true` if this chain contains Ethereum configuration. + fn is_ethereum(&self) -> bool { + self.chain().is_ethereum() + } } impl EthChainSpec for ChainSpec { @@ -84,8 +88,8 @@ impl EthChainSpec for ChainSpec { self.prune_delete_limit } - fn display_hardforks(&self) -> DisplayHardforks { - self.display_hardforks() + fn display_hardforks(&self) -> Box { + Box::new(Self::display_hardforks(self)) } fn genesis_header(&self) -> &Header { diff --git a/crates/chainspec/src/constants.rs b/crates/chainspec/src/constants.rs index 2e22b2299a4b..3f46fb6b746a 100644 --- a/crates/chainspec/src/constants.rs +++ b/crates/chainspec/src/constants.rs @@ -1,11 +1,12 @@ use crate::spec::DepositContract; -use alloy_primitives::{address, b256}; +use alloy_eips::eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS; +use alloy_primitives::b256; /// Gas per transaction not creating a contract. pub const MIN_TRANSACTION_GAS: u64 = 21_000u64; /// Deposit contract address: `0x00000000219ab540356cbb839cbe05303d7705fa` pub(crate) const MAINNET_DEPOSIT_CONTRACT: DepositContract = DepositContract::new( - address!("00000000219ab540356cbb839cbe05303d7705fa"), + MAINNET_DEPOSIT_CONTRACT_ADDRESS, 11052984, b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), ); diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 02f4b5ca983e..fdaad948f26b 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -3,12 +3,18 @@ pub use alloy_eips::eip1559::BaseFeeParams; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; use alloy_consensus::constants::EMPTY_WITHDRAWALS; -use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7685::EMPTY_REQUESTS_HASH}; +use alloy_eips::{ + eip1559::INITIAL_BASE_FEE, eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS, + eip7685::EMPTY_REQUESTS_HASH, +}; use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use derive_more::From; -use alloy_consensus::constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; +use alloy_consensus::{ + constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}, + Header, +}; use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use reth_ethereum_forks::{ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, @@ -18,7 +24,7 @@ use reth_network_peers::{ base_nodes, base_testnet_nodes, holesky_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, sepolia_nodes, NodeRecord, }; -use reth_primitives_traits::{constants::HOLESKY_GENESIS_HASH, Header, SealedHeader}; +use reth_primitives_traits::{constants::HOLESKY_GENESIS_HASH, SealedHeader}; use reth_trie_common::root::state_root_ref_unhashed; use crate::{constants::MAINNET_DEPOSIT_CONTRACT, once_cell_set, EthChainSpec, LazyLock, OnceLock}; @@ -39,7 +45,7 @@ pub static MAINNET: LazyLock> = LazyLock::new(|| { hardforks: EthereumHardfork::mainnet().into(), // https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0 deposit_contract: Some(DepositContract::new( - address!("00000000219ab540356cbb839cbe05303d7705fa"), + MAINNET_DEPOSIT_CONTRACT_ADDRESS, 11052984, b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), )), @@ -509,34 +515,36 @@ impl ChainSpec { } } - /// An internal helper function that returns the block number of the last block-based - /// fork that occurs before any existing TTD (merge)/timestamp based forks. + /// This internal helper function retrieves the block number of the last block-based fork + /// that occurs before: + /// - Any existing Total Terminal Difficulty (TTD) or + /// - Timestamp-based forks in the current [`ChainSpec`]. /// - /// Note: this returns None if the `ChainSpec` is not configured with a TTD/Timestamp fork. + /// The function operates by examining the configured hard forks in the chain. It iterates + /// through the fork conditions and identifies the most recent block-based fork that + /// precedes any TTD or timestamp-based conditions. + /// + /// If there are no block-based forks found before these conditions, or if the [`ChainSpec`] + /// is not configured with a TTD or timestamp fork, this function will return `None`. pub(crate) fn last_block_fork_before_merge_or_timestamp(&self) -> Option { let mut hardforks_iter = self.hardforks.forks_iter().peekable(); while let Some((_, curr_cond)) = hardforks_iter.next() { if let Some((_, next_cond)) = hardforks_iter.peek() { - // peek and find the first occurrence of ForkCondition::TTD (merge) , or in - // custom ChainSpecs, the first occurrence of - // ForkCondition::Timestamp. If curr_cond is ForkCondition::Block at - // this point, which it should be in most "normal" ChainSpecs, - // return its block_num + // Match against the `next_cond` to see if it represents: + // - A TTD (merge) + // - A timestamp-based fork match next_cond { - ForkCondition::TTD { fork_block, .. } => { - // handle Sepolia merge netsplit case - if fork_block.is_some() { - return *fork_block - } - // ensure curr_cond is indeed ForkCondition::Block and return block_num - if let ForkCondition::Block(block_num) = curr_cond { - return Some(block_num) - } - } - ForkCondition::Timestamp(_) => { - // ensure curr_cond is indeed ForkCondition::Block and return block_num + // If the next fork is TTD and specifies a specific block, return that block + // number + ForkCondition::TTD { fork_block: Some(block), .. } => return Some(*block), + + // If the next fork is TTD without a specific block or is timestamp-based, + // return the block number of the current condition if it is block-based. + ForkCondition::TTD { .. } | ForkCondition::Timestamp(_) => { + // Check if `curr_cond` is a block-based fork and return its block number if + // true. if let ForkCondition::Block(block_num) = curr_cond { - return Some(block_num) + return Some(block_num); } } ForkCondition::Block(_) | ForkCondition::Never => continue, diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index ef66a99410f0..7e27d9b4e2ed 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -51,6 +51,8 @@ reth-trie-common = { workspace = true, optional = true } # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-rlp.workspace = true +alloy-consensus.workspace = true itertools.workspace = true futures.workspace = true @@ -93,22 +95,23 @@ reth-discv4.workspace = true [features] default = [] arbitrary = [ - "dep:proptest", - "dep:arbitrary", - "dep:proptest-arbitrary-interop", - "reth-primitives/arbitrary", - "reth-db-api/arbitrary", - "reth-eth-wire/arbitrary", - "reth-db/arbitrary", - "reth-chainspec/arbitrary", - "alloy-eips/arbitrary", - "alloy-primitives/arbitrary", - "reth-codecs/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils", - "reth-trie-common/test-utils", - "reth-codecs?/arbitrary", - "reth-prune-types?/arbitrary", - "reth-stages-types?/arbitrary", - "reth-trie-common?/arbitrary" + "dep:proptest", + "dep:arbitrary", + "dep:proptest-arbitrary-interop", + "reth-primitives/arbitrary", + "reth-db-api/arbitrary", + "reth-eth-wire/arbitrary", + "reth-db/arbitrary", + "reth-chainspec/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-codecs/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", + "reth-trie-common/test-utils", + "reth-codecs?/arbitrary", + "reth-prune-types?/arbitrary", + "reth-stages-types?/arbitrary", + "reth-trie-common?/arbitrary", + "alloy-consensus/arbitrary", ] diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 956a63a5aa0e..49fee347ed45 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -126,7 +126,7 @@ impl> Environmen .static_file_provider() .check_consistency(&factory.provider()?, has_receipt_pruning)? { - if factory.db_ref().is_read_only() { + if factory.db_ref().is_read_only()? { warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal."); return Ok(factory) } diff --git a/crates/cli/commands/src/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs index 60ec09c9606e..9aa48e0e865d 100644 --- a/crates/cli/commands/src/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum.rs @@ -6,7 +6,7 @@ use reth_db::{DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables}; use reth_db_api::{cursor::DbCursorRO, table::Table, transaction::DbTx}; use reth_db_common::DbTool; use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; -use reth_provider::providers::ProviderNodeTypes; +use reth_provider::{providers::ProviderNodeTypes, DBProvider}; use std::{ hash::{BuildHasher, Hasher}, sync::Arc, diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 4006d1660aa3..94c0f63dd301 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Header; use alloy_primitives::{hex, BlockHash}; use clap::Parser; use reth_db::{ @@ -7,7 +8,6 @@ use reth_db::{ use reth_db_api::table::{Decompress, DupSort, Table}; use reth_db_common::DbTool; use reth_node_builder::NodeTypesWithDB; -use reth_primitives::Header; use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use tracing::error; diff --git a/crates/cli/commands/src/init_state.rs b/crates/cli/commands/src/init_state.rs deleted file mode 100644 index 16e99f8fe976..000000000000 --- a/crates/cli/commands/src/init_state.rs +++ /dev/null @@ -1,79 +0,0 @@ -//! Command that initializes the node from a genesis file. - -use crate::common::{AccessRights, Environment, EnvironmentArgs}; -use alloy_primitives::B256; -use clap::Parser; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_cli::chainspec::ChainSpecParser; -use reth_config::config::EtlConfig; -use reth_db_common::init::init_from_state_dump; -use reth_node_builder::NodeTypesWithEngine; -use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; - -use std::{fs::File, io::BufReader, path::PathBuf}; -use tracing::info; - -/// Initializes the database with the genesis block. -#[derive(Debug, Parser)] -pub struct InitStateCommand { - #[command(flatten)] - pub env: EnvironmentArgs, - - /// JSONL file with state dump. - /// - /// Must contain accounts in following format, additional account fields are ignored. Must - /// also contain { "root": \ } as first line. - /// { - /// "balance": "\", - /// "nonce": \, - /// "code": "\", - /// "storage": { - /// "\": "\", - /// .. - /// }, - /// "address": "\", - /// } - /// - /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until - /// and including the non-genesis block to init chain at. See 'import' command. - #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] - pub state: PathBuf, -} - -impl> InitStateCommand { - /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { - info!(target: "reth::cli", "Reth init-state starting"); - - let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; - - info!(target: "reth::cli", "Initiating state dump"); - - let hash = init_at_state(self.state, provider_factory, config.stages.etl)?; - - info!(target: "reth::cli", hash = ?hash, "Genesis block written"); - Ok(()) - } -} - -/// Initialize chain with state at specific block, from a file with state dump. -pub fn init_at_state( - state_dump_path: PathBuf, - factory: ProviderFactory, - etl_config: EtlConfig, -) -> eyre::Result { - info!(target: "reth::cli", - path=?state_dump_path, - "Opening state dump"); - - let file = File::open(state_dump_path)?; - let reader = BufReader::new(file); - - let provider_rw = factory.provider_rw()?; - let hash = init_from_state_dump(reader, &provider_rw.0, etl_config)?; - provider_rw.commit()?; - - Ok(hash) -} diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs new file mode 100644 index 000000000000..adaec3e8be37 --- /dev/null +++ b/crates/cli/commands/src/init_state/mod.rs @@ -0,0 +1,132 @@ +//! Command that initializes the node from a genesis file. + +use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use alloy_primitives::{B256, U256}; +use clap::Parser; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_cli::chainspec::ChainSpecParser; +use reth_db_common::init::init_from_state_dump; +use reth_node_builder::NodeTypesWithEngine; +use reth_primitives::SealedHeader; +use reth_provider::{ + BlockNumReader, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, +}; + +use std::{fs::File, io::BufReader, path::PathBuf, str::FromStr}; +use tracing::info; + +pub mod without_evm; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct InitStateCommand { + #[command(flatten)] + pub env: EnvironmentArgs, + + /// JSONL file with state dump. + /// + /// Must contain accounts in following format, additional account fields are ignored. Must + /// also contain { "root": \ } as first line. + /// { + /// "balance": "\", + /// "nonce": \, + /// "code": "\", + /// "storage": { + /// "\": "\", + /// .. + /// }, + /// "address": "\", + /// } + /// + /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + /// and including the non-genesis block to init chain at. See 'import' command. + #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] + pub state: PathBuf, + + /// Specifies whether to initialize the state without relying on EVM historical data. + /// + /// When enabled, and before inserting the state, it creates a dummy chain up to the last EVM + /// block specified. It then, appends the first block provided block. + /// + /// - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be + /// ignored. + #[arg(long, default_value = "false")] + pub without_evm: bool, + + /// Header file containing the header in an RLP encoded format. + #[arg(long, value_name = "HEADER_FILE", verbatim_doc_comment)] + pub header: Option, + + /// Total difficulty of the header. + #[arg(long, value_name = "TOTAL_DIFFICULTY", verbatim_doc_comment)] + pub total_difficulty: Option, + + /// Hash of the header. + #[arg(long, value_name = "HEADER_HASH", verbatim_doc_comment)] + pub header_hash: Option, +} + +impl> InitStateCommand { + /// Execute the `init` command + pub async fn execute>( + self, + ) -> eyre::Result<()> { + info!(target: "reth::cli", "Reth init-state starting"); + + let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; + + let static_file_provider = provider_factory.static_file_provider(); + let provider_rw = provider_factory.database_provider_rw()?; + + if self.without_evm { + // ensure header, total difficulty and header hash are provided + let header = self.header.ok_or_else(|| eyre::eyre!("Header file must be provided"))?; + let header = without_evm::read_header_from_file(header)?; + + let header_hash = + self.header_hash.ok_or_else(|| eyre::eyre!("Header hash must be provided"))?; + let header_hash = B256::from_str(&header_hash)?; + + let total_difficulty = self + .total_difficulty + .ok_or_else(|| eyre::eyre!("Total difficulty must be provided"))?; + let total_difficulty = U256::from_str(&total_difficulty)?; + + let last_block_number = provider_rw.last_block_number()?; + + if last_block_number == 0 { + without_evm::setup_without_evm( + &provider_rw, + &static_file_provider, + // &header, + // header_hash, + SealedHeader::new(header, header_hash), + total_difficulty, + )?; + + // SAFETY: it's safe to commit static files, since in the event of a crash, they + // will be unwinded according to database checkpoints. + // + // Necessary to commit, so the header is accessible to provider_rw and + // init_state_dump + static_file_provider.commit()?; + } else if last_block_number > 0 && last_block_number < header.number { + return Err(eyre::eyre!( + "Data directory should be empty when calling init-state with --without-evm-history." + )); + } + } + + info!(target: "reth::cli", "Initiating state dump"); + + let file = File::open(self.state)?; + let reader = BufReader::new(file); + + let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; + + provider_rw.commit()?; + + info!(target: "reth::cli", hash = ?hash, "Genesis block written"); + Ok(()) + } +} diff --git a/crates/optimism/cli/src/commands/init_state/bedrock.rs b/crates/cli/commands/src/init_state/without_evm.rs similarity index 68% rename from crates/optimism/cli/src/commands/init_state/bedrock.rs rename to crates/cli/commands/src/init_state/without_evm.rs index efff065e5051..29fc2aec60e0 100644 --- a/crates/optimism/cli/src/commands/init_state/bedrock.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,34 +1,50 @@ use alloy_primitives::{BlockNumber, B256, U256}; -use reth_optimism_primitives::bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}; +use alloy_rlp::Decodable; + +use alloy_consensus::Header; use reth_primitives::{ - BlockBody, Header, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, + BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, }; use reth_provider::{ providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileWriter, }; use reth_stages::{StageCheckpoint, StageId}; + +use std::{fs::File, io::Read, path::PathBuf}; use tracing::info; -/// Creates a dummy chain (with no transactions) up to the last OVM block and appends the -/// first valid Bedrock block. -pub(crate) fn setup_op_mainnet_without_ovm( +/// Reads the header RLP from a file and returns the Header. +pub(crate) fn read_header_from_file(path: PathBuf) -> Result { + let mut file = File::open(path)?; + let mut buf = Vec::new(); + file.read_to_end(&mut buf)?; + + let header = Header::decode(&mut &buf[..])?; + Ok(header) +} + +/// Creates a dummy chain (with no transactions) up to the last EVM block and appends the +/// first valid block. +pub fn setup_without_evm( provider_rw: &Provider, static_file_provider: &StaticFileProvider, + header: SealedHeader, + total_difficulty: U256, ) -> Result<(), eyre::Error> where Provider: StageCheckpointWriter + BlockWriter, { - info!(target: "reth::cli", "Setting up dummy OVM chain before importing state."); + info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); - // Write OVM dummy data up to `BEDROCK_HEADER - 1` block - append_dummy_chain(static_file_provider, BEDROCK_HEADER.number - 1)?; + // Write EVM dummy data up to `header - 1` block + append_dummy_chain(static_file_provider, header.number - 1)?; - info!(target: "reth::cli", "Appending Bedrock block."); + info!(target: "reth::cli", "Appending first valid block."); - append_bedrock_block(provider_rw, static_file_provider)?; + append_first_block(provider_rw, static_file_provider, &header, total_difficulty)?; for stage in StageId::ALL { - provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(BEDROCK_HEADER.number))?; + provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number))?; } info!(target: "reth::cli", "Set up finished."); @@ -36,38 +52,30 @@ where Ok(()) } -/// Appends the first bedrock block. +/// Appends the first block. /// /// By appending it, static file writer also verifies that all segments are at the same /// height. -fn append_bedrock_block( +fn append_first_block( provider_rw: impl BlockWriter, sf_provider: &StaticFileProvider, + header: &SealedHeader, + total_difficulty: U256, ) -> Result<(), eyre::Error> { provider_rw.insert_block( - SealedBlockWithSenders::new( - SealedBlock::new( - SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), - BlockBody::default(), - ), - vec![], - ) - .expect("no senders or txes"), + SealedBlockWithSenders::new(SealedBlock::new(header.clone(), BlockBody::default()), vec![]) + .expect("no senders or txes"), )?; sf_provider.latest_writer(StaticFileSegment::Headers)?.append_header( - &BEDROCK_HEADER, - BEDROCK_HEADER_TTD, - &BEDROCK_HEADER_HASH, + header, + total_difficulty, + &header.hash(), )?; - sf_provider - .latest_writer(StaticFileSegment::Receipts)? - .increment_block(BEDROCK_HEADER.number)?; + sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number)?; - sf_provider - .latest_writer(StaticFileSegment::Transactions)? - .increment_block(BEDROCK_HEADER.number)?; + sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number)?; Ok(()) } diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index 162ee1ceaa49..c498718e9fcb 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -1,4 +1,5 @@ -use alloy_primitives::{hex, private::getrandom::getrandom, TxKind}; +use alloy_eips::eip4895::Withdrawals; +use alloy_primitives::{hex, private::getrandom::getrandom, PrimitiveSignature, TxKind}; use arbitrary::Arbitrary; use eyre::{Context, Result}; use proptest::{ @@ -22,7 +23,7 @@ use reth_db::{ use reth_fs_util as fs; use reth_primitives::{ Account, Log, LogData, Receipt, ReceiptWithBloom, StorageEntry, Transaction, - TransactionSignedNoHash, TxType, Withdrawals, + TransactionSignedNoHash, TxType, }; use reth_prune_types::{PruneCheckpoint, PruneMode}; use reth_stages_types::{ @@ -75,7 +76,6 @@ compact_types!( // reth-primitives Account, Receipt, - Withdrawals, ReceiptWithBloom, // reth_codecs::alloy Authorization, @@ -83,6 +83,7 @@ compact_types!( Header, HeaderExt, Withdrawal, + Withdrawals, TxEip2930, TxEip1559, TxEip4844, @@ -126,7 +127,7 @@ compact_types!( ], // These types require an extra identifier which is usually stored elsewhere (eg. parent type). identifier: [ - // Signature todo we for v we only store parity(true || false), while v can take more values + PrimitiveSignature, Transaction, TxType, TxKind @@ -167,9 +168,22 @@ pub fn generate_vectors_with(gen: &[fn(&mut TestRunner) -> eyre::Result<()>]) -> /// re-encoding. pub fn read_vectors_with(read: &[fn() -> eyre::Result<()>]) -> Result<()> { fs::create_dir_all(VECTORS_FOLDER)?; + let mut errors = None; for read_fn in read { - read_fn()?; + if let Err(err) = read_fn() { + errors.get_or_insert_with(Vec::new).push(err); + } + } + + if let Some(err_list) = errors { + for error in err_list { + eprintln!("{:?}", error); + } + return Err(eyre::eyre!( + "If there are missing types, make sure to run `reth test-vectors compact --write` first.\n + If it happened during CI, ignore IF it's a new proposed type that `main` branch does not have." + )); } Ok(()) @@ -238,9 +252,8 @@ where // Read the file where the vectors are stored let file_path = format!("{VECTORS_FOLDER}/{}.json", &type_name); - let file = File::open(&file_path).wrap_err_with(|| { - "Failed to open vector. Make sure to run `reth test-vectors compact --write` first." - })?; + let file = + File::open(&file_path).wrap_err_with(|| format!("Failed to open vector {type_name}."))?; let reader = BufReader::new(file); let stored_values: Vec = serde_json::from_reader(reader)?; @@ -267,5 +280,5 @@ where } pub fn type_name() -> String { - std::any::type_name::().replace("::", "__") + std::any::type_name::().split("::").last().unwrap_or(std::any::type_name::()).to_string() } diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index 29ba50c8d83e..fd7d3b3799d8 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Header; use alloy_primitives::{hex, private::getrandom::getrandom}; use arbitrary::Arbitrary; use eyre::Result; @@ -10,6 +11,7 @@ use proptest_arbitrary_interop::arb; use reth_db::tables; use reth_db_api::table::{DupSort, Table, TableRow}; use reth_fs_util as fs; +use reth_primitives::TransactionSignedNoHash; use std::collections::HashSet; use tracing::error; @@ -31,16 +33,16 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { fs::create_dir_all(VECTORS_FOLDER)?; macro_rules! generate_vector { - ($table_type:ident, $per_table:expr, TABLE) => { - generate_table_vector::(&mut runner, $per_table)?; + ($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, TABLE) => { + generate_table_vector::)?>(&mut runner, $per_table)?; }; - ($table_type:ident, $per_table:expr, DUPSORT) => { - generate_dupsort_vector::(&mut runner, $per_table)?; + ($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, DUPSORT) => { + generate_dupsort_vector::)?>(&mut runner, $per_table)?; }; } macro_rules! generate { - ([$(($table_type:ident, $per_table:expr, $table_or_dup:tt)),*]) => { + ([$(($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, $table_or_dup:tt)),*]) => { let all_tables = vec![$(stringify!($table_type).to_string(),)*]; if tables.is_empty() { @@ -51,9 +53,9 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { match table.as_str() { $( stringify!($table_type) => { - println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type::NAME); + println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME); - generate_vector!($table_type, $per_table, $table_or_dup); + generate_vector!($table_type$(<$($generic),+>)?, $per_table, $table_or_dup); }, )* _ => { @@ -68,11 +70,11 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { (CanonicalHeaders, PER_TABLE, TABLE), (HeaderTerminalDifficulties, PER_TABLE, TABLE), (HeaderNumbers, PER_TABLE, TABLE), - (Headers, PER_TABLE, TABLE), + (Headers
, PER_TABLE, TABLE), (BlockBodyIndices, PER_TABLE, TABLE), (BlockOmmers, 100, TABLE), (TransactionHashNumbers, PER_TABLE, TABLE), - (Transactions, 100, TABLE), + (Transactions, 100, TABLE), (PlainStorageState, PER_TABLE, DUPSORT), (PlainAccountState, PER_TABLE, TABLE) ]); diff --git a/crates/cli/util/Cargo.toml b/crates/cli/util/Cargo.toml index d96a882a672a..70515f83b4b7 100644 --- a/crates/cli/util/Cargo.toml +++ b/crates/cli/util/Cargo.toml @@ -24,6 +24,7 @@ eyre.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } thiserror.workspace = true +serde.workspace = true tracy-client = { workspace = true, optional = true, features = ["demangle"] } diff --git a/crates/cli/util/src/parsers.rs b/crates/cli/util/src/parsers.rs index 202744a4bb79..9bb803bcca89 100644 --- a/crates/cli/util/src/parsers.rs +++ b/crates/cli/util/src/parsers.rs @@ -1,7 +1,9 @@ use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; +use reth_fs_util::FsPathError; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs}, + path::Path, str::FromStr, time::Duration, }; @@ -82,6 +84,11 @@ pub fn parse_socket_address(value: &str) -> eyre::Result(path: &str) -> Result { + reth_fs_util::read_json_file(Path::new(path)) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml deleted file mode 100644 index f2bfb43bcced..000000000000 --- a/crates/consensus/auto-seal/Cargo.toml +++ /dev/null @@ -1,57 +0,0 @@ -[package] -name = "reth-auto-seal-consensus" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -description = "A consensus impl for local testing purposes" - -[lints] -workspace = true - -[dependencies] -# reth -reth-chainspec.workspace = true -reth-beacon-consensus.workspace = true -reth-primitives.workspace = true -reth-execution-errors.workspace = true -reth-execution-types.workspace = true -reth-network-p2p.workspace = true -reth-provider.workspace = true -reth-stages-api.workspace = true -reth-revm.workspace = true -reth-transaction-pool.workspace = true -reth-evm.workspace = true -reth-engine-primitives.workspace = true -reth-consensus.workspace = true -reth-network-peers.workspace = true -reth-tokio-util.workspace = true -reth-trie.workspace = true - -# ethereum -alloy-eips.workspace = true -alloy-primitives.workspace = true -revm-primitives.workspace = true -alloy-rpc-types-engine.workspace = true - -# optimism -reth-optimism-consensus = { workspace = true, optional = true } - -# async -futures-util.workspace = true -tokio = { workspace = true, features = ["sync", "time"] } -tokio-stream.workspace = true -tracing.workspace = true - -[features] -optimism = [ - "reth-provider/optimism", - "reth-optimism-consensus", - "reth-beacon-consensus/optimism", - "reth-execution-types/optimism", - "reth-optimism-consensus?/optimism", - "reth-primitives/optimism", - "revm-primitives/optimism" -] diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs deleted file mode 100644 index f9b80f10bb5c..000000000000 --- a/crates/consensus/auto-seal/src/client.rs +++ /dev/null @@ -1,131 +0,0 @@ -//! This includes download client implementations for auto sealing miners. - -use crate::Storage; -use alloy_primitives::B256; -use reth_network_p2p::{ - bodies::client::{BodiesClient, BodiesFut}, - download::DownloadClient, - headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest}, - priority::Priority, -}; -use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header}; -use std::fmt::Debug; -use tracing::{trace, warn}; - -/// A download client that polls the miner for transactions and assembles blocks to be returned in -/// the download process. -/// -/// When polled, the miner will assemble blocks when miners produce ready transactions and store the -/// blocks in memory. -#[derive(Debug, Clone)] -pub struct AutoSealClient { - storage: Storage, -} - -impl AutoSealClient { - pub(crate) const fn new(storage: Storage) -> Self { - Self { storage } - } - - async fn fetch_headers(&self, request: HeadersRequest) -> Vec
{ - trace!(target: "consensus::auto", ?request, "received headers request"); - - let storage = self.storage.read().await; - let HeadersRequest { start, limit, direction } = request; - let mut headers = Vec::new(); - - let mut block: BlockHashOrNumber = match start { - BlockHashOrNumber::Hash(start) => start.into(), - BlockHashOrNumber::Number(num) => { - if let Some(hash) = storage.block_hash(num) { - hash.into() - } else { - warn!(target: "consensus::auto", num, "no matching block found"); - return headers - } - } - }; - - for _ in 0..limit { - // fetch from storage - if let Some(header) = storage.header_by_hash_or_number(block) { - match direction { - HeadersDirection::Falling => block = header.parent_hash.into(), - HeadersDirection::Rising => { - let next = header.number + 1; - block = next.into() - } - } - headers.push(header); - } else { - break - } - } - - trace!(target: "consensus::auto", ?headers, "returning headers"); - - headers - } - - async fn fetch_bodies(&self, hashes: Vec) -> Vec { - trace!(target: "consensus::auto", ?hashes, "received bodies request"); - let storage = self.storage.read().await; - let mut bodies = Vec::new(); - for hash in hashes { - if let Some(body) = storage.bodies.get(&hash).cloned() { - bodies.push(body); - } else { - break - } - } - - trace!(target: "consensus::auto", ?bodies, "returning bodies"); - - bodies - } -} - -impl HeadersClient for AutoSealClient { - type Output = HeadersFut; - - fn get_headers_with_priority( - &self, - request: HeadersRequest, - _priority: Priority, - ) -> Self::Output { - let this = self.clone(); - Box::pin(async move { - let headers = this.fetch_headers(request).await; - Ok(WithPeerId::new(PeerId::random(), headers)) - }) - } -} - -impl BodiesClient for AutoSealClient { - type Output = BodiesFut; - - fn get_block_bodies_with_priority( - &self, - hashes: Vec, - _priority: Priority, - ) -> Self::Output { - let this = self.clone(); - Box::pin(async move { - let bodies = this.fetch_bodies(hashes).await; - Ok(WithPeerId::new(PeerId::random(), bodies)) - }) - } -} - -impl DownloadClient for AutoSealClient { - fn report_bad_message(&self, _peer_id: PeerId) { - warn!("Reported a bad message on a miner, we should never produce bad blocks"); - // noop - } - - fn num_connected_peers(&self) -> usize { - // no such thing as connected peers when we are mining ourselves - 1 - } -} diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs deleted file mode 100644 index 16299e19ba45..000000000000 --- a/crates/consensus/auto-seal/src/lib.rs +++ /dev/null @@ -1,691 +0,0 @@ -//! A [Consensus] implementation for local testing purposes -//! that automatically seals blocks. -//! -//! The Mining task polls a [`MiningMode`], and will return a list of transactions that are ready to -//! be mined. -//! -//! These downloaders poll the miner, assemble the block, and return transactions that are ready to -//! be mined. - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -use alloy_eips::eip7685::Requests; -use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; -use reth_engine_primitives::EngineTypes; -use reth_execution_errors::{ - BlockExecutionError, BlockValidationError, InternalBlockExecutionError, -}; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{ - proofs, Block, BlockBody, BlockHashOrNumber, BlockWithSenders, Header, SealedBlock, - SealedHeader, TransactionSigned, Withdrawals, -}; -use reth_provider::{BlockReaderIdExt, StateProviderFactory, StateRootProvider}; -use reth_revm::database::StateProviderDatabase; -use reth_transaction_pool::TransactionPool; -use reth_trie::HashedPostState; -use revm_primitives::calc_excess_blob_gas; -use std::{ - collections::HashMap, - fmt::Debug, - sync::Arc, - time::{SystemTime, UNIX_EPOCH}, -}; -use tokio::sync::{mpsc::UnboundedSender, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use tracing::trace; - -mod client; -mod mode; -mod task; - -pub use crate::client::AutoSealClient; -pub use mode::{FixedBlockTimeMiner, MiningMode, ReadyTransactionMiner}; -use reth_evm::execute::{BlockExecutorProvider, Executor}; -pub use task::MiningTask; - -/// A consensus implementation intended for local development and testing purposes. -#[derive(Debug, Clone)] -#[allow(dead_code)] -pub struct AutoSealConsensus { - /// Configuration - chain_spec: Arc, -} - -impl AutoSealConsensus { - /// Create a new instance of [`AutoSealConsensus`] - pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } - } -} - -impl Consensus for AutoSealConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_header_against_parent( - &self, - _header: &SealedHeader, - _parent: &SealedHeader, - ) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_header_with_total_difficulty( - &self, - _header: &Header, - _total_difficulty: U256, - ) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_block_post_execution( - &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - Ok(()) - } -} - -/// Builder type for configuring the setup -#[derive(Debug)] -pub struct AutoSealBuilder { - client: Client, - consensus: AutoSealConsensus, - pool: Pool, - mode: MiningMode, - storage: Storage, - to_engine: UnboundedSender>, - evm_config: EvmConfig, -} - -// === impl AutoSealBuilder === - -impl - AutoSealBuilder -where - Client: BlockReaderIdExt, - Pool: TransactionPool, - Engine: EngineTypes, - ChainSpec: EthChainSpec, -{ - /// Creates a new builder instance to configure all parts. - pub fn new( - chain_spec: Arc, - client: Client, - pool: Pool, - to_engine: UnboundedSender>, - mode: MiningMode, - evm_config: EvmConfig, - ) -> Self { - let latest_header = client.latest_header().ok().flatten().unwrap_or_else(|| { - SealedHeader::new(chain_spec.genesis_header().clone(), chain_spec.genesis_hash()) - }); - - Self { - storage: Storage::new(latest_header), - client, - consensus: AutoSealConsensus::new(chain_spec), - pool, - mode, - to_engine, - evm_config, - } - } - - /// Sets the [`MiningMode`] it operates in, default is [`MiningMode::Auto`] - pub fn mode(mut self, mode: MiningMode) -> Self { - self.mode = mode; - self - } - - /// Consumes the type and returns all components - #[track_caller] - pub fn build( - self, - ) -> ( - AutoSealConsensus, - AutoSealClient, - MiningTask, - ) { - let Self { client, consensus, pool, mode, storage, to_engine, evm_config } = self; - let auto_client = AutoSealClient::new(storage.clone()); - let task = MiningTask::new( - Arc::clone(&consensus.chain_spec), - mode, - to_engine, - storage, - client, - pool, - evm_config, - ); - (consensus, auto_client, task) - } -} - -/// In memory storage -#[derive(Debug, Clone, Default)] -pub(crate) struct Storage { - inner: Arc>, -} - -// == impl Storage === - -impl Storage { - /// Initializes the [Storage] with the given best block. This should be initialized with the - /// highest block in the chain, if there is a chain already stored on-disk. - fn new(best_block: SealedHeader) -> Self { - let (header, best_hash) = best_block.split(); - let mut storage = StorageInner { - best_hash, - total_difficulty: header.difficulty, - best_block: header.number, - ..Default::default() - }; - storage.headers.insert(header.number, header); - storage.bodies.insert(best_hash, BlockBody::default()); - Self { inner: Arc::new(RwLock::new(storage)) } - } - - /// Returns the write lock of the storage - pub(crate) async fn write(&self) -> RwLockWriteGuard<'_, StorageInner> { - self.inner.write().await - } - - /// Returns the read lock of the storage - pub(crate) async fn read(&self) -> RwLockReadGuard<'_, StorageInner> { - self.inner.read().await - } -} - -/// In-memory storage for the chain the auto seal engine is building. -#[derive(Default, Debug)] -pub(crate) struct StorageInner { - /// Headers buffered for download. - pub(crate) headers: HashMap, - /// A mapping between block hash and number. - pub(crate) hash_to_number: HashMap, - /// Bodies buffered for download. - pub(crate) bodies: HashMap, - /// Tracks best block - pub(crate) best_block: u64, - /// Tracks hash of best block - pub(crate) best_hash: B256, - /// The total difficulty of the chain until this block - pub(crate) total_difficulty: U256, -} - -// === impl StorageInner === - -impl StorageInner { - /// Returns the block hash for the given block number if it exists. - pub(crate) fn block_hash(&self, num: u64) -> Option { - self.hash_to_number.iter().find_map(|(k, v)| num.eq(v).then_some(*k)) - } - - /// Returns the matching header if it exists. - pub(crate) fn header_by_hash_or_number( - &self, - hash_or_num: BlockHashOrNumber, - ) -> Option
{ - let num = match hash_or_num { - BlockHashOrNumber::Hash(hash) => self.hash_to_number.get(&hash).copied()?, - BlockHashOrNumber::Number(num) => num, - }; - self.headers.get(&num).cloned() - } - - /// Inserts a new header+body pair - pub(crate) fn insert_new_block(&mut self, mut header: Header, body: BlockBody) { - header.number = self.best_block + 1; - header.parent_hash = self.best_hash; - - self.best_hash = header.hash_slow(); - self.best_block = header.number; - self.total_difficulty += header.difficulty; - - trace!(target: "consensus::auto", num=self.best_block, hash=?self.best_hash, "inserting new block"); - self.headers.insert(header.number, header); - self.bodies.insert(self.best_hash, body); - self.hash_to_number.insert(self.best_hash, self.best_block); - } - - /// Fills in pre-execution header fields based on the current best block and given - /// transactions. - pub(crate) fn build_header_template( - &self, - timestamp: u64, - transactions: &[TransactionSigned], - ommers: &[Header], - withdrawals: Option<&Withdrawals>, - requests: Option<&Requests>, - chain_spec: &ChainSpec, - ) -> Header - where - ChainSpec: EthChainSpec + EthereumHardforks, - { - // check previous block for base fee - let base_fee_per_gas = self.headers.get(&self.best_block).and_then(|parent| { - parent.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(timestamp)) - }); - - let blob_gas_used = chain_spec.is_cancun_active_at_timestamp(timestamp).then(|| { - transactions - .iter() - .filter_map(|tx| tx.transaction.as_eip4844()) - .map(|blob_tx| blob_tx.blob_gas()) - .sum::() - }); - - let mut header = Header { - parent_hash: self.best_hash, - ommers_hash: proofs::calculate_ommers_root(ommers), - transactions_root: proofs::calculate_transaction_root(transactions), - withdrawals_root: withdrawals.map(|w| proofs::calculate_withdrawals_root(w)), - difficulty: U256::from(2), - number: self.best_block + 1, - gas_limit: chain_spec.max_gas_limit(), - timestamp, - base_fee_per_gas, - blob_gas_used, - requests_hash: requests.map(|r| r.requests_hash()), - ..Default::default() - }; - - if chain_spec.is_cancun_active_at_timestamp(timestamp) { - let parent = self.headers.get(&self.best_block); - header.parent_beacon_block_root = - parent.and_then(|parent| parent.parent_beacon_block_root); - header.blob_gas_used = Some(0); - - let (parent_excess_blob_gas, parent_blob_gas_used) = match parent { - Some(parent) if chain_spec.is_cancun_active_at_timestamp(parent.timestamp) => ( - parent.excess_blob_gas.unwrap_or_default(), - parent.blob_gas_used.unwrap_or_default(), - ), - _ => (0, 0), - }; - header.excess_blob_gas = - Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) - } - - header - } - - /// Builds and executes a new block with the given transactions, on the provided executor. - /// - /// This returns the header of the executed block, as well as the poststate from execution. - #[allow(clippy::too_many_arguments)] - pub(crate) fn build_and_execute( - &mut self, - transactions: Vec, - ommers: Vec
, - provider: &Provider, - chain_spec: Arc, - executor: &Executor, - ) -> Result<(SealedHeader, ExecutionOutcome), BlockExecutionError> - where - Executor: BlockExecutorProvider, - Provider: StateProviderFactory, - ChainSpec: EthChainSpec + EthereumHardforks, - { - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); - - // if shanghai is active, include empty withdrawals - let withdrawals = - chain_spec.is_shanghai_active_at_timestamp(timestamp).then_some(Withdrawals::default()); - // if prague is active, include empty requests - let requests = - chain_spec.is_prague_active_at_timestamp(timestamp).then_some(Requests::default()); - - let header = self.build_header_template( - timestamp, - &transactions, - &ommers, - withdrawals.as_ref(), - requests.as_ref(), - &chain_spec, - ); - - let block = Block { - header, - body: BlockBody { - transactions, - ommers: ommers.clone(), - withdrawals: withdrawals.clone(), - }, - } - .with_recovered_senders() - .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; - - trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - - let mut db = StateProviderDatabase::new( - provider.latest().map_err(InternalBlockExecutionError::LatestBlock)?, - ); - - // execute the block - let block_execution_output = - executor.executor(&mut db).execute((&block, U256::ZERO).into())?; - let gas_used = block_execution_output.gas_used; - let execution_outcome = ExecutionOutcome::from((block_execution_output, block.number)); - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); - - // todo(onbjerg): we should not pass requests around as this is building a block, which - // means we need to extract the requests from the execution output and compute the requests - // root here - - let Block { mut header, body, .. } = block.block; - let body = BlockBody { transactions: body.transactions, ommers, withdrawals }; - - trace!(target: "consensus::auto", ?execution_outcome, ?header, ?body, "executed block, calculating state root and completing header"); - - // now we need to update certain header fields with the results of the execution - header.state_root = db.state_root(hashed_state)?; - header.gas_used = gas_used; - - let receipts = execution_outcome.receipts_by_block(header.number); - - // update logs bloom - let receipts_with_bloom = - receipts.iter().map(|r| r.as_ref().unwrap().bloom_slow()).collect::>(); - header.logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | *r); - - // update receipts root - header.receipts_root = { - #[cfg(feature = "optimism")] - let receipts_root = execution_outcome - .generic_receipts_root_slow(header.number, |receipts| { - reth_optimism_consensus::calculate_receipt_root_no_memo_optimism( - receipts, - &chain_spec, - header.timestamp, - ) - }) - .expect("Receipts is present"); - - #[cfg(not(feature = "optimism"))] - let receipts_root = - execution_outcome.receipts_root_slow(header.number).expect("Receipts is present"); - - receipts_root - }; - trace!(target: "consensus::auto", root=?header.state_root, ?body, "calculated root"); - - // finally insert into storage - self.insert_new_block(header.clone(), body); - - // set new header with hash that should have been updated by insert_new_block - let new_header = SealedHeader::new(header, self.best_hash); - - Ok((new_header, execution_outcome)) - } -} - -#[cfg(test)] -mod tests { - use reth_chainspec::{ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition}; - use reth_primitives::Transaction; - - use super::*; - - #[test] - fn test_block_hash() { - let mut storage = StorageInner::default(); - - // Define two block hashes and their corresponding block numbers. - let block_hash_1: BlockHash = B256::random(); - let block_number_1: BlockNumber = 1; - let block_hash_2: BlockHash = B256::random(); - let block_number_2: BlockNumber = 2; - - // Insert the block number and hash pairs into the `hash_to_number` map. - storage.hash_to_number.insert(block_hash_1, block_number_1); - storage.hash_to_number.insert(block_hash_2, block_number_2); - - // Verify that `block_hash` returns the correct block hash for the given block number. - assert_eq!(storage.block_hash(block_number_1), Some(block_hash_1)); - assert_eq!(storage.block_hash(block_number_2), Some(block_hash_2)); - - // Test that `block_hash` returns `None` for a non-existent block number. - let block_number_3: BlockNumber = 3; - assert_eq!(storage.block_hash(block_number_3), None); - } - - #[test] - fn test_header_by_hash_or_number() { - let mut storage = StorageInner::default(); - - // Define block numbers, headers, and hashes. - let block_number_1: u64 = 1; - let block_number_2: u64 = 2; - let header_1 = Header { number: block_number_1, ..Default::default() }; - let header_2 = Header { number: block_number_2, ..Default::default() }; - let block_hash_1: BlockHash = B256::random(); - let block_hash_2: BlockHash = B256::random(); - - // Insert headers and hash-to-number mappings. - storage.headers.insert(block_number_1, header_1.clone()); - storage.headers.insert(block_number_2, header_2.clone()); - storage.hash_to_number.insert(block_hash_1, block_number_1); - storage.hash_to_number.insert(block_hash_2, block_number_2); - - // Test header retrieval by block number. - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Number(block_number_1)), - Some(header_1.clone()) - ); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Number(block_number_2)), - Some(header_2.clone()) - ); - - // Test header retrieval by block hash. - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block_hash_1)), - Some(header_1) - ); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block_hash_2)), - Some(header_2) - ); - - // Test non-existent block number and hash. - assert_eq!(storage.header_by_hash_or_number(BlockHashOrNumber::Number(999)), None); - let non_existent_hash: BlockHash = B256::random(); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(non_existent_hash)), - None - ); - } - - #[test] - fn test_insert_new_block() { - let mut storage = StorageInner::default(); - - // Define headers and block bodies. - let header_1 = Header { difficulty: U256::from(100), ..Default::default() }; - let body_1 = BlockBody::default(); - let header_2 = Header { difficulty: U256::from(200), ..Default::default() }; - let body_2 = BlockBody::default(); - - // Insert the first block. - storage.insert_new_block(header_1.clone(), body_1.clone()); - let best_block_1 = storage.best_block; - let best_hash_1 = storage.best_hash; - - // Verify the block was inserted correctly. - assert_eq!( - storage.headers.get(&best_block_1), - Some(&Header { number: 1, ..header_1.clone() }) - ); - assert_eq!(storage.bodies.get(&best_hash_1), Some(&body_1)); - assert_eq!(storage.hash_to_number.get(&best_hash_1), Some(&best_block_1)); - - // Insert the second block. - storage.insert_new_block(header_2.clone(), body_2.clone()); - let best_block_2 = storage.best_block; - let best_hash_2 = storage.best_hash; - - // Verify the second block was inserted correctly. - assert_eq!( - storage.headers.get(&best_block_2), - Some(&Header { - number: 2, - parent_hash: Header { number: 1, ..header_1 }.hash_slow(), - ..header_2 - }) - ); - assert_eq!(storage.bodies.get(&best_hash_2), Some(&body_2)); - assert_eq!(storage.hash_to_number.get(&best_hash_2), Some(&best_block_2)); - - // Check that the total difficulty was updated. - assert_eq!(storage.total_difficulty, header_1.difficulty + header_2.difficulty); - } - - #[test] - fn test_build_basic_header_template() { - let mut storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - - let best_block_number = 1; - let best_block_hash = B256::random(); - let timestamp = 1_600_000_000; - - // Set up best block information - storage.best_block = best_block_number; - storage.best_hash = best_block_hash; - - // Build header template - let header = storage.build_header_template( - timestamp, - &[], // no transactions - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify basic fields - assert_eq!(header.parent_hash, best_block_hash); - assert_eq!(header.number, best_block_number + 1); - assert_eq!(header.timestamp, timestamp); - assert_eq!(header.gas_limit, chain_spec.max_gas_limit); - } - - #[test] - fn test_ommers_and_transactions_roots() { - let storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - let timestamp = 1_600_000_000; - - // Setup ommers and transactions - let ommers = vec![Header::default()]; - let transactions = vec![TransactionSigned::default()]; - - // Build header template - let header = storage.build_header_template( - timestamp, - &transactions, - &ommers, - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify ommers and transactions roots - assert_eq!(header.ommers_hash, proofs::calculate_ommers_root(&ommers)); - assert_eq!(header.transactions_root, proofs::calculate_transaction_root(&transactions)); - } - - // Test base fee calculation from the parent block - #[test] - fn test_base_fee_calculation() { - let mut storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - let timestamp = 1_600_000_000; - - // Set up the parent header with base fee - let base_fee = Some(100); - let parent_header = Header { base_fee_per_gas: base_fee, ..Default::default() }; - storage.headers.insert(storage.best_block, parent_header); - - // Build header template - let header = storage.build_header_template( - timestamp, - &[], // no transactions - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify base fee is correctly propagated - assert_eq!(header.base_fee_per_gas, base_fee); - } - - // Test blob gas and excess blob gas calculation when Cancun is active - #[test] - fn test_blob_gas_calculation_cancun() { - let storage = StorageInner::default(); - let chain_spec = ChainSpec { - hardforks: ChainHardforks::new(vec![( - EthereumHardfork::Cancun.boxed(), - ForkCondition::Timestamp(25), - )]), - ..Default::default() - }; - let timestamp = 26; - - // Set up a transaction with blob gas - let blob_tx = TransactionSigned { - transaction: Transaction::Eip4844(Default::default()), - ..Default::default() - }; - let transactions = vec![blob_tx]; - - // Build header template - let header = storage.build_header_template( - timestamp, - &transactions, - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify that the header has the correct fields including blob gas - assert_eq!( - header, - Header { - parent_hash: B256::ZERO, - ommers_hash: proofs::calculate_ommers_root(&[]), - transactions_root: proofs::calculate_transaction_root(&transactions), - withdrawals_root: None, - difficulty: U256::from(2), - number: 1, - gas_limit: chain_spec.max_gas_limit, - timestamp, - base_fee_per_gas: None, - blob_gas_used: Some(0), - requests_hash: None, - excess_blob_gas: Some(0), - ..Default::default() - } - ); - } -} diff --git a/crates/consensus/auto-seal/src/mode.rs b/crates/consensus/auto-seal/src/mode.rs deleted file mode 100644 index 82750c8e47b6..000000000000 --- a/crates/consensus/auto-seal/src/mode.rs +++ /dev/null @@ -1,166 +0,0 @@ -//! The mode the auto seal miner is operating in. - -use alloy_primitives::TxHash; -use futures_util::{stream::Fuse, StreamExt}; -use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; -use std::{ - fmt, - pin::Pin, - sync::Arc, - task::{Context, Poll}, - time::Duration, -}; -use tokio::{sync::mpsc::Receiver, time::Interval}; -use tokio_stream::{wrappers::ReceiverStream, Stream}; - -/// Mode of operations for the `Miner` -#[derive(Debug)] -pub enum MiningMode { - /// A miner that does nothing - None, - /// A miner that listens for new transactions that are ready. - /// - /// Either one transaction will be mined per block, or any number of transactions will be - /// allowed - Auto(ReadyTransactionMiner), - /// A miner that constructs a new block every `interval` tick - FixedBlockTime(FixedBlockTimeMiner), -} - -// === impl MiningMode === - -impl MiningMode { - /// Creates a new instant mining mode that listens for new transactions and tries to build - /// non-empty blocks as soon as transactions arrive. - pub fn instant(max_transactions: usize, listener: Receiver) -> Self { - Self::Auto(ReadyTransactionMiner { - max_transactions, - has_pending_txs: None, - rx: ReceiverStream::new(listener).fuse(), - }) - } - - /// Creates a new interval miner that builds a block ever `duration`. - pub fn interval(duration: Duration) -> Self { - Self::FixedBlockTime(FixedBlockTimeMiner::new(duration)) - } - - /// polls the Pool and returns those transactions that should be put in a block, if any. - pub(crate) fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - match self { - Self::None => Poll::Pending, - Self::Auto(miner) => miner.poll(pool, cx), - Self::FixedBlockTime(miner) => miner.poll(pool, cx), - } - } -} - -impl fmt::Display for MiningMode { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let kind = match self { - Self::None => "None", - Self::Auto(_) => "Auto", - Self::FixedBlockTime(_) => "FixedBlockTime", - }; - write!(f, "{kind}") - } -} - -/// A miner that's supposed to create a new block every `interval`, mining all transactions that are -/// ready at that time. -/// -/// The default blocktime is set to 6 seconds -#[derive(Debug)] -pub struct FixedBlockTimeMiner { - /// The interval this fixed block time miner operates with - interval: Interval, -} - -// === impl FixedBlockTimeMiner === - -impl FixedBlockTimeMiner { - /// Creates a new instance with an interval of `duration` - pub(crate) fn new(duration: Duration) -> Self { - let start = tokio::time::Instant::now() + duration; - Self { interval: tokio::time::interval_at(start, duration) } - } - - fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - if self.interval.poll_tick(cx).is_ready() { - // drain the pool - return Poll::Ready(pool.best_transactions().collect()) - } - Poll::Pending - } -} - -impl Default for FixedBlockTimeMiner { - fn default() -> Self { - Self::new(Duration::from_secs(6)) - } -} - -/// A miner that Listens for new ready transactions -pub struct ReadyTransactionMiner { - /// how many transactions to mine per block - max_transactions: usize, - /// stores whether there are pending transactions (if known) - has_pending_txs: Option, - /// Receives hashes of transactions that are ready - rx: Fuse>, -} - -// === impl ReadyTransactionMiner === - -impl ReadyTransactionMiner { - fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - // drain the notification stream - while let Poll::Ready(Some(_hash)) = Pin::new(&mut self.rx).poll_next(cx) { - self.has_pending_txs = Some(true); - } - - if self.has_pending_txs == Some(false) { - return Poll::Pending - } - - let transactions = pool.best_transactions().take(self.max_transactions).collect::>(); - - // there are pending transactions if we didn't drain the pool - self.has_pending_txs = Some(transactions.len() >= self.max_transactions); - - if transactions.is_empty() { - return Poll::Pending - } - - Poll::Ready(transactions) - } -} - -impl fmt::Debug for ReadyTransactionMiner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ReadyTransactionMiner") - .field("max_transactions", &self.max_transactions) - .finish_non_exhaustive() - } -} diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs deleted file mode 100644 index cb0586d44407..000000000000 --- a/crates/consensus/auto-seal/src/task.rs +++ /dev/null @@ -1,220 +0,0 @@ -use crate::{mode::MiningMode, Storage}; -use alloy_rpc_types_engine::ForkchoiceState; -use futures_util::{future::BoxFuture, FutureExt}; -use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_engine_primitives::EngineTypes; -use reth_evm::execute::BlockExecutorProvider; -use reth_provider::{CanonChainTracker, StateProviderFactory}; -use reth_stages_api::PipelineEvent; -use reth_tokio_util::EventStream; -use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; -use std::{ - collections::VecDeque, - future::Future, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; -use tracing::{debug, error, warn}; - -/// A Future that listens for new ready transactions and puts new blocks into storage -pub struct MiningTask { - /// The configured chain spec - chain_spec: Arc, - /// The client used to interact with the state - client: Client, - /// The active miner - miner: MiningMode, - /// Single active future that inserts a new block into `storage` - insert_task: Option>>>, - /// Shared storage to insert new blocks - storage: Storage, - /// Pool where transactions are stored - pool: Pool, - /// backlog of sets of transactions ready to be mined - queued: VecDeque::Transaction>>>>, - // TODO: ideally this would just be a sender of hashes - to_engine: UnboundedSender>, - /// The pipeline events to listen on - pipe_line_events: Option>, - /// The type used for block execution - block_executor: Executor, -} - -// === impl MiningTask === - -impl - MiningTask -{ - /// Creates a new instance of the task - #[allow(clippy::too_many_arguments)] - pub(crate) fn new( - chain_spec: Arc, - miner: MiningMode, - to_engine: UnboundedSender>, - storage: Storage, - client: Client, - pool: Pool, - block_executor: Executor, - ) -> Self { - Self { - chain_spec, - client, - miner, - insert_task: None, - storage, - pool, - to_engine, - queued: Default::default(), - pipe_line_events: None, - block_executor, - } - } - - /// Sets the pipeline events to listen on. - pub fn set_pipeline_events(&mut self, events: EventStream) { - self.pipe_line_events = Some(events); - } -} - -impl Future - for MiningTask -where - Client: StateProviderFactory + CanonChainTracker + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, - Engine: EngineTypes, - Executor: BlockExecutorProvider, - ChainSpec: EthChainSpec + EthereumHardforks + 'static, -{ - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - // this drives block production and - loop { - if let Poll::Ready(transactions) = this.miner.poll(&this.pool, cx) { - // miner returned a set of transaction that we feed to the producer - this.queued.push_back(transactions); - } - - if this.insert_task.is_none() { - if this.queued.is_empty() { - // nothing to insert - break - } - - // ready to queue in new insert task - let storage = this.storage.clone(); - let transactions = this.queued.pop_front().expect("not empty"); - - let to_engine = this.to_engine.clone(); - let client = this.client.clone(); - let chain_spec = Arc::clone(&this.chain_spec); - let events = this.pipe_line_events.take(); - let executor = this.block_executor.clone(); - - // Create the mining future that creates a block, notifies the engine that drives - // the pipeline - this.insert_task = Some(Box::pin(async move { - let mut storage = storage.write().await; - - let transactions: Vec<_> = transactions - .into_iter() - .map(|tx| { - let recovered = tx.to_recovered_transaction(); - recovered.into_signed() - }) - .collect(); - let ommers = vec![]; - - match storage.build_and_execute( - transactions.clone(), - ommers.clone(), - &client, - chain_spec, - &executor, - ) { - Ok((new_header, _bundle_state)) => { - let state = ForkchoiceState { - head_block_hash: new_header.hash(), - finalized_block_hash: new_header.hash(), - safe_block_hash: new_header.hash(), - }; - drop(storage); - - // TODO: make this a future - // await the fcu call rx for SYNCING, then wait for a VALID response - loop { - // send the new update to the engine, this will trigger the engine - // to download and execute the block we just inserted - let (tx, rx) = oneshot::channel(); - let _ = to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs: None, - tx, - }); - debug!(target: "consensus::auto", ?state, "Sent fork choice update"); - - match rx.await.unwrap() { - Ok(fcu_response) => { - match fcu_response.forkchoice_status() { - ForkchoiceStatus::Valid => break, - ForkchoiceStatus::Invalid => { - error!(target: "consensus::auto", ?fcu_response, "Forkchoice update returned invalid response"); - return None - } - ForkchoiceStatus::Syncing => { - debug!(target: "consensus::auto", ?fcu_response, "Forkchoice update returned SYNCING, waiting for VALID"); - // wait for the next fork choice update - continue - } - } - } - Err(err) => { - error!(target: "consensus::auto", %err, "Autoseal fork choice update failed"); - return None - } - } - } - - // update canon chain for rpc - client.set_canonical_head(new_header.clone()); - client.set_safe(new_header.clone()); - client.set_finalized(new_header.clone()); - } - Err(err) => { - warn!(target: "consensus::auto", %err, "failed to execute block") - } - } - - events - })); - } - - if let Some(mut fut) = this.insert_task.take() { - match fut.poll_unpin(cx) { - Poll::Ready(events) => { - this.pipe_line_events = events; - } - Poll::Pending => { - this.insert_task = Some(fut); - break - } - } - } - } - - Poll::Pending - } -} - -impl - std::fmt::Debug for MiningTask -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("MiningTask").finish_non_exhaustive() - } -} diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 1abc09b2a44b..d926fc09c356 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -32,8 +32,9 @@ reth-chainspec = { workspace = true, optional = true } # ethereum alloy-primitives.workspace = true -alloy-rpc-types-engine.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["std"] } alloy-eips.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -73,15 +74,16 @@ reth-exex-types.workspace = true reth-prune-types.workspace = true reth-chainspec.workspace = true alloy-genesis.workspace = true - assert_matches.workspace = true [features] optimism = [ + "reth-blockchain-tree/optimism", "reth-chainspec", + "reth-db-api/optimism", + "reth-db/optimism", + "reth-downloaders/optimism", "reth-primitives/optimism", "reth-provider/optimism", - "reth-blockchain-tree/optimism", - "reth-db/optimism", - "reth-db-api/optimism", + "reth-downloaders/optimism", ] diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index 7e49714ba375..a9d9301738f7 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -8,7 +8,6 @@ pub struct ForkchoiceStateTracker { /// /// Caution: this can be invalid. latest: Option, - /// Tracks the latest forkchoice state that we received to which we need to sync. last_syncing: Option, /// The latest valid forkchoice state that we received and processed as valid. @@ -48,19 +47,19 @@ impl ForkchoiceStateTracker { /// Returns whether the latest received FCU is valid: [`ForkchoiceStatus::Valid`] #[allow(dead_code)] pub(crate) fn is_latest_valid(&self) -> bool { - self.latest_status().map(|s| s.is_valid()).unwrap_or(false) + self.latest_status().map_or(false, |s| s.is_valid()) } /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Syncing`] #[allow(dead_code)] pub(crate) fn is_latest_syncing(&self) -> bool { - self.latest_status().map(|s| s.is_syncing()).unwrap_or(false) + self.latest_status().map_or(false, |s| s.is_syncing()) } /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`] #[allow(dead_code)] pub(crate) fn is_latest_invalid(&self) -> bool { - self.latest_status().map(|s| s.is_invalid()).unwrap_or(false) + self.latest_status().map_or(false, |s| s.is_invalid()) } /// Returns the last valid head hash. @@ -75,32 +74,28 @@ impl ForkchoiceStateTracker { self.last_syncing.as_ref().map(|s| s.head_block_hash) } - /// Returns the latest received `ForkchoiceState`. + /// Returns the latest received [`ForkchoiceState`]. /// /// Caution: this can be invalid. pub const fn latest_state(&self) -> Option { self.last_valid } - /// Returns the last valid `ForkchoiceState`. + /// Returns the last valid [`ForkchoiceState`]. pub const fn last_valid_state(&self) -> Option { self.last_valid } /// Returns the last valid finalized hash. /// - /// This will return [`None`], if either there is no valid finalized forkchoice state, or the - /// finalized hash for the latest valid forkchoice state is zero. + /// This will return [`None`]: + /// - If either there is no valid finalized forkchoice state, + /// - Or the finalized hash for the latest valid forkchoice state is zero. #[inline] pub fn last_valid_finalized(&self) -> Option { - self.last_valid.and_then(|state| { - // if the hash is zero then we should act like there is no finalized hash - if state.finalized_block_hash.is_zero() { - None - } else { - Some(state.finalized_block_hash) - } - }) + self.last_valid + .filter(|state| !state.finalized_block_hash.is_zero()) + .map(|state| state.finalized_block_hash) } /// Returns the last received `ForkchoiceState` to which we need to sync. @@ -110,18 +105,14 @@ impl ForkchoiceStateTracker { /// Returns the sync target finalized hash. /// - /// This will return [`None`], if either there is no sync target forkchoice state, or the - /// finalized hash for the sync target forkchoice state is zero. + /// This will return [`None`]: + /// - If either there is no sync target forkchoice state, + /// - Or the finalized hash for the sync target forkchoice state is zero. #[inline] pub fn sync_target_finalized(&self) -> Option { - self.last_syncing.and_then(|state| { - // if the hash is zero then we should act like there is no finalized hash - if state.finalized_block_hash.is_zero() { - None - } else { - Some(state.finalized_block_hash) - } - }) + self.last_syncing + .filter(|state| !state.finalized_block_hash.is_zero()) + .map(|state| state.finalized_block_hash) } /// Returns true if no forkchoice state has been received yet. @@ -222,3 +213,229 @@ impl AsRef for ForkchoiceStateHash { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_forkchoice_state_tracker_set_latest_valid() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Latest state is None + assert!(tracker.latest_status().is_none()); + + // Create a valid ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + let status = ForkchoiceStatus::Valid; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is updated + assert!(tracker.last_valid.is_some()); + assert_eq!(tracker.last_valid.as_ref().unwrap(), &state); + + // Assert that last syncing state is None + assert!(tracker.last_syncing.is_none()); + + // Test when there is a latest status and it is valid + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Valid)); + } + + #[test] + fn test_forkchoice_state_tracker_set_latest_syncing() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Create a syncing ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[0; 32]), // Zero to simulate not finalized + }; + let status = ForkchoiceStatus::Syncing; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is None since the status is syncing + assert!(tracker.last_valid.is_none()); + + // Assert that last syncing state is updated + assert!(tracker.last_syncing.is_some()); + assert_eq!(tracker.last_syncing.as_ref().unwrap(), &state); + + // Test when there is a latest status and it is syncing + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Syncing)); + } + + #[test] + fn test_forkchoice_state_tracker_set_latest_invalid() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Create an invalid ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + let status = ForkchoiceStatus::Invalid; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is None since the status is invalid + assert!(tracker.last_valid.is_none()); + + // Assert that last syncing state is None since the status is invalid + assert!(tracker.last_syncing.is_none()); + + // Test when there is a latest status and it is invalid + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Invalid)); + } + + #[test] + fn test_forkchoice_state_tracker_sync_target() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Test when there is no last syncing state (should return None) + assert!(tracker.sync_target().is_none()); + + // Set a last syncing forkchoice state + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + tracker.last_syncing = Some(state); + + // Test when the last syncing state is set (should return the head block hash) + assert_eq!(tracker.sync_target(), Some(B256::from_slice(&[1; 32]))); + } + + #[test] + fn test_forkchoice_state_tracker_last_valid_finalized() { + let mut tracker = ForkchoiceStateTracker::default(); + + // No valid finalized state (should return None) + assert!(tracker.last_valid_finalized().is_none()); + + // Valid finalized state, but finalized hash is zero (should return None) + let zero_finalized_state = ForkchoiceState { + head_block_hash: B256::ZERO, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, // Zero finalized hash + }; + tracker.last_valid = Some(zero_finalized_state); + assert!(tracker.last_valid_finalized().is_none()); + + // Valid finalized state with non-zero finalized hash (should return finalized hash) + let valid_finalized_state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[123; 32]), // Non-zero finalized hash + }; + tracker.last_valid = Some(valid_finalized_state); + assert_eq!(tracker.last_valid_finalized(), Some(B256::from_slice(&[123; 32]))); + + // Reset the last valid state to None + tracker.last_valid = None; + assert!(tracker.last_valid_finalized().is_none()); + } + + #[test] + fn test_forkchoice_state_tracker_sync_target_finalized() { + let mut tracker = ForkchoiceStateTracker::default(); + + // No sync target state (should return None) + assert!(tracker.sync_target_finalized().is_none()); + + // Sync target state with finalized hash as zero (should return None) + let zero_finalized_sync_target = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::ZERO, // Zero finalized hash + }; + tracker.last_syncing = Some(zero_finalized_sync_target); + assert!(tracker.sync_target_finalized().is_none()); + + // Sync target state with non-zero finalized hash (should return the hash) + let valid_sync_target = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[22; 32]), // Non-zero finalized hash + }; + tracker.last_syncing = Some(valid_sync_target); + assert_eq!(tracker.sync_target_finalized(), Some(B256::from_slice(&[22; 32]))); + + // Reset the last sync target state to None + tracker.last_syncing = None; + assert!(tracker.sync_target_finalized().is_none()); + } + + #[test] + fn test_forkchoice_state_tracker_is_empty() { + let mut forkchoice = ForkchoiceStateTracker::default(); + + // Initially, no forkchoice state has been received, so it should be empty. + assert!(forkchoice.is_empty()); + + // After setting a forkchoice state, it should no longer be empty. + forkchoice.set_latest(ForkchoiceState::default(), ForkchoiceStatus::Valid); + assert!(!forkchoice.is_empty()); + + // Reset the forkchoice latest, it should be empty again. + forkchoice.latest = None; + assert!(forkchoice.is_empty()); + } + + #[test] + fn test_forkchoice_state_hash_find() { + // Define example hashes + let head_hash = B256::random(); + let safe_hash = B256::random(); + let finalized_hash = B256::random(); + let non_matching_hash = B256::random(); + + // Create a ForkchoiceState with specific hashes + let state = ForkchoiceState { + head_block_hash: head_hash, + safe_block_hash: safe_hash, + finalized_block_hash: finalized_hash, + }; + + // Test finding the head hash + assert_eq!( + ForkchoiceStateHash::find(&state, head_hash), + Some(ForkchoiceStateHash::Head(head_hash)) + ); + + // Test finding the safe hash + assert_eq!( + ForkchoiceStateHash::find(&state, safe_hash), + Some(ForkchoiceStateHash::Safe(safe_hash)) + ); + + // Test finding the finalized hash + assert_eq!( + ForkchoiceStateHash::find(&state, finalized_hash), + Some(ForkchoiceStateHash::Finalized(finalized_hash)) + ); + + // Test with a hash that doesn't match any of the hashes in ForkchoiceState + assert_eq!(ForkchoiceStateHash::find(&state, non_matching_hash), None); + } +} diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 4aafc6e07c1c..f8840cf78abb 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -8,7 +8,7 @@ use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use futures::TryFutureExt; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::RethResult; use reth_tokio_util::{EventSender, EventStream}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; @@ -60,9 +60,10 @@ where &self, state: ForkchoiceState, payload_attrs: Option, + version: EngineApiMessageVersion, ) -> Result { Ok(self - .send_fork_choice_updated(state, payload_attrs) + .send_fork_choice_updated(state, payload_attrs, version) .map_err(|_| BeaconForkChoiceUpdateError::EngineUnavailable) .await?? .await?) @@ -74,12 +75,14 @@ where &self, state: ForkchoiceState, payload_attrs: Option, + version: EngineApiMessageVersion, ) -> oneshot::Receiver> { let (tx, rx) = oneshot::channel(); let _ = self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx, + version, }); rx } diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index 8a1c95d73ceb..b8d80b0ceeae 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -1,9 +1,10 @@ +use alloy_consensus::Header; use alloy_primitives::B256; use reth_metrics::{ metrics::{Counter, Gauge}, Metrics, }; -use reth_primitives::{Header, SealedHeader}; +use reth_primitives::SealedHeader; use schnellru::{ByLength, LruMap}; use std::sync::Arc; use tracing::warn; @@ -106,14 +107,12 @@ struct InvalidHeaderCacheMetrics { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Sealable; #[test] fn test_hit_eviction() { let mut cache = InvalidHeaderCache::new(10); - let sealed = Header::default().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + let header = Header::default(); + let header = SealedHeader::seal(header); cache.insert(header.clone()); assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, 0); diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index e33decbd848f..fa7457c1225d 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -4,7 +4,7 @@ use alloy_rpc_types_engine::{ ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use futures::{future::Either, FutureExt}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::RethResult; use reth_payload_primitives::PayloadBuilderError; use std::{ @@ -156,6 +156,8 @@ pub enum BeaconEngineMessage { state: ForkchoiceState, /// The payload attributes for block building. payload_attrs: Option, + /// The Engine API Version. + version: EngineApiMessageVersion, /// The sender for returning forkchoice updated result. tx: oneshot::Sender>, }, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 2363b9078408..03f7bf08b1ee 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,4 +1,5 @@ -use alloy_eips::merge::EPOCH_SLOTS; +use alloy_consensus::Header; +use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, @@ -10,17 +11,17 @@ use reth_blockchain_tree_api::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }; -use reth_engine_primitives::{EngineTypes, PayloadTypes}; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes, PayloadTypes}; use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, - BlockClient, + EthBlockClient, }; use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{BlockNumHash, Head, Header, SealedBlock, SealedHeader}; +use reth_primitives::{Head, SealedBlock, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, @@ -174,7 +175,7 @@ type PendingForkchoiceUpdate = pub struct BeaconConsensusEngine where N: EngineNodeTypes, - Client: BlockClient, + Client: EthBlockClient, BT: BlockchainTreeEngine + BlockReader + BlockIdReader @@ -237,7 +238,7 @@ where + StageCheckpointReader + ChainSpecProvider + 'static, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance of the [`BeaconConsensusEngine`]. #[allow(clippy::too_many_arguments)] @@ -428,7 +429,12 @@ where } else if let Some(attrs) = attrs { // the CL requested to build a new payload on top of this new VALID head let head = outcome.into_header().unseal(); - self.process_payload_attributes(attrs, head, state) + self.process_payload_attributes( + attrs, + head, + state, + EngineApiMessageVersion::default(), + ) } else { OnForkChoiceUpdated::valid(PayloadStatus::new( PayloadStatusEnum::Valid, @@ -1160,6 +1166,7 @@ where attrs: ::PayloadAttributes, head: Header, state: ForkchoiceState, + version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held @@ -1177,6 +1184,7 @@ where match <::PayloadBuilderAttributes as PayloadBuilderAttributes>::try_new( state.head_block_hash, attrs, + version as u8 ) { Ok(attributes) => { // send the payload to the builder and return the receiver for the pending payload @@ -1792,7 +1800,7 @@ where impl Future for BeaconConsensusEngine where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, BT: BlockchainTreeEngine + BlockReader + BlockIdReader @@ -1855,7 +1863,12 @@ where // sensitive, hence they are polled first. if let Poll::Ready(Some(msg)) = this.engine_message_stream.poll_next_unpin(cx) { match msg { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version: _version, + } => { this.on_forkchoice_updated(state, payload_attrs, tx); } BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 9426ca19712f..b6e75f802e30 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -8,7 +8,7 @@ use alloy_primitives::{BlockNumber, B256}; use futures::FutureExt; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, + EthBlockClient, }; use reth_primitives::SealedBlock; use reth_provider::providers::ProviderNodeTypes; @@ -34,7 +34,7 @@ use tracing::trace; pub(crate) struct EngineSyncController where N: ProviderNodeTypes, - Client: BlockClient, + Client: EthBlockClient, { /// A downloader that can download full blocks from the network. full_block_client: FullBlockClient, @@ -64,7 +64,7 @@ where impl EngineSyncController where N: ProviderNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance pub(crate) fn new( @@ -410,12 +410,12 @@ impl PipelineState { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Sealable; + use alloy_consensus::Header; use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient}; - use reth_primitives::{BlockBody, Header, SealedHeader}; + use reth_primitives::{BlockBody, SealedHeader}; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, ExecutionOutcome, @@ -522,7 +522,7 @@ mod tests { ) -> EngineSyncController> where N: ProviderNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { let client = self .client @@ -599,9 +599,7 @@ mod tests { header.parent_hash = hash; header.number += 1; header.timestamp += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } } @@ -617,14 +615,12 @@ mod tests { ); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..10); // set up a pipeline diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 912f0a871bf2..0ad4c595f1bd 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -4,7 +4,7 @@ use crate::{ BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, }; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; @@ -19,11 +19,14 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_engine_primitives::EngineApiMessageVersion; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::{either::Either, test_utils::MockExecutorProvider}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; -use reth_network_p2p::{sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, BlockClient}; +use reth_network_p2p::{ + sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, EthBlockClient, +}; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::SealedHeader; use reth_provider::{ @@ -93,7 +96,9 @@ impl TestEnv { &self, state: ForkchoiceState, ) -> Result { - self.engine_handle.fork_choice_updated(state, None).await + self.engine_handle + .fork_choice_updated(state, None, EngineApiMessageVersion::default()) + .await } /// Sends the `ForkchoiceUpdated` message to the consensus engine and retries if the engine @@ -103,7 +108,10 @@ impl TestEnv { state: ForkchoiceState, ) -> Result { loop { - let result = self.engine_handle.fork_choice_updated(state, None).await?; + let result = self + .engine_handle + .fork_choice_updated(state, None, EngineApiMessageVersion::default()) + .await?; if !result.is_syncing() { return Ok(result) } @@ -231,7 +239,7 @@ impl TestConsensusEngineBuilder { client: Client, ) -> NetworkedTestConsensusEngineBuilder where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { NetworkedTestConsensusEngineBuilder { base_config: self, client: Some(client) } } @@ -258,7 +266,7 @@ pub struct NetworkedTestConsensusEngineBuilder { impl NetworkedTestConsensusEngineBuilder where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Set the pipeline execution outputs to use for the test consensus engine. #[allow(dead_code)] @@ -313,7 +321,7 @@ where client: ClientType, ) -> NetworkedTestConsensusEngineBuilder where - ClientType: BlockClient + 'static, + ClientType: EthBlockClient + 'static, { NetworkedTestConsensusEngineBuilder { base_config: self.base_config, client: Some(client) } } @@ -394,9 +402,8 @@ where BlockchainTree::new(externals, BlockchainTreeConfig::new(1, 2, 3, 2)) .expect("failed to create tree"), )); - let sealed = self.base_config.chain_spec.genesis_header().clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - let genesis_block = SealedHeader::new(header, seal); + let header = self.base_config.chain_spec.genesis_header().clone(); + let genesis_block = SealedHeader::seal(header); let blockchain_provider = BlockchainProvider::with_blocks( provider_factory.clone(), @@ -444,7 +451,7 @@ pub fn spawn_consensus_engine( engine: TestBeaconConsensusEngine, ) -> oneshot::Receiver> where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { let (tx, rx) = oneshot::channel(); tokio::spawn(async move { diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 1070bbdbc0f5..62357b4b9b12 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,12 +1,10 @@ //! Collection of methods for block validation. -use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, Header}; +use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{ - constants::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, - EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader, -}; +use reth_primitives::{BlockBody, EthereumHardfork, GotExpected, SealedBlock, SealedHeader}; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. @@ -75,6 +73,49 @@ pub fn validate_cancun_gas(block: &SealedBlock) -> Result<(), ConsensusError> { Ok(()) } +/// Ensures the block response data matches the header. +/// +/// This ensures the body response items match the header's hashes: +/// - ommer hash +/// - transaction root +/// - withdrawals root +pub fn validate_body_against_header( + body: &BlockBody, + header: &SealedHeader, +) -> Result<(), ConsensusError> { + let ommers_hash = body.calculate_ommers_root(); + if header.ommers_hash != ommers_hash { + return Err(ConsensusError::BodyOmmersHashDiff( + GotExpected { got: ommers_hash, expected: header.ommers_hash }.into(), + )) + } + + let tx_root = body.calculate_tx_root(); + if header.transactions_root != tx_root { + return Err(ConsensusError::BodyTransactionRootDiff( + GotExpected { got: tx_root, expected: header.transactions_root }.into(), + )) + } + + match (header.withdrawals_root, &body.withdrawals) { + (Some(header_withdrawals_root), Some(withdrawals)) => { + let withdrawals = withdrawals.as_slice(); + let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); + if withdrawals_root != header_withdrawals_root { + return Err(ConsensusError::BodyWithdrawalsRootDiff( + GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), + )) + } + } + (None, None) => { + // this is ok because we assume the fork is not active in this case + } + _ => return Err(ConsensusError::WithdrawalsRootUnexpected), + } + + Ok(()) +} + /// Validate a block without regard for state: /// /// - Compares the ommer hash in the block header to the block body @@ -277,16 +318,18 @@ pub fn validate_against_parent_4844( mod tests { use super::*; use alloy_consensus::{TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; + use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, + }; use alloy_primitives::{ - hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, Parity, Sealable, U256, + hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, PrimitiveSignature as Signature, + U256, }; use mockall::mock; use rand::Rng; use reth_chainspec::ChainSpecBuilder; - use reth_primitives::{ - proofs, Account, BlockBody, BlockHashOrNumber, Signature, Transaction, TransactionSigned, - Withdrawal, Withdrawals, - }; + use reth_primitives::{proofs, Account, BlockBody, Transaction, TransactionSigned}; use reth_storage_api::{ errors::provider::ProviderResult, AccountReader, HeaderProvider, WithdrawalsProvider, }; @@ -405,7 +448,7 @@ mod tests { blob_versioned_hashes: std::iter::repeat_with(|| rng.gen()).take(num_blobs).collect(), }); - let signature = Signature::new(U256::default(), U256::default(), Parity::Parity(true)); + let signature = Signature::new(U256::default(), U256::default(), true); TransactionSigned::from_transaction_and_signature(request, signature) } @@ -450,12 +493,9 @@ mod tests { let ommers = Vec::new(); let transactions = Vec::new(); - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - ( SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions, ommers, withdrawals: None }, }, parent, @@ -474,15 +514,13 @@ mod tests { .collect(), ); - let sealed = Header { + let header = Header { withdrawals_root: Some(proofs::calculate_withdrawals_root(&withdrawals)), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { withdrawals: Some(withdrawals), ..Default::default() }, } }; @@ -513,16 +551,14 @@ mod tests { // create a tx with 10 blobs let transaction = mock_blob_tx(1, 10); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(1337), withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), blob_gas_used: Some(1), transactions_root: proofs::calculate_transaction_root(&[transaction.clone()]), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); let body = BlockBody { transactions: vec![transaction], diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 2faf3f2ac719..d120d268bd9a 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -17,6 +17,7 @@ reth-primitives.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # misc auto_impl.workspace = true @@ -25,10 +26,9 @@ derive_more.workspace = true [features] default = ["std"] std = [ - "reth-primitives/std", - "alloy-primitives/std", - "alloy-eips/std" -] -test-utils = [ - "reth-primitives/test-utils" + "reth-primitives/std", + "alloy-primitives/std", + "alloy-eips/std", + "alloy-consensus/std", ] +test-utils = ["reth-primitives/test-utils"] diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 4bf5da3b152f..a8f0a01f22ba 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -12,10 +12,11 @@ extern crate alloc; use alloc::{fmt::Debug, vec::Vec}; +use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockWithSenders, GotExpected, GotExpectedBoxed, Header, + constants::MINIMUM_GAS_LIMIT, BlockBody, BlockWithSenders, GotExpected, GotExpectedBoxed, InvalidTransactionError, Receipt, SealedBlock, SealedHeader, }; @@ -44,11 +45,11 @@ impl<'a> PostExecutionInput<'a> { /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] -pub trait Consensus: Debug + Send + Sync { +pub trait Consensus: Debug + Send + Sync { /// Validate if header is correct and follows consensus specification. /// /// This is called on standalone header to check if all hashes are correct. - fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError>; + fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError>; /// Validate that the header information regarding parent are correct. /// This checks the block number, timestamp, basefee and gas limit increment. @@ -61,8 +62,8 @@ pub trait Consensus: Debug + Send + Sync { /// Note: Validating header against its parent does not include other Consensus validations. fn validate_header_against_parent( &self, - header: &SealedHeader, - parent: &SealedHeader, + header: &SealedHeader, + parent: &SealedHeader, ) -> Result<(), ConsensusError>; /// Validates the given headers @@ -71,7 +72,13 @@ pub trait Consensus: Debug + Send + Sync { /// on its own and valid against its parent. /// /// Note: this expects that the headers are in natural order (ascending block number) - fn validate_header_range(&self, headers: &[SealedHeader]) -> Result<(), HeaderConsensusError> { + fn validate_header_range( + &self, + headers: &[SealedHeader], + ) -> Result<(), HeaderConsensusError> + where + H: Clone, + { if let Some((initial_header, remaining_headers)) = headers.split_first() { self.validate_header(initial_header) .map_err(|e| HeaderConsensusError(e, initial_header.clone()))?; @@ -94,10 +101,17 @@ pub trait Consensus: Debug + Send + Sync { /// Note: validating headers with TD does not include other Consensus validation. fn validate_header_with_total_difficulty( &self, - header: &Header, + header: &H, total_difficulty: U256, ) -> Result<(), ConsensusError>; + /// Ensures that body field values match the header. + fn validate_body_against_header( + &self, + body: &B, + header: &SealedHeader, + ) -> Result<(), ConsensusError>; + /// Validate a block disregarding world state, i.e. things that can be checked before sender /// recovery and execution. /// @@ -107,7 +121,8 @@ pub trait Consensus: Debug + Send + Sync { /// **This should not be called for the genesis block**. /// /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError>; + fn validate_block_pre_execution(&self, block: &SealedBlock) + -> Result<(), ConsensusError>; /// Validate a block considering world state, i.e. things that can not be checked before /// execution. @@ -407,4 +422,4 @@ impl From for ConsensusError { /// `HeaderConsensusError` combines a `ConsensusError` with the `SealedHeader` it relates to. #[derive(derive_more::Display, derive_more::Error, Debug)] #[display("Consensus error: {_0}, Invalid header: {_1:?}")] -pub struct HeaderConsensusError(ConsensusError, SealedHeader); +pub struct HeaderConsensusError(ConsensusError, SealedHeader); diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 53bdb72afb2d..9b72f89b176a 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,34 +1,45 @@ use crate::{Consensus, ConsensusError, PostExecutionInput}; use alloy_primitives::U256; -use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; /// A Consensus implementation that does nothing. #[derive(Debug, Copy, Clone, Default)] #[non_exhaustive] pub struct NoopConsensus; -impl Consensus for NoopConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { +impl Consensus for NoopConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { Ok(()) } fn validate_header_against_parent( &self, - _header: &SealedHeader, - _parent: &SealedHeader, + _header: &SealedHeader, + _parent: &SealedHeader, ) -> Result<(), ConsensusError> { Ok(()) } fn validate_header_with_total_difficulty( &self, - _header: &Header, + _header: &H, _total_difficulty: U256, ) -> Result<(), ConsensusError> { Ok(()) } - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + fn validate_body_against_header( + &self, + _body: &B, + _header: &SealedHeader, + ) -> Result<(), ConsensusError> { + Ok(()) + } + + fn validate_block_pre_execution( + &self, + _block: &SealedBlock, + ) -> Result<(), ConsensusError> { Ok(()) } diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index 436947209178..52926ec323e7 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,18 +1,25 @@ use crate::{Consensus, ConsensusError, PostExecutionInput}; use alloy_primitives::U256; use core::sync::atomic::{AtomicBool, Ordering}; -use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; /// Consensus engine implementation for testing #[derive(Debug)] pub struct TestConsensus { /// Flag whether the header validation should purposefully fail fail_validation: AtomicBool, + /// Separate flag for setting whether `validate_body_against_header` should fail. It is needed + /// for testing networking logic for which the body failing this check is getting completely + /// rejected while more high-level failures are handled by the sync logic. + fail_body_against_header: AtomicBool, } impl Default for TestConsensus { fn default() -> Self { - Self { fail_validation: AtomicBool::new(false) } + Self { + fail_validation: AtomicBool::new(false), + fail_body_against_header: AtomicBool::new(false), + } } } @@ -24,12 +31,23 @@ impl TestConsensus { /// Update the validation flag. pub fn set_fail_validation(&self, val: bool) { - self.fail_validation.store(val, Ordering::SeqCst) + self.fail_validation.store(val, Ordering::SeqCst); + self.fail_body_against_header.store(val, Ordering::SeqCst); + } + + /// Returns the body validation flag. + pub fn fail_body_against_header(&self) -> bool { + self.fail_body_against_header.load(Ordering::SeqCst) + } + + /// Update the body validation flag. + pub fn set_fail_body_against_header(&self, val: bool) { + self.fail_body_against_header.store(val, Ordering::SeqCst); } } -impl Consensus for TestConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { +impl Consensus for TestConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { @@ -39,8 +57,8 @@ impl Consensus for TestConsensus { fn validate_header_against_parent( &self, - _header: &SealedHeader, - _parent: &SealedHeader, + _header: &SealedHeader, + _parent: &SealedHeader, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -51,7 +69,7 @@ impl Consensus for TestConsensus { fn validate_header_with_total_difficulty( &self, - _header: &Header, + _header: &H, _total_difficulty: U256, ) -> Result<(), ConsensusError> { if self.fail_validation() { @@ -61,7 +79,22 @@ impl Consensus for TestConsensus { } } - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + fn validate_body_against_header( + &self, + _body: &B, + _header: &SealedHeader, + ) -> Result<(), ConsensusError> { + if self.fail_body_against_header() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_block_pre_execution( + &self, + _block: &SealedBlock, + ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { diff --git a/crates/consensus/debug-client/Cargo.toml b/crates/consensus/debug-client/Cargo.toml index c37beef10742..18e7aead3068 100644 --- a/crates/consensus/debug-client/Cargo.toml +++ b/crates/consensus/debug-client/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth reth-node-api.workspace = true -reth-rpc-api.workspace = true +reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-builder.workspace = true reth-tracing.workspace = true @@ -21,7 +21,7 @@ reth-tracing.workspace = true alloy-consensus = { workspace = true, features = ["serde"] } alloy-eips.workspace = true alloy-provider = { workspace = true, features = ["ws"] } -alloy-rpc-types.workspace = true +alloy-rpc-types-eth.workspace = true alloy-rpc-types-engine.workspace = true alloy-primitives.workspace = true diff --git a/crates/consensus/debug-client/src/client.rs b/crates/consensus/debug-client/src/client.rs index a6a59a6a380b..0e2a50370b85 100644 --- a/crates/consensus/debug-client/src/client.rs +++ b/crates/consensus/debug-client/src/client.rs @@ -1,8 +1,8 @@ -use alloy_consensus::TxEnvelope; +use alloy_consensus::Transaction; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; -use alloy_rpc_types::{Block, BlockTransactions}; use alloy_rpc_types_engine::{ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3}; +use alloy_rpc_types_eth::{Block, BlockTransactions}; use reth_node_api::EngineTypes; use reth_rpc_builder::auth::AuthServerHandle; use reth_tracing::tracing::warn; @@ -184,18 +184,19 @@ pub fn block_to_execution_payload_v3(block: Block) -> ExecutionNewPayload { // https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#specification let versioned_hashes = transactions .iter() - .flat_map(|tx| tx.blob_versioned_hashes.clone().unwrap_or_default()) + .flat_map(|tx| tx.blob_versioned_hashes().unwrap_or_default()) + .copied() .collect(); let payload: ExecutionPayloadV3 = ExecutionPayloadV3 { payload_inner: ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { parent_hash: block.header.parent_hash, - fee_recipient: block.header.miner, + fee_recipient: block.header.beneficiary, state_root: block.header.state_root, receipts_root: block.header.receipts_root, logs_bloom: block.header.logs_bloom, - prev_randao: block.header.mix_hash.unwrap(), + prev_randao: block.header.mix_hash, block_number: block.header.number, gas_limit: block.header.gas_limit, gas_used: block.header.gas_used, @@ -205,15 +206,10 @@ pub fn block_to_execution_payload_v3(block: Block) -> ExecutionNewPayload { block_hash: block.header.hash, transactions: transactions .into_iter() - .map(|tx| { - let envelope: TxEnvelope = tx.try_into().unwrap(); - let mut buffer: Vec = vec![]; - envelope.encode_2718(&mut buffer); - buffer.into() - }) + .map(|tx| tx.inner.encoded_2718().into()) .collect(), }, - withdrawals: block.withdrawals.clone().unwrap_or_default(), + withdrawals: block.withdrawals.clone().unwrap_or_default().into_inner(), }, blob_gas_used: block.header.blob_gas_used.unwrap(), excess_blob_gas: block.header.excess_blob_gas.unwrap(), diff --git a/crates/consensus/debug-client/src/providers/etherscan.rs b/crates/consensus/debug-client/src/providers/etherscan.rs index 59b402f3e78f..d3167b6cfab1 100644 --- a/crates/consensus/debug-client/src/providers/etherscan.rs +++ b/crates/consensus/debug-client/src/providers/etherscan.rs @@ -1,6 +1,6 @@ use crate::BlockProvider; use alloy_eips::BlockNumberOrTag; -use alloy_rpc_types::Block; +use alloy_rpc_types_eth::Block; use reqwest::Client; use reth_tracing::tracing::warn; use serde::Deserialize; diff --git a/crates/consensus/debug-client/src/providers/rpc.rs b/crates/consensus/debug-client/src/providers/rpc.rs index a8cd15c105aa..787515f1a600 100644 --- a/crates/consensus/debug-client/src/providers/rpc.rs +++ b/crates/consensus/debug-client/src/providers/rpc.rs @@ -1,7 +1,7 @@ use crate::BlockProvider; use alloy_eips::BlockNumberOrTag; use alloy_provider::{Provider, ProviderBuilder}; -use alloy_rpc_types::{Block, BlockTransactionsKind}; +use alloy_rpc_types_eth::{Block, BlockTransactionsKind}; use futures::StreamExt; use tokio::sync::mpsc::Sender; @@ -30,9 +30,9 @@ impl BlockProvider for RpcBlockProvider { .expect("failed to subscribe on new blocks") .into_stream(); - while let Some(block) = stream.next().await { + while let Some(header) = stream.next().await { let full_block = ws_provider - .get_block_by_hash(block.header.hash, BlockTransactionsKind::Full) + .get_block_by_hash(header.hash, BlockTransactionsKind::Full) .await .expect("failed to get block") .expect("block not found"); @@ -49,7 +49,7 @@ impl BlockProvider for RpcBlockProvider { .await .expect("failed to create WS provider"); let block: Block = ws_provider - .get_block_by_number(BlockNumberOrTag::Number(block_number), true) + .get_block_by_number(BlockNumberOrTag::Number(block_number), true.into()) .await? .ok_or_else(|| eyre::eyre!("block not found by number {}", block_number))?; Ok(block) diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 67bb74555366..e56449551bbc 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -41,7 +41,8 @@ tokio-stream.workspace = true serde_json.workspace = true alloy-signer.workspace = true alloy-signer-local = { workspace = true, features = ["mnemonic"] } -alloy-rpc-types.workspace = true +alloy-rpc-types-eth.workspace = true +alloy-rpc-types-engine.workspace = true alloy-network.workspace = true alloy-consensus = { workspace = true, features = ["kzg"] } tracing.workspace = true diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 1e9717d082cd..1e9b39058e63 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -7,6 +7,7 @@ use reth::{ args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, builder::{NodeBuilder, NodeConfig, NodeHandle}, network::PeersHandleProvider, + rpc::server_types::RpcModuleSelection, tasks::TaskManager, }; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -147,7 +148,12 @@ where let node_config = NodeConfig::new(chain_spec.clone()) .with_network(network_config.clone()) .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()) + .with_rpc( + RpcServerArgs::default() + .with_unused_ports() + .with_http() + .with_http_api(RpcModuleSelection::All), + ) .set_dev(is_dev); let span = span!(Level::INFO, "node", idx); diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 07df36a33e1a..c3dff527eb20 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,7 +1,7 @@ use std::{marker::PhantomData, pin::Pin}; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; -use alloy_rpc_types::BlockNumberOrTag; +use alloy_rpc_types_eth::BlockNumberOrTag; use eyre::Ok; use futures_util::Future; use reth::{ @@ -10,10 +10,7 @@ use reth::{ network::PeersHandleProvider, providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, rpc::{ - api::eth::{ - helpers::{EthApiSpec, EthTransactions, TraceExt}, - FullEthApiTypes, - }, + api::eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, types::engine::PayloadStatusEnum, }, }; @@ -97,7 +94,7 @@ where where Engine::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, Engine::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, - AddOns::EthApi: EthApiSpec + EthTransactions + TraceExt + FullEthApiTypes, + AddOns::EthApi: EthApiSpec + EthTransactions + TraceExt, { let mut chain = Vec::with_capacity(length as usize); for i in 0..length { diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index b8cbe4d77add..7b7dabdf2404 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -4,12 +4,15 @@ use alloy_primitives::{Bytes, B256}; use reth::{ builder::{rpc::RpcRegistry, FullNodeComponents}, rpc::api::{ - eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, + eth::{ + helpers::{EthApiSpec, EthTransactions, TraceExt}, + EthApiTypes, + }, DebugApiServer, }, }; use reth_chainspec::EthereumHardforks; -use reth_node_builder::{EthApiTypes, NodeTypes}; +use reth_node_builder::NodeTypes; #[allow(missing_debug_implementations)] pub struct RpcTestContext { diff --git a/crates/e2e-test-utils/src/traits.rs b/crates/e2e-test-utils/src/traits.rs index a70bbf7afb76..d14445370d41 100644 --- a/crates/e2e-test-utils/src/traits.rs +++ b/crates/e2e-test-utils/src/traits.rs @@ -1,4 +1,4 @@ -use alloy_rpc_types::engine::ExecutionPayloadEnvelopeV4; +use alloy_rpc_types_engine::ExecutionPayloadEnvelopeV4; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; use reth::rpc::types::engine::{ExecutionPayloadEnvelopeV3, ExecutionPayloadV3}; diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index 58a25dc12579..d24c5579313f 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -4,7 +4,7 @@ use alloy_network::{ eip2718::Encodable2718, Ethereum, EthereumWallet, TransactionBuilder, TransactionBuilder4844, }; use alloy_primitives::{hex, Address, Bytes, TxKind, B256, U256}; -use alloy_rpc_types::{Authorization, TransactionInput, TransactionRequest}; +use alloy_rpc_types_eth::{Authorization, TransactionInput, TransactionRequest}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use eyre::Ok; diff --git a/crates/engine/invalid-block-hooks/Cargo.toml b/crates/engine/invalid-block-hooks/Cargo.toml index b33b8c00a1cd..462f0762a9e4 100644 --- a/crates/engine/invalid-block-hooks/Cargo.toml +++ b/crates/engine/invalid-block-hooks/Cargo.toml @@ -26,6 +26,7 @@ reth-trie = { workspace = true, features = ["serde"] } alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types-debug.workspace = true +alloy-consensus.workspace = true # async futures.workspace = true diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 416c4adb40f8..4e92411ea128 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; +use alloy_consensus::Header; use alloy_primitives::{keccak256, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use eyre::OptionExt; @@ -9,7 +10,7 @@ use reth_engine_primitives::InvalidBlockHook; use reth_evm::{ state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, }; -use reth_primitives::{Header, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index d9dc63253399..2ab448e3bbfc 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -47,7 +47,7 @@ workspace = true [features] optimism = [ - "op-alloy-rpc-types-engine", - "reth-beacon-consensus/optimism", - "reth-provider/optimism" + "op-alloy-rpc-types-engine", + "reth-beacon-consensus/optimism", + "reth-provider/optimism", ] diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 706ddc43de3f..7cebd3063097 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -6,7 +6,7 @@ use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; use reth_beacon_consensus::BeaconEngineMessage; use reth_chainspec::EthereumHardforks; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{ BuiltPayload, PayloadAttributesBuilder, PayloadBuilder, PayloadKind, PayloadTypes, @@ -167,6 +167,7 @@ where state: self.forkchoice_state(), payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), })?; let res = rx.await??; @@ -193,6 +194,7 @@ where state: self.forkchoice_state(), payload_attrs: Some(self.payload_attributes_builder.build(timestamp)), tx, + version: EngineApiMessageVersion::default(), })?; let res = rx.await??.await?; diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index fab96b0d17e9..949ebf0155c4 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -23,7 +23,7 @@ use serde::{de::DeserializeOwned, ser::Serialize}; /// payload job. Hence this trait is also [`PayloadTypes`]. pub trait EngineTypes: PayloadTypes< - BuiltPayload: TryInto + BuiltPayload: TryInto + TryInto + TryInto + TryInto, @@ -31,9 +31,15 @@ pub trait EngineTypes: + Serialize + 'static { - /// Execution Payload V1 type. - type ExecutionPayloadV1: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; - /// Execution Payload V2 type. + /// Execution Payload V1 envelope type. + type ExecutionPayloadEnvelopeV1: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; + /// Execution Payload V2 envelope type. type ExecutionPayloadEnvelopeV2: DeserializeOwned + Serialize + Clone @@ -41,7 +47,7 @@ pub trait EngineTypes: + Send + Sync + 'static; - /// Execution Payload V3 type. + /// Execution Payload V3 envelope type. type ExecutionPayloadEnvelopeV3: DeserializeOwned + Serialize + Clone @@ -49,7 +55,7 @@ pub trait EngineTypes: + Send + Sync + 'static; - /// Execution Payload V4 type. + /// Execution Payload V4 envelope type. type ExecutionPayloadEnvelopeV4: DeserializeOwned + Serialize + Clone diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 198438d457f1..d383af6caa6a 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -15,7 +15,7 @@ pub use reth_engine_tree::{ engine::EngineApiEvent, }; use reth_evm::execute::BlockExecutorProvider; -use reth_network_p2p::BlockClient; +use reth_network_p2p::EthBlockClient; use reth_node_types::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_validator::ExecutionPayloadValidator; @@ -49,7 +49,7 @@ type EngineServiceType = ChainOrchestrator< pub struct EngineService where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { orchestrator: EngineServiceType, @@ -59,7 +59,7 @@ where impl EngineService where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { /// Constructor for `EngineService`. @@ -124,7 +124,7 @@ where impl Stream for EngineService where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { type Item = ChainEvent; diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index dee0bcaf7ce1..fb259d085601 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -38,10 +38,15 @@ reth-trie-parallel.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true + +revm-primitives.workspace = true # common futures.workspace = true +pin-project.workspace = true tokio = { workspace = true, features = ["macros", "sync"] } +tokio-stream.workspace = true thiserror.workspace = true # metrics @@ -79,22 +84,22 @@ assert_matches.workspace = true [features] test-utils = [ - "reth-db/test-utils", - "reth-chain-state/test-utils", - "reth-network-p2p/test-utils", - "reth-prune-types", - "reth-stages/test-utils", - "reth-static-file", - "reth-tracing", - "reth-blockchain-tree/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-evm/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-revm/test-utils", - "reth-stages-api/test-utils", - "reth-provider/test-utils", - "reth-trie/test-utils", - "reth-prune-types?/test-utils" + "reth-db/test-utils", + "reth-chain-state/test-utils", + "reth-network-p2p/test-utils", + "reth-prune-types", + "reth-stages/test-utils", + "reth-static-file", + "reth-tracing", + "reth-blockchain-tree/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-stages-api/test-utils", + "reth-provider/test-utils", + "reth-trie/test-utils", + "reth-prune-types?/test-utils", ] diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index 78e21a7b5efc..2ed0e758d505 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -230,12 +230,13 @@ impl PipelineState { mod tests { use super::*; use crate::test_utils::{insert_headers_into_client, TestPipelineBuilder}; - use alloy_primitives::{BlockNumber, Sealable, B256}; + use alloy_consensus::Header; + use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_network_p2p::test_utils::TestFullBlockClient; - use reth_primitives::{Header, SealedHeader}; + use reth_primitives::SealedHeader; use reth_provider::test_utils::MockNodeTypesWithDB; use reth_stages::ExecOutput; use reth_stages_api::StageCheckpoint; @@ -267,14 +268,12 @@ mod tests { let pipeline_sync = PipelineSync::new(pipeline, Box::::default()); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..total_blocks); let tip = client.highest_block().expect("there should be blocks here").hash(); diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index 9ecec70ae369..8a7ea583f0fc 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -6,12 +6,13 @@ use futures::FutureExt; use reth_consensus::Consensus; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, + BlockClient, EthBlockClient, }; use reth_primitives::{SealedBlock, SealedBlockWithSenders}; use std::{ cmp::{Ordering, Reverse}, collections::{binary_heap::PeekMut, BinaryHeap, HashSet, VecDeque}, + fmt::Debug, sync::Arc, task::{Context, Poll}, }; @@ -72,10 +73,13 @@ where impl BasicBlockDownloader where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance - pub fn new(client: Client, consensus: Arc) -> Self { + pub fn new( + client: Client, + consensus: Arc>, + ) -> Self { Self { full_block_client: FullBlockClient::new(client, consensus), inflight_full_block_requests: Vec::new(), @@ -182,7 +186,7 @@ where impl BlockDownloader for BasicBlockDownloader where - Client: BlockClient + 'static, + Client: EthBlockClient, { /// Handles incoming download actions. fn on_action(&mut self, action: DownloadAction) { @@ -305,12 +309,12 @@ impl BlockDownloader for NoopBlockDownloader { mod tests { use super::*; use crate::test_utils::insert_headers_into_client; - use alloy_primitives::Sealable; + use alloy_consensus::Header; use assert_matches::assert_matches; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_network_p2p::test_utils::TestFullBlockClient; - use reth_primitives::{Header, SealedHeader}; + use reth_primitives::SealedHeader; use std::{future::poll_fn, sync::Arc}; struct TestHarness { @@ -329,14 +333,12 @@ mod tests { ); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..total_blocks); let consensus = Arc::new(EthBeaconConsensus::new(chain_spec)); diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index f17766a43ed7..c1b534ebf5eb 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Sealable, B256}; +use alloy_primitives::B256; use reth_chainspec::ChainSpec; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::{BlockBody, SealedHeader}; @@ -76,9 +76,7 @@ pub fn insert_headers_into_client( header.parent_hash = hash; header.number += 1; header.timestamp += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 555cf89164f9..adc9230af34b 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -4,6 +4,7 @@ use crate::{ engine::{DownloadRequest, EngineApiEvent, FromEngine}, persistence::PersistenceHandle, }; +use alloy_consensus::Header; use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -26,15 +27,13 @@ use reth_chain_state::{ }; use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, PostExecutionInput}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{ - Block, GotExpected, Header, SealedBlock, SealedBlockWithSenders, SealedHeader, -}; +use reth_primitives::{Block, GotExpected, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, ProviderError, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, @@ -43,7 +42,8 @@ use reth_provider::{ use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; -use reth_trie_parallel::parallel_root::{ParallelStateRoot, ParallelStateRootError}; +use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; +use revm_primitives::ResultAndState; use std::{ cmp::Ordering, collections::{btree_map, hash_map, BTreeMap, VecDeque}, @@ -56,9 +56,8 @@ use std::{ time::Instant, }; use tokio::sync::{ - mpsc::{UnboundedReceiver, UnboundedSender}, - oneshot, - oneshot::error::TryRecvError, + mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, + oneshot::{self, error::TryRecvError}, }; use tracing::*; @@ -75,6 +74,8 @@ pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::InvalidBlockHook; +mod root; + /// Keeps track of the state of the tree. /// /// ## Invariants @@ -609,7 +610,7 @@ where remove_above_state: VecDeque::new(), }; - let (tx, outgoing) = tokio::sync::mpsc::unbounded_channel(); + let (tx, outgoing) = unbounded_channel(); let state = EngineApiTreeState::new( config.block_buffer_limit(), config.max_invalid_header_cache_length(), @@ -969,6 +970,7 @@ where &mut self, state: ForkchoiceState, attrs: Option, + version: EngineApiMessageVersion, ) -> ProviderResult> { trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); self.metrics.engine.forkchoice_updated_messages.increment(1); @@ -1018,7 +1020,7 @@ where // to return an error ProviderError::HeaderNotFound(state.head_block_hash.into()) })?; - let updated = self.process_payload_attributes(attr, &tip, state); + let updated = self.process_payload_attributes(attr, &tip, state, version); return Ok(TreeOutcome::new(updated)) } @@ -1038,7 +1040,7 @@ where } if let Some(attr) = attrs { - let updated = self.process_payload_attributes(attr, &tip, state); + let updated = self.process_payload_attributes(attr, &tip, state, version); return Ok(TreeOutcome::new(updated)) } @@ -1054,7 +1056,8 @@ where if self.engine_kind.is_opstack() { if let Some(attr) = attrs { debug!(target: "engine::tree", head = canonical_header.number, "handling payload attributes for canonical head"); - let updated = self.process_payload_attributes(attr, &canonical_header, state); + let updated = + self.process_payload_attributes(attr, &canonical_header, state, version); return Ok(TreeOutcome::new(updated)) } } @@ -1206,8 +1209,14 @@ where } EngineApiRequest::Beacon(request) => { match request { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - let mut output = self.on_forkchoice_updated(state, payload_attrs); + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + } => { + let mut output = + self.on_forkchoice_updated(state, payload_attrs, version); if let Ok(res) = &mut output { // track last received forkchoice state @@ -2177,9 +2186,18 @@ where let block = block.unseal(); let exec_time = Instant::now(); - let output = self.metrics.executor.execute_metered(executor, (&block, U256::MAX).into())?; + + // TODO: create StateRootTask with the receiving end of a channel and + // pass the sending end of the channel to the state hook. + let noop_state_hook = |_result_and_state: &ResultAndState| {}; + let output = self.metrics.executor.execute_metered( + executor, + (&block, U256::MAX).into(), + Box::new(noop_state_hook), + )?; trace!(target: "engine::tree", elapsed=?exec_time.elapsed(), ?block_number, "Executed block"); + if let Err(err) = self.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&output.receipts, &output.requests), @@ -2200,6 +2218,8 @@ where let root_time = Instant::now(); let mut state_root_result = None; + // TODO: switch to calculate state root using `StateRootTask`. + // We attempt to compute state root in parallel if we are currently not persisting anything // to database. This is safe, because the database state cannot change until we // finish parallel computation. It is important that nothing is being persisted as @@ -2287,6 +2307,9 @@ where parent_hash: B256, hashed_state: &HashedPostState, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { + // TODO: when we switch to calculate state root using `StateRootTask` this + // method can be still useful to calculate the required `TrieInput` to + // create the task. let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; let mut input = TrieInput::default(); @@ -2484,6 +2507,7 @@ where attrs: T::PayloadAttributes, head: &Header, state: ForkchoiceState, + version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held @@ -2501,6 +2525,7 @@ where match ::try_new( state.head_block_hash, attrs, + version as u8, ) { Ok(attributes) => { // send the payload to the builder and return the receiver for the pending payload @@ -2571,7 +2596,7 @@ pub enum AdvancePersistenceError { mod tests { use super::*; use crate::persistence::PersistenceAction; - use alloy_primitives::{Bytes, Sealable}; + use alloy_primitives::Bytes; use alloy_rlp::Decodable; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; use assert_matches::assert_matches; @@ -2587,7 +2612,6 @@ mod tests { str::FromStr, sync::mpsc::{channel, Sender}, }; - use tokio::sync::mpsc::unbounded_channel; /// This is a test channel that allows you to `release` any value that is in the channel. /// @@ -2684,9 +2708,8 @@ mod tests { let (from_tree_tx, from_tree_rx) = unbounded_channel(); - let sealed = chain_spec.genesis_header().clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + let header = chain_spec.genesis_header().clone(); + let header = SealedHeader::seal(header); let engine_api_tree_state = EngineApiTreeState::new(10, 10, header.num_hash()); let canonical_in_memory_state = CanonicalInMemoryState::with_head(header, None, None); @@ -2808,6 +2831,7 @@ mod tests { state: fcu_state, payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), } .into(), )) @@ -3097,6 +3121,7 @@ mod tests { }, payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), } .into(), )) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs new file mode 100644 index 000000000000..fbf6c3481384 --- /dev/null +++ b/crates/engine/tree/src/tree/root.rs @@ -0,0 +1,124 @@ +//! State root task related functionality. + +use futures::Stream; +use pin_project::pin_project; +use reth_provider::providers::ConsistentDbView; +use reth_trie::{updates::TrieUpdates, TrieInput}; +use reth_trie_parallel::root::ParallelStateRootError; +use revm_primitives::{EvmState, B256}; +use std::{ + future::Future, + pin::Pin, + sync::{mpsc, Arc}, + task::{Context, Poll}, +}; +use tokio_stream::wrappers::UnboundedReceiverStream; +use tracing::debug; + +/// Result of the state root calculation +pub(crate) type StateRootResult = Result<(B256, TrieUpdates), ParallelStateRootError>; + +/// Handle to a spawned state root task. +#[derive(Debug)] +#[allow(dead_code)] +pub(crate) struct StateRootHandle { + /// Channel for receiving the final result. + rx: mpsc::Receiver, +} + +#[allow(dead_code)] +impl StateRootHandle { + /// Waits for the state root calculation to complete. + pub(crate) fn wait_for_result(self) -> StateRootResult { + self.rx.recv().expect("state root task was dropped without sending result") + } +} + +/// Standalone task that receives a transaction state stream and updates relevant +/// data structures to calculate state root. +/// +/// It is responsible of initializing a blinded sparse trie and subscribe to +/// transaction state stream. As it receives transaction execution results, it +/// fetches the proofs for relevant accounts from the database and reveal them +/// to the tree. +/// Then it updates relevant leaves according to the result of the transaction. +#[pin_project] +pub(crate) struct StateRootTask { + /// View over the state in the database. + consistent_view: ConsistentDbView, + /// Incoming state updates. + #[pin] + state_stream: UnboundedReceiverStream, + /// Latest trie input. + input: Arc, +} + +#[allow(dead_code)] +impl StateRootTask +where + Factory: Send + 'static, +{ + /// Creates a new `StateRootTask`. + pub(crate) const fn new( + consistent_view: ConsistentDbView, + input: Arc, + state_stream: UnboundedReceiverStream, + ) -> Self { + Self { consistent_view, state_stream, input } + } + + /// Spawns the state root task and returns a handle to await its result. + pub(crate) fn spawn(self) -> StateRootHandle { + let (tx, rx) = mpsc::channel(); + + // Spawn the task that will process state updates and calculate the root + tokio::spawn(async move { + debug!(target: "engine::tree", "Starting state root task"); + let result = self.await; + let _ = tx.send(result); + }); + + StateRootHandle { rx } + } + + /// Handles state updates. + fn on_state_update( + _view: &ConsistentDbView, + _input: &Arc, + _state: EvmState, + ) { + // TODO: calculate hashed state update and dispatch proof gathering for it. + } +} + +impl Future for StateRootTask +where + Factory: Send + 'static, +{ + type Output = StateRootResult; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + // Process all items until the stream is closed + loop { + match this.state_stream.as_mut().poll_next(cx) { + Poll::Ready(Some(state)) => { + Self::on_state_update(this.consistent_view, this.input, state); + } + Poll::Ready(None) => { + // stream closed, return final result + return Poll::Ready(Ok((B256::default(), TrieUpdates::default()))); + } + Poll::Pending => { + return Poll::Pending; + } + } + } + + // TODO: + // * keep track of proof calculation + // * keep track of intermediate root computation + // * return final state root result + } +} diff --git a/crates/engine/util/src/engine_store.rs b/crates/engine/util/src/engine_store.rs index 85c5e126fa44..6b584f0c1f55 100644 --- a/crates/engine/util/src/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -64,7 +64,12 @@ impl EngineMessageStore { fs::create_dir_all(&self.path)?; // ensure that store path had been created let timestamp = received_at.duration_since(SystemTime::UNIX_EPOCH).unwrap().as_millis(); match msg { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx: _tx } => { + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx: _tx, + version: _version, + } => { let filename = format!("{}-fcu-{}.json", timestamp, state.head_block_hash); fs::write( self.path.join(filename), diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index d109fb9e94ae..0a4dd8d496fe 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -1,6 +1,6 @@ //! Stream wrapper that simulates reorgs. -use alloy_consensus::Transaction; +use alloy_consensus::{Header, Transaction}; use alloy_primitives::U256; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, @@ -8,7 +8,7 @@ use alloy_rpc_types_engine::{ use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use itertools::Either; use reth_beacon_consensus::{BeaconEngineMessage, BeaconOnNewPayloadError, OnForkChoiceUpdated}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult}; use reth_ethereum_forks::EthereumHardforks; use reth_evm::{ @@ -16,7 +16,7 @@ use reth_evm::{ ConfigureEvm, }; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{proofs, Block, BlockBody, Header, Receipt, Receipts}; +use reth_primitives::{proofs, Block, BlockBody, Receipt, Receipts}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, @@ -211,18 +211,32 @@ where state: reorg_forkchoice_state, payload_attrs: None, tx: reorg_fcu_tx, + version: EngineApiMessageVersion::default(), }, ]); *this.state = EngineReorgState::Reorg { queue }; continue } - (Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }), _) => { + ( + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }), + _, + ) => { // Record last forkchoice state forwarded to the engine. // We do not care if it's valid since engine should be able to handle // reorgs that rely on invalid forkchoice state. *this.last_forkchoice_state = Some(state); *this.forkchoice_states_forwarded += 1; - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) } (item, _) => item, }; @@ -289,7 +303,7 @@ where let mut evm = evm_config.evm_with_env(&mut state, env); // apply eip-4788 pre block contract call - let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec); + let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); system_caller.apply_beacon_root_contract_call( reorg_target.timestamp, @@ -361,7 +375,7 @@ where // and 4788 contract call state.merge_transitions(BundleRetention::PlainState); - let outcome = ExecutionOutcome::new( + let outcome: ExecutionOutcome = ExecutionOutcome::new( state.take_bundle(), Receipts::from(vec![receipts]), reorg_target.number, diff --git a/crates/engine/util/src/skip_fcu.rs b/crates/engine/util/src/skip_fcu.rs index e110cecedc89..adadfb595f89 100644 --- a/crates/engine/util/src/skip_fcu.rs +++ b/crates/engine/util/src/skip_fcu.rs @@ -45,7 +45,12 @@ where loop { let next = ready!(this.stream.poll_next_unpin(cx)); let item = match next { - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) => { + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) => { if this.skipped < this.threshold { *this.skipped += 1; tracing::warn!(target: "engine::stream::skip_fcu", ?state, ?payload_attrs, threshold=this.threshold, skipped=this.skipped, "Skipping FCU"); @@ -53,7 +58,12 @@ where continue } *this.skipped = 0; - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) } next => next, }; diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index dd286584a598..3dc7a02af8b7 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,23 +8,23 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_consensus_common::validation::{ validate_4844_header_standalone, validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, - validate_against_parent_timestamp, validate_block_pre_execution, validate_header_base_fee, - validate_header_extradata, validate_header_gas, + validate_against_parent_timestamp, validate_block_pre_execution, validate_body_against_header, + validate_header_base_fee, validate_header_extradata, validate_header_gas, }; use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockWithSenders, Header, SealedBlock, SealedHeader, + constants::MINIMUM_GAS_LIMIT, BlockBody, BlockWithSenders, SealedBlock, SealedHeader, }; use std::{fmt::Debug, sync::Arc, time::SystemTime}; /// The bound divisor of the gas limit, used in update calculations. -const GAS_LIMIT_BOUND_DIVISOR: u64 = 1024; +pub const GAS_LIMIT_BOUND_DIVISOR: u64 = 1024; mod validation; pub use validation::validate_block_post_execution; @@ -32,7 +32,7 @@ pub use validation::validate_block_post_execution; /// Ethereum beacon consensus /// /// This consensus engine does basic checks as outlined in the execution specs. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EthBeaconConsensus { /// Configuration chain_spec: Arc, @@ -212,6 +212,14 @@ impl Consensu Ok(()) } + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header) + } + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { validate_block_pre_execution(block, &self.chain_spec) } @@ -228,7 +236,7 @@ impl Consensu #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{Sealable, B256}; + use alloy_primitives::B256; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_primitives::proofs; @@ -313,16 +321,14 @@ mod tests { // that the header is valid let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(1337), withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; assert_eq!( - EthBeaconConsensus::new(chain_spec).validate_header(&SealedHeader::new(header, seal)), + EthBeaconConsensus::new(chain_spec).validate_header(&SealedHeader::seal(header,)), Ok(()) ); } diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 20a55883680b..5addf2a18c51 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -43,7 +43,7 @@ where + TryInto + TryInto, { - type ExecutionPayloadV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 420352cf2b98..094a1df26577 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -1,6 +1,6 @@ //! Contains types required for building a payload. -use alloy_eips::{eip4844::BlobTransactionSidecar, eip7685::Requests}; +use alloy_eips::{eip4844::BlobTransactionSidecar, eip4895::Withdrawals, eip7685::Requests}; use alloy_primitives::{Address, B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ @@ -9,11 +9,11 @@ use alloy_rpc_types_engine::{ }; use reth_chain_state::ExecutedBlock; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives::{SealedBlock, Withdrawals}; +use reth_primitives::SealedBlock; use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; -use std::convert::Infallible; +use std::{convert::Infallible, sync::Arc}; /// Contains the built payload. /// @@ -25,7 +25,7 @@ pub struct EthBuiltPayload { /// Identifier of the payload pub(crate) id: PayloadId, /// The built block - pub(crate) block: SealedBlock, + pub(crate) block: Arc, /// Block execution data for the payload, if any. pub(crate) executed_block: Option, /// The fees of the block @@ -45,7 +45,7 @@ impl EthBuiltPayload { /// Caution: This does not set any [`BlobTransactionSidecar`]. pub const fn new( id: PayloadId, - block: SealedBlock, + block: Arc, fees: U256, executed_block: Option, requests: Option, @@ -59,7 +59,7 @@ impl EthBuiltPayload { } /// Returns the built block(sealed) - pub const fn block(&self) -> &SealedBlock { + pub fn block(&self) -> &SealedBlock { &self.block } @@ -127,7 +127,7 @@ impl BuiltPayload for &EthBuiltPayload { // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: EthBuiltPayload) -> Self { - block_to_payload_v1(value.block) + block_to_payload_v1(Arc::unwrap_or_clone(value.block)) } } @@ -136,7 +136,10 @@ impl From for ExecutionPayloadEnvelopeV2 { fn from(value: EthBuiltPayload) -> Self { let EthBuiltPayload { block, fees, .. } = value; - Self { block_value: fees, execution_payload: convert_block_to_payload_field_v2(block) } + Self { + block_value: fees, + execution_payload: convert_block_to_payload_field_v2(Arc::unwrap_or_clone(block)), + } } } @@ -145,7 +148,7 @@ impl From for ExecutionPayloadEnvelopeV3 { let EthBuiltPayload { block, fees, sidecars, .. } = value; Self { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // @@ -166,7 +169,7 @@ impl From for ExecutionPayloadEnvelopeV4 { let EthBuiltPayload { block, fees, sidecars, requests, .. } = value; Self { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // @@ -184,7 +187,7 @@ impl From for ExecutionPayloadEnvelopeV4 { } /// Container type for all components required to build a payload. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct EthPayloadBuilderAttributes { /// Id of the payload pub id: PayloadId, @@ -237,7 +240,11 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { /// Creates a new payload builder for the given parent block and the attributes. /// /// Derives the unique [`PayloadId`] for the given parent and attributes - fn try_new(parent: B256, attributes: PayloadAttributes) -> Result { + fn try_new( + parent: B256, + attributes: PayloadAttributes, + _version: u8, + ) -> Result { Ok(Self::new(parent, attributes)) } @@ -297,10 +304,109 @@ pub(crate) fn payload_id(parent: &B256, attributes: &PayloadAttributes) -> Paylo #[cfg(test)] mod tests { use super::*; + use alloy_eips::eip4895::Withdrawal; + use alloy_primitives::B64; + use std::str::FromStr; #[test] fn attributes_serde() { let attributes = r#"{"timestamp":"0x1235","prevRandao":"0xf343b00e02dc34ec0124241f74f32191be28fb370bb48060f5fa4df99bda774c","suggestedFeeRecipient":"0x0000000000000000000000000000000000000000","withdrawals":null,"parentBeaconBlockRoot":null}"#; let _attributes: PayloadAttributes = serde_json::from_str(attributes).unwrap(); } + + #[test] + fn test_payload_id_basic() { + // Create a parent block and payload attributes + let parent = + B256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a") + .unwrap(); + let attributes = PayloadAttributes { + timestamp: 0x5, + prev_randao: B256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + suggested_fee_recipient: Address::from_str( + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + ) + .unwrap(), + withdrawals: None, + parent_beacon_block_root: None, + }; + + // Verify that the generated payload ID matches the expected value + assert_eq!( + payload_id(&parent, &attributes), + PayloadId(B64::from_str("0xa247243752eb10b4").unwrap()) + ); + } + + #[test] + fn test_payload_id_with_withdrawals() { + // Set up the parent and attributes with withdrawals + let parent = + B256::from_str("0x9876543210abcdef9876543210abcdef9876543210abcdef9876543210abcdef") + .unwrap(); + let attributes = PayloadAttributes { + timestamp: 1622553200, + prev_randao: B256::from_slice(&[1; 32]), + suggested_fee_recipient: Address::from_str( + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b", + ) + .unwrap(), + withdrawals: Some(vec![ + Withdrawal { + index: 1, + validator_index: 123, + address: Address::from([0xAA; 20]), + amount: 10, + }, + Withdrawal { + index: 2, + validator_index: 456, + address: Address::from([0xBB; 20]), + amount: 20, + }, + ]), + parent_beacon_block_root: None, + }; + + // Verify that the generated payload ID matches the expected value + assert_eq!( + payload_id(&parent, &attributes), + PayloadId(B64::from_str("0xedddc2f84ba59865").unwrap()) + ); + } + + #[test] + fn test_payload_id_with_parent_beacon_block_root() { + // Set up the parent and attributes with a parent beacon block root + let parent = + B256::from_str("0x9876543210abcdef9876543210abcdef9876543210abcdef9876543210abcdef") + .unwrap(); + let attributes = PayloadAttributes { + timestamp: 1622553200, + prev_randao: B256::from_str( + "0x123456789abcdef123456789abcdef123456789abcdef123456789abcdef1234", + ) + .unwrap(), + suggested_fee_recipient: Address::from_str( + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + ) + .unwrap(), + withdrawals: None, + parent_beacon_block_root: Some( + B256::from_str( + "0x2222222222222222222222222222222222222222222222222222222222222222", + ) + .unwrap(), + ), + }; + + // Verify that the generated payload ID matches the expected value + assert_eq!( + payload_id(&parent, &attributes), + PayloadId(B64::from_str("0x0fc49cd532094cce").unwrap()) + ); + } } diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index f082a3a707ea..fa14e260d651 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -95,7 +95,7 @@ where { /// Creates a new [`EthExecutionStrategy`] pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { - let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); + let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); Self { state, chain_spec, evm_config, system_caller } } } diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 91da90d99f0e..11e4acd4bfcd 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -17,11 +17,14 @@ extern crate alloc; +use core::convert::Infallible; + use alloc::{sync::Arc, vec::Vec}; +use alloy_consensus::Header; use alloy_primitives::{Address, Bytes, TxKind, U256}; use reth_chainspec::{ChainSpec, Head}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; -use reth_primitives::{transaction::FillTxEnv, Header, TransactionSigned}; +use reth_primitives::{transaction::FillTxEnv, TransactionSigned}; use revm_primitives::{ AnalysisKind, BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, Env, SpecId, TxEnv, }; @@ -59,6 +62,7 @@ impl EthEvmConfig { impl ConfigureEvmEnv for EthEvmConfig { type Header = Header; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); @@ -131,7 +135,7 @@ impl ConfigureEvmEnv for EthEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { // configure evm env based on parent block let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); @@ -179,7 +183,7 @@ impl ConfigureEvmEnv for EthEvmConfig { blob_excess_gas_and_price, }; - (CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env) + Ok((CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env)) } } @@ -192,15 +196,12 @@ impl ConfigureEvm for EthEvmConfig { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::constants::KECCAK_EMPTY; + use alloy_consensus::{constants::KECCAK_EMPTY, Header}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; use reth_chainspec::{Chain, ChainSpec, MAINNET}; use reth_evm::execute::ProviderError; - use reth_primitives::{ - revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, - }; + use reth_primitives::revm_primitives::{BlockEnv, CfgEnv, SpecId}; use reth_revm::{ db::{CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 98224b99e26b..6ecd5437bfb0 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -24,13 +24,15 @@ reth-network.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-consensus.workspace = true -reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-rpc.workspace = true reth-node-api.workspace = true reth-chainspec.workspace = true reth-primitives.workspace = true reth-revm = { workspace = true, features = ["std"] } +reth-trie-db.workspace = true + +alloy-consensus.workspace = true # revm with required ethereum features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -58,20 +60,22 @@ alloy-signer.workspace = true alloy-eips.workspace = true alloy-sol-types.workspace = true alloy-contract.workspace = true +alloy-rpc-types-beacon.workspace = true [features] default = [] test-utils = [ - "reth-node-builder/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-network/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-revm/test-utils", - "reth-db/test-utils", - "reth-provider/test-utils", - "reth-transaction-pool/test-utils", - "revm/test-utils", - "reth-evm/test-utils" + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-evm/test-utils", ] diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index dbd6ce0a134a..5265329f19a9 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -2,7 +2,7 @@ use std::sync::Arc; -use reth_auto_seal_consensus::AutoSealConsensus; +use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; @@ -26,7 +26,7 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Header}; +use reth_primitives::{Block, Receipt, TransactionSigned, TxType}; use reth_provider::CanonStateSubscriptions; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; @@ -34,15 +34,19 @@ use reth_transaction_pool::{ blobstore::DiskFileBlobStore, EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie_db::MerklePatriciaTrie; use crate::{EthEngineTypes, EthEvmConfig}; /// Ethereum primitive types. -#[derive(Debug)] +#[derive(Debug, Default, Clone)] pub struct EthPrimitives; impl NodePrimitives for EthPrimitives { type Block = Block; + type SignedTx = TransactionSigned; + type TxType = TxType; + type Receipt = Receipt; } /// Type configuration for a regular Ethereum node. @@ -81,6 +85,7 @@ impl EthereumNode { impl NodeTypes for EthereumNode { type Primitives = EthPrimitives; type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; } impl NodeTypesWithEngine for EthereumNode { @@ -332,11 +337,7 @@ where type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { - if ctx.is_dev() { - Ok(Arc::new(AutoSealConsensus::new(ctx.chain_spec()))) - } else { - Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) - } + Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) } } diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index ead438b5a670..b6d0ffcfaaaf 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,12 +1,10 @@ use std::sync::Arc; -use crate::utils::eth_payload_attributes; use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; -use reth::{args::DevArgs, core::rpc::eth::helpers::EthTransactions}; +use reth::{args::DevArgs, rpc::api::eth::helpers::EthTransactions}; use reth_chainspec::ChainSpec; -use reth_e2e_test_utils::setup; use reth_node_api::FullNodeComponents; use reth_node_builder::{ rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, @@ -17,16 +15,6 @@ use reth_tasks::TaskManager; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { - reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, _) = - setup::(1, custom_chain(), true, eth_payload_attributes).await?; - - assert_chain_advances(nodes.pop().unwrap().inner).await; - Ok(()) -} - -#[tokio::test] -async fn can_run_dev_node_new_engine() -> eyre::Result<()> { reth_tracing::init_test_tracing(); let tasks = TaskManager::current(); let exec = tasks.executor(); diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index 180b88bbd5a2..5b2a6654fbbd 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -157,7 +157,8 @@ async fn e2e_test_send_transactions() -> eyre::Result<()> { assert_eq!(second_provider.get_block_number().await?, 0); - let head = provider.get_block_by_number(Default::default(), false).await?.unwrap().header.hash; + let head = + provider.get_block_by_number(Default::default(), false.into()).await?.unwrap().header.hash; second_node.engine_api.update_forkchoice(head, head).await?; let start = std::time::Instant::now(); diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index ddf3d5cba2a6..b1a11b1b5eb6 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -1,8 +1,19 @@ use crate::utils::eth_payload_attributes; -use alloy_eips::calc_next_block_base_fee; -use alloy_primitives::U256; -use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder}; +use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718}; +use alloy_primitives::{Address, B256, U256}; +use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx}; +use alloy_rpc_types_beacon::relay::{ + BidTrace, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, + SignedBidSubmissionV3, SignedBidSubmissionV4, +}; use rand::{rngs::StdRng, Rng, SeedableRng}; +use reth::{ + payload::BuiltPayload, + rpc::{ + compat::engine::payload::block_to_payload_v3, + types::{engine::BlobsBundleV1, TransactionRequest}, + }, +}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::setup_engine; use reth_node_ethereum::EthereumNode; @@ -60,7 +71,7 @@ async fn test_fee_history() -> eyre::Result<()> { let receipt = builder.get_receipt().await?; assert!(receipt.status()); - let block = provider.get_block_by_number(1.into(), false).await?.unwrap(); + let block = provider.get_block_by_number(1.into(), false.into()).await?.unwrap(); assert_eq!(block.header.gas_used as u128, receipt.gas_used,); assert_eq!(block.header.base_fee_per_gas.unwrap(), expected_first_base_fee as u64); @@ -80,7 +91,7 @@ async fn test_fee_history() -> eyre::Result<()> { let fee_history = provider.get_fee_history(block_count, latest_block.into(), &[]).await?; let mut prev_header = provider - .get_block_by_number((latest_block + 1 - block_count).into(), false) + .get_block_by_number((latest_block + 1 - block_count).into(), false.into()) .await? .unwrap() .header; @@ -92,7 +103,8 @@ async fn test_fee_history() -> eyre::Result<()> { chain_spec.base_fee_params_at_block(block), ); - let header = provider.get_block_by_number(block.into(), false).await?.unwrap().header; + let header = + provider.get_block_by_number(block.into(), false.into()).await?.unwrap().header; assert_eq!(header.base_fee_per_gas.unwrap(), expected_base_fee as u64); assert_eq!( @@ -107,3 +119,152 @@ async fn test_fee_history() -> eyre::Result<()> { Ok(()) } + +#[tokio::test] +async fn test_flashbots_validate_v3() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(EthereumWallet::new(wallet.gen().swap_remove(0))) + .on_http(node.rpc_url()); + + node.advance(100, |_| { + let provider = provider.clone(); + Box::pin(async move { + let SendableTx::Envelope(tx) = + provider.fill(TransactionRequest::default().to(Address::ZERO)).await.unwrap() + else { + unreachable!() + }; + + tx.encoded_2718().into() + }) + }) + .await?; + + let _ = provider.send_transaction(TransactionRequest::default().to(Address::ZERO)).await?; + let (payload, attrs) = node.new_payload().await?; + + let mut request = BuilderBlockValidationRequestV3 { + request: SignedBidSubmissionV3 { + message: BidTrace { + parent_hash: payload.block().parent_hash, + block_hash: payload.block().hash(), + gas_used: payload.block().gas_used, + gas_limit: payload.block().gas_limit, + ..Default::default() + }, + execution_payload: block_to_payload_v3(payload.block().clone()), + blobs_bundle: BlobsBundleV1::new([]), + signature: Default::default(), + }, + parent_beacon_block_root: attrs.parent_beacon_block_root.unwrap(), + registered_gas_limit: payload.block().gas_limit, + }; + + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_ok()); + + request.registered_gas_limit -= 1; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_err()); + request.registered_gas_limit += 1; + + request.request.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_err()); + Ok(()) +} + +#[tokio::test] +async fn test_flashbots_validate_v4() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .prague_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(EthereumWallet::new(wallet.gen().swap_remove(0))) + .on_http(node.rpc_url()); + + node.advance(100, |_| { + let provider = provider.clone(); + Box::pin(async move { + let SendableTx::Envelope(tx) = + provider.fill(TransactionRequest::default().to(Address::ZERO)).await.unwrap() + else { + unreachable!() + }; + + tx.encoded_2718().into() + }) + }) + .await?; + + let _ = provider.send_transaction(TransactionRequest::default().to(Address::ZERO)).await?; + let (payload, attrs) = node.new_payload().await?; + + let mut request = BuilderBlockValidationRequestV4 { + request: SignedBidSubmissionV4 { + message: BidTrace { + parent_hash: payload.block().parent_hash, + block_hash: payload.block().hash(), + gas_used: payload.block().gas_used, + gas_limit: payload.block().gas_limit, + ..Default::default() + }, + execution_payload: block_to_payload_v3(payload.block().clone()), + blobs_bundle: BlobsBundleV1::new([]), + execution_requests: payload.requests().unwrap_or_default().to_vec(), + signature: Default::default(), + }, + parent_beacon_block_root: attrs.parent_beacon_block_root.unwrap(), + registered_gas_limit: payload.block().gas_limit, + }; + + provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .expect("request should validate"); + + request.registered_gas_limit -= 1; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .is_err()); + request.registered_gas_limit += 1; + + request.request.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .is_err()); + Ok(()) +} diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index f14c145889c9..c94cd8bb7281 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -9,8 +9,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; -use alloy_eips::{eip7685::Requests, merge::BEACON_NONCE}; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::{eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::Requests, merge::BEACON_NONCE}; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, @@ -25,10 +25,9 @@ use reth_execution_types::ExecutionOutcome; use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ - constants::eip4844::MAX_DATA_GAS_PER_BLOCK, proofs::{self}, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, EthereumHardforks, Header, Receipt, + Block, BlockBody, EthereumHardforks, Receipt, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; @@ -42,7 +41,7 @@ use revm::{ primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, DatabaseCommit, }; -use revm_primitives::calc_excess_blob_gas; +use revm_primitives::{calc_excess_blob_gas, TxEnv}; use std::sync::Arc; use tracing::{debug, trace, warn}; @@ -74,7 +73,7 @@ where &self, config: &PayloadConfig, parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { let next_attributes = NextBlockEnvAttributes { timestamp: config.attributes.timestamp(), suggested_fee_recipient: config.attributes.suggested_fee_recipient(), @@ -98,7 +97,9 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; let pool = args.pool.clone(); default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env, |attributes| { @@ -121,7 +122,9 @@ where None, ); - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; let pool = args.pool.clone(); @@ -155,13 +158,13 @@ where let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let chain_spec = client.chain_spec(); - let state_provider = client.state_by_block_hash(config.parent_block.hash())?; + let state_provider = client.state_by_block_hash(config.parent_header.hash())?; let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let PayloadConfig { parent_block, extra_data, attributes } = config; + State::builder().with_database(cached_reads.as_db_mut(state)).with_bundle_update().build(); + let PayloadConfig { parent_header, extra_data, attributes } = config; - debug!(target: "payload_builder", id=%attributes.id, parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + debug!(target: "payload_builder", id=%attributes.id, parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; let block_gas_limit: u64 = initialized_block_env.gas_limit.to::(); @@ -190,7 +193,7 @@ where ) .map_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_hash=%parent_header.hash(), %err, "failed to apply beacon root contract call for payload" ); @@ -202,13 +205,20 @@ where &mut db, &initialized_cfg, &initialized_block_env, - parent_block.hash(), + parent_header.hash(), ) .map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to update blockhashes for payload"); + warn!(target: "payload_builder", parent_hash=%parent_header.hash(), %err, "failed to update parent header blockhashes for payload"); PayloadBuilderError::Internal(err.into()) })?; + let env = EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + TxEnv::default(), + ); + let mut evm = evm_config.evm_with_env(&mut db, env); + let mut receipts = Vec::new(); while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction @@ -243,14 +253,8 @@ where } } - let env = EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - evm_config.tx_env(tx.as_signed(), tx.signer()), - ); - - // Configure the environment for the block. - let mut evm = evm_config.evm_with_env(&mut db, env); + // Configure the environment for the tx. + *evm.tx_mut() = evm_config.tx_env(tx.as_signed(), tx.signer()); let ResultAndState { result, state } = match evm.transact() { Ok(res) => res, @@ -276,10 +280,9 @@ where } } }; - // drop evm so db is released. - drop(evm); + // commit changes - db.commit(state); + evm.db_mut().commit(state); // add to the total blob gas used if the transaction successfully executed if let Some(blob_tx) = tx.transaction.as_eip4844() { @@ -318,6 +321,9 @@ where executed_txs.push(tx.into_signed()); } + // Release db + drop(evm); + // check if we have a better block if !is_better_payload(best_payload.as_ref(), total_fees) { // can skip building the block @@ -369,10 +375,9 @@ where // calculate the state root let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); let (state_root, trie_output) = { - let state_provider = db.database.0.inner.borrow_mut(); - state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + db.database.inner().state_root_with_updates(hashed_state.clone()).inspect_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_hash=%parent_header.hash(), %err, "failed to calculate state root for payload" ); @@ -394,9 +399,9 @@ where executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), )?; - excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { - let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); + excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_header.timestamp) { + let parent_excess_blob_gas = parent_header.excess_blob_gas.unwrap_or_default(); + let parent_blob_gas_used = parent_header.blob_gas_used.unwrap_or_default(); Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) } else { // for the first post-fork block, both parent.blob_gas_used and @@ -408,7 +413,7 @@ where } let header = Header { - parent_hash: parent_block.hash(), + parent_hash: parent_header.hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: initialized_block_env.coinbase, state_root, @@ -420,7 +425,7 @@ where mix_hash: attributes.prev_randao, nonce: BEACON_NONCE.into(), base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, + number: parent_header.number + 1, gas_limit: block_gas_limit, difficulty: U256::ZERO, gas_used: cumulative_gas_used, @@ -437,12 +442,12 @@ where body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals }, }; - let sealed_block = block.seal_slow(); + let sealed_block = Arc::new(block.seal_slow()); debug!(target: "payload_builder", ?sealed_block, "sealed built block"); // create the executed block data let executed = ExecutedBlock { - block: Arc::new(sealed_block.clone()), + block: sealed_block.clone(), senders: Arc::new(executed_senders), execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), @@ -453,7 +458,7 @@ where EthBuiltPayload::new(attributes.id, sealed_block, total_fees, Some(executed), requests); // extend the payload with the blob sidecars from the executed txs - payload.extend_sidecars(blob_sidecars); + payload.extend_sidecars(blob_sidecars.into_iter().map(Arc::unwrap_or_clone)); Ok(BuildOutcome::Better { payload, cached_reads }) } diff --git a/crates/etl/src/lib.rs b/crates/etl/src/lib.rs index d30f432f9c19..46d41d704d06 100644 --- a/crates/etl/src/lib.rs +++ b/crates/etl/src/lib.rs @@ -281,9 +281,8 @@ impl EtlFile { #[cfg(test)] mod tests { - use alloy_primitives::{TxHash, TxNumber}; - use super::*; + use alloy_primitives::{TxHash, TxNumber}; #[test] fn etl_hashes() { diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 90fd532828e1..9d6a616af983 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -30,6 +30,7 @@ revm-primitives.workspace = true # alloy alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-consensus.workspace = true auto_impl.workspace = true futures-util.workspace = true @@ -40,6 +41,7 @@ parking_lot = { workspace = true, optional = true } parking_lot.workspace = true reth-ethereum-forks.workspace = true alloy-consensus.workspace = true +metrics-util = { workspace = true, features = ["debugging"] } [features] default = ["std"] @@ -57,12 +59,12 @@ std = [ "revm/std", ] test-utils = [ - "dep:parking_lot", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-revm/test-utils", - "revm/test-utils", - "reth-prune-types/test-utils" + "dep:parking_lot", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils" ] diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index b6af3dee9afd..13b0aef8ad45 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -14,6 +14,7 @@ workspace = true reth-primitives.workspace = true reth-execution-errors.workspace = true reth-trie.workspace = true +reth-primitives-traits.workspace = true revm.workspace = true @@ -43,14 +44,16 @@ serde = [ ] serde-bincode-compat = [ "reth-primitives/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", "reth-trie/serde-bincode-compat", "serde_with", - "alloy-eips/serde-bincode-compat" + "alloy-eips/serde-bincode-compat", ] std = [ "reth-primitives/std", "alloy-eips/std", "alloy-primitives/std", "revm/std", - "serde?/std" + "serde?/std", + "reth-primitives-traits/std", ] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 65f96ff56387..dc633e2d7ab7 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -453,7 +453,7 @@ impl IntoIterator for ChainBlocks<'_> { } /// Used to hold receipts and their attachment. -#[derive(Default, Clone, Debug)] +#[derive(Default, Clone, Debug, PartialEq, Eq)] pub struct BlockReceipts { /// Block identifier pub block: BlockNumHash, diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 0fde01547f7e..c1d9c7016501 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -1,13 +1,16 @@ -use crate::BlockExecutionOutput; +use std::collections::HashMap; + use alloy_eips::eip7685::Requests; use alloy_primitives::{Address, BlockNumber, Bloom, Log, B256, U256}; -use reth_primitives::{logs_bloom, Account, Bytecode, Receipt, Receipts, StorageEntry}; +use reth_primitives::{logs_bloom, Account, Bytecode, Receipts, StorageEntry}; +use reth_primitives_traits::Receipt; use reth_trie::HashedPostState; use revm::{ db::{states::BundleState, BundleAccount}, primitives::AccountInfo, }; -use std::collections::HashMap; + +use crate::BlockExecutionOutput; /// Represents a changed account #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -33,7 +36,7 @@ impl ChangedAccount { /// blocks, capturing the resulting state, receipts, and requests following the execution. #[derive(Default, Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct ExecutionOutcome { +pub struct ExecutionOutcome { /// Bundle state with reverts. pub bundle: BundleState, /// The collection of receipts. @@ -41,7 +44,7 @@ pub struct ExecutionOutcome { /// The inner vector stores receipts ordered by transaction number. /// /// If receipt is None it means it is pruned. - pub receipts: Receipts, + pub receipts: Receipts, /// First block of bundle state. pub first_block: BlockNumber, /// The collection of EIP-7685 requests. @@ -63,14 +66,14 @@ pub type AccountRevertInit = (Option>, Vec); /// Type used to initialize revms reverts. pub type RevertsInit = HashMap>; -impl ExecutionOutcome { +impl ExecutionOutcome { /// Creates a new `ExecutionOutcome`. /// /// This constructor initializes a new `ExecutionOutcome` instance with the provided /// bundle state, receipts, first block number, and EIP-7685 requests. pub const fn new( bundle: BundleState, - receipts: Receipts, + receipts: Receipts, first_block: BlockNumber, requests: Vec, ) -> Self { @@ -85,7 +88,7 @@ impl ExecutionOutcome { state_init: BundleStateInit, revert_init: RevertsInit, contracts_init: impl IntoIterator, - receipts: Receipts, + receipts: Receipts, first_block: BlockNumber, requests: Vec, ) -> Self { @@ -168,7 +171,7 @@ impl ExecutionOutcome { } /// Transform block number to the index of block. - fn block_number_to_index(&self, block_number: BlockNumber) -> Option { + pub fn block_number_to_index(&self, block_number: BlockNumber) -> Option { if self.first_block > block_number { return None } @@ -180,27 +183,33 @@ impl ExecutionOutcome { } /// Returns an iterator over all block logs. - pub fn logs(&self, block_number: BlockNumber) -> Option> { + pub fn logs(&self, block_number: BlockNumber) -> Option> + where + T: Receipt, + { let index = self.block_number_to_index(block_number)?; - Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs.iter())).flatten()) + Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs().iter())).flatten()) } /// Return blocks logs bloom - pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option { + pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option + where + T: Receipt, + { Some(logs_bloom(self.logs(block_number)?)) } /// Returns the receipt root for all recorded receipts. /// Note: this function calculated Bloom filters for every receipt and created merkle trees /// of receipt. This is a expensive operation. - pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option { + pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option + where + T: Receipt, + { #[cfg(feature = "optimism")] panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); #[cfg(not(feature = "optimism"))] - self.receipts.root_slow( - self.block_number_to_index(_block_number)?, - reth_primitives::proofs::calculate_receipt_root_no_memo, - ) + self.receipts.root_slow(self.block_number_to_index(_block_number)?, T::receipts_root) } /// Returns the receipt root for all recorded receipts. @@ -209,23 +218,23 @@ impl ExecutionOutcome { pub fn generic_receipts_root_slow( &self, block_number: BlockNumber, - f: impl FnOnce(&[&Receipt]) -> B256, + f: impl FnOnce(&[&T]) -> B256, ) -> Option { self.receipts.root_slow(self.block_number_to_index(block_number)?, f) } /// Returns reference to receipts. - pub const fn receipts(&self) -> &Receipts { + pub const fn receipts(&self) -> &Receipts { &self.receipts } /// Returns mutable reference to receipts. - pub fn receipts_mut(&mut self) -> &mut Receipts { + pub fn receipts_mut(&mut self) -> &mut Receipts { &mut self.receipts } /// Return all block receipts - pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[Option] { + pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[Option] { let Some(index) = self.block_number_to_index(block_number) else { return &[] }; &self.receipts[index] } @@ -277,7 +286,10 @@ impl ExecutionOutcome { /// # Panics /// /// If the target block number is not included in the state block range. - pub fn split_at(self, at: BlockNumber) -> (Option, Self) { + pub fn split_at(self, at: BlockNumber) -> (Option, Self) + where + T: Clone, + { if at == self.first_block { return (None, self) } @@ -329,7 +341,7 @@ impl ExecutionOutcome { } /// Create a new instance with updated receipts. - pub fn with_receipts(mut self, receipts: Receipts) -> Self { + pub fn with_receipts(mut self, receipts: Receipts) -> Self { self.receipts = receipts; self } @@ -352,8 +364,8 @@ impl ExecutionOutcome { } } -impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { - fn from(value: (BlockExecutionOutput, BlockNumber)) -> Self { +impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { + fn from(value: (BlockExecutionOutput, BlockNumber)) -> Self { Self { bundle: value.0.state, receipts: Receipts::from(value.0.receipts), @@ -366,12 +378,15 @@ impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { #[cfg(test)] mod tests { use super::*; - use alloy_eips::eip7685::Requests; - use alloy_primitives::{bytes, Address, LogData, B256}; - use reth_primitives::{Receipts, TxType}; - use std::collections::HashMap; + #[cfg(not(feature = "optimism"))] + use alloy_primitives::bytes; + use alloy_primitives::{Address, B256}; + use reth_primitives::Receipts; + #[cfg(not(feature = "optimism"))] + use reth_primitives::{LogData, TxType}; #[test] + #[cfg(not(feature = "optimism"))] fn test_initialisation() { // Create a new BundleState object with initial data let bundle = BundleState::new( @@ -382,15 +397,11 @@ mod tests { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -444,18 +455,15 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_block_number_to_index() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -482,18 +490,15 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_get_logs() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -517,18 +522,15 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_receipts_by_block() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -550,37 +552,30 @@ mod tests { // Assert that the receipts for block number 123 match the expected receipts assert_eq!( receipts_by_block, - vec![&Some(Receipt { + vec![&Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })] ); } #[test] + #[cfg(not(feature = "optimism"))] fn test_receipts_len() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; // Create an empty Receipts object - let receipts_empty = Receipts { receipt_vec: vec![] }; + let receipts_empty: Receipts = Receipts { receipt_vec: vec![] }; // Define the first block number let first_block = 123; @@ -616,17 +611,14 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_revert_to() { // Create a random receipt object - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object with a vector of receipt vectors @@ -668,17 +660,14 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_extend_execution_outcome() { // Create a Receipt object with specific attributes. - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object containing the receipt. @@ -715,17 +704,14 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_split_at_execution_outcome() { // Create a random receipt object - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object with a vector of receipt vectors @@ -829,7 +815,7 @@ mod tests { }, ); - let execution_outcome = ExecutionOutcome { + let execution_outcome: ExecutionOutcome = ExecutionOutcome { bundle: bundle_state, receipts: Receipts::default(), first_block: 0, diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index b75feea83a15..d20dbe4594a2 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -17,13 +17,15 @@ extern crate alloc; -use crate::builder::RethEvmBuilder; +use alloy_consensus::BlockHeader as _; use alloy_primitives::{Address, Bytes, B256, U256}; use reth_primitives::TransactionSigned; use reth_primitives_traits::BlockHeader; use revm::{Database, Evm, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv}; +use crate::builder::RethEvmBuilder; + pub mod builder; pub mod either; pub mod execute; @@ -33,7 +35,6 @@ pub mod noop; pub mod provider; pub mod state_change; pub mod system_calls; - #[cfg(any(test, feature = "test-utils"))] /// test helpers for mocking executor pub mod test_utils; @@ -116,6 +117,9 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { /// The header type used by the EVM. type Header: BlockHeader; + /// The error type that is returned by [`Self::next_cfg_and_block_env`]. + type Error: core::error::Error + Send + Sync; + /// Returns a [`TxEnv`] from a [`TransactionSigned`] and [`Address`]. fn tx_env(&self, transaction: &TransactionSigned, signer: Address) -> TxEnv { let mut tx_env = TxEnv::default(); @@ -152,7 +156,7 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { block_env.coinbase = header.beneficiary(); block_env.timestamp = U256::from(header.timestamp()); if after_merge { - block_env.prevrandao = Some(header.mix_hash()); + block_env.prevrandao = header.mix_hash(); block_env.difficulty = U256::ZERO; } else { block_env.difficulty = header.difficulty(); @@ -192,7 +196,7 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv); + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error>; } /// Represents additional attributes required to configure the next block. diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index fbb2b858b158..3464bb96f4c7 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -2,14 +2,41 @@ //! //! Block processing related to syncing should take care to update the metrics by using either //! [`ExecutorMetrics::execute_metered`] or [`ExecutorMetrics::metered_one`]. -use std::time::Instant; - +use crate::{execute::Executor, system_calls::OnStateHook}; use metrics::{Counter, Gauge, Histogram}; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput}; use reth_metrics::Metrics; use reth_primitives::BlockWithSenders; +use revm_primitives::ResultAndState; +use std::time::Instant; + +/// Wrapper struct that combines metrics and state hook +struct MeteredStateHook { + metrics: ExecutorMetrics, + inner_hook: Box, +} + +impl OnStateHook for MeteredStateHook { + fn on_state(&mut self, result_and_state: &ResultAndState) { + // Update the metrics for the number of accounts, storage slots and bytecodes loaded + let accounts = result_and_state.state.keys().len(); + let storage_slots = + result_and_state.state.values().map(|account| account.storage.len()).sum::(); + let bytecodes = result_and_state + .state + .values() + .filter(|account| !account.info.is_empty_code_hash()) + .collect::>() + .len(); + + self.metrics.accounts_loaded_histogram.record(accounts as f64); + self.metrics.storage_slots_loaded_histogram.record(storage_slots as f64); + self.metrics.bytecodes_loaded_histogram.record(bytecodes as f64); -use crate::execute::Executor; + // Call the original state hook + self.inner_hook.on_state(result_and_state); + } +} /// Executor metrics. // TODO(onbjerg): add sload/sstore @@ -65,10 +92,13 @@ impl ExecutorMetrics { /// /// Compared to [`Self::metered_one`], this method additionally updates metrics for the number /// of accounts, storage slots and bytecodes loaded and updated. + /// Execute the given block using the provided [`Executor`] and update metrics for the + /// execution. pub fn execute_metered<'a, E, DB, O, Error>( &self, executor: E, input: BlockExecutionInput<'a, BlockWithSenders>, + state_hook: Box, ) -> Result, Error> where E: Executor< @@ -78,29 +108,16 @@ impl ExecutorMetrics { Error = Error, >, { - let output = self.metered(input.block, || { - executor.execute_with_state_closure(input, |state: &revm::db::State| { - // Update the metrics for the number of accounts, storage slots and bytecodes - // loaded - let accounts = state.cache.accounts.len(); - let storage_slots = state - .cache - .accounts - .values() - .filter_map(|account| { - account.account.as_ref().map(|account| account.storage.len()) - }) - .sum::(); - let bytecodes = state.cache.contracts.len(); - - // Record all state present in the cache state as loaded even though some might have - // been newly created. - // TODO: Consider spitting these into loaded and newly created. - self.accounts_loaded_histogram.record(accounts as f64); - self.storage_slots_loaded_histogram.record(storage_slots as f64); - self.bytecodes_loaded_histogram.record(bytecodes as f64); - }) - })?; + // clone here is cheap, all the metrics are Option>. additionally + // they are gloally registered so that the data recorded in the hook will + // be accessible. + let wrapper = MeteredStateHook { metrics: self.clone(), inner_hook: state_hook }; + + // Store reference to block for metered + let block = input.block; + + // Use metered to execute and track timing/gas metrics + let output = self.metered(block, || executor.execute_with_state_hook(input, wrapper))?; // Update the metrics for the number of accounts, storage slots and bytecodes updated let accounts = output.state.state.len(); @@ -123,3 +140,177 @@ impl ExecutorMetrics { self.metered(input.block, || f(input)) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::eip7685::Requests; + use metrics_util::debugging::{DebugValue, DebuggingRecorder, Snapshotter}; + use revm::db::BundleState; + use revm_primitives::{ + Account, AccountInfo, AccountStatus, Bytes, EvmState, EvmStorage, EvmStorageSlot, + ExecutionResult, Output, SuccessReason, B256, U256, + }; + use std::sync::mpsc; + + /// A mock executor that simulates state changes + struct MockExecutor { + result_and_state: ResultAndState, + } + + impl Executor<()> for MockExecutor { + type Input<'a> + = BlockExecutionInput<'a, BlockWithSenders> + where + Self: 'a; + type Output = BlockExecutionOutput<()>; + type Error = std::convert::Infallible; + + fn execute(self, _input: Self::Input<'_>) -> Result { + Ok(BlockExecutionOutput { + state: BundleState::default(), + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + }) + } + fn execute_with_state_closure( + self, + _input: Self::Input<'_>, + _state: F, + ) -> Result + where + F: FnMut(&revm::State<()>), + { + Ok(BlockExecutionOutput { + state: BundleState::default(), + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + }) + } + fn execute_with_state_hook( + self, + _input: Self::Input<'_>, + mut hook: F, + ) -> Result + where + F: OnStateHook + 'static, + { + // Call hook with our mock state + hook.on_state(&self.result_and_state); + + Ok(BlockExecutionOutput { + state: BundleState::default(), + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + }) + } + } + + struct ChannelStateHook { + output: i32, + sender: mpsc::Sender, + } + + impl OnStateHook for ChannelStateHook { + fn on_state(&mut self, _result_and_state: &ResultAndState) { + let _ = self.sender.send(self.output); + } + } + + fn setup_test_recorder() -> Snapshotter { + let recorder = DebuggingRecorder::new(); + let snapshotter = recorder.snapshotter(); + recorder.install().unwrap(); + snapshotter + } + + #[test] + fn test_executor_metrics_hook_metrics_recorded() { + let snapshotter = setup_test_recorder(); + let metrics = ExecutorMetrics::default(); + + let input = BlockExecutionInput { + block: &BlockWithSenders::default(), + total_difficulty: Default::default(), + }; + + let (tx, _rx) = mpsc::channel(); + let expected_output = 42; + let state_hook = Box::new(ChannelStateHook { sender: tx, output: expected_output }); + + let result_and_state = ResultAndState { + result: ExecutionResult::Success { + reason: SuccessReason::Stop, + gas_used: 100, + output: Output::Call(Bytes::default()), + logs: vec![], + gas_refunded: 0, + }, + state: { + let mut state = EvmState::default(); + let storage = + EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); + state.insert( + Default::default(), + Account { + info: AccountInfo { + balance: U256::from(100), + nonce: 10, + code_hash: B256::random(), + code: Default::default(), + }, + storage, + status: AccountStatus::Loaded, + }, + ); + state + }, + }; + let executor = MockExecutor { result_and_state }; + let _result = metrics.execute_metered(executor, input, state_hook).unwrap(); + + let snapshot = snapshotter.snapshot().into_vec(); + + for metric in snapshot { + let metric_name = metric.0.key().name(); + if metric_name == "sync.execution.accounts_loaded_histogram" || + metric_name == "sync.execution.storage_slots_loaded_histogram" || + metric_name == "sync.execution.bytecodes_loaded_histogram" + { + if let DebugValue::Histogram(vs) = metric.3 { + assert!( + vs.iter().any(|v| v.into_inner() > 0.0), + "metric {metric_name} not recorded" + ); + } + } + } + } + + #[test] + fn test_executor_metrics_hook_called() { + let metrics = ExecutorMetrics::default(); + + let input = BlockExecutionInput { + block: &BlockWithSenders::default(), + total_difficulty: Default::default(), + }; + + let (tx, rx) = mpsc::channel(); + let expected_output = 42; + let state_hook = Box::new(ChannelStateHook { sender: tx, output: expected_output }); + + let result_and_state = ResultAndState { + result: ExecutionResult::Revert { gas_used: 0, output: Default::default() }, + state: EvmState::default(), + }; + let executor = MockExecutor { result_and_state }; + let _result = metrics.execute_metered(executor, input, state_hook).unwrap(); + + let actual_output = rx.try_recv().unwrap(); + assert_eq!(actual_output, expected_output); + } +} diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index 8db828ec4a00..0d4f45c4d9d7 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -1,8 +1,8 @@ //! Provider trait for populating the EVM environment. use crate::ConfigureEvmEnv; -use alloy_eips::eip1898::BlockHashOrNumber; -use reth_primitives::Header; +use alloy_consensus::Header; +use alloy_eips::BlockHashOrNumber; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; diff --git a/crates/evm/src/state_change.rs b/crates/evm/src/state_change.rs index 2a3d93f94d9c..0e207fc2dbeb 100644 --- a/crates/evm/src/state_change.rs +++ b/crates/evm/src/state_change.rs @@ -1,9 +1,10 @@ //! State changes that are not related to transactions. +use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{map::HashMap, Address, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc; -use reth_primitives::{Block, Withdrawal, Withdrawals}; +use reth_primitives::Block; /// Collect all balance changes at the end of the block. /// @@ -36,7 +37,7 @@ pub fn post_block_balance_increments( insert_post_block_withdrawals_balance_increments( chain_spec, block.timestamp, - block.body.withdrawals.as_ref().map(Withdrawals::as_ref), + block.body.withdrawals.as_ref().map(|w| w.as_slice()), &mut balance_increments, ); diff --git a/crates/evm/src/system_calls/eip2935.rs b/crates/evm/src/system_calls/eip2935.rs index edb71c8b4e06..4848feb7281c 100644 --- a/crates/evm/src/system_calls/eip2935.rs +++ b/crates/evm/src/system_calls/eip2935.rs @@ -4,10 +4,10 @@ use alloc::{boxed::Box, string::ToString}; use alloy_eips::eip2935::HISTORY_STORAGE_ADDRESS; use crate::ConfigureEvm; +use alloy_consensus::Header; use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::ResultAndState; diff --git a/crates/evm/src/system_calls/eip4788.rs b/crates/evm/src/system_calls/eip4788.rs index bc535809680f..2ad02c26eb90 100644 --- a/crates/evm/src/system_calls/eip4788.rs +++ b/crates/evm/src/system_calls/eip4788.rs @@ -2,11 +2,11 @@ use alloc::{boxed::Box, string::ToString}; use crate::ConfigureEvm; +use alloy_consensus::Header; use alloy_eips::eip4788::BEACON_ROOTS_ADDRESS; use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::ResultAndState; diff --git a/crates/evm/src/system_calls/eip7002.rs b/crates/evm/src/system_calls/eip7002.rs index 5e36f2bdeb93..f20b7a54c089 100644 --- a/crates/evm/src/system_calls/eip7002.rs +++ b/crates/evm/src/system_calls/eip7002.rs @@ -1,10 +1,10 @@ //! [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) system call implementation. use crate::ConfigureEvm; use alloc::{boxed::Box, format}; +use alloy_consensus::Header; use alloy_eips::eip7002::WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS; use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::{ExecutionResult, ResultAndState}; diff --git a/crates/evm/src/system_calls/eip7251.rs b/crates/evm/src/system_calls/eip7251.rs index 7a55c7a5aeab..112f724df764 100644 --- a/crates/evm/src/system_calls/eip7251.rs +++ b/crates/evm/src/system_calls/eip7251.rs @@ -1,10 +1,10 @@ //! [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) system call implementation. use crate::ConfigureEvm; use alloc::{boxed::Box, format}; +use alloy_consensus::Header; use alloy_eips::eip7251::CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS; use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::{ExecutionResult, ResultAndState}; diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index daaf1d1414f7..47fd59d735fc 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -1,13 +1,14 @@ //! System contract call functions. use crate::ConfigureEvm; -use alloc::{boxed::Box, vec}; +use alloc::{boxed::Box, sync::Arc, vec}; +use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::Bytes; use core::fmt::Display; use reth_chainspec::EthereumHardforks; use reth_execution_errors::BlockExecutionError; -use reth_primitives::{Block, Header}; +use reth_primitives::Block; use revm::{Database, DatabaseCommit, Evm}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; @@ -46,7 +47,7 @@ impl OnStateHook for NoopHook { #[allow(missing_debug_implementations)] pub struct SystemCaller { evm_config: EvmConfig, - chain_spec: Chainspec, + chain_spec: Arc, /// Optional hook to be called after each state change. hook: Option>, } @@ -54,7 +55,7 @@ pub struct SystemCaller { impl SystemCaller { /// Create a new system caller with the given EVM config, database, and chain spec, and creates /// the EVM with the given initialized config and block environment. - pub const fn new(evm_config: EvmConfig, chain_spec: Chainspec) -> Self { + pub const fn new(evm_config: EvmConfig, chain_spec: Arc) -> Self { Self { evm_config, chain_spec, hook: None } } diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 903e11e784ea..f7ab4fce5df0 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -25,7 +25,6 @@ reth-fs-util.workspace = true reth-metrics.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-payload-builder.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } reth-primitives-traits.workspace = true reth-provider.workspace = true diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 0a8bde242457..a1e88c7f428e 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use alloy_consensus::{constants::ETH_TO_WEI, TxEip2930}; +use alloy_consensus::{constants::ETH_TO_WEI, Header, TxEip2930}; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{b256, Address, TxKind, U256}; use eyre::OptionExt; @@ -10,7 +10,7 @@ use reth_evm::execute::{ }; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::{ - Block, BlockBody, BlockWithSenders, Header, Receipt, SealedBlockWithSenders, Transaction, + Block, BlockBody, BlockWithSenders, Receipt, SealedBlockWithSenders, Transaction, }; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 23d772b738a8..4e0d9f5956c7 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -1,14 +1,12 @@ -use std::fmt::Debug; - +use crate::{ExExContextDyn, ExExEvent, ExExNotifications, ExExNotificationsStream}; use reth_exex_types::ExExHead; -use reth_node_api::{FullNodeComponents, NodeTypes, NodeTypesWithEngine}; +use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_node_core::node_config::NodeConfig; use reth_primitives::Head; use reth_tasks::TaskExecutor; +use std::fmt::Debug; use tokio::sync::mpsc::UnboundedSender; -use crate::{ExExContextDyn, ExExEvent, ExExNotifications, ExExNotificationsStream}; - /// Captures the context that an `ExEx` has access to. pub struct ExExContext { /// The current head of the blockchain at launch. @@ -97,10 +95,7 @@ where } /// Returns the handle to the payload builder service. - pub fn payload_builder( - &self, - ) -> &reth_payload_builder::PayloadBuilderHandle<::Engine> - { + pub fn payload_builder(&self) -> &Node::PayloadBuilder { self.components.payload_builder() } diff --git a/crates/exex/exex/src/event.rs b/crates/exex/exex/src/event.rs index 1215ea2a502a..bbd79addc9e2 100644 --- a/crates/exex/exex/src/event.rs +++ b/crates/exex/exex/src/event.rs @@ -1,4 +1,4 @@ -use reth_primitives::BlockNumHash; +use alloy_eips::BlockNumHash; /// Events emitted by an `ExEx`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index 8c1518f3090f..a17de660862b 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1,13 +1,14 @@ use crate::{ wal::Wal, ExExEvent, ExExNotification, ExExNotifications, FinishedExExHeight, WalHandle, }; +use alloy_eips::BlockNumHash; use futures::StreamExt; use itertools::Itertools; use metrics::Gauge; use reth_chain_state::ForkChoiceStream; use reth_chainspec::Head; use reth_metrics::{metrics::Counter, Metrics}; -use reth_primitives::{BlockNumHash, SealedHeader}; +use reth_primitives::SealedHeader; use reth_provider::HeaderProvider; use reth_tracing::tracing::debug; use std::{ diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 90a0ee230a4d..14cfe9be4d92 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -108,11 +108,6 @@ where }); } - fn without_head(mut self) -> Self { - self.set_without_head(); - self - } - fn set_with_head(&mut self, exex_head: ExExHead) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); self.inner = ExExNotificationsInner::WithHead(match current { @@ -131,6 +126,11 @@ where }); } + fn without_head(mut self) -> Self { + self.set_without_head(); + self + } + fn with_head(mut self, exex_head: ExExHead) -> Self { self.set_with_head(exex_head); self diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index b850295a332b..6e5af981b317 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -31,6 +31,10 @@ reth-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } +reth-trie-db.workspace = true + +## alloy +alloy-eips.workspace = true ## async futures-util.workspace = true diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 1f6ea75ce6d9..5c3468a3c1c7 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -15,6 +15,7 @@ use std::{ task::Poll, }; +use alloy_eips::BlockNumHash; use futures_util::FutureExt; use reth_blockchain_tree::noop::NoopBlockchainTree; use reth_chainspec::{ChainSpec, MAINNET}; @@ -44,7 +45,7 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{BlockNumHash, Head, SealedBlockWithSenders}; +use reth_primitives::{Head, SealedBlockWithSenders}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, BlockReader, ProviderFactory, @@ -119,6 +120,7 @@ pub struct TestNode; impl NodeTypes for TestNode { type Primitives = (); type ChainSpec = ChainSpec; + type StateCommitment = reth_trie_db::MerklePatriciaTrie; } impl NodeTypesWithEngine for TestNode { diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index 6fb669388851..f1c8410eeba9 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -42,6 +42,7 @@ parking_lot.workspace = true rand = { workspace = true, optional = true } generic-array.workspace = true serde = { workspace = true, optional = true } +itertools.workspace = true [dev-dependencies] assert_matches.workspace = true diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index a99906bdf09b..9ffe8451f0e2 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -38,6 +38,7 @@ use discv5::{ ConnectionDirection, ConnectionState, }; use enr::Enr; +use itertools::Itertools; use parking_lot::Mutex; use proto::{EnrRequest, EnrResponse}; use reth_ethereum_forks::ForkId; @@ -793,7 +794,7 @@ impl Discv4Service { } /// Sends a new `FindNode` packet to the node with `target` as the lookup target but checks - /// whether we should should send a new ping first to renew the endpoint proof by checking the + /// whether we should send a new ping first to renew the endpoint proof by checking the /// previously failed findNode requests. It could be that the node is no longer reachable or /// lost our entry. fn find_node_checked(&mut self, node: &NodeRecord, ctx: LookupContext) { @@ -861,7 +862,7 @@ impl Discv4Service { let Some(bucket) = self.kbuckets.get_bucket(&key) else { return false }; if bucket.num_entries() < MAX_NODES_PER_BUCKET / 2 { // skip half empty bucket - return false; + return false } self.remove_key(node_id, key) } @@ -1406,6 +1407,16 @@ impl Discv4Service { } }; + // log the peers we discovered + trace!(target: "discv4", + target=format!("{:#?}", node_id), + peers_count=msg.nodes.len(), + peers=format!("[{:#}]", msg.nodes.iter() + .map(|node_rec| node_rec.id + ).format(", ")), + "Received peers from Neighbours packet" + ); + // This is the recursive lookup step where we initiate new FindNode requests for new nodes // that were discovered. for node in msg.nodes.into_iter().map(NodeRecord::into_ipv4_mapped) { diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 0684c263b8c7..4a534afbef53 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -412,7 +412,7 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0)); if let Some(discv5_addr) = discv5_addr_ipv4 { - warn!(target: "discv5", + warn!(target: "net::discv5", %discv5_addr, %rlpx_addr, "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" @@ -429,7 +429,7 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4)); if let Some(discv5_addr) = discv5_addr_ipv6 { - warn!(target: "discv5", + warn!(target: "net::discv5", %discv5_addr, %rlpx_addr, "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" @@ -477,11 +477,9 @@ impl BootNode { #[cfg(test)] mod test { - use std::net::SocketAddrV4; - - use alloy_primitives::hex; - use super::*; + use alloy_primitives::hex; + use std::net::SocketAddrV4; const MULTI_ADDRESSES: &str = "/ip4/184.72.129.189/udp/30301/p2p/16Uiu2HAmSG2hdLwyQHQmG4bcJBgD64xnW63WMTLcrNq6KoZREfGb,/ip4/3.231.11.52/udp/30301/p2p/16Uiu2HAmMy4V8bi3XP7KDfSLQcLACSvTLroRRwEsTyFUKo8NCkkp,/ip4/54.198.153.150/udp/30301/p2p/16Uiu2HAmSVsb7MbRf1jg3Dvd6a3n5YNqKQwn1fqHCFgnbqCsFZKe,/ip4/3.220.145.177/udp/30301/p2p/16Uiu2HAm74pBDGdQ84XCZK27GRQbGFFwQ7RsSqsPwcGmCR3Cwn3B,/ip4/3.231.138.188/udp/30301/p2p/16Uiu2HAmMnTiJwgFtSVGV14ZNpwAvS1LUoF4pWWeNtURuV6C3zYB"; const BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET: &[&str] = &[ diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index 325544de6c18..a83345a9a5e6 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -89,13 +89,11 @@ impl MustNotIncludeKeys { #[cfg(test)] mod tests { + use super::*; + use crate::NetworkStackId; use alloy_rlp::Bytes; use discv5::enr::{CombinedKey, Enr}; - use crate::NetworkStackId; - - use super::*; - #[test] fn must_not_include_key_filter() { // rig test diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index a8154b7bd19a..da54d0b52663 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -95,14 +95,14 @@ impl Discv5 { /// CAUTION: The value **must** be rlp encoded pub fn set_eip868_in_local_enr(&self, key: Vec, rlp: Bytes) { let Ok(key_str) = std::str::from_utf8(&key) else { - error!(target: "discv5", + error!(target: "net::discv5", err="key not utf-8", "failed to update local enr" ); return }; if let Err(err) = self.discv5.enr_insert(key_str, &rlp) { - error!(target: "discv5", + error!(target: "net::discv5", %err, "failed to update local enr" ); @@ -131,7 +131,7 @@ impl Discv5 { self.discv5.ban_node(&node_id, None); self.ban_ip(ip); } - Err(err) => error!(target: "discv5", + Err(err) => error!(target: "net::discv5", %err, "failed to ban peer" ), @@ -167,7 +167,7 @@ impl Discv5 { // let (enr, bc_enr, fork_key, rlpx_ip_mode) = build_local_enr(sk, &discv5_config); - trace!(target: "discv5", + trace!(target: "net::discv5", ?enr, "local ENR" ); @@ -271,7 +271,7 @@ impl Discv5 { // to them over RLPx, to be compatible with EL discv5 implementations that don't // enforce this security measure. - trace!(target: "discv5", + trace!(target: "net::discv5", ?enr, %socket, "discovered unverifiable enr, source socket doesn't match socket advertised in ENR" @@ -296,7 +296,7 @@ impl Discv5 { let node_record = match self.try_into_reachable(enr, socket) { Ok(enr_bc) => enr_bc, Err(err) => { - trace!(target: "discv5", + trace!(target: "net::discv5", %err, ?enr, "discovered peer is unreachable" @@ -308,7 +308,7 @@ impl Discv5 { } }; if let FilterOutcome::Ignore { reason } = self.filter_discovered_peer(enr) { - trace!(target: "discv5", + trace!(target: "net::discv5", ?enr, reason, "filtered out discovered peer" @@ -324,7 +324,7 @@ impl Discv5 { .then(|| self.get_fork_id(enr).ok()) .flatten(); - trace!(target: "discv5", + trace!(target: "net::discv5", ?fork_id, ?enr, "discovered peer" @@ -491,7 +491,7 @@ pub async fn bootstrap( bootstrap_nodes: HashSet, discv5: &Arc, ) -> Result<(), Error> { - trace!(target: "discv5", + trace!(target: "net::discv5", ?bootstrap_nodes, "adding bootstrap nodes .." ); @@ -508,7 +508,7 @@ pub async fn bootstrap( let discv5 = discv5.clone(); enr_requests.push(async move { if let Err(err) = discv5.request_enr(enode.to_string()).await { - debug!(target: "discv5", + debug!(target: "net::discv5", ?enode, %err, "failed adding boot node" @@ -545,7 +545,7 @@ pub fn spawn_populate_kbuckets_bg( for i in (0..bootstrap_lookup_countdown).rev() { let target = discv5::enr::NodeId::random(); - trace!(target: "discv5", + trace!(target: "net::discv5", %target, bootstrap_boost_runs_countdown=i, lookup_interval=format!("{:#?}", pulse_lookup_interval), @@ -563,7 +563,7 @@ pub fn spawn_populate_kbuckets_bg( // selection (ref kademlia) let target = get_lookup_target(kbucket_index, local_node_id); - trace!(target: "discv5", + trace!(target: "net::discv5", %target, lookup_interval=format!("{:#?}", lookup_interval), "starting periodic lookup query" @@ -628,11 +628,11 @@ pub async fn lookup( ); match discv5.find_node(target).await { - Err(err) => trace!(target: "discv5", + Err(err) => trace!(target: "net::discv5", %err, "lookup query failed" ), - Ok(peers) => trace!(target: "discv5", + Ok(peers) => trace!(target: "net::discv5", target=format!("{:#?}", target), peers_count=peers.len(), peers=format!("[{:#}]", peers.iter() @@ -645,7 +645,7 @@ pub async fn lookup( // `Discv5::connected_peers` can be subset of sessions, not all peers make it // into kbuckets, e.g. incoming sessions from peers with // unreachable enrs - debug!(target: "discv5", + debug!(target: "net::discv5", connected_peers=discv5.connected_peers(), "connected peers in routing table" ); diff --git a/crates/net/discv5/src/network_stack_id.rs b/crates/net/discv5/src/network_stack_id.rs index f707c7de7b7f..a7b6944f355a 100644 --- a/crates/net/discv5/src/network_stack_id.rs +++ b/crates/net/discv5/src/network_stack_id.rs @@ -20,12 +20,11 @@ impl NetworkStackId { /// ENR fork ID kv-pair key, for an Optimism CL node. pub const OPSTACK: &'static [u8] = b"opstack"; - #[allow(clippy::missing_const_for_fn)] /// Returns the [`NetworkStackId`] that matches the given chain spec. pub fn id(chain: impl EthChainSpec) -> Option<&'static [u8]> { - if chain.chain().is_optimism() { + if chain.is_optimism() { return Some(Self::OPEL) - } else if chain.chain().is_ethereum() { + } else if chain.is_ethereum() { return Some(Self::ETH) } diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 272db6fc6d1c..f4cc134ec484 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -18,6 +18,7 @@ reth-consensus.workspace = true reth-network-p2p.workspace = true reth-network-peers.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-tasks.workspace = true @@ -27,6 +28,7 @@ reth-db-api = { workspace = true, optional = true } reth-testing-utils = { workspace = true, optional = true } # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true @@ -44,9 +46,9 @@ reth-metrics.workspace = true metrics.workspace = true # misc -tracing.workspace = true rayon.workspace = true thiserror.workspace = true +tracing.workspace = true tempfile = { workspace = true, optional = true } itertools.workspace = true @@ -70,9 +72,16 @@ rand.workspace = true tempfile.workspace = true [features] +optimism = [ + "reth-primitives/optimism", + "reth-db?/optimism", + "reth-db-api?/optimism", + "reth-provider/optimism" +] + test-utils = [ - "dep:tempfile", - "dep:reth-db-api", + "tempfile", + "reth-db-api", "reth-db/test-utils", "reth-consensus/test-utils", "reth-network-p2p/test-utils", @@ -80,5 +89,6 @@ test-utils = [ "reth-chainspec/test-utils", "reth-primitives/test-utils", "reth-db-api?/test-utils", - "reth-provider/test-utils" + "reth-provider/test-utils", + "reth-primitives-traits/test-utils" ] diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 314f3a09084c..bebc51ad7725 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -14,6 +14,7 @@ use reth_network_p2p::{ error::{DownloadError, DownloadResult}, }; use reth_primitives::SealedHeader; +use reth_primitives_traits::size::InMemorySize; use reth_storage_api::HeaderProvider; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ @@ -36,7 +37,7 @@ pub struct BodiesDownloader { /// The bodies client client: Arc, /// The consensus client - consensus: Arc, + consensus: Arc>, /// The database handle provider: Provider, /// The maximum number of non-empty blocks per one request @@ -56,16 +57,16 @@ pub struct BodiesDownloader { /// Requests in progress in_progress_queue: BodiesRequestQueue, /// Buffered responses - buffered_responses: BinaryHeap, + buffered_responses: BinaryHeap>, /// Queued body responses that can be returned for insertion into the database. - queued_bodies: Vec, + queued_bodies: Vec>, /// The bodies downloader metrics. metrics: BodyDownloaderMetrics, } impl BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. @@ -190,14 +191,14 @@ where } /// Queues bodies and sets the latest queued block number - fn queue_bodies(&mut self, bodies: Vec) { + fn queue_bodies(&mut self, bodies: Vec>) { self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number()); self.queued_bodies.extend(bodies); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); } /// Removes the next response from the buffer. - fn pop_buffered_response(&mut self) -> Option { + fn pop_buffered_response(&mut self) -> Option> { let resp = self.buffered_responses.pop()?; self.metrics.buffered_responses.decrement(1.); self.buffered_blocks_size_bytes -= resp.size(); @@ -207,10 +208,10 @@ where } /// Adds a new response to the internal buffer - fn buffer_bodies_response(&mut self, response: Vec) { + fn buffer_bodies_response(&mut self, response: Vec>) { // take into account capacity let size = response.iter().map(BlockResponse::size).sum::() + - response.capacity() * mem::size_of::(); + response.capacity() * mem::size_of::>(); let response = OrderedBodiesResponse { resp: response, size }; let response_len = response.len(); @@ -224,7 +225,7 @@ where } /// Returns a response if it's first block number matches the next expected. - fn try_next_buffered(&mut self) -> Option> { + fn try_next_buffered(&mut self) -> Option>> { if let Some(next) = self.buffered_responses.peek() { let expected = self.next_expected_block_number(); let next_block_range = next.block_range(); @@ -250,7 +251,7 @@ where /// Returns the next batch of block bodies that can be returned if we have enough buffered /// bodies - fn try_split_next_batch(&mut self) -> Option> { + fn try_split_next_batch(&mut self) -> Option>> { if self.queued_bodies.len() >= self.stream_batch_size { let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::>(); self.queued_bodies.shrink_to_fit(); @@ -282,12 +283,12 @@ where Self: BodyDownloader + 'static, { /// Spawns the downloader task via [`tokio::task::spawn`] - pub fn into_task(self) -> TaskDownloader { + pub fn into_task(self) -> TaskDownloader<::Body> { self.into_task_with(&TokioTaskExecutor::default()) } /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given spawner. - pub fn into_task_with(self, spawner: &S) -> TaskDownloader + pub fn into_task_with(self, spawner: &S) -> TaskDownloader<::Body> where S: TaskSpawner, { @@ -297,9 +298,11 @@ where impl BodyDownloader for BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { + type Body = B::Body; + /// Set a new download range (exclusive). /// /// This method will drain all queued bodies, filter out ones outside the range and put them @@ -345,10 +348,10 @@ where impl Stream for BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -430,13 +433,13 @@ where } #[derive(Debug)] -struct OrderedBodiesResponse { - resp: Vec, +struct OrderedBodiesResponse { + resp: Vec>, /// The total size of the response in bytes size: usize, } -impl OrderedBodiesResponse { +impl OrderedBodiesResponse { /// Returns the block number of the first element /// /// # Panics @@ -467,21 +470,21 @@ impl OrderedBodiesResponse { } } -impl PartialEq for OrderedBodiesResponse { +impl PartialEq for OrderedBodiesResponse { fn eq(&self, other: &Self) -> bool { self.first_block_number() == other.first_block_number() } } -impl Eq for OrderedBodiesResponse {} +impl Eq for OrderedBodiesResponse {} -impl PartialOrd for OrderedBodiesResponse { +impl PartialOrd for OrderedBodiesResponse { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedBodiesResponse { +impl Ord for OrderedBodiesResponse { fn cmp(&self, other: &Self) -> Ordering { self.first_block_number().cmp(&other.first_block_number()).reverse() } @@ -561,7 +564,7 @@ impl BodiesDownloaderBuilder { pub fn build( self, client: B, - consensus: Arc, + consensus: Arc>, provider: Provider, ) -> BodiesDownloader where diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index e70c534a0e39..494a5f2ef2ec 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -4,6 +4,7 @@ use reth_network_p2p::{ bodies::{downloader::BodyDownloader, response::BlockResponse}, error::{DownloadError, DownloadResult}, }; +use reth_primitives::BlockBody; use std::ops::RangeInclusive; /// A [`BodyDownloader`] implementation that does nothing. @@ -12,13 +13,15 @@ use std::ops::RangeInclusive; pub struct NoopBodiesDownloader; impl BodyDownloader for NoopBodiesDownloader { + type Body = BlockBody; + fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { Ok(()) } } impl Stream for NoopBodiesDownloader { - type Item = Result, DownloadError>; + type Item = Result>, DownloadError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index db7ff71cfc9e..aa6ec9e4af0f 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -9,6 +9,7 @@ use reth_network_p2p::{ error::DownloadResult, }; use reth_primitives::SealedHeader; +use reth_primitives_traits::InMemorySize; use std::{ pin::Pin, sync::Arc, @@ -57,7 +58,7 @@ where pub(crate) fn push_new_request( &mut self, client: Arc, - consensus: Arc, + consensus: Arc>, request: Vec, ) { // Set last max requested block number @@ -77,9 +78,9 @@ where impl Stream for BodiesRequestQueue where - B: BodiesClient + 'static, + B: BodiesClient + 'static, { - type Item = DownloadResult>; + type Item = DownloadResult>>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().inner.poll_next_unpin(cx) diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index c2b36732b51d..66287624f890 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -9,6 +9,7 @@ use reth_network_p2p::{ }; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives::{BlockBody, GotExpected, SealedBlock, SealedHeader}; +use reth_primitives_traits::InMemorySize; use std::{ collections::VecDeque, mem, @@ -26,7 +27,7 @@ use std::{ /// It then proceeds to verify the downloaded bodies. In case of an validation error, /// the future will start over. /// -/// The future will filter out any empty headers (see [`reth_primitives::Header::is_empty`]) from +/// The future will filter out any empty headers (see [`alloy_consensus::Header::is_empty`]) from /// the request. If [`BodiesRequestFuture`] was initialized with all empty headers, no request will /// be dispatched and they will be immediately returned upon polling. /// @@ -38,7 +39,7 @@ use std::{ /// and eventually disconnected. pub(crate) struct BodiesRequestFuture { client: Arc, - consensus: Arc, + consensus: Arc>, metrics: BodyDownloaderMetrics, /// Metrics for individual responses. This can be used to observe how the size (in bytes) of /// responses change while bodies are being downloaded. @@ -46,7 +47,7 @@ pub(crate) struct BodiesRequestFuture { // Headers to download. The collection is shrunk as responses are buffered. pending_headers: VecDeque, /// Internal buffer for all blocks - buffer: Vec, + buffer: Vec>, fut: Option, /// Tracks how many bodies we requested in the last request. last_request_len: Option, @@ -59,7 +60,7 @@ where /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( client: Arc, - consensus: Arc, + consensus: Arc>, metrics: BodyDownloaderMetrics, ) -> Self { Self { @@ -114,7 +115,10 @@ where /// Process block response. /// Returns an error if the response is invalid. - fn on_block_response(&mut self, response: WithPeerId>) -> DownloadResult<()> { + fn on_block_response(&mut self, response: WithPeerId>) -> DownloadResult<()> + where + B::Body: InMemorySize, + { let (peer_id, bodies) = response.split(); let request_len = self.last_request_len.unwrap_or_default(); let response_len = bodies.len(); @@ -157,7 +161,10 @@ where /// /// This method removes headers from the internal collection. /// If the response fails validation, then the header will be put back. - fn try_buffer_blocks(&mut self, bodies: Vec) -> DownloadResult<()> { + fn try_buffer_blocks(&mut self, bodies: Vec) -> DownloadResult<()> + where + B::Body: InMemorySize, + { let bodies_capacity = bodies.capacity(); let bodies_len = bodies.len(); let mut bodies = bodies.into_iter().peekable(); @@ -207,9 +214,9 @@ where impl Future for BodiesRequestFuture where - B: BodiesClient + 'static, + B: BodiesClient + 'static, { - type Output = DownloadResult>; + type Output = DownloadResult>>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index eeafb7ab121b..de1638f3e665 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -23,15 +23,15 @@ pub const BODIES_TASK_BUFFER_SIZE: usize = 4; /// A [BodyDownloader] that drives a spawned [BodyDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] - from_downloader: ReceiverStream, + from_downloader: ReceiverStream>, to_downloader: UnboundedSender>, } // === impl TaskDownloader === -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] returns a [`TaskDownloader`] that's /// connected to that task. /// @@ -45,12 +45,16 @@ impl TaskDownloader { /// use reth_consensus::Consensus; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; /// use reth_network_p2p::bodies::client::BodiesClient; + /// use reth_primitives_traits::InMemorySize; /// use reth_storage_api::HeaderProvider; /// use std::sync::Arc; /// - /// fn t( + /// fn t< + /// B: BodiesClient + 'static, + /// Provider: HeaderProvider + Unpin + 'static, + /// >( /// client: Arc, - /// consensus: Arc, + /// consensus: Arc>, /// provider: Provider, /// ) { /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider); @@ -59,7 +63,7 @@ impl TaskDownloader { /// ``` pub fn spawn(downloader: T) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader + 'static, { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } @@ -68,7 +72,7 @@ impl TaskDownloader { /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader + 'static, S: TaskSpawner, { let (bodies_tx, bodies_rx) = mpsc::channel(BODIES_TASK_BUFFER_SIZE); @@ -86,15 +90,17 @@ impl TaskDownloader { } } -impl BodyDownloader for TaskDownloader { +impl BodyDownloader for TaskDownloader { + type Body = B; + fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()> { let _ = self.to_downloader.send(range); Ok(()) } } -impl Stream for TaskDownloader { - type Item = BodyDownloaderResult; +impl Stream for TaskDownloader { + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().from_downloader.poll_next(cx) @@ -102,9 +108,9 @@ impl Stream for TaskDownloader { } /// A [`BodyDownloader`] that runs on its own task -struct SpawnedDownloader { +struct SpawnedDownloader { updates: UnboundedReceiverStream>, - bodies_tx: PollSender, + bodies_tx: PollSender>, downloader: T, } diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 5b21c82fb3f8..486d4a05127a 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -1,7 +1,8 @@ use std::{collections::HashMap, io, path::Path}; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockHash, BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockHash, BlockNumber, B256}; use futures::Future; use itertools::Either; use reth_network_p2p::{ @@ -12,16 +13,15 @@ use reth_network_p2p::{ priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, Header, SealedHeader}; +use reth_primitives::{BlockBody, SealedHeader}; use thiserror::Error; use tokio::{fs::File, io::AsyncReadExt}; use tokio_stream::StreamExt; use tokio_util::codec::FramedRead; use tracing::{debug, trace, warn}; -use crate::receipt_file_client::FromReceiptReader; - use super::file_codec::BlockFileCodec; +use crate::receipt_file_client::FromReceiptReader; /// Default byte length of chunk to read from chain file. /// @@ -115,11 +115,7 @@ impl FileClient { /// Clones and returns the highest header of this client has or `None` if empty. Seals header /// before returning. pub fn tip_header(&self) -> Option { - self.headers.get(&self.max_block()?).map(|h| { - let sealed = h.clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) + self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal(h.clone())) } /// Returns true if all blocks are canonical (no gaps) @@ -265,6 +261,7 @@ impl FromReader for FileClient { } impl HeadersClient for FileClient { + type Header = Header; type Output = HeadersFut; fn get_headers_with_priority( @@ -315,6 +312,7 @@ impl HeadersClient for FileClient { } impl BodiesClient for FileClient { + type Body = BlockBody; type Output = BodiesFut; fn get_block_bodies_with_priority( diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs index 210655f7e26e..58da73123878 100644 --- a/crates/net/downloaders/src/headers/noop.rs +++ b/crates/net/downloaders/src/headers/noop.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Header; use futures::Stream; use reth_network_p2p::headers::{ downloader::{HeaderDownloader, SyncTarget}, @@ -11,6 +12,8 @@ use reth_primitives::SealedHeader; pub struct NoopHeaderDownloader; impl HeaderDownloader for NoopHeaderDownloader { + type Header = Header; + fn update_local_head(&mut self, _: SealedHeader) {} fn update_sync_target(&mut self, _: SyncTarget) {} @@ -19,7 +22,7 @@ impl HeaderDownloader for NoopHeaderDownloader { } impl Stream for NoopHeaderDownloader { - type Item = Result, HeadersDownloaderError>; + type Item = Result, HeadersDownloaderError
>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 941d140b39d4..3960ae6e8126 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -2,8 +2,9 @@ use super::task::TaskDownloader; use crate::metrics::HeaderDownloaderMetrics; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use futures::{stream::Stream, FutureExt}; use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; @@ -12,14 +13,14 @@ use reth_consensus::Consensus; use reth_network_p2p::{ error::{DownloadError, DownloadResult, PeerRequestResult}, headers::{ - client::{HeadersClient, HeadersDirection, HeadersRequest}, + client::{HeadersClient, HeadersRequest}, downloader::{validate_header_download, HeaderDownloader, SyncTarget}, error::{HeadersDownloaderError, HeadersDownloaderResult}, }, priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{GotExpected, Header, SealedHeader}; +use reth_primitives::{GotExpected, SealedHeader}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::{Ordering, Reverse}, @@ -39,14 +40,14 @@ const REQUESTS_PER_PEER_MULTIPLIER: usize = 5; /// Wrapper for internal downloader errors. #[derive(Error, Debug)] -enum ReverseHeadersDownloaderError { +enum ReverseHeadersDownloaderError { #[error(transparent)] - Downloader(#[from] HeadersDownloaderError), + Downloader(#[from] HeadersDownloaderError), #[error(transparent)] Response(#[from] Box), } -impl From for ReverseHeadersDownloaderError { +impl From for ReverseHeadersDownloaderError { fn from(value: HeadersResponseError) -> Self { Self::Response(Box::new(value)) } @@ -59,24 +60,25 @@ impl From for ReverseHeadersDownloaderError { /// tries to fill the gap between the local head of the node and the chain tip by issuing multiple /// requests at a time but yielding them in batches on [`Stream::poll_next`]. /// -/// **Note:** This downloader downloads in reverse, see also [`HeadersDirection::Falling`], this -/// means the batches of headers that this downloader yields will start at the chain tip and move -/// towards the local head: falling block numbers. +/// **Note:** This downloader downloads in reverse, see also +/// [`reth_network_p2p::headers::client::HeadersDirection`], this means the batches of headers that +/// this downloader yields will start at the chain tip and move towards the local head: falling +/// block numbers. #[must_use = "Stream does nothing unless polled"] #[derive(Debug)] pub struct ReverseHeadersDownloader { /// Consensus client used to validate headers - consensus: Arc, + consensus: Arc>, /// Client used to download headers. client: Arc, /// The local head of the chain. - local_head: Option, + local_head: Option>, /// Block we want to close the gap to. sync_target: Option, /// The block number to use for requests. next_request_block_number: u64, /// Keeps track of the block we need to validate next. - lowest_validated_header: Option, + lowest_validated_header: Option>, /// Tip block number to start validating from (in reverse) next_chain_tip_block_number: u64, /// The batch size per one request @@ -97,11 +99,11 @@ pub struct ReverseHeadersDownloader { /// requests in progress in_progress_queue: FuturesUnordered>, /// Buffered, unvalidated responses - buffered_responses: BinaryHeap, + buffered_responses: BinaryHeap>, /// Buffered, _sorted_ and validated headers ready to be returned. /// /// Note: headers are sorted from high to low - queued_validated_headers: Vec, + queued_validated_headers: Vec>, /// Header downloader metrics. metrics: HeaderDownloaderMetrics, } @@ -110,7 +112,7 @@ pub struct ReverseHeadersDownloader { impl ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { /// Convenience method to create a [`ReverseHeadersDownloaderBuilder`] without importing it pub fn builder() -> ReverseHeadersDownloaderBuilder { @@ -120,7 +122,7 @@ where /// Returns the block number the local node is at. #[inline] fn local_block_number(&self) -> Option { - self.local_head.as_ref().map(|h| h.number) + self.local_head.as_ref().map(|h| h.number()) } /// Returns the existing local head block number @@ -130,7 +132,7 @@ where /// If the local head has not been set. #[inline] fn existing_local_block_number(&self) -> BlockNumber { - self.local_head.as_ref().expect("is initialized").number + self.local_head.as_ref().expect("is initialized").number() } /// Returns the existing sync target. @@ -197,14 +199,14 @@ where /// `lowest_validated_header`. /// /// This only returns `None` if we haven't fetched the initial chain tip yet. - fn lowest_validated_header(&self) -> Option<&SealedHeader> { + fn lowest_validated_header(&self) -> Option<&SealedHeader> { self.queued_validated_headers.last().or(self.lowest_validated_header.as_ref()) } /// Validate that the received header matches the expected sync target. fn validate_sync_target( &self, - header: &SealedHeader, + header: &SealedHeader, request: HeadersRequest, peer_id: PeerId, ) -> Result<(), Box> { @@ -220,12 +222,12 @@ where ), })) } - SyncTargetBlock::Number(number) if header.number != number => { + SyncTargetBlock::Number(number) if header.number() != number => { Err(Box::new(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::InvalidTipNumber(GotExpected { - got: header.number, + got: header.number(), expected: number, }), })) @@ -244,20 +246,12 @@ where fn process_next_headers( &mut self, request: HeadersRequest, - headers: Vec
, + headers: Vec, peer_id: PeerId, - ) -> Result<(), ReverseHeadersDownloaderError> { + ) -> Result<(), ReverseHeadersDownloaderError> { let mut validated = Vec::with_capacity(headers.len()); - let sealed_headers = headers - .into_par_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - - SealedHeader::new(header, seal) - }) - .collect::>(); + let sealed_headers = headers.into_par_iter().map(SealedHeader::seal).collect::>(); for parent in sealed_headers { // Validate that the header is the parent header of the last validated header. if let Some(validated_header) = @@ -280,17 +274,17 @@ where if let Some((last_header, head)) = validated .last_mut() .zip(self.local_head.as_ref()) - .filter(|(last, head)| last.number == head.number + 1) + .filter(|(last, head)| last.number() == head.number() + 1) { // Every header must be valid on its own - if let Err(error) = self.consensus.validate_header(last_header) { + if let Err(error) = self.consensus.validate_header(&*last_header) { trace!(target: "downloaders::headers", %error, "Failed to validate header"); return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::HeaderValidation { hash: head.hash(), - number: head.number, + number: head.number(), error: Box::new(error), }, } @@ -299,9 +293,9 @@ where // If the header is valid on its own, but not against its parent, we return it as // detached head error. - if let Err(error) = self.consensus.validate_header_against_parent(last_header, head) { + if let Err(error) = self.consensus.validate_header_against_parent(&*last_header, head) { // Replace the last header with a detached variant - error!(target: "downloaders::headers", %error, number = last_header.number, hash = ?last_header.hash(), "Header cannot be attached to known canonical chain"); + error!(target: "downloaders::headers", %error, number = last_header.number(), hash = ?last_header.hash(), "Header cannot be attached to known canonical chain"); return Err(HeadersDownloaderError::DetachedHead { local_head: Box::new(head.clone()), header: Box::new(last_header.clone()), @@ -313,7 +307,7 @@ where // update tracked block info (falling block number) self.next_chain_tip_block_number = - validated.last().expect("exists").number.saturating_sub(1); + validated.last().expect("exists").number().saturating_sub(1); self.queued_validated_headers.extend(validated); Ok(()) @@ -345,7 +339,7 @@ where let skip = self .queued_validated_headers .iter() - .take_while(|last| last.number > target_block_number) + .take_while(|last| last.number() > target_block_number) .count(); // removes all headers that are higher than current target self.queued_validated_headers.drain(..skip); @@ -360,8 +354,8 @@ where /// Handles the response for the request for the sync target fn on_sync_target_outcome( &mut self, - response: HeadersRequestOutcome, - ) -> Result<(), ReverseHeadersDownloaderError> { + response: HeadersRequestOutcome, + ) -> Result<(), ReverseHeadersDownloaderError> { let sync_target = self.existing_sync_target(); let HeadersRequestOutcome { request, outcome } = response; match outcome { @@ -372,7 +366,7 @@ where self.metrics.total_downloaded.increment(headers.len() as u64); // sort headers from highest to lowest block number - headers.sort_unstable_by_key(|h| Reverse(h.number)); + headers.sort_unstable_by_key(|h| Reverse(h.number())); if headers.is_empty() { return Err(HeadersResponseError { @@ -383,9 +377,8 @@ where .into()) } - let sealed_target = headers.swap_remove(0).seal_slow(); - let (header, seal) = sealed_target.into_parts(); - let target = SealedHeader::new(header, seal); + let header = headers.swap_remove(0); + let target = SealedHeader::seal(header); match sync_target { SyncTargetBlock::Hash(hash) | SyncTargetBlock::HashAndNumber { hash, .. } => { @@ -401,12 +394,12 @@ where } } SyncTargetBlock::Number(number) => { - if target.number != number { + if target.number() != number { return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::InvalidTipNumber(GotExpected { - got: target.number, + got: target.number(), expected: number, }), } @@ -415,17 +408,17 @@ where } } - trace!(target: "downloaders::headers", head=?self.local_block_number(), hash=?target.hash(), number=%target.number, "Received sync target"); + trace!(target: "downloaders::headers", head=?self.local_block_number(), hash=?target.hash(), number=%target.number(), "Received sync target"); // This is the next block we need to start issuing requests from - let parent_block_number = target.number.saturating_sub(1); - self.on_block_number_update(target.number, parent_block_number); + let parent_block_number = target.number().saturating_sub(1); + self.on_block_number_update(target.number(), parent_block_number); self.queued_validated_headers.push(target); // try to validate all buffered responses blocked by this successful response self.try_validate_buffered() - .map(Err::<(), ReverseHeadersDownloaderError>) + .map(Err::<(), ReverseHeadersDownloaderError>) .transpose()?; Ok(()) @@ -439,8 +432,8 @@ where /// Invoked when we received a response fn on_headers_outcome( &mut self, - response: HeadersRequestOutcome, - ) -> Result<(), ReverseHeadersDownloaderError> { + response: HeadersRequestOutcome, + ) -> Result<(), ReverseHeadersDownloaderError> { let requested_block_number = response.block_number(); let HeadersRequestOutcome { request, outcome } = response; @@ -475,19 +468,19 @@ where } // sort headers from highest to lowest block number - headers.sort_unstable_by_key(|h| Reverse(h.number)); + headers.sort_unstable_by_key(|h| Reverse(h.number())); // validate the response let highest = &headers[0]; - trace!(target: "downloaders::headers", requested_block_number, highest=?highest.number, "Validating non-empty headers response"); + trace!(target: "downloaders::headers", requested_block_number, highest=?highest.number(), "Validating non-empty headers response"); - if highest.number != requested_block_number { + if highest.number() != requested_block_number { return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::HeadersResponseStartBlockMismatch(GotExpected { - got: highest.number, + got: highest.number(), expected: requested_block_number, }), } @@ -495,14 +488,14 @@ where } // check if the response is the next expected - if highest.number == self.next_chain_tip_block_number { + if highest.number() == self.next_chain_tip_block_number { // is next response, validate it self.process_next_headers(request, headers, peer_id)?; // try to validate all buffered responses blocked by this successful response self.try_validate_buffered() - .map(Err::<(), ReverseHeadersDownloaderError>) + .map(Err::<(), ReverseHeadersDownloaderError>) .transpose()?; - } else if highest.number > self.existing_local_block_number() { + } else if highest.number() > self.existing_local_block_number() { self.metrics.buffered_responses.increment(1.); // can't validate yet self.buffered_responses.push(OrderedHeadersResponse { @@ -549,7 +542,7 @@ where /// Attempts to validate the buffered responses /// /// Returns an error if the next expected response was popped, but failed validation. - fn try_validate_buffered(&mut self) -> Option { + fn try_validate_buffered(&mut self) -> Option> { loop { // Check to see if we've already received the next value let next_response = self.buffered_responses.peek_mut()?; @@ -575,7 +568,7 @@ where /// Returns the request for the `sync_target` header. const fn get_sync_target_request(&self, start: BlockHashOrNumber) -> HeadersRequest { - HeadersRequest { start, limit: 1, direction: HeadersDirection::Falling } + HeadersRequest::falling(start, 1) } /// Starts a request future @@ -598,7 +591,11 @@ where } /// Validate whether the header is valid in relation to it's parent - fn validate(&self, header: &SealedHeader, parent: &SealedHeader) -> DownloadResult<()> { + fn validate( + &self, + header: &SealedHeader, + parent: &SealedHeader, + ) -> DownloadResult<()> { validate_header_download(&self.consensus, header, parent) } @@ -614,7 +611,7 @@ where } /// Splits off the next batch of headers - fn split_next_batch(&mut self) -> Vec { + fn split_next_batch(&mut self) -> Vec> { let batch_size = self.stream_batch_size.min(self.queued_validated_headers.len()); let mut rem = self.queued_validated_headers.split_off(batch_size); std::mem::swap(&mut rem, &mut self.queued_validated_headers); @@ -644,12 +641,15 @@ where Self: HeaderDownloader + 'static, { /// Spawns the downloader task via [`tokio::task::spawn`] - pub fn into_task(self) -> TaskDownloader { + pub fn into_task(self) -> TaskDownloader<::Header> { self.into_task_with(&TokioTaskExecutor::default()) } /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given `spawner`. - pub fn into_task_with(self, spawner: &S) -> TaskDownloader + pub fn into_task_with( + self, + spawner: &S, + ) -> TaskDownloader<::Header> where S: TaskSpawner, { @@ -659,11 +659,17 @@ where impl HeaderDownloader for ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { - fn update_local_head(&mut self, head: SealedHeader) { + type Header = H::Header; + + fn update_local_head(&mut self, head: SealedHeader) { // ensure we're only yielding headers that are in range and follow the current local head. - while self.queued_validated_headers.last().is_some_and(|last| last.number <= head.number) { + while self + .queued_validated_headers + .last() + .is_some_and(|last| last.number() <= head.number()) + { // headers are sorted high to low self.queued_validated_headers.pop(); } @@ -686,7 +692,7 @@ where .queued_validated_headers .first() .filter(|h| h.hash() == tip) - .map(|h| h.number) + .map(|h| h.number()) { self.sync_target = Some(new_sync_target.with_number(target_number)); return @@ -740,9 +746,9 @@ where impl Stream for ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { - type Item = HeadersDownloaderResult>; + type Item = HeadersDownloaderResult>, H::Header>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -883,18 +889,18 @@ where } } -/// A future that returns a list of [`Header`] on success. +/// A future that returns a list of headers on success. #[derive(Debug)] struct HeadersRequestFuture { request: Option, fut: F, } -impl Future for HeadersRequestFuture +impl Future for HeadersRequestFuture where - F: Future>> + Sync + Send + Unpin, + F: Future>> + Sync + Send + Unpin, { - type Output = HeadersRequestOutcome; + type Output = HeadersRequestOutcome; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -906,14 +912,14 @@ where } /// The outcome of the [`HeadersRequestFuture`] -struct HeadersRequestOutcome { +struct HeadersRequestOutcome { request: HeadersRequest, - outcome: PeerRequestResult>, + outcome: PeerRequestResult>, } // === impl OrderedHeadersResponse === -impl HeadersRequestOutcome { +impl HeadersRequestOutcome { fn block_number(&self) -> u64 { self.request.start.as_number().expect("is number") } @@ -921,35 +927,35 @@ impl HeadersRequestOutcome { /// Wrapper type to order responses #[derive(Debug)] -struct OrderedHeadersResponse { - headers: Vec
, +struct OrderedHeadersResponse { + headers: Vec, request: HeadersRequest, peer_id: PeerId, } // === impl OrderedHeadersResponse === -impl OrderedHeadersResponse { +impl OrderedHeadersResponse { fn block_number(&self) -> u64 { self.request.start.as_number().expect("is number") } } -impl PartialEq for OrderedHeadersResponse { +impl PartialEq for OrderedHeadersResponse { fn eq(&self, other: &Self) -> bool { self.block_number() == other.block_number() } } -impl Eq for OrderedHeadersResponse {} +impl Eq for OrderedHeadersResponse {} -impl PartialOrd for OrderedHeadersResponse { +impl PartialOrd for OrderedHeadersResponse { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedHeadersResponse { +impl Ord for OrderedHeadersResponse { fn cmp(&self, other: &Self) -> Ordering { self.block_number().cmp(&other.block_number()) } @@ -1156,7 +1162,11 @@ impl ReverseHeadersDownloaderBuilder { /// Build [`ReverseHeadersDownloader`] with provided consensus /// and header client implementations - pub fn build(self, client: H, consensus: Arc) -> ReverseHeadersDownloader + pub fn build( + self, + client: H, + consensus: Arc>, + ) -> ReverseHeadersDownloader where H: HeadersClient + 'static, { @@ -1207,13 +1217,14 @@ fn calc_next_request( let diff = next_request_block_number - local_head; let limit = diff.min(request_limit); let start = next_request_block_number; - HeadersRequest { start: start.into(), limit, direction: HeadersDirection::Falling } + HeadersRequest::falling(start.into(), limit) } #[cfg(test)] mod tests { use super::*; use crate::headers::test_utils::child_header; + use alloy_consensus::Header; use assert_matches::assert_matches; use reth_consensus::test_utils::TestConsensus; use reth_network_p2p::test_utils::TestHeadersClient; @@ -1296,7 +1307,7 @@ mod tests { assert!(downloader.sync_target_request.is_some()); downloader.sync_target_request.take(); - let target = SyncTarget::Gap(SealedHeader::new(Header::default(), B256::random())); + let target = SyncTarget::Gap(SealedHeader::new(Default::default(), B256::random())); downloader.update_sync_target(target); assert!(downloader.sync_target_request.is_none()); assert_matches!( @@ -1310,7 +1321,7 @@ mod tests { fn test_head_update() { let client = Arc::new(TestHeadersClient::default()); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let mut downloader = ReverseHeadersDownloaderBuilder::default() .build(Arc::clone(&client), Arc::new(TestConsensus::default())); @@ -1373,7 +1384,7 @@ mod tests { fn test_resp_order() { let mut heap = BinaryHeap::new(); let hi = 1u64; - heap.push(OrderedHeadersResponse { + heap.push(OrderedHeadersResponse::
{ headers: vec![], request: HeadersRequest { start: hi.into(), limit: 0, direction: Default::default() }, peer_id: Default::default(), diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index b3fa27fde59c..81c4cd80da3f 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -22,15 +22,15 @@ pub const HEADERS_TASK_BUFFER_SIZE: usize = 8; /// A [HeaderDownloader] that drives a spawned [HeaderDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] - from_downloader: ReceiverStream>>, - to_downloader: UnboundedSender, + from_downloader: ReceiverStream>, H>>, + to_downloader: UnboundedSender>, } // === impl TaskDownloader === -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] and returns a [`TaskDownloader`] /// that's connected to that task. /// @@ -46,7 +46,8 @@ impl TaskDownloader { /// # use reth_downloaders::headers::task::TaskDownloader; /// # use reth_consensus::Consensus; /// # use reth_network_p2p::headers::client::HeadersClient; - /// # fn t(consensus:Arc, client: Arc) { + /// # use reth_primitives_traits::BlockHeader; + /// # fn t + 'static>(consensus:Arc>, client: Arc) { /// let downloader = ReverseHeadersDownloader::::builder().build( /// client, /// consensus @@ -55,7 +56,7 @@ impl TaskDownloader { /// # } pub fn spawn(downloader: T) -> Self where - T: HeaderDownloader + 'static, + T: HeaderDownloader
+ 'static, { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } @@ -64,7 +65,7 @@ impl TaskDownloader { /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where - T: HeaderDownloader + 'static, + T: HeaderDownloader
+ 'static, S: TaskSpawner, { let (headers_tx, headers_rx) = mpsc::channel(HEADERS_TASK_BUFFER_SIZE); @@ -81,12 +82,14 @@ impl TaskDownloader { } } -impl HeaderDownloader for TaskDownloader { - fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { +impl HeaderDownloader for TaskDownloader { + type Header = H; + + fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { let _ = self.to_downloader.send(DownloaderUpdates::UpdateSyncGap(head, target)); } - fn update_local_head(&mut self, head: SealedHeader) { + fn update_local_head(&mut self, head: SealedHeader) { let _ = self.to_downloader.send(DownloaderUpdates::UpdateLocalHead(head)); } @@ -99,8 +102,8 @@ impl HeaderDownloader for TaskDownloader { } } -impl Stream for TaskDownloader { - type Item = HeadersDownloaderResult>; +impl Stream for TaskDownloader { + type Item = HeadersDownloaderResult>, H>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().from_downloader.poll_next(cx) @@ -108,9 +111,10 @@ impl Stream for TaskDownloader { } /// A [`HeaderDownloader`] that runs on its own task -struct SpawnedDownloader { - updates: UnboundedReceiverStream, - headers_tx: PollSender>>, +#[expect(clippy::complexity)] +struct SpawnedDownloader { + updates: UnboundedReceiverStream>, + headers_tx: PollSender>, T::Header>>, downloader: T, } @@ -170,9 +174,9 @@ impl Future for SpawnedDownloader { } /// Commands delegated tot the spawned [`HeaderDownloader`] -enum DownloaderUpdates { - UpdateSyncGap(SealedHeader, SyncTarget), - UpdateLocalHead(SealedHeader), +enum DownloaderUpdates { + UpdateSyncGap(SealedHeader, SyncTarget), + UpdateLocalHead(SealedHeader), UpdateSyncTarget(SyncTarget), SetBatchSize(usize), } diff --git a/crates/net/downloaders/src/headers/test_utils.rs b/crates/net/downloaders/src/headers/test_utils.rs index 923ad9969373..baea409f20e7 100644 --- a/crates/net/downloaders/src/headers/test_utils.rs +++ b/crates/net/downloaders/src/headers/test_utils.rs @@ -2,7 +2,6 @@ #![allow(dead_code)] -use alloy_primitives::Sealable; use reth_primitives::SealedHeader; /// Returns a new [`SealedHeader`] that's the child header of the given `parent`. @@ -10,7 +9,5 @@ pub(crate) fn child_header(parent: &SealedHeader) -> SealedHeader { let mut child = parent.as_ref().clone(); child.number += 1; child.parent_hash = parent.hash_slow(); - let sealed = child.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) + SealedHeader::seal(child) } diff --git a/crates/net/downloaders/src/test_utils/bodies_client.rs b/crates/net/downloaders/src/test_utils/bodies_client.rs index be8373f8235d..d84d92363ee2 100644 --- a/crates/net/downloaders/src/test_utils/bodies_client.rs +++ b/crates/net/downloaders/src/test_utils/bodies_client.rs @@ -78,6 +78,7 @@ impl DownloadClient for TestBodiesClient { } impl BodiesClient for TestBodiesClient { + type Body = BlockBody; type Output = BodiesFut; fn get_block_bodies_with_priority( diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 83dcc657bce9..f799b6c7f6c0 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -650,6 +650,8 @@ impl ECIES { out.extend_from_slice(tag.as_slice()); } + /// Reads the `RLPx` header from the slice, setting up the MAC and AES, returning the body + /// size contained in the header. pub fn read_header(&mut self, data: &mut [u8]) -> Result { // If the data is not large enough to fit the header and mac bytes, return an error // @@ -677,7 +679,7 @@ impl ECIES { self.body_size = Some(body_size); - Ok(self.body_size.unwrap()) + Ok(body_size) } pub const fn header_len() -> usize { @@ -686,7 +688,7 @@ impl ECIES { pub fn body_len(&self) -> usize { let len = self.body_size.unwrap(); - (if len % 16 == 0 { len } else { (len / 16 + 1) * 16 }) + 16 + Self::align_16(len) + 16 } #[cfg(test)] @@ -697,7 +699,7 @@ impl ECIES { } pub fn write_body(&mut self, out: &mut BytesMut, data: &[u8]) { - let len = if data.len() % 16 == 0 { data.len() } else { (data.len() / 16 + 1) * 16 }; + let len = Self::align_16(data.len()); let old_len = out.len(); out.resize(old_len + len, 0); @@ -730,6 +732,14 @@ impl ECIES { self.ingress_aes.as_mut().unwrap().apply_keystream(ret); Ok(split_at_mut(ret, size)?.0) } + + /// Returns `num` aligned to 16. + /// + /// `` + #[inline] + const fn align_16(num: usize) -> usize { + (num + (16 - 1)) & !(16 - 1) + } } #[cfg(test)] diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index c3e9b8d58cc9..b5a10284cf20 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -1,12 +1,15 @@ //! This contains the main codec for `RLPx` ECIES messages -use crate::{algorithm::ECIES, ECIESError, EgressECIESValue, IngressECIESValue}; +use crate::{algorithm::ECIES, ECIESError, ECIESErrorImpl, EgressECIESValue, IngressECIESValue}; use alloy_primitives::{bytes::BytesMut, B512 as PeerId}; use secp256k1::SecretKey; use std::{fmt::Debug, io}; use tokio_util::codec::{Decoder, Encoder}; use tracing::{instrument, trace}; +/// The max size that the initial handshake packet can be. Currently 2KiB. +const MAX_INITIAL_HANDSHAKE_SIZE: usize = 2048; + /// Tokio codec for ECIES #[derive(Debug)] pub struct ECIESCodec { @@ -26,6 +29,11 @@ pub enum ECIESState { /// message containing the nonce and other metadata. Ack, + /// This is the same as the [`ECIESState::Header`] stage, but occurs only after the first + /// [`ECIESState::Ack`] message. This is so that the initial handshake message can be properly + /// validated. + InitialHeader, + /// The third stage of the ECIES handshake, where header is parsed, message integrity checks /// performed, and message is decrypted. Header, @@ -70,7 +78,7 @@ impl Decoder for ECIESCodec { self.ecies.read_auth(&mut buf.split_to(total_size))?; - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; return Ok(Some(IngressECIESValue::AuthReceive(self.ecies.remote_id()))) } ECIESState::Ack => { @@ -89,9 +97,29 @@ impl Decoder for ECIESCodec { self.ecies.read_ack(&mut buf.split_to(total_size))?; - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; return Ok(Some(IngressECIESValue::Ack)) } + ECIESState::InitialHeader => { + if buf.len() < ECIES::header_len() { + trace!("current len {}, need {}", buf.len(), ECIES::header_len()); + return Ok(None) + } + + let body_size = + self.ecies.read_header(&mut buf.split_to(ECIES::header_len()))?; + + if body_size > MAX_INITIAL_HANDSHAKE_SIZE { + trace!(?body_size, max=?MAX_INITIAL_HANDSHAKE_SIZE, "Header exceeds max initial handshake size"); + return Err(ECIESErrorImpl::InitialHeaderBodyTooLarge { + body_size, + max_body_size: MAX_INITIAL_HANDSHAKE_SIZE, + } + .into()) + } + + self.state = ECIESState::Body; + } ECIESState::Header => { if buf.len() < ECIES::header_len() { trace!("current len {}, need {}", buf.len(), ECIES::header_len()); @@ -131,7 +159,7 @@ impl Encoder for ECIESCodec { Ok(()) } EgressECIESValue::Ack => { - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; self.ecies.write_ack(buf); Ok(()) } diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index 79965f73303f..9dabfc16183d 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -62,6 +62,14 @@ pub enum ECIESErrorImpl { /// The encrypted data is not large enough for all fields #[error("encrypted data is not large enough for all fields")] EncryptedDataTooSmall, + /// The initial header body is too large. + #[error("initial header body is {body_size} but the max is {max_body_size}")] + InitialHeaderBodyTooLarge { + /// The body size from the header + body_size: usize, + /// The max body size + max_body_size: usize, + }, /// Error when trying to split an array beyond its length #[error("requested {idx} but array len is {len}")] OutOfBounds { diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 1d2b54872455..f9759ffc25af 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -16,12 +16,14 @@ workspace = true reth-chainspec.workspace = true reth-codecs-derive.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true # ethereum alloy-chains = { workspace = true, features = ["rlp"] } alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } +alloy-consensus.workspace = true bytes.workspace = true derive_more.workspace = true @@ -41,26 +43,26 @@ arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true rand.workspace = true -alloy-consensus.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", - "alloy-chains/arbitrary", - "dep:arbitrary", - "dep:proptest", - "dep:proptest-arbitrary-interop", - "reth-chainspec/arbitrary", - "alloy-consensus/arbitrary", - "alloy-eips/arbitrary", - "alloy-primitives/arbitrary" + "reth-primitives/arbitrary", + "alloy-chains/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-arbitrary-interop", + "reth-chainspec/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-primitives-traits/arbitrary", ] serde = [ - "dep:serde", - "alloy-chains/serde", - "alloy-consensus/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "bytes/serde", - "rand/serde" + "dep:serde", + "alloy-chains/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", ] diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 878b4573f2b6..1eb71082d814 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -5,8 +5,7 @@ use crate::HeadersDirection; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; -use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{BlockBody, Header}; +use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; /// A request for a peer to return block headers starting at the requested block. /// The peer must return at most [`limit`](#structfield.limit) headers. @@ -41,34 +40,16 @@ pub struct GetBlockHeaders { /// The response to [`GetBlockHeaders`], containing headers if any headers were found. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(rlp, 10)] -pub struct BlockHeaders( +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +pub struct BlockHeaders( /// The requested headers. - pub Vec
, + pub Vec, ); -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for BlockHeaders { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let headers_count: usize = u.int_in_range(0..=10)?; - let mut headers = Vec::with_capacity(headers_count); - - for _ in 0..headers_count { - headers.push(reth_primitives::generate_valid_header( - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - )) - } - - Ok(Self(headers)) - } -} +generate_tests!(#[rlp, 10] BlockHeaders, EthBlockHeadersTests); -impl From> for BlockHeaders { - fn from(headers: Vec
) -> Self { +impl From> for BlockHeaders { + fn from(headers: Vec) -> Self { Self(headers) } } @@ -94,14 +75,15 @@ impl From> for GetBlockBodies { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(rlp, 16)] -pub struct BlockBodies( +pub struct BlockBodies( /// The requested block bodies, each of which should correspond to a hash in the request. - pub Vec, + pub Vec, ); -impl From> for BlockBodies { - fn from(bodies: Vec) -> Self { +generate_tests!(#[rlp, 16] BlockBodies, EthBlockBodiesTests); + +impl From> for BlockBodies { + fn from(bodies: Vec) -> Self { Self(bodies) } } @@ -112,14 +94,13 @@ mod tests { message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, HeadersDirection, }; - use alloy_consensus::TxLegacy; - use alloy_primitives::{hex, Parity, TxKind, U256}; + use alloy_consensus::{Header, TxLegacy}; + use alloy_eips::BlockHashOrNumber; + use alloy_primitives::{hex, PrimitiveSignature as Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::{BlockHashOrNumber, Header, Signature, Transaction, TransactionSigned}; + use reth_primitives::{BlockBody, Transaction, TransactionSigned}; use std::str::FromStr; - use super::BlockBody; - #[test] fn decode_hash() { // this is a valid 32 byte rlp string @@ -253,7 +234,7 @@ mod tests { // [ (f90202) 0x0457 = 1111, [ (f901fc) [ (f901f9) header ] ] ] let expected = hex!("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); let mut data = vec![]; - RequestPair:: { + RequestPair::> { request_id: 1111, message: BlockHeaders(vec![ Header { @@ -288,7 +269,7 @@ mod tests { #[test] fn decode_block_header() { let data = hex!("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); - let expected = RequestPair:: { + let expected = RequestPair::> { request_id: 1111, message: BlockHeaders(vec![ Header { @@ -356,7 +337,7 @@ mod tests { fn encode_block_bodies() { let expected = hex!("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair::> { request_id: 1111, message: BlockBodies(vec![ BlockBody { @@ -372,7 +353,7 @@ mod tests { }), Signature::new( U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12").unwrap(), U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10").unwrap(), - Parity::Parity(false), + false, ), ), TransactionSigned::from_transaction_and_signature(Transaction::Legacy(TxLegacy { @@ -386,7 +367,7 @@ mod tests { }), Signature::new( U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), - Parity::Parity(false), + false, ), ), ], @@ -427,7 +408,7 @@ mod tests { #[test] fn decode_block_bodies() { let data = hex!("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); - let expected = RequestPair:: { + let expected = RequestPair::> { request_id: 1111, message: BlockBodies(vec![ BlockBody { @@ -445,7 +426,7 @@ mod tests { Signature::new( U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12").unwrap(), U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10").unwrap(), - Parity::Eip155(37), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -461,7 +442,7 @@ mod tests { Signature::new( U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), - Parity::Eip155(37), + false, ), ), ], @@ -503,7 +484,7 @@ mod tests { let body = BlockBodies::default(); let mut buf = Vec::new(); body.encode(&mut buf); - let decoded = BlockBodies::decode(&mut buf.as_slice()).unwrap(); + let decoded = BlockBodies::::decode(&mut buf.as_slice()).unwrap(); assert_eq!(body, decoded); } } diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 2ef6083a5001..032227069923 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -1,14 +1,14 @@ //! Types for broadcasting new data. -use crate::{EthMessage, EthVersion}; +use crate::{EthMessage, EthVersion, NetworkPrimitives}; use alloy_rlp::{ Decodable, Encodable, RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper, }; use alloy_primitives::{Bytes, TxHash, B256, U128}; use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; -use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{Block, PooledTransactionsElement, TransactionSigned}; +use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; +use reth_primitives::{PooledTransactionsElement, TransactionSigned}; use std::{ collections::{HashMap, HashSet}, @@ -75,14 +75,15 @@ impl From for Vec { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(rlp, 25)] -pub struct NewBlock { +pub struct NewBlock { /// A new block. - pub block: Block, + pub block: B, /// The current total difficulty. pub td: U128, } +generate_tests!(#[rlp, 25] NewBlock, EthNewBlockTests); + /// This informs peers of transactions that have appeared on the network and are not yet included /// in a block. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -269,7 +270,7 @@ impl NewPooledTransactionHashes { } } -impl From for EthMessage { +impl From for EthMessage { fn from(value: NewPooledTransactionHashes) -> Self { match value { NewPooledTransactionHashes::Eth66(msg) => Self::NewPooledTransactionHashes66(msg), diff --git a/crates/net/eth-wire-types/src/header.rs b/crates/net/eth-wire-types/src/header.rs index 8c11bfa82bb6..9fa3b150d9e1 100644 --- a/crates/net/eth-wire-types/src/header.rs +++ b/crates/net/eth-wire-types/src/header.rs @@ -87,10 +87,9 @@ impl From for bool { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; + use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, hex, Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::Header; use std::str::FromStr; // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 diff --git a/crates/net/eth-wire-types/src/lib.rs b/crates/net/eth-wire-types/src/lib.rs index 0e8fd5df98ae..ac7ea55d0b90 100644 --- a/crates/net/eth-wire-types/src/lib.rs +++ b/crates/net/eth-wire-types/src/lib.rs @@ -40,3 +40,6 @@ pub use disconnect_reason::*; pub mod capability; pub use capability::*; + +pub mod primitives; +pub use primitives::*; diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 4afcb34e13bf..2a6d973ffc36 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -11,7 +11,7 @@ use super::{ GetNodeData, GetPooledTransactions, GetReceipts, NewBlock, NewPooledTransactionHashes66, NewPooledTransactionHashes68, NodeData, PooledTransactions, Receipts, Status, Transactions, }; -use crate::{EthVersion, SharedTransactions}; +use crate::{EthNetworkPrimitives, EthVersion, NetworkPrimitives, SharedTransactions}; use alloy_primitives::bytes::{Buf, BufMut}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; @@ -35,14 +35,18 @@ pub enum MessageError { /// An `eth` protocol message, containing a message ID and payload. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct ProtocolMessage { +pub struct ProtocolMessage { /// The unique identifier representing the type of the Ethereum message. pub message_type: EthMessageID, /// The content of the message, including specific data based on the message type. - pub message: EthMessage, + #[cfg_attr( + feature = "serde", + serde(bound = "EthMessage: serde::Serialize + serde::de::DeserializeOwned") + )] + pub message: EthMessage, } -impl ProtocolMessage { +impl ProtocolMessage { /// Create a new `ProtocolMessage` from a message type and message rlp bytes. pub fn decode_message(version: EthVersion, buf: &mut &[u8]) -> Result { let message_type = EthMessageID::decode(buf)?; @@ -50,9 +54,17 @@ impl ProtocolMessage { let message = match message_type { EthMessageID::Status => EthMessage::Status(Status::decode(buf)?), EthMessageID::NewBlockHashes => { + if version.is_eth69() { + return Err(MessageError::Invalid(version, EthMessageID::NewBlockHashes)); + } EthMessage::NewBlockHashes(NewBlockHashes::decode(buf)?) } - EthMessageID::NewBlock => EthMessage::NewBlock(Box::new(NewBlock::decode(buf)?)), + EthMessageID::NewBlock => { + if version.is_eth69() { + return Err(MessageError::Invalid(version, EthMessageID::NewBlock)); + } + EthMessage::NewBlock(Box::new(NewBlock::decode(buf)?)) + } EthMessageID::Transactions => EthMessage::Transactions(Transactions::decode(buf)?), EthMessageID::NewPooledTransactionHashes => { if version >= EthVersion::Eth68 { @@ -70,7 +82,7 @@ impl ProtocolMessage { EthMessage::GetBlockHeaders(request_pair) } EthMessageID::BlockHeaders => { - let request_pair = RequestPair::::decode(buf)?; + let request_pair = RequestPair::>::decode(buf)?; EthMessage::BlockHeaders(request_pair) } EthMessageID::GetBlockBodies => { @@ -78,7 +90,7 @@ impl ProtocolMessage { EthMessage::GetBlockBodies(request_pair) } EthMessageID::BlockBodies => { - let request_pair = RequestPair::::decode(buf)?; + let request_pair = RequestPair::>::decode(buf)?; EthMessage::BlockBodies(request_pair) } EthMessageID::GetPooledTransactions => { @@ -116,7 +128,7 @@ impl ProtocolMessage { } } -impl Encodable for ProtocolMessage { +impl Encodable for ProtocolMessage { /// Encodes the protocol message into bytes. The message type is encoded as a single byte and /// prepended to the message. fn encode(&self, out: &mut dyn BufMut) { @@ -128,23 +140,23 @@ impl Encodable for ProtocolMessage { } } -impl From for ProtocolMessage { - fn from(message: EthMessage) -> Self { +impl From> for ProtocolMessage { + fn from(message: EthMessage) -> Self { Self { message_type: message.message_id(), message } } } /// Represents messages that can be sent to multiple peers. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ProtocolBroadcastMessage { +#[derive(Clone, Debug)] +pub struct ProtocolBroadcastMessage { /// The unique identifier representing the type of the Ethereum message. pub message_type: EthMessageID, /// The content of the message to be broadcasted, including specific data based on the message /// type. - pub message: EthBroadcastMessage, + pub message: EthBroadcastMessage, } -impl Encodable for ProtocolBroadcastMessage { +impl Encodable for ProtocolBroadcastMessage { /// Encodes the protocol message into bytes. The message type is encoded as a single byte and /// prepended to the message. fn encode(&self, out: &mut dyn BufMut) { @@ -156,8 +168,8 @@ impl Encodable for ProtocolBroadcastMessage { } } -impl From for ProtocolBroadcastMessage { - fn from(message: EthBroadcastMessage) -> Self { +impl From> for ProtocolBroadcastMessage { + fn from(message: EthBroadcastMessage) -> Self { Self { message_type: message.message_id(), message } } } @@ -181,13 +193,17 @@ impl From for ProtocolBroadcastMessage { /// [`NewPooledTransactionHashes68`] is defined. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum EthMessage { +pub enum EthMessage { /// Represents a Status message required for the protocol handshake. Status(Status), /// Represents a `NewBlockHashes` message broadcast to the network. NewBlockHashes(NewBlockHashes), /// Represents a `NewBlock` message broadcast to the network. - NewBlock(Box), + #[cfg_attr( + feature = "serde", + serde(bound = "N::Block: serde::Serialize + serde::de::DeserializeOwned") + )] + NewBlock(Box>), /// Represents a Transactions message broadcast to the network. Transactions(Transactions), /// Represents a `NewPooledTransactionHashes` message for eth/66 version. @@ -198,11 +214,19 @@ pub enum EthMessage { /// Represents a `GetBlockHeaders` request-response pair. GetBlockHeaders(RequestPair), /// Represents a `BlockHeaders` request-response pair. - BlockHeaders(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BlockHeader: serde::Serialize + serde::de::DeserializeOwned") + )] + BlockHeaders(RequestPair>), /// Represents a `GetBlockBodies` request-response pair. GetBlockBodies(RequestPair), /// Represents a `BlockBodies` request-response pair. - BlockBodies(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BlockBody: serde::Serialize + serde::de::DeserializeOwned") + )] + BlockBodies(RequestPair>), /// Represents a `GetPooledTransactions` request-response pair. GetPooledTransactions(RequestPair), /// Represents a `PooledTransactions` request-response pair. @@ -217,7 +241,7 @@ pub enum EthMessage { Receipts(RequestPair), } -impl EthMessage { +impl EthMessage { /// Returns the message's ID. pub const fn message_id(&self) -> EthMessageID { match self { @@ -242,7 +266,7 @@ impl EthMessage { } } -impl Encodable for EthMessage { +impl Encodable for EthMessage { fn encode(&self, out: &mut dyn BufMut) { match self { Self::Status(status) => status.encode(out), @@ -293,16 +317,16 @@ impl Encodable for EthMessage { /// /// Note: This is only useful for outgoing messages. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum EthBroadcastMessage { +pub enum EthBroadcastMessage { /// Represents a new block broadcast message. - NewBlock(Arc), + NewBlock(Arc>), /// Represents a transactions broadcast message. Transactions(SharedTransactions), } // === impl EthBroadcastMessage === -impl EthBroadcastMessage { +impl EthBroadcastMessage { /// Returns the message's ID. pub const fn message_id(&self) -> EthMessageID { match self { @@ -312,7 +336,7 @@ impl EthBroadcastMessage { } } -impl Encodable for EthBroadcastMessage { +impl Encodable for EthBroadcastMessage { fn encode(&self, out: &mut dyn BufMut) { match self { Self::NewBlock(new_block) => new_block.encode(out), @@ -494,8 +518,8 @@ where mod tests { use super::MessageError; use crate::{ - message::RequestPair, EthMessage, EthMessageID, EthVersion, GetNodeData, NodeData, - ProtocolMessage, + message::RequestPair, EthMessage, EthMessageID, EthNetworkPrimitives, EthVersion, + GetNodeData, NodeData, ProtocolMessage, }; use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable, Error}; @@ -508,20 +532,30 @@ mod tests { #[test] fn test_removed_message_at_eth67() { - let get_node_data = - EthMessage::GetNodeData(RequestPair { request_id: 1337, message: GetNodeData(vec![]) }); + let get_node_data = EthMessage::::GetNodeData(RequestPair { + request_id: 1337, + message: GetNodeData(vec![]), + }); let buf = encode(ProtocolMessage { message_type: EthMessageID::GetNodeData, message: get_node_data, }); - let msg = ProtocolMessage::decode_message(crate::EthVersion::Eth67, &mut &buf[..]); + let msg = ProtocolMessage::::decode_message( + crate::EthVersion::Eth67, + &mut &buf[..], + ); assert!(matches!(msg, Err(MessageError::Invalid(..)))); - let node_data = - EthMessage::NodeData(RequestPair { request_id: 1337, message: NodeData(vec![]) }); + let node_data = EthMessage::::NodeData(RequestPair { + request_id: 1337, + message: NodeData(vec![]), + }); let buf = encode(ProtocolMessage { message_type: EthMessageID::NodeData, message: node_data }); - let msg = ProtocolMessage::decode_message(crate::EthVersion::Eth67, &mut &buf[..]); + let msg = ProtocolMessage::::decode_message( + crate::EthVersion::Eth67, + &mut &buf[..], + ); assert!(matches!(msg, Err(MessageError::Invalid(..)))); } @@ -570,10 +604,11 @@ mod tests { #[test] fn empty_block_bodies_protocol() { - let empty_block_bodies = ProtocolMessage::from(EthMessage::BlockBodies(RequestPair { - request_id: 0, - message: Default::default(), - })); + let empty_block_bodies = + ProtocolMessage::from(EthMessage::::BlockBodies(RequestPair { + request_id: 0, + message: Default::default(), + })); let mut buf = Vec::new(); empty_block_bodies.encode(&mut buf); let decoded = diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs new file mode 100644 index 000000000000..eab36c3b6a7c --- /dev/null +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -0,0 +1,86 @@ +//! Abstraction over primitive types in network messages. + +use std::fmt::Debug; + +use alloy_rlp::{Decodable, Encodable}; +use reth_primitives_traits::{Block, BlockHeader}; + +/// Abstraction over primitive types which might appear in network messages. See +/// [`crate::EthMessage`] for more context. +pub trait NetworkPrimitives: + Send + Sync + Unpin + Clone + Debug + PartialEq + Eq + 'static +{ + /// The block header type. + type BlockHeader: BlockHeader + + Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + /// The block body type. + type BlockBody: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + /// Full block type. + type Block: Block
+ + Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + + /// The transaction type which peers announce in `Transactions` messages. It is different from + /// `PooledTransactions` to account for Ethereum case where EIP-4844 transactions are not being + /// announced and can only be explicitly requested from peers. + type BroadcastedTransaction: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + /// The transaction type which peers return in `PooledTransactions` messages. + type PooledTransaction: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; +} + +/// Primitive types used by Ethereum network. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] +#[non_exhaustive] +pub struct EthNetworkPrimitives; + +impl NetworkPrimitives for EthNetworkPrimitives { + type BlockHeader = alloy_consensus::Header; + type BlockBody = reth_primitives::BlockBody; + type Block = reth_primitives::Block; + type BroadcastedTransaction = reth_primitives::TransactionSigned; + type PooledTransaction = reth_primitives::PooledTransactionsElement; +} diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index 90e1731c90ab..d9e8d4319b5b 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -19,7 +19,7 @@ use std::fmt::{Debug, Display}; pub struct Status { /// The current protocol version. For example, peers running `eth/66` would have a version of /// 66. - pub version: u8, + pub version: EthVersion, /// The chain id, as introduced in /// [EIP155](https://eips.ethereum.org/EIPS/eip-155#list-of-chain-ids). @@ -50,7 +50,7 @@ impl Status { /// Sets the [`EthVersion`] for the status. pub fn set_eth_version(&mut self, version: EthVersion) { - self.version = version as u8; + self.version = version; } /// Create a [`StatusBuilder`] from the given [`EthChainSpec`] and head block. @@ -122,7 +122,7 @@ impl Default for Status { fn default() -> Self { let mainnet_genesis = MAINNET.genesis_hash(); Self { - version: EthVersion::Eth68 as u8, + version: EthVersion::Eth68, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(17_179_869_184u64), blockhash: mainnet_genesis, @@ -145,7 +145,7 @@ impl Default for Status { /// /// // this is just an example status message! /// let status = Status::builder() -/// .version(EthVersion::Eth66.into()) +/// .version(EthVersion::Eth66) /// .chain(Chain::mainnet()) /// .total_difficulty(U256::from(100)) /// .blockhash(B256::from(MAINNET_GENESIS_HASH)) @@ -156,7 +156,7 @@ impl Default for Status { /// assert_eq!( /// status, /// Status { -/// version: EthVersion::Eth66.into(), +/// version: EthVersion::Eth66, /// chain: Chain::mainnet(), /// total_difficulty: U256::from(100), /// blockhash: B256::from(MAINNET_GENESIS_HASH), @@ -177,7 +177,7 @@ impl StatusBuilder { } /// Sets the protocol version. - pub const fn version(mut self, version: u8) -> Self { + pub const fn version(mut self, version: EthVersion) -> Self { self.status.version = version; self } @@ -229,7 +229,7 @@ mod tests { fn encode_eth_status_message() { let expected = hex!("f85643018a07aac59dabcdd74bc567a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13da0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d80"); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(36206751599115524359527u128), blockhash: B256::from_str( @@ -249,7 +249,7 @@ mod tests { fn decode_eth_status_message() { let data = hex!("f85643018a07aac59dabcdd74bc567a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13da0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d80"); let expected = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(36206751599115524359527u128), blockhash: B256::from_str( @@ -267,7 +267,7 @@ mod tests { fn encode_network_status_message() { let expected = hex!("f850423884024190faa0f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27ba00d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5bc6845d43d2fd80"); let status = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_named(NamedChain::BinanceSmartChain), total_difficulty: U256::from(37851386u64), blockhash: B256::from_str( @@ -290,7 +290,7 @@ mod tests { fn decode_network_status_message() { let data = hex!("f850423884024190faa0f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27ba00d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5bc6845d43d2fd80"); let expected = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_named(NamedChain::BinanceSmartChain), total_difficulty: U256::from(37851386u64), blockhash: B256::from_str( @@ -311,7 +311,7 @@ mod tests { fn decode_another_network_status_message() { let data = hex!("f86142820834936d68fcffffffffffffffffffffffffdeab81b8a0523e8163a6d620a4cc152c547a05f28a03fec91a2a615194cb86df9731372c0ca06499dccdc7c7def3ebb1ce4c6ee27ec6bd02aee570625ca391919faf77ef27bdc6841a67ccd880"); let expected = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_id(2100), total_difficulty: U256::from_str( "0x000000000000000000000000006d68fcffffffffffffffffffffffffdeab81b8", diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index ab65aa178ee8..dfedcb6f83e4 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -1,12 +1,11 @@ //! Implements the `GetPooledTransactions` and `PooledTransactions` message types. +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use derive_more::{Constructor, Deref, IntoIterator}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{ - transaction::TransactionConversionError, PooledTransactionsElement, TransactionSigned, -}; +use reth_primitives::{transaction::TransactionConversionError, PooledTransactionsElement}; /// A list of transaction hashes that the peer would like transaction bodies for. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -42,46 +41,54 @@ where Eq, RlpEncodableWrapper, RlpDecodableWrapper, - Default, IntoIterator, Deref, Constructor, )] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct PooledTransactions( +pub struct PooledTransactions( /// The transaction bodies, each of which should correspond to a requested hash. - pub Vec, + pub Vec, ); -impl PooledTransactions { +impl PooledTransactions { /// Returns an iterator over the transaction hashes in this response. - pub fn hashes(&self) -> impl Iterator + '_ { - self.0.iter().map(|tx| tx.hash()) + pub fn hashes(&self) -> impl Iterator + '_ { + self.0.iter().map(|tx| tx.trie_hash()) } } -impl TryFrom> for PooledTransactions { +impl TryFrom> for PooledTransactions +where + T: TryFrom, +{ type Error = TransactionConversionError; - fn try_from(txs: Vec) -> Result { - txs.into_iter().map(PooledTransactionsElement::try_from).collect() + fn try_from(txs: Vec) -> Result { + txs.into_iter().map(T::try_from).collect() } } -impl FromIterator for PooledTransactions { - fn from_iter>(iter: I) -> Self { +impl FromIterator for PooledTransactions { + fn from_iter>(iter: I) -> Self { Self(iter.into_iter().collect()) } } +impl Default for PooledTransactions { + fn default() -> Self { + Self(Default::default()) + } +} + #[cfg(test)] mod tests { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; use alloy_consensus::{TxEip1559, TxLegacy}; - use alloy_primitives::{hex, Parity, TxKind, U256}; + use alloy_primitives::{hex, PrimitiveSignature as Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; use reth_chainspec::MIN_TRANSACTION_GAS; - use reth_primitives::{PooledTransactionsElement, Signature, Transaction, TransactionSigned}; + use reth_primitives::{PooledTransactionsElement, Transaction, TransactionSigned}; use std::str::FromStr; #[test] @@ -142,7 +149,7 @@ mod tests { "0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10", ) .unwrap(), - Parity::Parity(false), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -164,7 +171,7 @@ mod tests { "0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb", ) .unwrap(), - Parity::Parity(false), + false, ), ), ]; @@ -208,7 +215,7 @@ mod tests { "0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10", ) .unwrap(), - Parity::Eip155(37), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -230,7 +237,7 @@ mod tests { "0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb", ) .unwrap(), - Parity::Eip155(37), + false, ), ), ]; @@ -275,7 +282,7 @@ mod tests { "0x612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860", ) .unwrap(), - Parity::Eip155(44), + true, ), ), TransactionSigned::from_transaction_and_signature( @@ -299,7 +306,7 @@ mod tests { "0x016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469", ) .unwrap(), - Parity::Parity(true), + true, ), ), TransactionSigned::from_transaction_and_signature( @@ -321,7 +328,7 @@ mod tests { "0x3ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88", ) .unwrap(), - Parity::Eip155(43), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -343,7 +350,7 @@ mod tests { "0x5406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631da", ) .unwrap(), - Parity::Eip155(43), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -365,7 +372,7 @@ mod tests { "0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18", ) .unwrap(), - Parity::Eip155(43), + false, ), ), ]; @@ -414,7 +421,7 @@ mod tests { "0x612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860", ) .unwrap(), - Parity::Parity(true), + true, ), ), TransactionSigned::from_transaction_and_signature( @@ -438,7 +445,7 @@ mod tests { "0x016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469", ) .unwrap(), - Parity::Parity(true), + true, ), ), TransactionSigned::from_transaction_and_signature( @@ -460,7 +467,7 @@ mod tests { "0x3ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88", ) .unwrap(), - Parity::Parity(false), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -482,7 +489,7 @@ mod tests { "0x5406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631da", ) .unwrap(), - Parity::Parity(false), + false, ), ), TransactionSigned::from_transaction_and_signature( @@ -504,7 +511,7 @@ mod tests { "0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18", ) .unwrap(), - Parity::Parity(false), + false, ), ), ]; diff --git a/crates/net/eth-wire-types/src/version.rs b/crates/net/eth-wire-types/src/version.rs index 4fd3e792dcc9..40d51cb55180 100644 --- a/crates/net/eth-wire-types/src/version.rs +++ b/crates/net/eth-wire-types/src/version.rs @@ -15,15 +15,17 @@ pub struct ParseVersionError(String); /// The `eth` protocol version. #[repr(u8)] #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Display)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] pub enum EthVersion { /// The `eth` protocol version 66. Eth66 = 66, - /// The `eth` protocol version 67. Eth67 = 67, - /// The `eth` protocol version 68. Eth68 = 68, + /// The `eth` protocol version 69. + Eth69 = 69, } impl EthVersion { @@ -38,6 +40,8 @@ impl EthVersion { // eth/67,68 are eth/66 minus GetNodeData and NodeData messages 13 } + // eth69 is both eth67 and eth68 minus NewBlockHashes and NewBlock + Self::Eth69 => 11, } } @@ -55,6 +59,31 @@ impl EthVersion { pub const fn is_eth68(&self) -> bool { matches!(self, Self::Eth68) } + + /// Returns true if the version is eth/69 + pub const fn is_eth69(&self) -> bool { + matches!(self, Self::Eth69) + } +} + +/// RLP encodes `EthVersion` as a single byte (66-69). +impl Encodable for EthVersion { + fn encode(&self, out: &mut dyn BufMut) { + (*self as u8).encode(out) + } + + fn length(&self) -> usize { + (*self as u8).length() + } +} + +/// RLP decodes a single byte into `EthVersion`. +/// Returns error if byte is not a valid version (66-69). +impl Decodable for EthVersion { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let version = u8::decode(buf)?; + Self::try_from(version).map_err(|_| RlpError::Custom("invalid eth version")) + } } /// Allow for converting from a `&str` to an `EthVersion`. @@ -75,6 +104,7 @@ impl TryFrom<&str> for EthVersion { "66" => Ok(Self::Eth66), "67" => Ok(Self::Eth67), "68" => Ok(Self::Eth68), + "69" => Ok(Self::Eth69), _ => Err(ParseVersionError(s.to_string())), } } @@ -98,6 +128,7 @@ impl TryFrom for EthVersion { 66 => Ok(Self::Eth66), 67 => Ok(Self::Eth67), 68 => Ok(Self::Eth68), + 69 => Ok(Self::Eth69), _ => Err(ParseVersionError(u.to_string())), } } @@ -126,6 +157,7 @@ impl From for &'static str { EthVersion::Eth66 => "66", EthVersion::Eth67 => "67", EthVersion::Eth68 => "68", + EthVersion::Eth69 => "69", } } } @@ -173,13 +205,16 @@ impl Decodable for ProtocolVersion { #[cfg(test)] mod tests { use super::{EthVersion, ParseVersionError}; + use alloy_rlp::{Decodable, Encodable, Error as RlpError}; + use bytes::BytesMut; #[test] fn test_eth_version_try_from_str() { assert_eq!(EthVersion::Eth66, EthVersion::try_from("66").unwrap()); assert_eq!(EthVersion::Eth67, EthVersion::try_from("67").unwrap()); assert_eq!(EthVersion::Eth68, EthVersion::try_from("68").unwrap()); - assert_eq!(Err(ParseVersionError("69".to_string())), EthVersion::try_from("69")); + assert_eq!(EthVersion::Eth69, EthVersion::try_from("69").unwrap()); + assert_eq!(Err(ParseVersionError("70".to_string())), EthVersion::try_from("70")); } #[test] @@ -187,6 +222,48 @@ mod tests { assert_eq!(EthVersion::Eth66, "66".parse().unwrap()); assert_eq!(EthVersion::Eth67, "67".parse().unwrap()); assert_eq!(EthVersion::Eth68, "68".parse().unwrap()); - assert_eq!(Err(ParseVersionError("69".to_string())), "69".parse::()); + assert_eq!(EthVersion::Eth69, "69".parse().unwrap()); + assert_eq!(Err(ParseVersionError("70".to_string())), "70".parse::()); + } + + #[test] + fn test_eth_version_rlp_encode() { + let versions = [EthVersion::Eth66, EthVersion::Eth67, EthVersion::Eth68, EthVersion::Eth69]; + + for version in versions { + let mut encoded = BytesMut::new(); + version.encode(&mut encoded); + + assert_eq!(encoded.len(), 1); + assert_eq!(encoded[0], version as u8); + } + } + #[test] + fn test_eth_version_rlp_decode() { + let test_cases = [ + (66_u8, Ok(EthVersion::Eth66)), + (67_u8, Ok(EthVersion::Eth67)), + (68_u8, Ok(EthVersion::Eth68)), + (69_u8, Ok(EthVersion::Eth69)), + (70_u8, Err(RlpError::Custom("invalid eth version"))), + (65_u8, Err(RlpError::Custom("invalid eth version"))), + ]; + + for (input, expected) in test_cases { + let mut encoded = BytesMut::new(); + input.encode(&mut encoded); + + let mut slice = encoded.as_ref(); + let result = EthVersion::decode(&mut slice); + assert_eq!(result, expected); + } + } + + #[test] + fn test_eth_version_total_messages() { + assert_eq!(EthVersion::Eth66.total_messages(), 15); + assert_eq!(EthVersion::Eth67.total_messages(), 13); + assert_eq!(EthVersion::Eth68.total_messages(), 13); + assert_eq!(EthVersion::Eth69.total_messages(), 11); } } diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index d60e500744c0..625971e0e7bd 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -5,10 +5,11 @@ use crate::{ p2pstream::MAX_RESERVED_MESSAGE_ID, protocol::{ProtoVersion, Protocol}, version::ParseVersionError, - Capability, EthMessage, EthMessageID, EthVersion, + Capability, EthMessageID, EthVersion, }; use alloy_primitives::bytes::Bytes; use derive_more::{Deref, DerefMut}; +use reth_eth_wire_types::{EthMessage, EthNetworkPrimitives, NetworkPrimitives}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ @@ -30,9 +31,13 @@ pub struct RawCapabilityMessage { /// network. #[derive(Debug)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum CapabilityMessage { +pub enum CapabilityMessage { /// Eth sub-protocol message. - Eth(EthMessage), + #[cfg_attr( + feature = "serde", + serde(bound = "EthMessage: Serialize + serde::de::DeserializeOwned") + )] + Eth(EthMessage), /// Any other capability message. Other(RawCapabilityMessage), } diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index 557fbd66a00f..1f8b995afda4 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -5,6 +5,7 @@ use crate::{ }; use alloy_primitives::B256; use reth_chainspec::Chain; +use reth_eth_wire_types::EthVersion; use reth_primitives::{GotExpected, GotExpectedBoxed, ValidationError}; use std::io; @@ -88,7 +89,7 @@ pub enum EthHandshakeError { MismatchedGenesis(GotExpectedBoxed), #[error("mismatched protocol version in status message: {0}")] /// Mismatched protocol versions in status messages. - MismatchedProtocolVersion(GotExpected), + MismatchedProtocolVersion(GotExpected), #[error("mismatched chain in status message: {0}")] /// Mismatch in chain details in status messages. MismatchedChain(GotExpected), diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 9deca99fb587..c971f6182ce1 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -2,11 +2,13 @@ use crate::{ errors::{EthHandshakeError, EthStreamError}, message::{EthBroadcastMessage, ProtocolBroadcastMessage}, p2pstream::HANDSHAKE_TIMEOUT, - CanDisconnect, DisconnectReason, EthMessage, EthVersion, ProtocolMessage, Status, + CanDisconnect, DisconnectReason, EthMessage, EthNetworkPrimitives, EthVersion, ProtocolMessage, + Status, }; use alloy_primitives::bytes::{Bytes, BytesMut}; use futures::{ready, Sink, SinkExt, StreamExt}; use pin_project::pin_project; +use reth_eth_wire_types::NetworkPrimitives; use reth_primitives::{ForkFilter, GotExpected}; use std::{ pin::Pin, @@ -21,6 +23,9 @@ use tracing::{debug, trace}; // https://github.com/ethereum/go-ethereum/blob/30602163d5d8321fbc68afdcbbaf2362b2641bde/eth/protocols/eth/protocol.go#L50 pub const MAX_MESSAGE_SIZE: usize = 10 * 1024 * 1024; +/// [`MAX_STATUS_SIZE`] is the maximum cap on the size of the initial status message +pub(crate) const MAX_STATUS_SIZE: usize = 500 * 1024; + /// An un-authenticated [`EthStream`]. This is consumed and returns a [`EthStream`] after the /// `Status` handshake is completed. #[pin_project] @@ -50,32 +55,32 @@ where /// Consumes the [`UnauthedEthStream`] and returns an [`EthStream`] after the `Status` /// handshake is completed successfully. This also returns the `Status` message sent by the /// remote peer. - pub async fn handshake( + pub async fn handshake( self, status: Status, fork_filter: ForkFilter, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { self.handshake_with_timeout(status, fork_filter, HANDSHAKE_TIMEOUT).await } /// Wrapper around handshake which enforces a timeout. - pub async fn handshake_with_timeout( + pub async fn handshake_with_timeout( self, status: Status, fork_filter: ForkFilter, timeout_limit: Duration, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { timeout(timeout_limit, Self::handshake_without_timeout(self, status, fork_filter)) .await .map_err(|_| EthStreamError::StreamTimeout)? } /// Handshake with no timeout - pub async fn handshake_without_timeout( + pub async fn handshake_without_timeout( mut self, status: Status, fork_filter: ForkFilter, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { trace!( %status, "sending eth status to peer" @@ -84,7 +89,10 @@ where // we need to encode and decode here on our own because we don't have an `EthStream` yet // The max length for a status with TTD is: + self.inner - .send(alloy_rlp::encode(ProtocolMessage::from(EthMessage::Status(status))).into()) + .send( + alloy_rlp::encode(ProtocolMessage::::from(EthMessage::::Status(status))) + .into(), + ) .await?; let their_msg_res = self.inner.next().await; @@ -97,13 +105,13 @@ where } }?; - if their_msg.len() > MAX_MESSAGE_SIZE { + if their_msg.len() > MAX_STATUS_SIZE { self.inner.disconnect(DisconnectReason::ProtocolBreach).await?; return Err(EthStreamError::MessageTooBig(their_msg.len())) } - let version = EthVersion::try_from(status.version)?; - let msg = match ProtocolMessage::decode_message(version, &mut their_msg.as_ref()) { + let version = status.version; + let msg = match ProtocolMessage::::decode_message(version, &mut their_msg.as_ref()) { Ok(m) => m, Err(err) => { debug!("decode error in eth handshake: msg={their_msg:x}"); @@ -184,19 +192,21 @@ where /// compatible with eth-networking protocol messages, which get RLP encoded/decoded. #[pin_project] #[derive(Debug)] -pub struct EthStream { +pub struct EthStream { /// Negotiated eth version. version: EthVersion, #[pin] inner: S, + + _pd: std::marker::PhantomData, } -impl EthStream { +impl EthStream { /// Creates a new unauthed [`EthStream`] from a provided stream. You will need /// to manually handshake a peer. #[inline] pub const fn new(version: EthVersion, inner: S) -> Self { - Self { version, inner } + Self { version, inner, _pd: std::marker::PhantomData } } /// Returns the eth version. @@ -224,15 +234,16 @@ impl EthStream { } } -impl EthStream +impl EthStream where S: Sink + Unpin, EthStreamError: From, + N: NetworkPrimitives, { /// Same as [`Sink::start_send`] but accepts a [`EthBroadcastMessage`] instead. pub fn start_send_broadcast( &mut self, - item: EthBroadcastMessage, + item: EthBroadcastMessage, ) -> Result<(), EthStreamError> { self.inner.start_send_unpin(Bytes::from(alloy_rlp::encode( ProtocolBroadcastMessage::from(item), @@ -242,12 +253,13 @@ where } } -impl Stream for EthStream +impl Stream for EthStream where S: Stream> + Unpin, EthStreamError: From, + N: NetworkPrimitives, { - type Item = Result; + type Item = Result, EthStreamError>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); @@ -289,10 +301,11 @@ where } } -impl Sink for EthStream +impl Sink> for EthStream where S: CanDisconnect + Unpin, EthStreamError: From<>::Error>, + N: NetworkPrimitives, { type Error = EthStreamError; @@ -300,7 +313,7 @@ where self.project().inner.poll_ready(cx).map_err(Into::into) } - fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { + fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { if matches!(item, EthMessage::Status(_)) { // TODO: to disconnect here we would need to do something similar to P2PStream's // start_disconnect, which would ideally be a part of the CanDisconnect trait, or at @@ -330,10 +343,11 @@ where } } -impl CanDisconnect for EthStream +impl CanDisconnect> for EthStream where S: CanDisconnect + Send, EthStreamError: From<>::Error>, + N: NetworkPrimitives, { async fn disconnect(&mut self, reason: DisconnectReason) -> Result<(), EthStreamError> { self.inner.disconnect(reason).await.map_err(Into::into) @@ -355,6 +369,7 @@ mod tests { use futures::{SinkExt, StreamExt}; use reth_chainspec::NamedChain; use reth_ecies::stream::ECIESStream; + use reth_eth_wire_types::EthNetworkPrimitives; use reth_network_peers::pk2id; use reth_primitives::{ForkFilter, Head}; use secp256k1::{SecretKey, SECP256K1}; @@ -368,7 +383,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), @@ -387,7 +402,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -399,8 +414,10 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let (_, their_status) = - UnauthedEthStream::new(sink).handshake(status, fork_filter).await.unwrap(); + let (_, their_status) = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await + .unwrap(); // their status is a clone of our status, these should be equal assert_eq!(their_status, status); @@ -415,7 +432,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::from(2).pow(U256::from(100)) - U256::from(1), blockhash: B256::random(), @@ -434,7 +451,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -446,8 +463,10 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let (_, their_status) = - UnauthedEthStream::new(sink).handshake(status, fork_filter).await.unwrap(); + let (_, their_status) = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await + .unwrap(); // their status is a clone of our status, these should be equal assert_eq!(their_status, status); @@ -462,7 +481,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::from(2).pow(U256::from(100)), blockhash: B256::random(), @@ -480,8 +499,9 @@ mod tests { // roughly based off of the design of tokio::net::TcpListener let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); - let handshake_res = - UnauthedEthStream::new(stream).handshake(status_clone, fork_filter_clone).await; + let handshake_res = UnauthedEthStream::new(stream) + .handshake::(status_clone, fork_filter_clone) + .await; // make sure the handshake fails due to td too high assert!(matches!( @@ -496,7 +516,9 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let handshake_res = UnauthedEthStream::new(sink).handshake(status, fork_filter).await; + let handshake_res = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await; // this handshake should also fail due to td too high assert!(matches!( @@ -514,7 +536,7 @@ mod tests { async fn can_write_and_read_cleartext() { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); - let test_msg = EthMessage::NewBlockHashes( + let test_msg: EthMessage = EthMessage::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -549,7 +571,7 @@ mod tests { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); let server_key = SecretKey::new(&mut rand::thread_rng()); - let test_msg = EthMessage::NewBlockHashes( + let test_msg: EthMessage = EthMessage::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -591,7 +613,7 @@ mod tests { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); let server_key = SecretKey::new(&mut rand::thread_rng()); - let test_msg = EthMessage::NewBlockHashes( + let test_msg: EthMessage = EthMessage::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -603,7 +625,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), @@ -674,7 +696,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), @@ -695,7 +717,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -708,7 +730,11 @@ mod tests { // try to connect let handshake_result = UnauthedEthStream::new(sink) - .handshake_with_timeout(status, fork_filter, Duration::from_secs(1)) + .handshake_with_timeout::( + status, + fork_filter, + Duration::from_secs(1), + ) .await; // Assert that a timeout error occurred diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index d1d977aba784..6f882f408872 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -24,6 +24,7 @@ use crate::{ }; use bytes::{Bytes, BytesMut}; use futures::{Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; +use reth_eth_wire_types::NetworkPrimitives; use reth_primitives::ForkFilter; use tokio::sync::{mpsc, mpsc::UnboundedSender}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -204,11 +205,11 @@ impl RlpxProtocolMultiplexer { /// Converts this multiplexer into a [`RlpxSatelliteStream`] with eth protocol as the given /// primary protocol. - pub async fn into_eth_satellite_stream( + pub async fn into_eth_satellite_stream( self, status: Status, fork_filter: ForkFilter, - ) -> Result<(RlpxSatelliteStream>, Status), EthStreamError> + ) -> Result<(RlpxSatelliteStream>, Status), EthStreamError> where St: Stream> + Sink + Unpin, { @@ -674,6 +675,7 @@ mod tests { }, UnauthedP2PStream, }; + use reth_eth_wire_types::EthNetworkPrimitives; use tokio::{net::TcpListener, sync::oneshot}; use tokio_util::codec::Decoder; @@ -693,7 +695,7 @@ mod tests { UnauthedP2PStream::new(stream).handshake(server_hello).await.unwrap(); let (_eth_stream, _) = UnauthedEthStream::new(p2p_stream) - .handshake(other_status, other_fork_filter) + .handshake::(other_status, other_fork_filter) .await .unwrap(); @@ -708,7 +710,9 @@ mod tests { .into_satellite_stream_with_handshake( eth.capability().as_ref(), move |proxy| async move { - UnauthedEthStream::new(proxy).handshake(status, fork_filter).await + UnauthedEthStream::new(proxy) + .handshake::(status, fork_filter) + .await }, ) .await @@ -731,7 +735,7 @@ mod tests { let (conn, _) = UnauthedP2PStream::new(stream).handshake(server_hello).await.unwrap(); let (mut st, _their_status) = RlpxProtocolMultiplexer::new(conn) - .into_eth_satellite_stream(other_status, other_fork_filter) + .into_eth_satellite_stream::(other_status, other_fork_filter) .await .unwrap(); @@ -762,7 +766,7 @@ mod tests { let conn = connect_passthrough(local_addr, test_hello().0).await; let (mut st, _their_status) = RlpxProtocolMultiplexer::new(conn) - .into_eth_satellite_stream(status, fork_filter) + .into_eth_satellite_stream::(status, fork_filter) .await .unwrap(); diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index e516c0aee7df..d7a3aa582b72 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -37,7 +37,7 @@ pub fn eth_handshake() -> (Status, ForkFilter) { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::mainnet(), total_difficulty: U256::ZERO, blockhash: B256::random(), diff --git a/crates/net/eth-wire/tests/new_block.rs b/crates/net/eth-wire/tests/new_block.rs index 266752b74abf..366bf26a3a20 100644 --- a/crates/net/eth-wire/tests/new_block.rs +++ b/crates/net/eth-wire/tests/new_block.rs @@ -11,7 +11,7 @@ fn decode_new_block_network() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/new_block_network_rlp"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } #[test] @@ -20,7 +20,7 @@ fn decode_new_block_network_bsc_one() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/bsc_new_block_network_one"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } #[test] @@ -29,5 +29,5 @@ fn decode_new_block_network_bsc_two() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/bsc_new_block_network_two"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } diff --git a/crates/net/eth-wire/tests/pooled_transactions.rs b/crates/net/eth-wire/tests/pooled_transactions.rs index 6690f42631a6..22c5fcc33291 100644 --- a/crates/net/eth-wire/tests/pooled_transactions.rs +++ b/crates/net/eth-wire/tests/pooled_transactions.rs @@ -12,7 +12,7 @@ use test_fuzz::test_fuzz; #[test_fuzz] fn roundtrip_pooled_transactions(hex_data: Vec) -> Result<(), alloy_rlp::Error> { let input_rlp = &mut &hex_data[..]; - let txs = match PooledTransactions::decode(input_rlp) { + let txs: PooledTransactions = match PooledTransactions::decode(input_rlp) { Ok(txs) => txs, Err(e) => return Err(e), }; @@ -28,7 +28,7 @@ fn roundtrip_pooled_transactions(hex_data: Vec) -> Result<(), alloy_rlp::Err assert_eq!(expected_encoding, buf); // now do another decoding, on what we encoded - this should succeed - let txs2 = PooledTransactions::decode(&mut &buf[..]).unwrap(); + let txs2: PooledTransactions = PooledTransactions::decode(&mut &buf[..]).unwrap(); // ensure that the payload length is the same assert_eq!(txs.length(), txs2.length()); @@ -54,7 +54,8 @@ fn decode_request_pair_pooled_blob_transactions() { .join("testdata/request_pair_pooled_blob_transactions"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = ProtocolMessage::decode_message(EthVersion::Eth68, &mut &hex_data[..]).unwrap(); + let _txs: ProtocolMessage = + ProtocolMessage::decode_message(EthVersion::Eth68, &mut &hex_data[..]).unwrap(); } #[test] diff --git a/crates/net/network-api/src/downloaders.rs b/crates/net/network-api/src/downloaders.rs index f081c16ed815..cbfe816134ea 100644 --- a/crates/net/network-api/src/downloaders.rs +++ b/crates/net/network-api/src/downloaders.rs @@ -1,5 +1,7 @@ //! API related to syncing blocks. +use std::fmt::Debug; + use futures::Future; use reth_network_p2p::BlockClient; use tokio::sync::oneshot; @@ -7,10 +9,13 @@ use tokio::sync::oneshot; /// Provides client for downloading blocks. #[auto_impl::auto_impl(&, Arc)] pub trait BlockDownloaderProvider { + /// The client this type can provide. + type Client: BlockClient + Send + Sync + Clone + 'static; + /// Returns a new [`BlockClient`], used for fetching blocks from peers. /// /// The client is the entrypoint for sending block requests to the network. fn fetch_client( &self, - ) -> impl Future> + Send; + ) -> impl Future> + Send; } diff --git a/crates/net/network-api/src/events.rs b/crates/net/network-api/src/events.rs index d2bd66d1fdd9..af392b6f9ead 100644 --- a/crates/net/network-api/src/events.rs +++ b/crates/net/network-api/src/events.rs @@ -4,8 +4,9 @@ use std::{fmt, net::SocketAddr, sync::Arc}; use reth_eth_wire_types::{ message::RequestPair, BlockBodies, BlockHeaders, Capabilities, DisconnectReason, EthMessage, - EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData, GetPooledTransactions, GetReceipts, - NodeData, PooledTransactions, Receipts, Status, + EthNetworkPrimitives, EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData, + GetPooledTransactions, GetReceipts, NetworkPrimitives, NodeData, PooledTransactions, Receipts, + Status, }; use reth_ethereum_forks::ForkId; use reth_network_p2p::error::{RequestError, RequestResult}; @@ -30,8 +31,8 @@ pub trait NetworkEventListenerProvider: Send + Sync { /// /// This includes any event types that may be relevant to tasks, for metrics, keep track of peers /// etc. -#[derive(Debug, Clone)] -pub enum NetworkEvent { +#[derive(Debug)] +pub enum NetworkEvent { /// Closed the peer session. SessionClosed { /// The identifier of the peer to which a session was closed. @@ -50,7 +51,7 @@ pub enum NetworkEvent { /// Capabilities the peer announced capabilities: Arc, /// A request channel to the session task. - messages: PeerRequestSender, + messages: PeerRequestSender, /// The status of the peer to which a session was established. status: Arc, /// negotiated eth version of the session @@ -62,6 +63,35 @@ pub enum NetworkEvent { PeerRemoved(PeerId), } +impl Clone for NetworkEvent { + fn clone(&self) -> Self { + match self { + Self::SessionClosed { peer_id, reason } => { + Self::SessionClosed { peer_id: *peer_id, reason: *reason } + } + Self::SessionEstablished { + peer_id, + remote_addr, + client_version, + capabilities, + messages, + status, + version, + } => Self::SessionEstablished { + peer_id: *peer_id, + remote_addr: *remote_addr, + client_version: client_version.clone(), + capabilities: capabilities.clone(), + messages: messages.clone(), + status: status.clone(), + version: *version, + }, + Self::PeerAdded(peer) => Self::PeerAdded(*peer), + Self::PeerRemoved(peer) => Self::PeerRemoved(*peer), + } + } +} + /// Events produced by the `Discovery` manager. #[derive(Debug, Clone, PartialEq, Eq)] pub enum DiscoveryEvent { @@ -98,7 +128,7 @@ pub enum DiscoveredEvent { /// Protocol related request messages that expect a response #[derive(Debug)] -pub enum PeerRequest { +pub enum PeerRequest { /// Requests block headers from the peer. /// /// The response should be sent through the channel. @@ -106,7 +136,7 @@ pub enum PeerRequest { /// The request for block headers. request: GetBlockHeaders, /// The channel to send the response for block headers. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests block bodies from the peer. /// @@ -115,7 +145,7 @@ pub enum PeerRequest { /// The request for block bodies. request: GetBlockBodies, /// The channel to send the response for block bodies. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests pooled transactions from the peer. /// @@ -148,7 +178,7 @@ pub enum PeerRequest { // === impl PeerRequest === -impl PeerRequest { +impl PeerRequest { /// Invoked if we received a response which does not match the request pub fn send_bad_response(self) { self.send_err_response(RequestError::BadResponse) @@ -166,7 +196,7 @@ impl PeerRequest { } /// Returns the [`EthMessage`] for this type - pub fn create_request_message(&self, request_id: u64) -> EthMessage { + pub fn create_request_message(&self, request_id: u64) -> EthMessage { match self { Self::GetBlockHeaders { request, .. } => { EthMessage::GetBlockHeaders(RequestPair { request_id, message: *request }) @@ -199,24 +229,29 @@ impl PeerRequest { } /// A Cloneable connection for sending _requests_ directly to the session of a peer. -#[derive(Clone)] -pub struct PeerRequestSender { +pub struct PeerRequestSender { /// id of the remote node. pub peer_id: PeerId, /// The Sender half connected to a session. - pub to_session_tx: mpsc::Sender, + pub to_session_tx: mpsc::Sender, +} + +impl Clone for PeerRequestSender { + fn clone(&self) -> Self { + Self { peer_id: self.peer_id, to_session_tx: self.to_session_tx.clone() } + } } // === impl PeerRequestSender === -impl PeerRequestSender { +impl PeerRequestSender { /// Constructs a new sender instance that's wired to a session - pub const fn new(peer_id: PeerId, to_session_tx: mpsc::Sender) -> Self { + pub const fn new(peer_id: PeerId, to_session_tx: mpsc::Sender) -> Self { Self { peer_id, to_session_tx } } /// Attempts to immediately send a message on this Sender - pub fn try_send(&self, req: PeerRequest) -> Result<(), mpsc::error::TrySendError> { + pub fn try_send(&self, req: R) -> Result<(), mpsc::error::TrySendError> { self.to_session_tx.try_send(req) } @@ -226,7 +261,7 @@ impl PeerRequestSender { } } -impl fmt::Debug for PeerRequestSender { +impl fmt::Debug for PeerRequestSender { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PeerRequestSender").field("peer_id", &self.peer_id).finish_non_exhaustive() } diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 6163c8730033..986d490c34f9 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -36,6 +36,7 @@ pub use events::{ use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; use reth_eth_wire_types::{capability::Capabilities, DisconnectReason, EthVersion, Status}; +use reth_network_p2p::EthBlockClient; use reth_network_peers::NodeRecord; /// The `PeerId` type. @@ -43,7 +44,7 @@ pub type PeerId = alloy_primitives::B512; /// Helper trait that unifies network API needed to launch node. pub trait FullNetwork: - BlockDownloaderProvider + BlockDownloaderProvider + NetworkSyncUpdater + NetworkInfo + NetworkEventListenerProvider @@ -55,7 +56,7 @@ pub trait FullNetwork: } impl FullNetwork for T where - T: BlockDownloaderProvider + T: BlockDownloaderProvider + NetworkSyncUpdater + NetworkInfo + NetworkEventListenerProvider diff --git a/crates/net/network-types/src/peers/config.rs b/crates/net/network-types/src/peers/config.rs index 97a8bb3cac39..890679f5d34b 100644 --- a/crates/net/network-types/src/peers/config.rs +++ b/crates/net/network-types/src/peers/config.rs @@ -24,6 +24,9 @@ pub const DEFAULT_MAX_COUNT_PEERS_INBOUND: u32 = 30; /// This restricts how many outbound dials can be performed concurrently. pub const DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS: usize = 15; +/// A temporary timeout for ips on incoming connection attempts. +pub const INBOUND_IP_THROTTLE_DURATION: Duration = Duration::from_secs(30); + /// The durations to use when a backoff should be applied to a peer. /// /// See also [`BackoffKind`]. @@ -155,6 +158,11 @@ pub struct PeersConfig { /// /// The backoff duration increases with number of backoff attempts. pub backoff_durations: PeerBackoffDurations, + /// How long to temporarily ban ips on incoming connection attempts. + /// + /// This acts as an IP based rate limit. + #[cfg_attr(feature = "serde", serde(default, with = "humantime_serde"))] + pub incoming_ip_throttle_duration: Duration, } impl Default for PeersConfig { @@ -171,6 +179,7 @@ impl Default for PeersConfig { trusted_nodes_only: false, basic_nodes: Default::default(), max_backoff_count: 5, + incoming_ip_throttle_duration: INBOUND_IP_THROTTLE_DURATION, } } } diff --git a/crates/net/network-types/src/peers/state.rs b/crates/net/network-types/src/peers/state.rs index f6ab1a39f853..1e2466c805a1 100644 --- a/crates/net/network-types/src/peers/state.rs +++ b/crates/net/network-types/src/peers/state.rs @@ -31,6 +31,12 @@ impl PeerConnectionState { } } + /// Returns true if this is the idle state. + #[inline] + pub const fn is_idle(&self) -> bool { + matches!(self, Self::Idle) + } + /// Returns true if this is an active incoming connection. #[inline] pub const fn is_incoming(&self) -> bool { diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index f444aa7fe27c..09f81e63e545 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-chainspec.workspace = true reth-fs-util.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-net-banlist.workspace = true reth-network-api.workspace = true reth-network-p2p.workspace = true @@ -34,6 +35,7 @@ reth-network-peers = { workspace = true, features = ["net"] } reth-network-types.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true @@ -129,7 +131,8 @@ test-utils = [ "reth-discv4/test-utils", "reth-network/test-utils", "reth-network-p2p/test-utils", - "reth-primitives/test-utils" + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", ] [[bench]] diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 72627f5b657d..96aef249d9f9 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -631,14 +631,13 @@ impl NetworkMode { #[cfg(test)] mod tests { - use std::sync::Arc; - use super::*; use rand::thread_rng; use reth_chainspec::{Chain, MAINNET}; use reth_dns_discovery::tree::LinkEntry; use reth_primitives::ForkHash; use reth_provider::test_utils::NoopProvider; + use std::sync::Arc; fn builder() -> NetworkConfigBuilder { let secret_key = SecretKey::new(&mut thread_rng()); diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index f0c355b174a4..1f20be53967d 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -7,6 +7,7 @@ use std::{ time::Duration, }; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_rlp::Encodable; use futures::StreamExt; @@ -17,7 +18,7 @@ use reth_eth_wire::{ use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::error::RequestResult; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, Header}; +use reth_primitives::BlockBody; use reth_storage_api::{BlockReader, HeaderProvider, ReceiptProvider}; use tokio::sync::{mpsc::Receiver, oneshot}; use tokio_stream::wrappers::ReceiverStream; diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index c47ee5d234fc..584c079b8d86 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -7,6 +7,7 @@ use std::sync::{ use alloy_primitives::B256; use futures::{future, future::Either}; +use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, @@ -17,7 +18,6 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::Header; use tokio::sync::{mpsc::UnboundedSender, oneshot}; use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; @@ -30,16 +30,16 @@ use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; /// /// include_mmd!("docs/mermaid/fetch-client.mmd") #[derive(Debug, Clone)] -pub struct FetchClient { +pub struct FetchClient { /// Sender half of the request channel. - pub(crate) request_tx: UnboundedSender, + pub(crate) request_tx: UnboundedSender>, /// The handle to the peers pub(crate) peers_handle: PeersHandle, /// Number of active peer sessions the node's currently handling. pub(crate) num_active_peers: Arc, } -impl DownloadClient for FetchClient { +impl DownloadClient for FetchClient { fn report_bad_message(&self, peer_id: PeerId) { self.peers_handle.reputation_change(peer_id, ReputationChangeKind::BadMessage); } @@ -53,8 +53,9 @@ impl DownloadClient for FetchClient { // or an error. type HeadersClientFuture = Either, future::Ready>; -impl HeadersClient for FetchClient { - type Output = HeadersClientFuture>>; +impl HeadersClient for FetchClient { + type Header = N::BlockHeader; + type Output = HeadersClientFuture>>; /// Sends a `GetBlockHeaders` request to an available peer. fn get_headers_with_priority( @@ -75,8 +76,9 @@ impl HeadersClient for FetchClient { } } -impl BodiesClient for FetchClient { - type Output = BodiesFut; +impl BodiesClient for FetchClient { + type Body = N::BlockBody; + type Output = BodiesFut; /// Sends a `GetBlockBodies` request to an available peer. fn get_block_bodies_with_priority( diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index f5c0006bc3a4..8af6300b7056 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -15,7 +15,7 @@ use std::{ use alloy_primitives::B256; use futures::StreamExt; -use reth_eth_wire::{GetBlockBodies, GetBlockHeaders}; +use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives}; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::{ error::{EthResponseValidator, PeerRequestResult, RequestError, RequestResult}, @@ -24,12 +24,14 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{BlockBody, Header}; use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; use crate::message::BlockRequest; +type InflightHeadersRequest = Request>>; +type InflightBodiesRequest = Request, PeerRequestResult>>; + /// Manages data fetching operations. /// /// This type is hooked into the staged sync pipeline and delegates download request to available @@ -37,13 +39,11 @@ use crate::message::BlockRequest; /// /// This type maintains a list of connected peers that are available for requests. #[derive(Debug)] -pub struct StateFetcher { +pub struct StateFetcher { /// Currently active [`GetBlockHeaders`] requests - inflight_headers_requests: - HashMap>>>, + inflight_headers_requests: HashMap>, /// Currently active [`GetBlockBodies`] requests - inflight_bodies_requests: - HashMap, PeerRequestResult>>>, + inflight_bodies_requests: HashMap>, /// The list of _available_ peers for requests. peers: HashMap, /// The handle to the peers manager @@ -51,16 +51,16 @@ pub struct StateFetcher { /// Number of active peer sessions the node's currently handling. num_active_peers: Arc, /// Requests queued for processing - queued_requests: VecDeque, + queued_requests: VecDeque>, /// Receiver for new incoming download requests - download_requests_rx: UnboundedReceiverStream, + download_requests_rx: UnboundedReceiverStream>, /// Sender for download requests, used to detach a [`FetchClient`] - download_requests_tx: UnboundedSender, + download_requests_tx: UnboundedSender>, } // === impl StateSyncer === -impl StateFetcher { +impl StateFetcher { pub(crate) fn new(peers_handle: PeersHandle, num_active_peers: Arc) -> Self { let (download_requests_tx, download_requests_rx) = mpsc::unbounded_channel(); Self { @@ -217,7 +217,7 @@ impl StateFetcher { /// Handles a new request to a peer. /// /// Caution: this assumes the peer exists and is idle - fn prepare_block_request(&mut self, peer_id: PeerId, req: DownloadRequest) -> BlockRequest { + fn prepare_block_request(&mut self, peer_id: PeerId, req: DownloadRequest) -> BlockRequest { // update the peer's state if let Some(peer) = self.peers.get_mut(&peer_id) { peer.state = req.peer_state(); @@ -260,7 +260,7 @@ impl StateFetcher { pub(crate) fn on_block_headers_response( &mut self, peer_id: PeerId, - res: RequestResult>, + res: RequestResult>, ) -> Option { let is_error = res.is_err(); let maybe_reputation_change = res.reputation_change_err(); @@ -296,7 +296,7 @@ impl StateFetcher { pub(crate) fn on_block_bodies_response( &mut self, peer_id: PeerId, - res: RequestResult>, + res: RequestResult>, ) -> Option { let is_likely_bad_response = res.as_ref().map_or(true, |bodies| bodies.is_empty()); @@ -315,7 +315,7 @@ impl StateFetcher { } /// Returns a new [`FetchClient`] that can send requests to this type. - pub(crate) fn client(&self) -> FetchClient { + pub(crate) fn client(&self) -> FetchClient { FetchClient { request_tx: self.download_requests_tx.clone(), peers_handle: self.peers_handle.clone(), @@ -405,24 +405,24 @@ struct Request { /// Requests that can be sent to the Syncer from a [`FetchClient`] #[derive(Debug)] -pub(crate) enum DownloadRequest { +pub(crate) enum DownloadRequest { /// Download the requested headers and send response through channel GetBlockHeaders { request: HeadersRequest, - response: oneshot::Sender>>, + response: oneshot::Sender>>, priority: Priority, }, /// Download the requested headers and send response through channel GetBlockBodies { request: Vec, - response: oneshot::Sender>>, + response: oneshot::Sender>>, priority: Priority, }, } // === impl DownloadRequest === -impl DownloadRequest { +impl DownloadRequest { /// Returns the corresponding state for a peer that handles the request. const fn peer_state(&self) -> PeerState { match self { @@ -472,14 +472,14 @@ pub(crate) enum BlockResponseOutcome { mod tests { use super::*; use crate::{peers::PeersManager, PeersConfig}; + use alloy_consensus::Header; use alloy_primitives::B512; - use reth_primitives::SealedHeader; use std::future::poll_fn; #[tokio::test(flavor = "multi_thread")] async fn test_poll_fetcher() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); poll_fn(move |cx| { assert!(fetcher.poll(cx).is_pending()); @@ -499,7 +499,7 @@ mod tests { #[tokio::test] async fn test_peer_rotation() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -522,7 +522,7 @@ mod tests { #[tokio::test] async fn test_peer_prioritization() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -547,7 +547,7 @@ mod tests { #[tokio::test] async fn test_on_block_headers_response() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); let peer_id = B512::random(); assert_eq!(fetcher.on_block_headers_response(peer_id, Ok(vec![Header::default()])), None); @@ -577,7 +577,7 @@ mod tests { #[tokio::test] async fn test_header_response_outcome() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); let peer_id = B512::random(); let request_pair = || { @@ -590,8 +590,7 @@ mod tests { }, response: tx, }; - let mut header = SealedHeader::default().unseal(); - header.number = 0u64; + let header = Header { number: 0, ..Default::default() }; (req, header) }; @@ -612,7 +611,10 @@ mod tests { let outcome = fetcher.on_block_headers_response(peer_id, Err(RequestError::Timeout)).unwrap(); - assert!(EthResponseValidator::reputation_change_err(&Err(RequestError::Timeout)).is_some()); + assert!(EthResponseValidator::reputation_change_err(&Err::, _>( + RequestError::Timeout + )) + .is_some()); match outcome { BlockResponseOutcome::BadResponse(peer, _) => { diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index 201dc3e4f786..749b3c347b37 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -7,7 +7,7 @@ use reth_network_peers::PeerId; use crate::message::NewBlockMessage; /// Abstraction over block import. -pub trait BlockImport: std::fmt::Debug + Send + Sync { +pub trait BlockImport: std::fmt::Debug + Send + Sync { /// Invoked for a received `NewBlock` broadcast message from the peer. /// /// > When a `NewBlock` announcement message is received from a peer, the client first verifies @@ -15,35 +15,35 @@ pub trait BlockImport: std::fmt::Debug + Send + Sync { /// /// This is supposed to start verification. The results are then expected to be returned via /// [`BlockImport::poll`]. - fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockMessage); + fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockMessage); /// Returns the results of a [`BlockImport::on_new_block`] - fn poll(&mut self, cx: &mut Context<'_>) -> Poll; + fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; } /// Outcome of the [`BlockImport`]'s block handling. #[derive(Debug)] -pub struct BlockImportOutcome { +pub struct BlockImportOutcome { /// Sender of the `NewBlock` message. pub peer: PeerId, /// The result after validating the block - pub result: Result, + pub result: Result, BlockImportError>, } /// Represents the successful validation of a received `NewBlock` message. #[derive(Debug)] -pub enum BlockValidation { +pub enum BlockValidation { /// Basic Header validity check, after which the block should be relayed to peers via a /// `NewBlock` message ValidHeader { /// received block - block: NewBlockMessage, + block: NewBlockMessage, }, /// Successfully imported: state-root matches after execution. The block should be relayed via /// `NewBlockHashes` ValidBlock { /// validated block. - block: NewBlockMessage, + block: NewBlockMessage, }, } @@ -62,10 +62,10 @@ pub enum BlockImportError { #[non_exhaustive] pub struct ProofOfStakeBlockImport; -impl BlockImport for ProofOfStakeBlockImport { - fn on_new_block(&mut self, _peer_id: PeerId, _incoming_block: NewBlockMessage) {} +impl BlockImport for ProofOfStakeBlockImport { + fn on_new_block(&mut self, _peer_id: PeerId, _incoming_block: NewBlockMessage) {} - fn poll(&mut self, _cx: &mut Context<'_>) -> Poll { + fn poll(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Pending } } diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 6b8287fe51cf..3040577415c5 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -8,44 +8,46 @@ use std::{ task::{ready, Context, Poll}, }; +use alloy_consensus::BlockHeader; use alloy_primitives::{Bytes, B256}; use futures::FutureExt; use reth_eth_wire::{ capability::RawCapabilityMessage, message::RequestPair, BlockBodies, BlockHeaders, EthMessage, - GetBlockBodies, GetBlockHeaders, NewBlock, NewBlockHashes, NewPooledTransactionHashes, - NodeData, PooledTransactions, Receipts, SharedTransactions, Transactions, + EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives, NewBlock, + NewBlockHashes, NewPooledTransactionHashes, NodeData, PooledTransactions, Receipts, + SharedTransactions, Transactions, }; use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; -use reth_primitives::{BlockBody, Header, PooledTransactionsElement, ReceiptWithBloom}; +use reth_primitives::{PooledTransactionsElement, ReceiptWithBloom}; use tokio::sync::oneshot; /// Internal form of a `NewBlock` message #[derive(Debug, Clone)] -pub struct NewBlockMessage { +pub struct NewBlockMessage { /// Hash of the block pub hash: B256, /// Raw received message - pub block: Arc, + pub block: Arc>, } // === impl NewBlockMessage === -impl NewBlockMessage { +impl NewBlockMessage { /// Returns the block number of the block pub fn number(&self) -> u64 { - self.block.block.header.number + self.block.block.header().number() } } /// All Bi-directional eth-message variants that can be sent to a session or received from a /// session. #[derive(Debug)] -pub enum PeerMessage { +pub enum PeerMessage { /// Announce new block hashes NewBlockHashes(NewBlockHashes), /// Broadcast new block. - NewBlock(NewBlockMessage), + NewBlock(NewBlockMessage), /// Received transactions _from_ the peer ReceivedTransaction(Transactions), /// Broadcast transactions _from_ local _to_ a peer. @@ -53,7 +55,7 @@ pub enum PeerMessage { /// Send new pooled transactions PooledTransactions(NewPooledTransactionHashes), /// All `eth` request variants. - EthRequest(PeerRequest), + EthRequest(PeerRequest), /// Other than eth namespace message Other(RawCapabilityMessage), } @@ -74,16 +76,16 @@ pub enum BlockRequest { /// Corresponding variant for [`PeerRequest`]. #[derive(Debug)] -pub enum PeerResponse { +pub enum PeerResponse { /// Represents a response to a request for block headers. BlockHeaders { /// The receiver channel for the response to a block headers request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for block bodies. BlockBodies { /// The receiver channel for the response to a block bodies request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for pooled transactions. PooledTransactions { @@ -104,9 +106,9 @@ pub enum PeerResponse { // === impl PeerResponse === -impl PeerResponse { +impl PeerResponse { /// Polls the type to completion. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { macro_rules! poll_request { ($response:ident, $item:ident, $cx:ident) => { match ready!($response.poll_unpin($cx)) { @@ -139,11 +141,11 @@ impl PeerResponse { /// All response variants for [`PeerResponse`] #[derive(Debug)] -pub enum PeerResponseResult { +pub enum PeerResponseResult { /// Represents a result containing block headers or an error. - BlockHeaders(RequestResult>), + BlockHeaders(RequestResult>), /// Represents a result containing block bodies or an error. - BlockBodies(RequestResult>), + BlockBodies(RequestResult>), /// Represents a result containing pooled transactions or an error. PooledTransactions(RequestResult>), /// Represents a result containing node data or an error. @@ -154,9 +156,9 @@ pub enum PeerResponseResult { // === impl PeerResponseResult === -impl PeerResponseResult { +impl PeerResponseResult { /// Converts this response into an [`EthMessage`] - pub fn try_into_message(self, id: u64) -> RequestResult { + pub fn try_into_message(self, id: u64) -> RequestResult> { macro_rules! to_message { ($response:ident, $item:ident, $request_id:ident) => { match $response { diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index 4333cf1408b0..bda5f84c76b8 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -85,6 +85,8 @@ pub struct SessionManagerMetrics { pub(crate) total_dial_successes: Counter, /// Number of dropped outgoing peer messages. pub(crate) total_outgoing_peer_messages_dropped: Counter, + /// Number of queued outgoing messages + pub(crate) queued_outgoing_messages: Gauge, } /// Metrics for the [`TransactionsManager`](crate::transactions::TransactionsManager). diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 594ad4d155de..4175757e0cf1 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -18,10 +18,7 @@ use reth_network_api::{ NetworkEventListenerProvider, NetworkInfo, NetworkStatus, PeerInfo, PeerRequest, Peers, PeersInfo, }; -use reth_network_p2p::{ - sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}, - BlockClient, -}; +use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::{PeerAddr, PeerKind, Reputation, ReputationChangeKind}; use reth_primitives::{Head, TransactionSigned}; @@ -400,7 +397,9 @@ impl NetworkSyncUpdater for NetworkHandle { } impl BlockDownloaderProvider for NetworkHandle { - async fn fetch_client(&self) -> Result { + type Client = FetchClient; + + async fn fetch_client(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self.manager().send(NetworkHandleMessage::FetchClient(tx)); rx.await diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index 3d5ff7a0d43a..4855ff5e7431 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -84,6 +84,8 @@ pub struct PeersManager { max_backoff_count: u8, /// Tracks the connection state of the node net_connection_state: NetworkConnectionState, + /// How long to temporarily ban ip on an incoming connection attempt. + incoming_ip_throttle_duration: Duration, } impl PeersManager { @@ -100,6 +102,7 @@ impl PeersManager { trusted_nodes_only, basic_nodes, max_backoff_count, + incoming_ip_throttle_duration, } = config; let (manager_tx, handle_rx) = mpsc::unbounded_channel(); let now = Instant::now(); @@ -148,6 +151,7 @@ impl PeersManager { last_tick: Instant::now(), max_backoff_count, net_connection_state: NetworkConnectionState::default(), + incoming_ip_throttle_duration, } } @@ -218,6 +222,11 @@ impl PeersManager { self.backed_off_peers.len() } + /// Returns the number of idle trusted peers. + fn num_idle_trusted_peers(&self) -> usize { + self.peers.iter().filter(|(_, peer)| peer.kind.is_trusted() && peer.state.is_idle()).count() + } + /// Invoked when a new _incoming_ tcp connection is accepted. /// /// returns an error if the inbound ip address is on the ban list @@ -229,12 +238,40 @@ impl PeersManager { return Err(InboundConnectionError::IpBanned) } - if !self.connection_info.has_in_capacity() && self.trusted_peer_ids.is_empty() { - // if we don't have any inbound slots and no trusted peers, we don't accept any new - // connections + // check if we even have slots for a new incoming connection + if !self.connection_info.has_in_capacity() { + if self.trusted_peer_ids.is_empty() { + // if we don't have any incoming slots and no trusted peers, we don't accept any new + // connections + return Err(InboundConnectionError::ExceedsCapacity) + } + + // there's an edge case here where no incoming connections besides from trusted peers + // are allowed (max_inbound == 0), in which case we still need to allow new pending + // incoming connections until all trusted peers are connected. + let num_idle_trusted_peers = self.num_idle_trusted_peers(); + if num_idle_trusted_peers <= self.trusted_peer_ids.len() { + // we still want to limit concurrent pending connections + let max_inbound = + self.trusted_peer_ids.len().max(self.connection_info.config.max_inbound); + if self.connection_info.num_pending_in <= max_inbound { + self.connection_info.inc_pending_in(); + } + return Ok(()) + } + + // all trusted peers are either connected or connecting + return Err(InboundConnectionError::ExceedsCapacity) + } + + // also cap the incoming connections we can process at once + if !self.connection_info.has_in_pending_capacity() { return Err(InboundConnectionError::ExceedsCapacity) } + // apply the rate limit + self.throttle_incoming_ip(addr); + self.connection_info.inc_pending_in(); Ok(()) } @@ -353,6 +390,12 @@ impl PeersManager { self.ban_list.ban_ip_until(ip, std::time::Instant::now() + self.ban_duration); } + /// Bans the IP temporarily to rate limit inbound connection attempts per IP. + fn throttle_incoming_ip(&mut self, ip: IpAddr) { + self.ban_list + .ban_ip_until(ip, std::time::Instant::now() + self.incoming_ip_throttle_duration); + } + /// Temporarily puts the peer in timeout by inserting it into the backedoff peers set fn backoff_peer_until(&mut self, peer_id: PeerId, until: std::time::Instant) { trace!(target: "net::peers", ?peer_id, "backing off"); @@ -968,17 +1011,22 @@ impl ConnectionInfo { Self { config, num_outbound: 0, num_pending_out: 0, num_inbound: 0, num_pending_in: 0 } } - /// Returns `true` if there's still capacity for a new outgoing connection. + /// Returns `true` if there's still capacity to perform an outgoing connection. const fn has_out_capacity(&self) -> bool { self.num_pending_out < self.config.max_concurrent_outbound_dials && self.num_outbound < self.config.max_outbound } - /// Returns `true` if there's still capacity for a new incoming connection. + /// Returns `true` if there's still capacity to accept a new incoming connection. const fn has_in_capacity(&self) -> bool { self.num_inbound < self.config.max_inbound } + /// Returns `true` if we can handle an additional incoming pending connection. + const fn has_in_pending_capacity(&self) -> bool { + self.num_pending_in < self.config.max_inbound + } + fn decr_state(&mut self, state: PeerConnectionState) { match state { PeerConnectionState::Idle => {} @@ -1094,15 +1142,6 @@ impl Display for InboundConnectionError { #[cfg(test)] mod tests { - use std::{ - future::{poll_fn, Future}, - io, - net::{IpAddr, Ipv4Addr, SocketAddr}, - pin::Pin, - task::{Context, Poll}, - time::Duration, - }; - use alloy_primitives::B512; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PHandshakeError, P2PStreamError}, @@ -1114,6 +1153,14 @@ mod tests { use reth_network_types::{ peers::reputation::DEFAULT_REPUTATION, BackoffKind, ReputationChangeKind, }; + use std::{ + future::{poll_fn, Future}, + io, + net::{IpAddr, Ipv4Addr, SocketAddr}, + pin::Pin, + task::{Context, Poll}, + time::Duration, + }; use url::Host; use super::PeersManager; @@ -1597,6 +1644,23 @@ mod tests { assert_eq!(peers.connection_info.num_pending_in, 0); } + #[tokio::test] + async fn test_reject_incoming_at_pending_capacity() { + let mut peers = PeersManager::default(); + + for count in 1..=peers.connection_info.config.max_inbound { + let socket_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, count as u8)), 8008); + assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_ok()); + assert_eq!(peers.connection_info.num_pending_in, count); + } + assert!(peers.connection_info.has_in_capacity()); + assert!(!peers.connection_info.has_in_pending_capacity()); + + let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 100)), 8008); + assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_err()); + } + #[tokio::test] async fn test_closed_incoming() { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); @@ -2278,6 +2342,39 @@ mod tests { ); } + #[tokio::test] + async fn test_incoming_rate_limit() { + let config = PeersConfig { + incoming_ip_throttle_duration: Duration::from_millis(100), + ..PeersConfig::test() + }; + let mut peers = PeersManager::new(config); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(168, 0, 1, 2)), 8009); + assert!(peers.on_incoming_pending_session(addr.ip()).is_ok()); + assert_eq!( + peers.on_incoming_pending_session(addr.ip()).unwrap_err(), + InboundConnectionError::IpBanned + ); + + peers.release_interval.reset_immediately(); + tokio::time::sleep(peers.incoming_ip_throttle_duration).await; + + // await unban + poll_fn(|cx| loop { + if peers.poll(cx).is_pending() { + return Poll::Ready(()); + } + }) + .await; + + assert!(peers.on_incoming_pending_session(addr.ip()).is_ok()); + assert_eq!( + peers.on_incoming_pending_session(addr.ip()).unwrap_err(), + InboundConnectionError::IpBanned + ); + } + #[tokio::test] async fn test_tick() { let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)); diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index e83f5d9f125a..f979a912cd46 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -11,17 +11,20 @@ use std::{ time::{Duration, Instant}, }; +use alloy_primitives::Sealable; use futures::{stream::Fuse, SinkExt, StreamExt}; +use metrics::Gauge; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PStreamError}, message::{EthBroadcastMessage, RequestPair}, - Capabilities, DisconnectP2P, DisconnectReason, EthMessage, + Capabilities, DisconnectP2P, DisconnectReason, EthMessage, NetworkPrimitives, }; use reth_metrics::common::mpsc::MeteredPollSender; use reth_network_api::PeerRequest; use reth_network_p2p::error::RequestError; use reth_network_peers::PeerId; use reth_network_types::session::config::INITIAL_REQUEST_TIMEOUT; +use reth_primitives_traits::Block; use rustc_hash::FxHashMap; use tokio::{ sync::{mpsc::error::TrySendError, oneshot}, @@ -61,11 +64,11 @@ const TIMEOUT_SCALING: u32 = 3; /// - incoming requests/broadcasts _from remote_ via the connection /// - responses for handled ETH requests received from the remote peer. #[allow(dead_code)] -pub(crate) struct ActiveSession { +pub(crate) struct ActiveSession { /// Keeps track of request ids. pub(crate) next_id: u64, /// The underlying connection. - pub(crate) conn: EthRlpxConnection, + pub(crate) conn: EthRlpxConnection, /// Identifier of the node we're connected to. pub(crate) remote_peer_id: PeerId, /// The address we're connected to. @@ -75,19 +78,19 @@ pub(crate) struct ActiveSession { /// Internal identifier of this session pub(crate) session_id: SessionId, /// Incoming commands from the manager - pub(crate) commands_rx: ReceiverStream, + pub(crate) commands_rx: ReceiverStream>, /// Sink to send messages to the [`SessionManager`](super::SessionManager). - pub(crate) to_session_manager: MeteredPollSender, + pub(crate) to_session_manager: MeteredPollSender>, /// A message that needs to be delivered to the session manager - pub(crate) pending_message_to_session: Option, + pub(crate) pending_message_to_session: Option>, /// Incoming internal requests which are delegated to the remote peer. - pub(crate) internal_request_tx: Fuse>, + pub(crate) internal_request_tx: Fuse>>, /// All requests sent to the remote peer we're waiting on a response - pub(crate) inflight_requests: FxHashMap, + pub(crate) inflight_requests: FxHashMap>>, /// All requests that were sent by the remote peer and we're waiting on an internal response - pub(crate) received_requests_from_remote: Vec, + pub(crate) received_requests_from_remote: Vec>, /// Buffered messages that should be handled and sent to the peer. - pub(crate) queued_outgoing: VecDeque, + pub(crate) queued_outgoing: QueuedOutgoingMessages, /// The maximum time we wait for a response from a peer. pub(crate) internal_request_timeout: Arc, /// Interval when to check for timed out requests. @@ -96,10 +99,11 @@ pub(crate) struct ActiveSession { /// considered a protocol violation and the session will initiate a drop. pub(crate) protocol_breach_request_timeout: Duration, /// Used to reserve a slot to guarantee that the termination message is delivered - pub(crate) terminate_message: Option<(PollSender, ActiveSessionMessage)>, + pub(crate) terminate_message: + Option<(PollSender>, ActiveSessionMessage)>, } -impl ActiveSession { +impl ActiveSession { /// Returns `true` if the session is currently in the process of disconnecting fn is_disconnecting(&self) -> bool { self.conn.inner().is_disconnecting() @@ -121,7 +125,7 @@ impl ActiveSession { /// Handle a message read from the connection. /// /// Returns an error if the message is considered to be in violation of the protocol. - fn on_incoming_message(&mut self, msg: EthMessage) -> OnIncomingMessageOutcome { + fn on_incoming_message(&mut self, msg: EthMessage) -> OnIncomingMessageOutcome { /// A macro that handles an incoming request /// This creates a new channel and tries to send the sender half to the session while /// storing the receiver half internally so the pending response can be polled. @@ -181,7 +185,7 @@ impl ActiveSession { } EthMessage::NewBlock(msg) => { let block = - NewBlockMessage { hash: msg.block.header.hash_slow(), block: Arc::new(*msg) }; + NewBlockMessage { hash: msg.block.header().hash_slow(), block: Arc::new(*msg) }; self.try_emit_broadcast(PeerMessage::NewBlock(block)).into() } EthMessage::Transactions(msg) => { @@ -237,7 +241,7 @@ impl ActiveSession { } /// Handle an internal peer request that will be sent to the remote. - fn on_internal_peer_request(&mut self, request: PeerRequest, deadline: Instant) { + fn on_internal_peer_request(&mut self, request: PeerRequest, deadline: Instant) { let request_id = self.next_id(); let msg = request.create_request_message(request_id); self.queued_outgoing.push_back(msg.into()); @@ -250,7 +254,7 @@ impl ActiveSession { } /// Handle a message received from the internal network - fn on_internal_peer_message(&mut self, msg: PeerMessage) { + fn on_internal_peer_message(&mut self, msg: PeerMessage) { match msg { PeerMessage::NewBlockHashes(msg) => { self.queued_outgoing.push_back(EthMessage::NewBlockHashes(msg).into()); @@ -288,7 +292,7 @@ impl ActiveSession { /// Handle a Response to the peer /// /// This will queue the response to be sent to the peer - fn handle_outgoing_response(&mut self, id: u64, resp: PeerResponseResult) { + fn handle_outgoing_response(&mut self, id: u64, resp: PeerResponseResult) { match resp.try_into_message(id) { Ok(msg) => { self.queued_outgoing.push_back(msg.into()); @@ -303,7 +307,7 @@ impl ActiveSession { /// /// Returns the message if the bounded channel is currently unable to handle this message. #[allow(clippy::result_large_err)] - fn try_emit_broadcast(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { + fn try_emit_broadcast(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) }; match sender @@ -329,7 +333,7 @@ impl ActiveSession { /// /// Returns the message if the bounded channel is currently unable to handle this message. #[allow(clippy::result_large_err)] - fn try_emit_request(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { + fn try_emit_request(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) }; match sender @@ -469,7 +473,7 @@ impl ActiveSession { } } -impl Future for ActiveSession { +impl Future for ActiveSession { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -655,20 +659,20 @@ impl Future for ActiveSession { } /// Tracks a request received from the peer -pub(crate) struct ReceivedRequest { +pub(crate) struct ReceivedRequest { /// Protocol Identifier request_id: u64, /// Receiver half of the channel that's supposed to receive the proper response. - rx: PeerResponse, + rx: PeerResponse, /// Timestamp when we read this msg from the wire. #[allow(dead_code)] received: Instant, } /// A request that waits for a response from the peer -pub(crate) struct InflightRequest { +pub(crate) struct InflightRequest { /// Request we sent to peer and the internal response channel - request: RequestState, + request: RequestState, /// Instant when the request was sent timestamp: Instant, /// Time limit for the response @@ -677,7 +681,7 @@ pub(crate) struct InflightRequest { // === impl InflightRequest === -impl InflightRequest { +impl InflightRequest> { /// Returns true if the request is timedout #[inline] fn is_timed_out(&self, now: Instant) -> bool { @@ -702,17 +706,19 @@ impl InflightRequest { } /// All outcome variants when handling an incoming message -enum OnIncomingMessageOutcome { +enum OnIncomingMessageOutcome { /// Message successfully handled. Ok, /// Message is considered to be in violation of the protocol - BadMessage { error: EthStreamError, message: EthMessage }, + BadMessage { error: EthStreamError, message: EthMessage }, /// Currently no capacity to handle the message - NoCapacity(ActiveSessionMessage), + NoCapacity(ActiveSessionMessage), } -impl From> for OnIncomingMessageOutcome { - fn from(res: Result<(), ActiveSessionMessage>) -> Self { +impl From>> + for OnIncomingMessageOutcome +{ + fn from(res: Result<(), ActiveSessionMessage>) -> Self { match res { Ok(_) => Self::Ok, Err(msg) => Self::NoCapacity(msg), @@ -720,29 +726,29 @@ impl From> for OnIncomingMessageOutcome { } } -enum RequestState { +enum RequestState { /// Waiting for the response - Waiting(PeerRequest), + Waiting(R), /// Request already timed out TimedOut, } /// Outgoing messages that can be sent over the wire. -pub(crate) enum OutgoingMessage { +pub(crate) enum OutgoingMessage { /// A message that is owned. - Eth(EthMessage), + Eth(EthMessage), /// A message that may be shared by multiple sessions. - Broadcast(EthBroadcastMessage), + Broadcast(EthBroadcastMessage), } -impl From for OutgoingMessage { - fn from(value: EthMessage) -> Self { +impl From> for OutgoingMessage { + fn from(value: EthMessage) -> Self { Self::Eth(value) } } -impl From for OutgoingMessage { - fn from(value: EthBroadcastMessage) -> Self { +impl From> for OutgoingMessage { + fn from(value: EthBroadcastMessage) -> Self { Self::Broadcast(value) } } @@ -757,6 +763,32 @@ fn calculate_new_timeout(current_timeout: Duration, estimated_rtt: Duration) -> smoothened_timeout.clamp(MINIMUM_TIMEOUT, MAXIMUM_TIMEOUT) } + +/// A helper struct that wraps the queue of outgoing messages and a metric to track their count +pub(crate) struct QueuedOutgoingMessages { + messages: VecDeque>, + count: Gauge, +} + +impl QueuedOutgoingMessages { + pub(crate) const fn new(metric: Gauge) -> Self { + Self { messages: VecDeque::new(), count: metric } + } + + pub(crate) fn push_back(&mut self, message: OutgoingMessage) { + self.messages.push_back(message); + self.count.increment(1); + } + + pub(crate) fn pop_front(&mut self) -> Option> { + self.messages.pop_front().inspect(|_| self.count.decrement(1)) + } + + pub(crate) fn shrink_to_fit(&mut self) { + self.messages.shrink_to_fit(); + } +} + #[cfg(test)] mod tests { use super::*; @@ -764,8 +796,8 @@ mod tests { use reth_chainspec::MAINNET; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ - EthStream, GetBlockBodies, HelloMessageWithProtocols, P2PStream, Status, StatusBuilder, - UnauthedEthStream, UnauthedP2PStream, + EthNetworkPrimitives, EthStream, GetBlockBodies, HelloMessageWithProtocols, P2PStream, + Status, StatusBuilder, UnauthedEthStream, UnauthedP2PStream, }; use reth_network_peers::pk2id; use reth_network_types::session::config::PROTOCOL_BREACH_REQUEST_TIMEOUT; @@ -781,11 +813,11 @@ mod tests { HelloMessageWithProtocols::builder(pk2id(&server_key.public_key(SECP256K1))).build() } - struct SessionBuilder { + struct SessionBuilder { _remote_capabilities: Arc, - active_session_tx: mpsc::Sender, - active_session_rx: ReceiverStream, - to_sessions: Vec>, + active_session_tx: mpsc::Sender>, + active_session_rx: ReceiverStream>, + to_sessions: Vec>>, secret_key: SecretKey, local_peer_id: PeerId, hello: HelloMessageWithProtocols, @@ -794,7 +826,7 @@ mod tests { next_id: usize, } - impl SessionBuilder { + impl SessionBuilder { fn next_id(&mut self) -> SessionId { let id = self.next_id; self.next_id += 1; @@ -831,7 +863,7 @@ mod tests { }) } - async fn connect_incoming(&mut self, stream: TcpStream) -> ActiveSession { + async fn connect_incoming(&mut self, stream: TcpStream) -> ActiveSession { let remote_addr = stream.local_addr().unwrap(); let session_id = self.next_id(); let (_disconnect_tx, disconnect_rx) = oneshot::channel(); @@ -882,7 +914,7 @@ mod tests { internal_request_tx: ReceiverStream::new(messages_rx).fuse(), inflight_requests: Default::default(), conn, - queued_outgoing: Default::default(), + queued_outgoing: QueuedOutgoingMessages::new(Gauge::noop()), received_requests_from_remote: Default::default(), internal_request_timeout_interval: tokio::time::interval( INITIAL_REQUEST_TIMEOUT, diff --git a/crates/net/network/src/session/conn.rs b/crates/net/network/src/session/conn.rs index 628c880c8eac..5329f01028b3 100644 --- a/crates/net/network/src/session/conn.rs +++ b/crates/net/network/src/session/conn.rs @@ -11,16 +11,16 @@ use reth_eth_wire::{ errors::EthStreamError, message::EthBroadcastMessage, multiplex::{ProtocolProxy, RlpxSatelliteStream}, - EthMessage, EthStream, EthVersion, P2PStream, + EthMessage, EthNetworkPrimitives, EthStream, EthVersion, NetworkPrimitives, P2PStream, }; use tokio::net::TcpStream; /// The type of the underlying peer network connection. -pub type EthPeerConnection = EthStream>>; +pub type EthPeerConnection = EthStream>, N>; /// Various connection types that at least support the ETH protocol. -pub type EthSatelliteConnection = - RlpxSatelliteStream, EthStream>; +pub type EthSatelliteConnection = + RlpxSatelliteStream, EthStream>; /// Connection types that support the ETH protocol. /// @@ -30,14 +30,14 @@ pub type EthSatelliteConnection = // This type is boxed because the underlying stream is ~6KB, // mostly coming from `P2PStream`'s `snap::Encoder` (2072), and `ECIESStream` (3600). #[derive(Debug)] -pub enum EthRlpxConnection { +pub enum EthRlpxConnection { /// A connection that only supports the ETH protocol. - EthOnly(Box), + EthOnly(Box>), /// A connection that supports the ETH protocol and __at least one other__ `RLPx` protocol. - Satellite(Box), + Satellite(Box>), } -impl EthRlpxConnection { +impl EthRlpxConnection { /// Returns the negotiated ETH version. #[inline] pub(crate) const fn version(&self) -> EthVersion { @@ -78,7 +78,7 @@ impl EthRlpxConnection { #[inline] pub fn start_send_broadcast( &mut self, - item: EthBroadcastMessage, + item: EthBroadcastMessage, ) -> Result<(), EthStreamError> { match self { Self::EthOnly(conn) => conn.start_send_broadcast(item), @@ -87,16 +87,16 @@ impl EthRlpxConnection { } } -impl From for EthRlpxConnection { +impl From> for EthRlpxConnection { #[inline] - fn from(conn: EthPeerConnection) -> Self { + fn from(conn: EthPeerConnection) -> Self { Self::EthOnly(Box::new(conn)) } } -impl From for EthRlpxConnection { +impl From> for EthRlpxConnection { #[inline] - fn from(conn: EthSatelliteConnection) -> Self { + fn from(conn: EthSatelliteConnection) -> Self { Self::Satellite(Box::new(conn)) } } @@ -112,22 +112,22 @@ macro_rules! delegate_call { } } -impl Stream for EthRlpxConnection { - type Item = Result; +impl Stream for EthRlpxConnection { + type Item = Result, EthStreamError>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { delegate_call!(self.poll_next(cx)) } } -impl Sink for EthRlpxConnection { +impl Sink> for EthRlpxConnection { type Error = EthStreamError; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { delegate_call!(self.poll_ready(cx)) } - fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { + fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { delegate_call!(self.start_send(item)) } diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index a022e670419a..f80428630d9b 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -5,7 +5,7 @@ use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use reth_ecies::ECIESError; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, - EthVersion, Status, + EthVersion, NetworkPrimitives, Status, }; use reth_network_api::PeerInfo; use reth_network_peers::{NodeRecord, PeerId}; @@ -54,7 +54,7 @@ impl PendingSessionHandle { /// Within an active session that supports the `Ethereum Wire Protocol `, three high-level tasks can /// be performed: chain synchronization, block propagation and transaction exchange. #[derive(Debug)] -pub struct ActiveSessionHandle { +pub struct ActiveSessionHandle { /// The direction of the session pub(crate) direction: Direction, /// The assigned id for this session @@ -68,7 +68,7 @@ pub struct ActiveSessionHandle { /// Announced capabilities of the peer. pub(crate) capabilities: Arc, /// Sender half of the command channel used send commands _to_ the spawned session - pub(crate) commands_to_session: mpsc::Sender, + pub(crate) commands_to_session: mpsc::Sender>, /// The client's name and version pub(crate) client_version: Arc, /// The address we're connected to @@ -81,7 +81,7 @@ pub struct ActiveSessionHandle { // === impl ActiveSessionHandle === -impl ActiveSessionHandle { +impl ActiveSessionHandle { /// Sends a disconnect command to the session. pub fn disconnect(&self, reason: Option) { // Note: we clone the sender which ensures the channel has capacity to send the message @@ -93,7 +93,7 @@ impl ActiveSessionHandle { pub async fn try_disconnect( &self, reason: Option, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.commands_to_session.clone().send(SessionCommand::Disconnect { reason }).await } @@ -162,7 +162,7 @@ impl ActiveSessionHandle { /// /// A session starts with a `Handshake`, followed by a `Hello` message which #[derive(Debug)] -pub enum PendingSessionEvent { +pub enum PendingSessionEvent { /// Represents a successful `Hello` and `Status` exchange: Established { /// An internal identifier for the established session @@ -179,7 +179,7 @@ pub enum PendingSessionEvent { status: Arc, /// The actual connection stream which can be used to send and receive `eth` protocol /// messages - conn: EthRlpxConnection, + conn: EthRlpxConnection, /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, /// The remote node's user agent, usually containing the client name and version @@ -222,20 +222,20 @@ pub enum PendingSessionEvent { /// Commands that can be sent to the spawned session. #[derive(Debug)] -pub enum SessionCommand { +pub enum SessionCommand { /// Disconnect the connection Disconnect { /// Why the disconnect was initiated reason: Option, }, /// Sends a message to the peer - Message(PeerMessage), + Message(PeerMessage), } /// Message variants an active session can produce and send back to the /// [`SessionManager`](crate::session::SessionManager) #[derive(Debug)] -pub enum ActiveSessionMessage { +pub enum ActiveSessionMessage { /// Session was gracefully disconnected. Disconnected { /// The remote node's public key @@ -257,7 +257,7 @@ pub enum ActiveSessionMessage { /// Identifier of the remote peer. peer_id: PeerId, /// Message received from the peer. - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 74f303df7b88..a95f0e889101 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -5,6 +5,7 @@ mod conn; mod counter; mod handle; +use active::QueuedOutgoingMessages; pub use conn::EthRlpxConnection; pub use handle::{ ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, @@ -27,11 +28,11 @@ use futures::{future::Either, io, FutureExt, StreamExt}; use reth_ecies::{stream::ECIESStream, ECIESError}; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, multiplex::RlpxProtocolMultiplexer, - Capabilities, DisconnectReason, EthVersion, HelloMessageWithProtocols, Status, - UnauthedEthStream, UnauthedP2PStream, + Capabilities, DisconnectReason, EthVersion, HelloMessageWithProtocols, NetworkPrimitives, + Status, UnauthedEthStream, UnauthedP2PStream, }; use reth_metrics::common::mpsc::MeteredPollSender; -use reth_network_api::PeerRequestSender; +use reth_network_api::{PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use reth_network_types::SessionsConfig; use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head}; @@ -61,7 +62,7 @@ pub struct SessionId(usize); /// Manages a set of sessions. #[must_use = "Session Manager must be polled to process session events."] #[derive(Debug)] -pub struct SessionManager { +pub struct SessionManager { /// Tracks the identifier for the next session. next_id: usize, /// Keeps track of all sessions @@ -92,30 +93,32 @@ pub struct SessionManager { /// session is authenticated, it can be moved to the `active_session` set. pending_sessions: FxHashMap, /// All active sessions that are ready to exchange messages. - active_sessions: HashMap, + active_sessions: HashMap>, /// The original Sender half of the [`PendingSessionEvent`] channel. /// /// When a new (pending) session is created, the corresponding [`PendingSessionHandle`] will /// get a clone of this sender half. - pending_sessions_tx: mpsc::Sender, + pending_sessions_tx: mpsc::Sender>, /// Receiver half that listens for [`PendingSessionEvent`] produced by pending sessions. - pending_session_rx: ReceiverStream, + pending_session_rx: ReceiverStream>, /// The original Sender half of the [`ActiveSessionMessage`] channel. /// /// When active session state is reached, the corresponding [`ActiveSessionHandle`] will get a /// clone of this sender half. - active_session_tx: MeteredPollSender, + active_session_tx: MeteredPollSender>, /// Receiver half that listens for [`ActiveSessionMessage`] produced by pending sessions. - active_session_rx: ReceiverStream, + active_session_rx: ReceiverStream>, /// Additional `RLPx` sub-protocols to be used by the session manager. extra_protocols: RlpxSubProtocols, + /// Tracks the ongoing graceful disconnections attempts for incoming connections. + disconnections_counter: DisconnectionsCounter, /// Metrics for the session manager. metrics: SessionManagerMetrics, } // === impl SessionManager === -impl SessionManager { +impl SessionManager { /// Creates a new empty [`SessionManager`]. #[allow(clippy::too_many_arguments)] pub fn new( @@ -150,6 +153,7 @@ impl SessionManager { active_session_tx: MeteredPollSender::new(active_session_tx, "network_active_session"), active_session_rx: ReceiverStream::new(active_session_rx), extra_protocols, + disconnections_counter: Default::default(), metrics: Default::default(), } } @@ -178,7 +182,7 @@ impl SessionManager { } /// Returns a borrowed reference to the active sessions. - pub const fn active_sessions(&self) -> &HashMap { + pub const fn active_sessions(&self) -> &HashMap> { &self.active_sessions } @@ -344,7 +348,7 @@ impl SessionManager { } /// Sends a message to the peer's session - pub fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) { + pub fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) { if let Some(session) = self.active_sessions.get_mut(peer_id) { let _ = session.commands_to_session.try_send(SessionCommand::Message(msg)).inspect_err( |e| { @@ -369,16 +373,45 @@ impl SessionManager { } /// Removes the [`PendingSessionHandle`] if it exists. - fn remove_active_session(&mut self, id: &PeerId) -> Option { + fn remove_active_session(&mut self, id: &PeerId) -> Option> { let session = self.active_sessions.remove(id)?; self.counter.dec_active(&session.direction); Some(session) } + /// Try to gracefully disconnect an incoming connection by initiating a ECIES connection and + /// sending a disconnect. If [`SessionManager`] is at capacity for ongoing disconnections, will + /// simply drop the incoming connection. + pub(crate) fn try_disconnect_incoming_connection( + &self, + stream: TcpStream, + reason: DisconnectReason, + ) { + if !self.disconnections_counter.has_capacity() { + // drop the connection if we don't have capacity for gracefully disconnecting + return + } + + let guard = self.disconnections_counter.clone(); + let secret_key = self.secret_key; + + self.spawn(async move { + trace!( + target: "net::session", + "gracefully disconnecting incoming connection" + ); + if let Ok(stream) = get_ecies_stream(stream, secret_key, Direction::Incoming).await { + let mut unauth = UnauthedP2PStream::new(stream); + let _ = unauth.send_disconnect(reason).await; + drop(guard); + } + }); + } + /// This polls all the session handles and returns [`SessionEvent`]. /// /// Active sessions are prioritized. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { // Poll events from active sessions match self.active_session_rx.poll_next_unpin(cx) { Poll::Pending => {} @@ -495,7 +528,9 @@ impl SessionManager { internal_request_tx: ReceiverStream::new(messages_rx).fuse(), inflight_requests: Default::default(), conn, - queued_outgoing: Default::default(), + queued_outgoing: QueuedOutgoingMessages::new( + self.metrics.queued_outgoing_messages.clone(), + ), received_requests_from_remote: Default::default(), internal_request_timeout_interval: tokio::time::interval( self.initial_internal_request_timeout, @@ -612,9 +647,23 @@ impl SessionManager { } } +/// A counter for ongoing graceful disconnections attempts. +#[derive(Default, Debug, Clone)] +struct DisconnectionsCounter(Arc<()>); + +impl DisconnectionsCounter { + const MAX_CONCURRENT_GRACEFUL_DISCONNECTIONS: usize = 15; + + /// Returns true if the [`DisconnectionsCounter`] still has capacity + /// for an additional graceful disconnection. + fn has_capacity(&self) -> bool { + Arc::strong_count(&self.0) <= Self::MAX_CONCURRENT_GRACEFUL_DISCONNECTIONS + } +} + /// Events produced by the [`SessionManager`] #[derive(Debug)] -pub enum SessionEvent { +pub enum SessionEvent { /// A new session was successfully authenticated. /// /// This session is now able to exchange data. @@ -632,7 +681,7 @@ pub enum SessionEvent { /// The Status message the peer sent during the `eth` handshake status: Arc, /// The channel for sending messages to the peer with the session - messages: PeerRequestSender, + messages: PeerRequestSender>, /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, /// The maximum time that the session waits for a response from the peer before timing out @@ -653,7 +702,7 @@ pub enum SessionEvent { /// The remote node's public key peer_id: PeerId, /// Message received from the peer. - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { @@ -748,12 +797,12 @@ impl PendingSessionHandshakeError { pub struct ExceedsSessionLimit(pub(crate) u32); /// Starts a pending session authentication with a timeout. -pub(crate) async fn pending_session_with_timeout( +pub(crate) async fn pending_session_with_timeout( timeout: Duration, session_id: SessionId, remote_addr: SocketAddr, direction: Direction, - events: mpsc::Sender, + events: mpsc::Sender>, f: F, ) where F: Future, @@ -774,11 +823,11 @@ pub(crate) async fn pending_session_with_timeout( /// /// This will wait for the _incoming_ handshake request and answer it. #[allow(clippy::too_many_arguments)] -pub(crate) async fn start_pending_incoming_session( +pub(crate) async fn start_pending_incoming_session( disconnect_rx: oneshot::Receiver<()>, session_id: SessionId, stream: TcpStream, - events: mpsc::Sender, + events: mpsc::Sender>, remote_addr: SocketAddr, secret_key: SecretKey, hello: HelloMessageWithProtocols, @@ -805,9 +854,9 @@ pub(crate) async fn start_pending_incoming_session( /// Starts the authentication process for a connection initiated by a remote peer. #[instrument(skip_all, fields(%remote_addr, peer_id), target = "net")] #[allow(clippy::too_many_arguments)] -async fn start_pending_outbound_session( +async fn start_pending_outbound_session( disconnect_rx: oneshot::Receiver<()>, - events: mpsc::Sender, + events: mpsc::Sender>, session_id: SessionId, remote_addr: SocketAddr, remote_peer_id: PeerId, @@ -854,9 +903,9 @@ async fn start_pending_outbound_session( /// Authenticates a session #[allow(clippy::too_many_arguments)] -async fn authenticate( +async fn authenticate( disconnect_rx: oneshot::Receiver<()>, - events: mpsc::Sender, + events: mpsc::Sender>, stream: TcpStream, session_id: SessionId, remote_addr: SocketAddr, @@ -868,7 +917,7 @@ async fn authenticate( extra_handlers: RlpxSubProtocolHandlers, ) { let local_addr = stream.local_addr().ok(); - let stream = match get_eciess_stream(stream, secret_key, direction).await { + let stream = match get_ecies_stream(stream, secret_key, direction).await { Ok(stream) => stream, Err(error) => { let _ = events @@ -917,7 +966,7 @@ async fn authenticate( /// Returns an [`ECIESStream`] if it can be built. If not, send a /// [`PendingSessionEvent::EciesAuthError`] and returns `None` -async fn get_eciess_stream( +async fn get_ecies_stream( stream: Io, secret_key: SecretKey, direction: Direction, @@ -937,7 +986,7 @@ async fn get_eciess_stream( /// If additional [`RlpxSubProtocolHandlers`] are provided, the hello message will be updated to /// also negotiate the additional protocols. #[allow(clippy::too_many_arguments)] -async fn authenticate_stream( +async fn authenticate_stream( stream: UnauthedP2PStream>, session_id: SessionId, remote_addr: SocketAddr, @@ -947,7 +996,7 @@ async fn authenticate_stream( mut status: Status, fork_filter: ForkFilter, mut extra_handlers: RlpxSubProtocolHandlers, -) -> PendingSessionEvent { +) -> PendingSessionEvent { // Add extra protocols to the hello message extra_handlers.retain(|handler| hello.try_add_protocol(handler.protocol()).is_ok()); diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 5caa656a98ea..c51f115c52f5 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -12,13 +12,18 @@ use std::{ task::{Context, Poll}, }; +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use rand::seq::SliceRandom; -use reth_eth_wire::{BlockHashNumber, Capabilities, DisconnectReason, NewBlockHashes, Status}; +use reth_eth_wire::{ + BlockHashNumber, Capabilities, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, + NewBlockHashes, Status, +}; use reth_network_api::{DiscoveredEvent, DiscoveryEvent, PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use reth_network_types::{PeerAddr, PeerKind}; use reth_primitives::ForkId; +use reth_primitives_traits::Block; use tokio::sync::oneshot; use tracing::{debug, trace}; @@ -69,13 +74,13 @@ impl Deref for BlockNumReader { /// /// This type is also responsible for responding for received request. #[derive(Debug)] -pub struct NetworkState { +pub struct NetworkState { /// All active peers and their state. - active_peers: HashMap, + active_peers: HashMap>, /// Manages connections to peers. peers_manager: PeersManager, /// Buffered messages until polled. - queued_messages: VecDeque, + queued_messages: VecDeque>, /// The client type that can interact with the chain. /// /// This type is used to fetch the block number after we established a session and received the @@ -88,10 +93,10 @@ pub struct NetworkState { /// The fetcher streams `RLPx` related requests on a per-peer basis to this type. This type /// will then queue in the request and notify the fetcher once the result has been /// received. - state_fetcher: StateFetcher, + state_fetcher: StateFetcher, } -impl NetworkState { +impl NetworkState { /// Create a new state instance with the given params pub(crate) fn new( client: BlockNumReader, @@ -126,7 +131,7 @@ impl NetworkState { } /// Returns a new [`FetchClient`] - pub(crate) fn fetch_client(&self) -> FetchClient { + pub(crate) fn fetch_client(&self) -> FetchClient { self.state_fetcher.client() } @@ -144,7 +149,7 @@ impl NetworkState { peer: PeerId, capabilities: Arc, status: Arc, - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, timeout: Arc, ) { debug_assert!(!self.active_peers.contains_key(&peer), "Already connected; not possible"); @@ -182,12 +187,12 @@ impl NetworkState { /// > the total number of peers) using the `NewBlock` message. /// /// See also - pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { + pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { // send a `NewBlock` message to a fraction of the connected peers (square root of the total // number of peers) let num_propagate = (self.active_peers.len() as f64).sqrt() as u64 + 1; - let number = msg.block.block.header.number; + let number = msg.block.block.header().number(); let mut count = 0; // Shuffle to propagate to a random sample of peers on every block announcement @@ -224,8 +229,8 @@ impl NetworkState { /// Completes the block propagation process started in [`NetworkState::announce_new_block()`] /// but sending `NewBlockHash` broadcast to all peers that haven't seen it yet. - pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) { - let number = msg.block.block.header.number; + pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) { + let number = msg.block.block.header().number(); let hashes = NewBlockHashes(vec![BlockHashNumber { hash: msg.hash, number }]); for (peer_id, peer) in &mut self.active_peers { if peer.blocks.contains(&msg.hash) { @@ -382,7 +387,10 @@ impl NetworkState { } /// Handle the outcome of processed response, for example directly queue another request. - fn on_block_response_outcome(&mut self, outcome: BlockResponseOutcome) -> Option { + fn on_block_response_outcome( + &mut self, + outcome: BlockResponseOutcome, + ) -> Option> { match outcome { BlockResponseOutcome::Request(peer, request) => { self.handle_block_request(peer, request); @@ -399,7 +407,11 @@ impl NetworkState { /// Delegates the response result to the fetcher which may return an outcome specific /// instruction that needs to be handled in [`Self::on_block_response_outcome`]. This could be /// a follow-up request or an instruction to slash the peer's reputation. - fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) -> Option { + fn on_eth_response( + &mut self, + peer: PeerId, + resp: PeerResponseResult, + ) -> Option> { match resp { PeerResponseResult::BlockHeaders(res) => { let outcome = self.state_fetcher.on_block_headers_response(peer, res)?; @@ -414,7 +426,7 @@ impl NetworkState { } /// Advances the state - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { loop { // drain buffered messages if let Some(message) = self.queued_messages.pop_front() { @@ -492,29 +504,29 @@ impl NetworkState { /// /// For example known blocks,so we can decide what to announce. #[derive(Debug)] -pub(crate) struct ActivePeer { +pub(crate) struct ActivePeer { /// Best block of the peer. pub(crate) best_hash: B256, /// The capabilities of the remote peer. #[allow(dead_code)] pub(crate) capabilities: Arc, /// A communication channel directly to the session task. - pub(crate) request_tx: PeerRequestSender, + pub(crate) request_tx: PeerRequestSender>, /// The response receiver for a currently active request to that peer. - pub(crate) pending_response: Option, + pub(crate) pending_response: Option>, /// Blocks we know the peer has. pub(crate) blocks: LruCache, } /// Message variants triggered by the [`NetworkState`] #[derive(Debug)] -pub(crate) enum StateAction { +pub(crate) enum StateAction { /// Dispatch a `NewBlock` message to the peer NewBlock { /// Target of the message peer_id: PeerId, /// The `NewBlock` message - block: NewBlockMessage, + block: NewBlockMessage, }, NewBlockHashes { /// Target of the message @@ -551,12 +563,13 @@ mod tests { sync::{atomic::AtomicU64, Arc}, }; + use alloy_consensus::Header; use alloy_primitives::B256; use reth_eth_wire::{BlockBodies, Capabilities, Capability, EthVersion}; use reth_network_api::PeerRequestSender; use reth_network_p2p::{bodies::client::BodiesClient, error::RequestError}; use reth_network_peers::PeerId; - use reth_primitives::{BlockBody, Header}; + use reth_primitives::BlockBody; use reth_provider::test_utils::NoopProvider; use tokio::sync::mpsc; use tokio_stream::{wrappers::ReceiverStream, StreamExt}; diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 0be7ae1c1bb6..655934f207ac 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -8,9 +8,10 @@ use std::{ use futures::Stream; use reth_eth_wire::{ - capability::CapabilityMessage, errors::EthStreamError, Capabilities, EthVersion, Status, + capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, + EthNetworkPrimitives, EthVersion, NetworkPrimitives, Status, }; -use reth_network_api::PeerRequestSender; +use reth_network_api::{PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use tracing::trace; @@ -32,7 +33,7 @@ use crate::{ /// [`SessionManager`]. Outgoing connections are either initiated on demand or triggered by the /// [`NetworkState`] and also delegated to the [`NetworkState`]. /// -/// Following diagram gives displays the dataflow contained in the [`Swarm`] +/// Following diagram displays the dataflow contained in the [`Swarm`] /// /// The [`ConnectionListener`] yields incoming [`TcpStream`]s from peers that are spawned as session /// tasks. After a successful `RLPx` authentication, the task is ready to accept ETH requests or @@ -49,39 +50,39 @@ use crate::{ /// `include_mmd!("docs/mermaid/swarm.mmd`") #[derive(Debug)] #[must_use = "Swarm does nothing unless polled"] -pub(crate) struct Swarm { +pub(crate) struct Swarm { /// Listens for new incoming connections. incoming: ConnectionListener, /// All sessions. - sessions: SessionManager, + sessions: SessionManager, /// Tracks the entire state of the network and handles events received from the sessions. - state: NetworkState, + state: NetworkState, } // === impl Swarm === -impl Swarm { +impl Swarm { /// Configures a new swarm instance. pub(crate) const fn new( incoming: ConnectionListener, - sessions: SessionManager, - state: NetworkState, + sessions: SessionManager, + state: NetworkState, ) -> Self { Self { incoming, sessions, state } } - /// Adds an additional protocol handler to the `RLPx` sub-protocol list. + /// Adds a protocol handler to the `RLPx` sub-protocol list. pub(crate) fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { self.sessions_mut().add_rlpx_sub_protocol(protocol); } /// Access to the state. - pub(crate) const fn state(&self) -> &NetworkState { + pub(crate) const fn state(&self) -> &NetworkState { &self.state } /// Mutable access to the state. - pub(crate) fn state_mut(&mut self) -> &mut NetworkState { + pub(crate) fn state_mut(&mut self) -> &mut NetworkState { &mut self.state } @@ -91,17 +92,17 @@ impl Swarm { } /// Access to the [`SessionManager`]. - pub(crate) const fn sessions(&self) -> &SessionManager { + pub(crate) const fn sessions(&self) -> &SessionManager { &self.sessions } /// Mutable access to the [`SessionManager`]. - pub(crate) fn sessions_mut(&mut self) -> &mut SessionManager { + pub(crate) fn sessions_mut(&mut self) -> &mut SessionManager { &mut self.sessions } } -impl Swarm { +impl Swarm { /// Triggers a new outgoing connection to the given node pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: PeerId) { self.sessions.dial_outbound(remote_addr, remote_id) @@ -111,7 +112,7 @@ impl Swarm { /// /// This either updates the state or produces a new [`SwarmEvent`] that is bubbled up to the /// manager. - fn on_session_event(&mut self, event: SessionEvent) -> Option { + fn on_session_event(&mut self, event: SessionEvent) -> Option> { match event { SessionEvent::SessionEstablished { peer_id, @@ -180,7 +181,7 @@ impl Swarm { /// Callback for events produced by [`ConnectionListener`]. /// /// Depending on the event, this will produce a new [`SwarmEvent`]. - fn on_connection(&mut self, event: ListenerEvent) -> Option { + fn on_connection(&mut self, event: ListenerEvent) -> Option> { match event { ListenerEvent::Error(err) => return Some(SwarmEvent::TcpListenerError(err)), ListenerEvent::ListenerClosed { local_address: address } => { @@ -201,6 +202,10 @@ impl Swarm { } InboundConnectionError::ExceedsCapacity => { trace!(target: "net", ?remote_addr, "No capacity for incoming connection"); + self.sessions.try_disconnect_incoming_connection( + stream, + DisconnectReason::TooManyPeers, + ); } } return None @@ -224,7 +229,7 @@ impl Swarm { } /// Hook for actions pulled from the state - fn on_state_action(&mut self, event: StateAction) -> Option { + fn on_state_action(&mut self, event: StateAction) -> Option> { match event { StateAction::Connect { remote_addr, peer_id } => { self.dial_outbound(remote_addr, peer_id); @@ -281,8 +286,8 @@ impl Swarm { } } -impl Stream for Swarm { - type Item = SwarmEvent; +impl Stream for Swarm { + type Item = SwarmEvent; /// This advances all components. /// @@ -333,13 +338,13 @@ impl Stream for Swarm { /// All events created or delegated by the [`Swarm`] that represents changes to the state of the /// network. -pub(crate) enum SwarmEvent { +pub(crate) enum SwarmEvent { /// Events related to the actual network protocol. ValidMessage { /// The peer that sent the message peer_id: PeerId, /// Message received from the peer - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidCapabilityMessage { @@ -389,7 +394,7 @@ pub(crate) enum SwarmEvent { capabilities: Arc, /// negotiated eth version version: EthVersion, - messages: PeerRequestSender, + messages: PeerRequestSender>, status: Arc, direction: Direction, }, diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 3e856951552a..00a9158233b7 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -1343,16 +1343,13 @@ struct TxFetcherSearchDurations { #[cfg(test)] mod test { - use std::{collections::HashSet, str::FromStr}; - + use super::*; + use crate::transactions::tests::{default_cache, new_mock_session}; use alloy_primitives::{hex, B256}; use alloy_rlp::Decodable; use derive_more::IntoIterator; use reth_primitives::TransactionSigned; - - use crate::transactions::tests::{default_cache, new_mock_session}; - - use super::*; + use std::{collections::HashSet, str::FromStr}; #[derive(IntoIterator)] struct TestValidAnnouncementData(Vec<(TxHash, Option<(u8, usize)>)>); diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 439f92bada9a..4e23c8527b47 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -416,6 +416,7 @@ where fn propagate_all(&mut self, hashes: Vec) { let propagated = self.propagate_transactions( self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), + PropagationMode::Basic, ); // notify pool so events get fired @@ -431,6 +432,7 @@ where fn propagate_transactions( &mut self, to_propagate: Vec, + propagation_mode: PropagationMode, ) -> PropagatedTransactions { let mut propagated = PropagatedTransactions::default(); if self.network.tx_gossip_disabled() { @@ -449,14 +451,18 @@ where PropagateTransactionsBuilder::full(peer.version) }; - // Iterate through the transactions to propagate and fill the hashes and full - // transaction lists, before deciding whether or not to send full transactions to the - // peer. - for tx in &to_propagate { - // Only proceed if the transaction is not in the peer's list of seen transactions - if !peer.seen_transactions.contains(&tx.hash()) { - // add transaction to the list of hashes to propagate - builder.push(tx); + if propagation_mode.is_forced() { + builder.extend(to_propagate.iter()); + } else { + // Iterate through the transactions to propagate and fill the hashes and full + // transaction lists, before deciding whether or not to send full transactions to + // the peer. + for tx in &to_propagate { + // Only proceed if the transaction is not in the peer's list of seen + // transactions + if !peer.seen_transactions.contains(&tx.hash()) { + builder.push(tx); + } } } @@ -514,6 +520,7 @@ where &mut self, txs: Vec, peer_id: PeerId, + propagation_mode: PropagationMode, ) -> Option { trace!(target: "net::tx", ?peer_id, "Propagating transactions to peer"); @@ -525,10 +532,17 @@ where let to_propagate = self.pool.get_all(txs).into_iter().map(PropagateTransaction::new); - // Iterate through the transactions to propagate and fill the hashes and full transaction - for tx in to_propagate { - if !peer.seen_transactions.contains(&tx.hash()) { - full_transactions.push(&tx); + if propagation_mode.is_forced() { + // skip cache check if forced + full_transactions.extend(to_propagate); + } else { + // Iterate through the transactions to propagate and fill the hashes and full + // transaction + for tx in to_propagate { + if !peer.seen_transactions.contains(&tx.hash()) { + // Only include if the peer hasn't seen the transaction + full_transactions.push(&tx); + } } } @@ -546,6 +560,7 @@ where // mark transaction as seen by peer peer.seen_transactions.insert(hash); } + // send hashes of transactions self.network.send_transactions_hashes(peer_id, new_pooled_hashes); } @@ -557,6 +572,7 @@ where // mark transaction as seen by peer peer.seen_transactions.insert(tx.hash()); } + // send full transactions self.network.send_transactions(peer_id, new_full_transactions); } @@ -570,7 +586,12 @@ where /// Propagate the transaction hashes to the given peer /// /// Note: This will only send the hashes for transactions that exist in the pool. - fn propagate_hashes_to(&mut self, hashes: Vec, peer_id: PeerId) { + fn propagate_hashes_to( + &mut self, + hashes: Vec, + peer_id: PeerId, + propagation_mode: PropagationMode, + ) { trace!(target: "net::tx", "Start propagating transactions as hashes"); // This fetches a transactions from the pool, including the blob transactions, which are @@ -589,9 +610,14 @@ where // check if transaction is known to peer let mut hashes = PooledTransactionsHashesBuilder::new(peer.version); - for tx in to_propagate { - if !peer.seen_transactions.insert(tx.hash()) { - hashes.push(&tx); + if propagation_mode.is_forced() { + hashes.extend(to_propagate) + } else { + for tx in to_propagate { + if !peer.seen_transactions.contains(&tx.hash()) { + // Include if the peer hasn't seen it + hashes.push(&tx); + } } } @@ -880,14 +906,16 @@ where self.on_new_pending_transactions(vec![hash]) } TransactionsCommand::PropagateHashesTo(hashes, peer) => { - self.propagate_hashes_to(hashes, peer) + self.propagate_hashes_to(hashes, peer, PropagationMode::Forced) } TransactionsCommand::GetActivePeers(tx) => { let peers = self.peers.keys().copied().collect::>(); tx.send(peers).ok(); } TransactionsCommand::PropagateTransactionsTo(txs, peer) => { - if let Some(propagated) = self.propagate_full_transactions_to_peer(txs, peer) { + if let Some(propagated) = + self.propagate_full_transactions_to_peer(txs, peer, PropagationMode::Forced) + { self.pool.on_propagated(propagated); } } @@ -1395,6 +1423,29 @@ where } } +/// Represents the different modes of transaction propagation. +/// +/// This enum is used to determine how transactions are propagated to peers in the network. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +enum PropagationMode { + /// Default propagation mode. + /// + /// Transactions are only sent to peers that haven't seen them yet. + Basic, + /// Forced propagation mode. + /// + /// Transactions are sent to all peers regardless of whether they have been sent or received + /// before. + Forced, +} + +impl PropagationMode { + /// Returns `true` if the propagation kind is `Forced`. + const fn is_forced(self) -> bool { + matches!(self, Self::Forced) + } +} + /// A transaction that's about to be propagated to multiple peers. #[derive(Debug, Clone)] struct PropagateTransaction { @@ -1441,6 +1492,13 @@ impl PropagateTransactionsBuilder { Self::Full(FullTransactionsBuilder::new(version)) } + /// Appends all transactions + fn extend<'a>(&mut self, txs: impl IntoIterator) { + for tx in txs { + self.push(tx); + } + } + /// Appends a transaction to the list. fn push(&mut self, transaction: &PropagateTransaction) { match self { @@ -1502,6 +1560,13 @@ impl FullTransactionsBuilder { } } + /// Appends all transactions. + fn extend(&mut self, txs: impl IntoIterator) { + for tx in txs { + self.push(&tx) + } + } + /// Append a transaction to the list of full transaction if the total message bytes size doesn't /// exceed the soft maximum target byte size. The limit is soft, meaning if one single /// transaction goes over the limit, it will be broadcasted in its own [`Transactions`] @@ -1581,6 +1646,13 @@ impl PooledTransactionsHashesBuilder { } } + /// Appends all hashes + fn extend(&mut self, txs: impl IntoIterator) { + for tx in txs { + self.push(&tx); + } + } + fn push(&mut self, tx: &PropagateTransaction) { match self { Self::Eth66(msg) => msg.0.push(tx.hash()), @@ -1596,7 +1668,7 @@ impl PooledTransactionsHashesBuilder { fn new(version: EthVersion) -> Self { match version { EthVersion::Eth66 | EthVersion::Eth67 => Self::Eth66(Default::default()), - EthVersion::Eth68 => Self::Eth68(Default::default()), + EthVersion::Eth68 | EthVersion::Eth69 => Self::Eth68(Default::default()), } } @@ -2388,7 +2460,8 @@ mod tests { let eip4844_tx = Arc::new(factory.create_eip4844()); propagate.push(PropagateTransaction::new(eip4844_tx.clone())); - let propagated = tx_manager.propagate_transactions(propagate.clone()); + let propagated = + tx_manager.propagate_transactions(propagate.clone(), PropagationMode::Basic); assert_eq!(propagated.0.len(), 2); let prop_txs = propagated.0.get(eip1559_tx.transaction.hash()).unwrap(); assert_eq!(prop_txs.len(), 1); @@ -2404,7 +2477,7 @@ mod tests { peer.seen_transactions.contains(eip4844_tx.transaction.hash()); // propagate again - let propagated = tx_manager.propagate_transactions(propagate); + let propagated = tx_manager.propagate_transactions(propagate, PropagationMode::Basic); assert!(propagated.0.is_empty()); } } diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index 4038f23e85ce..7bfe07761a21 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -336,7 +336,6 @@ impl FilterAnnouncement for EthMessageFilter { #[cfg(test)] mod test { use super::*; - use alloy_primitives::B256; use reth_eth_wire::{NewPooledTransactionHashes66, NewPooledTransactionHashes68}; use std::{collections::HashMap, str::FromStr}; diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 3a645da6c9fb..4d65e3f63baa 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -1,4 +1,4 @@ -use alloy_primitives::B256; +use alloy_primitives::{PrimitiveSignature as Signature, B256}; use reth_eth_wire::{GetPooledTransactions, PooledTransactions}; use reth_network::{ test_utils::{NetworkEventStream, Testnet}, @@ -6,7 +6,7 @@ use reth_network::{ }; use reth_network_api::{NetworkInfo, Peers}; use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState}; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ test_utils::{testing_pool, MockTransaction}, diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 61241f02d2de..54e1f4e12b4d 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -3,8 +3,8 @@ use std::sync::Arc; -use alloy_consensus::TxEip2930; -use alloy_primitives::{Bytes, Parity, TxKind, U256}; +use alloy_consensus::{Header, TxEip2930}; +use alloy_primitives::{Bytes, PrimitiveSignature as Signature, TxKind, U256}; use rand::Rng; use reth_eth_wire::HeadersDirection; use reth_network::{ @@ -16,7 +16,7 @@ use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::{HeadersClient, HeadersRequest}, }; -use reth_primitives::{Block, Header, Signature, Transaction, TransactionSigned}; +use reth_primitives::{Block, Transaction, TransactionSigned}; use reth_provider::test_utils::MockEthProvider; /// Returns a new [`TransactionSigned`] with some random parameters @@ -31,7 +31,7 @@ pub fn rng_transaction(rng: &mut impl rand::RngCore) -> TransactionSigned { input: Bytes::from(vec![1, 2]), access_list: Default::default(), }); - let signature = Signature::new(U256::default(), U256::default(), Parity::Parity(true)); + let signature = Signature::new(U256::default(), U256::default(), true); TransactionSigned::from_transaction_and_signature(request, signature) } diff --git a/crates/net/network/tests/it/session.rs b/crates/net/network/tests/it/session.rs index 6bc029d8a7b3..3f74db3d37f1 100644 --- a/crates/net/network/tests/it/session.rs +++ b/crates/net/network/tests/it/session.rs @@ -33,7 +33,7 @@ async fn test_session_established_with_highest_version() { } NetworkEvent::SessionEstablished { peer_id, status, .. } => { assert_eq!(handle1.peer_id(), &peer_id); - assert_eq!(status.version, EthVersion::Eth68 as u8); + assert_eq!(status.version, EthVersion::Eth68); } ev => { panic!("unexpected event {ev:?}") @@ -71,7 +71,7 @@ async fn test_session_established_with_different_capability() { } NetworkEvent::SessionEstablished { peer_id, status, .. } => { assert_eq!(handle1.peer_id(), &peer_id); - assert_eq!(status.version, EthVersion::Eth66 as u8); + assert_eq!(status.version, EthVersion::Eth66); } ev => { panic!("unexpected event: {ev:?}") diff --git a/crates/net/network/tests/it/startup.rs b/crates/net/network/tests/it/startup.rs index 89889a869460..d84ff492e5e7 100644 --- a/crates/net/network/tests/it/startup.rs +++ b/crates/net/network/tests/it/startup.rs @@ -113,6 +113,7 @@ async fn test_node_record_address_with_nat() { .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discv4_discovery() .disable_dns_discovery() + .listener_port(0) .build_with_noop_provider(MAINNET.clone()); let network = NetworkManager::new(config).await.unwrap(); @@ -127,6 +128,7 @@ async fn test_node_record_address_with_nat_disable_discovery() { let config = NetworkConfigBuilder::new(secret_key) .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discovery() + .listener_port(0) .build_with_noop_provider(MAINNET.clone()); let network = NetworkManager::new(config).await.unwrap(); diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index 70ac67bb5bf8..2e2ee4a031a0 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use alloy_consensus::TxLegacy; -use alloy_primitives::U256; +use alloy_primitives::{PrimitiveSignature as Signature, U256}; use futures::StreamExt; use rand::thread_rng; use reth_network::{test_utils::Testnet, NetworkEvent, NetworkEventListenerProvider}; use reth_network_api::PeersInfo; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index 3b6d74c9dbeb..9348bf2d0413 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-eth-wire-types.workspace = true reth-consensus.workspace = true reth-network-peers.workspace = true @@ -21,6 +22,7 @@ reth-network-types.workspace = true reth-storage-errors.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true @@ -32,7 +34,6 @@ tokio = { workspace = true, features = ["sync"] } auto_impl.workspace = true tracing.workspace = true derive_more.workspace = true - parking_lot = { workspace = true, optional = true } [dev-dependencies] @@ -47,11 +48,14 @@ test-utils = [ "reth-consensus/test-utils", "parking_lot", "reth-network-types/test-utils", - "reth-primitives/test-utils" + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils" ] std = [ "reth-consensus/std", "reth-primitives/std", "alloy-eips/std", - "alloy-primitives/std" + "alloy-primitives/std", + "reth-primitives-traits/std", + "alloy-consensus/std", ] diff --git a/crates/net/p2p/src/bodies/client.rs b/crates/net/p2p/src/bodies/client.rs index 2a4b57c23459..d48fccc6d000 100644 --- a/crates/net/p2p/src/bodies/client.rs +++ b/crates/net/p2p/src/bodies/client.rs @@ -9,13 +9,16 @@ use futures::{Future, FutureExt}; use reth_primitives::BlockBody; /// The bodies future type -pub type BodiesFut = Pin>> + Send + Sync>>; +pub type BodiesFut = + Pin>> + Send + Sync>>; /// A client capable of downloading block bodies. #[auto_impl::auto_impl(&, Arc, Box)] pub trait BodiesClient: DownloadClient { + /// The body type this client fetches. + type Body: Send + Sync + Unpin + 'static; /// The output of the request future for querying block bodies. - type Output: Future>> + Sync + Send + Unpin; + type Output: Future>> + Sync + Send + Unpin; /// Fetches the block body for the requested block. fn get_block_bodies(&self, hashes: Vec) -> Self::Output { @@ -49,11 +52,11 @@ pub struct SingleBodyRequest { fut: Fut, } -impl Future for SingleBodyRequest +impl Future for SingleBodyRequest where - Fut: Future>> + Sync + Send + Unpin, + Fut: Future>> + Sync + Send + Unpin, { - type Output = PeerRequestResult>; + type Output = PeerRequestResult>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let resp = ready!(self.get_mut().fut.poll_unpin(cx)); diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index b55229fa2426..f335b21438b7 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -5,14 +5,19 @@ use futures::Stream; use std::ops::RangeInclusive; /// Body downloader return type. -pub type BodyDownloaderResult = DownloadResult>; +pub type BodyDownloaderResult = DownloadResult>>; /// A downloader capable of fetching and yielding block bodies from block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block bodies, /// while a [`BodiesClient`][crate::bodies::client::BodiesClient] represents a client capable of /// fulfilling these requests. -pub trait BodyDownloader: Send + Sync + Stream + Unpin { +pub trait BodyDownloader: + Send + Sync + Stream> + Unpin +{ + /// The type of the body that is being downloaded. + type Body: Send + Sync + Unpin + 'static; + /// Method for setting the download range. fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()>; } diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 8ae840fbf669..153d7d39d4ef 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -1,16 +1,17 @@ use alloy_primitives::{BlockNumber, U256}; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; +use reth_primitives_traits::InMemorySize; /// The block response #[derive(PartialEq, Eq, Debug, Clone)] -pub enum BlockResponse { +pub enum BlockResponse { /// Full block response (with transactions or ommers) - Full(SealedBlock), + Full(SealedBlock), /// The empty block response Empty(SealedHeader), } -impl BlockResponse { +impl BlockResponse { /// Return the reference to the response header pub const fn header(&self) -> &SealedHeader { match self { @@ -19,15 +20,6 @@ impl BlockResponse { } } - /// Calculates a heuristic for the in-memory size of the [`BlockResponse`]. - #[inline] - pub fn size(&self) -> usize { - match self { - Self::Full(block) => SealedBlock::size(block), - Self::Empty(header) => SealedHeader::size(header), - } - } - /// Return the block number pub fn block_number(&self) -> BlockNumber { self.header().number @@ -41,3 +33,13 @@ impl BlockResponse { } } } + +impl InMemorySize for BlockResponse { + #[inline] + fn size(&self) -> usize { + match self { + Self::Full(block) => SealedBlock::size(block), + Self::Empty(header) => SealedHeader::size(header), + } + } +} diff --git a/crates/net/p2p/src/either.rs b/crates/net/p2p/src/either.rs index 30650069b913..3f1182bd4826 100644 --- a/crates/net/p2p/src/either.rs +++ b/crates/net/p2p/src/either.rs @@ -32,8 +32,9 @@ where impl BodiesClient for Either where A: BodiesClient, - B: BodiesClient, + B: BodiesClient, { + type Body = A::Body; type Output = Either; fn get_block_bodies_with_priority( @@ -51,8 +52,9 @@ where impl HeadersClient for Either where A: HeadersClient, - B: HeadersClient, + B: HeadersClient
, { + type Header = A::Header; type Output = Either; fn get_headers_with_priority( diff --git a/crates/net/p2p/src/error.rs b/crates/net/p2p/src/error.rs index 9394a9fdf6c5..45d34fc04ece 100644 --- a/crates/net/p2p/src/error.rs +++ b/crates/net/p2p/src/error.rs @@ -1,13 +1,14 @@ use std::ops::RangeInclusive; use super::headers::client::HeadersRequest; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use derive_more::{Display, Error}; use reth_consensus::ConsensusError; use reth_network_peers::WithPeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{GotExpected, GotExpectedBoxed, Header}; +use reth_primitives::{GotExpected, GotExpectedBoxed}; use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; use tokio::sync::{mpsc, oneshot}; @@ -26,7 +27,7 @@ pub trait EthResponseValidator { fn reputation_change_err(&self) -> Option; } -impl EthResponseValidator for RequestResult> { +impl EthResponseValidator for RequestResult> { fn is_likely_bad_headers_response(&self, request: &HeadersRequest) -> bool { match self { Ok(headers) => { @@ -38,7 +39,7 @@ impl EthResponseValidator for RequestResult> { match request.start { BlockHashOrNumber::Number(block_number) => { - headers.first().is_some_and(|header| block_number != header.number) + headers.first().is_some_and(|header| block_number != header.number()) } BlockHashOrNumber::Hash(_) => { // we don't want to hash the header @@ -79,24 +80,24 @@ impl EthResponseValidator for RequestResult> { #[derive(Clone, Debug, Eq, PartialEq, Display, Error)] pub enum RequestError { /// Closed channel to the peer. - #[display("closed channel to the peer")] /// Indicates the channel to the peer is closed. + #[display("closed channel to the peer")] ChannelClosed, /// Connection to a peer dropped while handling the request. - #[display("connection to a peer dropped while handling the request")] /// Represents a dropped connection while handling the request. + #[display("connection to a peer dropped while handling the request")] ConnectionDropped, /// Capability message is not supported by the remote peer. - #[display("capability message is not supported by remote peer")] /// Indicates an unsupported capability message from the remote peer. + #[display("capability message is not supported by remote peer")] UnsupportedCapability, /// Request timed out while awaiting response. - #[display("request timed out while awaiting response")] /// Represents a timeout while waiting for a response. + #[display("request timed out while awaiting response")] Timeout, /// Received bad response. - #[display("received bad response")] /// Indicates a bad response was received. + #[display("received bad response")] BadResponse, } @@ -216,6 +217,8 @@ impl From for DownloadError { #[cfg(test)] mod tests { + use alloy_consensus::Header; + use super::*; #[test] diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index e5129b686741..151a5bdd2a3b 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -5,16 +5,18 @@ use crate::{ headers::client::{HeadersClient, SingleHeaderRequest}, BlockClient, }; +use alloy_consensus::BlockHeader; use alloy_primitives::{Sealable, B256}; -use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus::Consensus; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::WithPeerId; -use reth_primitives::{BlockBody, GotExpected, Header, SealedBlock, SealedHeader}; +use reth_primitives::{SealedBlock, SealedHeader}; use std::{ cmp::Reverse, collections::{HashMap, VecDeque}, fmt::Debug, future::Future, + hash::Hash, pin::Pin, sync::Arc, task::{ready, Context, Poll}, @@ -23,14 +25,23 @@ use tracing::debug; /// A Client that can fetch full blocks from the network. #[derive(Debug, Clone)] -pub struct FullBlockClient { +pub struct FullBlockClient +where + Client: BlockClient, +{ client: Client, - consensus: Arc, + consensus: Arc>, } -impl FullBlockClient { +impl FullBlockClient +where + Client: BlockClient, +{ /// Creates a new instance of `FullBlockClient`. - pub fn new(client: Client, consensus: Arc) -> Self { + pub fn new( + client: Client, + consensus: Arc>, + ) -> Self { Self { client, consensus } } @@ -55,6 +66,7 @@ where let client = self.client.clone(); FetchFullBlockFuture { hash, + consensus: self.consensus.clone(), request: FullBlockRequest { header: Some(client.get_header(hash.into())), body: Some(client.get_block_body(hash)), @@ -84,11 +96,7 @@ where start_hash: hash, count, request: FullBlockRangeRequest { - headers: Some(client.get_headers(HeadersRequest { - start: hash.into(), - limit: count, - direction: HeadersDirection::Falling, - })), + headers: Some(client.get_headers(HeadersRequest::falling(hash.into(), count))), bodies: None, }, client, @@ -110,15 +118,16 @@ where Client: BlockClient, { client: Client, + consensus: Arc>, hash: B256, request: FullBlockRequest, - header: Option, - body: Option, + header: Option>, + body: Option>, } impl FetchFullBlockFuture where - Client: BlockClient, + Client: BlockClient, { /// Returns the hash of the block being requested. pub const fn hash(&self) -> &B256 { @@ -127,11 +136,11 @@ where /// If the header request is already complete, this returns the block number pub fn block_number(&self) -> Option { - self.header.as_ref().map(|h| h.number) + self.header.as_ref().map(|h| h.number()) } /// Returns the [`SealedBlock`] if the request is complete and valid. - fn take_block(&mut self) -> Option { + fn take_block(&mut self) -> Option> { if self.header.is_none() || self.body.is_none() { return None } @@ -142,7 +151,8 @@ where BodyResponse::Validated(body) => Some(SealedBlock::new(header, body)), BodyResponse::PendingValidation(resp) => { // ensure the block is valid, else retry - if let Err(err) = ensure_valid_body_response(&header, resp.data()) { + if let Err(err) = self.consensus.validate_body_against_header(resp.data(), &header) + { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body"); self.client.report_bad_message(resp.peer_id()); self.header = Some(header); @@ -154,9 +164,9 @@ where } } - fn on_block_response(&mut self, resp: WithPeerId) { + fn on_block_response(&mut self, resp: WithPeerId) { if let Some(ref header) = self.header { - if let Err(err) = ensure_valid_body_response(header, resp.data()) { + if let Err(err) = self.consensus.validate_body_against_header(resp.data(), header) { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body"); self.client.report_bad_message(resp.peer_id()); return @@ -170,9 +180,9 @@ where impl Future for FetchFullBlockFuture where - Client: BlockClient + 'static, + Client: BlockClient + 'static, { - type Output = SealedBlock; + type Output = SealedBlock; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -185,15 +195,8 @@ where ResponseResult::Header(res) => { match res { Ok(maybe_header) => { - let (peer, maybe_header) = maybe_header - .map(|h| { - h.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - }) - .split(); + let (peer, maybe_header) = + maybe_header.map(|h| h.map(SealedHeader::seal)).split(); if let Some(header) = maybe_header { if header.hash() == this.hash { this.header = Some(header); @@ -249,7 +252,7 @@ where impl Debug for FetchFullBlockFuture where - Client: BlockClient, + Client: BlockClient, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("FetchFullBlockFuture") @@ -272,7 +275,7 @@ impl FullBlockRequest where Client: BlockClient, { - fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { if let Some(fut) = Pin::new(&mut self.header).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { self.header = None; @@ -293,63 +296,19 @@ where /// The result of a request for a single header or body. This is yielded by the `FullBlockRequest` /// future. -enum ResponseResult { - Header(PeerRequestResult>), - Body(PeerRequestResult>), +enum ResponseResult { + Header(PeerRequestResult>), + Body(PeerRequestResult>), } /// The response of a body request. #[derive(Debug)] -enum BodyResponse { +enum BodyResponse { /// Already validated against transaction root of header - Validated(BlockBody), + Validated(B), /// Still needs to be validated against header - PendingValidation(WithPeerId), -} - -/// Ensures the block response data matches the header. -/// -/// This ensures the body response items match the header's hashes: -/// - ommer hash -/// - transaction root -/// - withdrawals root -fn ensure_valid_body_response( - header: &SealedHeader, - block: &BlockBody, -) -> Result<(), ConsensusError> { - let ommers_hash = block.calculate_ommers_root(); - if header.ommers_hash != ommers_hash { - return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: header.ommers_hash }.into(), - )) - } - - let tx_root = block.calculate_tx_root(); - if header.transactions_root != tx_root { - return Err(ConsensusError::BodyTransactionRootDiff( - GotExpected { got: tx_root, expected: header.transactions_root }.into(), - )) - } - - match (header.withdrawals_root, &block.withdrawals) { - (Some(header_withdrawals_root), Some(withdrawals)) => { - let withdrawals = withdrawals.as_slice(); - let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); - if withdrawals_root != header_withdrawals_root { - return Err(ConsensusError::BodyWithdrawalsRootDiff( - GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), - )) - } - } - (None, None) => { - // this is ok because we assume the fork is not active in this case - } - _ => return Err(ConsensusError::WithdrawalsRootUnexpected), - } - - Ok(()) + PendingValidation(WithPeerId), } - /// A future that downloads a range of full blocks from the network. /// /// This first fetches the headers for the given range using the inner `Client`. Once the request @@ -371,7 +330,7 @@ where /// The client used to fetch headers and bodies. client: Client, /// The consensus instance used to validate the blocks. - consensus: Arc, + consensus: Arc>, /// The block hash to start fetching from (inclusive). start_hash: B256, /// How many blocks to fetch: `len([start_hash, ..]) == count` @@ -379,16 +338,16 @@ where /// Requests for headers and bodies that are in progress. request: FullBlockRangeRequest, /// Fetched headers. - headers: Option>, + headers: Option>>, /// The next headers to request bodies for. This is drained as responses are received. - pending_headers: VecDeque, + pending_headers: VecDeque>, /// The bodies that have been received so far. - bodies: HashMap, + bodies: HashMap, BodyResponse>, } impl FetchFullBlockRangeFuture where - Client: BlockClient, + Client: BlockClient, { /// Returns the block hashes for the given range, if they are available. pub fn range_block_hashes(&self) -> Option> { @@ -403,14 +362,14 @@ where /// Inserts a block body, matching it with the `next_header`. /// /// Note: this assumes the response matches the next header in the queue. - fn insert_body(&mut self, body_response: BodyResponse) { + fn insert_body(&mut self, body_response: BodyResponse) { if let Some(header) = self.pending_headers.pop_front() { self.bodies.insert(header, body_response); } } /// Inserts multiple block bodies. - fn insert_bodies(&mut self, bodies: impl IntoIterator) { + fn insert_bodies(&mut self, bodies: impl IntoIterator>) { for body in bodies { self.insert_body(body); } @@ -429,7 +388,7 @@ where /// /// These are returned in falling order starting with the requested `hash`, i.e. with /// descending block numbers. - fn take_blocks(&mut self) -> Option> { + fn take_blocks(&mut self) -> Option>> { if !self.is_bodies_complete() { // not done with bodies yet return None @@ -446,7 +405,9 @@ where BodyResponse::Validated(body) => body, BodyResponse::PendingValidation(resp) => { // ensure the block is valid, else retry - if let Err(err) = ensure_valid_body_response(header, resp.data()) { + if let Err(err) = + self.consensus.validate_body_against_header(resp.data(), header) + { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body in range response"); self.client.report_bad_message(resp.peer_id()); @@ -484,23 +445,14 @@ where Some(valid_responses) } - fn on_headers_response(&mut self, headers: WithPeerId>) { - let (peer, mut headers_falling) = headers - .map(|h| { - h.into_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - .collect::>() - }) - .split(); + fn on_headers_response(&mut self, headers: WithPeerId>) { + let (peer, mut headers_falling) = + headers.map(|h| h.into_iter().map(SealedHeader::seal).collect::>()).split(); // fill in the response if it's the correct length if headers_falling.len() == self.count as usize { // sort headers from highest to lowest block number - headers_falling.sort_unstable_by_key(|h| Reverse(h.number)); + headers_falling.sort_unstable_by_key(|h| Reverse(h.number())); // check the starting hash if headers_falling[0].hash() == self.start_hash { @@ -551,9 +503,9 @@ where impl Future for FetchFullBlockRangeFuture where - Client: BlockClient + 'static, + Client: BlockClient + 'static, { - type Output = Vec; + type Output = Vec>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -660,7 +612,10 @@ impl FullBlockRangeRequest where Client: BlockClient, { - fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll> { if let Some(fut) = Pin::new(&mut self.headers).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { self.headers = None; @@ -681,13 +636,15 @@ where // The result of a request for headers or block bodies. This is yielded by the // `FullBlockRangeRequest` future. -enum RangeResponseResult { - Header(PeerRequestResult>), - Body(PeerRequestResult>), +enum RangeResponseResult { + Header(PeerRequestResult>), + Body(PeerRequestResult>), } #[cfg(test)] mod tests { + use reth_primitives::BlockBody; + use super::*; use crate::test_utils::TestFullBlockClient; use std::ops::Range; @@ -695,7 +652,7 @@ mod tests { #[tokio::test] async fn download_single_full_block() { let client = TestFullBlockClient::default(); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); client.insert(header.clone(), body.clone()); let client = FullBlockClient::test_client(client); @@ -707,7 +664,7 @@ mod tests { #[tokio::test] async fn download_single_full_block_range() { let client = TestFullBlockClient::default(); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); client.insert(header.clone(), body.clone()); let client = FullBlockClient::test_client(client); @@ -722,7 +679,7 @@ mod tests { client: &TestFullBlockClient, range: Range, ) -> (SealedHeader, BlockBody) { - let mut sealed_header = SealedHeader::default(); + let mut sealed_header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); for _ in range { let (mut header, hash) = sealed_header.split(); @@ -730,9 +687,7 @@ mod tests { header.parent_hash = hash; header.number += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } @@ -785,6 +740,7 @@ mod tests { let test_consensus = reth_consensus::test_utils::TestConsensus::default(); test_consensus.set_fail_validation(true); + test_consensus.set_fail_body_against_header(false); let client = FullBlockClient::new(client, Arc::new(test_consensus)); let received = client.get_full_block_range(header.hash(), range_length as u64).await; diff --git a/crates/net/p2p/src/headers/client.rs b/crates/net/p2p/src/headers/client.rs index b73ea4e925f9..3e8f9296e076 100644 --- a/crates/net/p2p/src/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -1,8 +1,8 @@ use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use futures::{Future, FutureExt}; pub use reth_eth_wire_types::{BlockHeaders, HeadersDirection}; -use reth_primitives::Header; use std::{ fmt::Debug, pin::Pin, @@ -21,14 +21,44 @@ pub struct HeadersRequest { pub direction: HeadersDirection, } +impl HeadersRequest { + /// Creates a request for a single header (direction doesn't matter). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + pub const fn one(start: BlockHashOrNumber) -> Self { + Self { direction: HeadersDirection::Rising, limit: 1, start } + } + + /// Creates a request for headers in rising direction (ascending block numbers). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + /// * `limit` - Maximum number of headers to retrieve + pub const fn rising(start: BlockHashOrNumber, limit: u64) -> Self { + Self { direction: HeadersDirection::Rising, limit, start } + } + + /// Creates a request for headers in falling direction (descending block numbers). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + /// * `limit` - Maximum number of headers to retrieve + pub const fn falling(start: BlockHashOrNumber, limit: u64) -> Self { + Self { direction: HeadersDirection::Falling, limit, start } + } +} + /// The headers future type pub type HeadersFut = Pin>> + Send + Sync>>; /// The block headers downloader client #[auto_impl::auto_impl(&, Arc, Box)] pub trait HeadersClient: DownloadClient { + /// The header type this client fetches. + type Header: Send + Sync + Unpin; /// The headers future type - type Output: Future>> + Sync + Send + Unpin; + type Output: Future>> + Sync + Send + Unpin; /// Sends the header request to the p2p network and returns the header response received from a /// peer. @@ -55,12 +85,7 @@ pub trait HeadersClient: DownloadClient { start: BlockHashOrNumber, priority: Priority, ) -> SingleHeaderRequest { - let req = HeadersRequest { - start, - limit: 1, - // doesn't matter for a single header - direction: HeadersDirection::Rising, - }; + let req = HeadersRequest::one(start); let fut = self.get_headers_with_priority(req, priority); SingleHeaderRequest { fut } } @@ -73,11 +98,11 @@ pub struct SingleHeaderRequest { fut: Fut, } -impl Future for SingleHeaderRequest +impl Future for SingleHeaderRequest where - Fut: Future>> + Sync + Send + Unpin, + Fut: Future>> + Sync + Send + Unpin, { - type Output = PeerRequestResult>; + type Output = PeerRequestResult>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let resp = ready!(self.get_mut().fut.poll_unpin(cx)); diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index 5565880ed399..59ecb58b84d2 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -1,5 +1,6 @@ use super::error::HeadersDownloaderResult; use crate::error::{DownloadError, DownloadResult}; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::Stream; @@ -13,19 +14,25 @@ use reth_primitives::SealedHeader; /// /// A [`HeaderDownloader`] is a [Stream] that returns batches of headers. pub trait HeaderDownloader: - Send + Sync + Stream>> + Unpin + Send + + Sync + + Stream>, Self::Header>> + + Unpin { + /// The header type being downloaded. + type Header: Send + Sync + Unpin + 'static; + /// Updates the gap to sync which ranges from local head to the sync target /// /// See also [`HeaderDownloader::update_sync_target`] and /// [`HeaderDownloader::update_local_head`] - fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { + fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { self.update_local_head(head); self.update_sync_target(target); } /// Updates the block number of the local database - fn update_local_head(&mut self, head: SealedHeader); + fn update_local_head(&mut self, head: SealedHeader); /// Updates the target we want to sync to fn update_sync_target(&mut self, target: SyncTarget); @@ -74,23 +81,23 @@ impl SyncTarget { /// Validate whether the header is valid in relation to it's parent /// /// Returns Ok(false) if the -pub fn validate_header_download( - consensus: &dyn Consensus, - header: &SealedHeader, - parent: &SealedHeader, +pub fn validate_header_download( + consensus: &dyn Consensus, + header: &SealedHeader, + parent: &SealedHeader, ) -> DownloadResult<()> { // validate header against parent consensus.validate_header_against_parent(header, parent).map_err(|error| { DownloadError::HeaderValidation { hash: header.hash(), - number: header.number, + number: header.number(), error: Box::new(error), } })?; // validate header standalone consensus.validate_header(header).map_err(|error| DownloadError::HeaderValidation { hash: header.hash(), - number: header.number, + number: header.number(), error: Box::new(error), })?; Ok(()) diff --git a/crates/net/p2p/src/headers/error.rs b/crates/net/p2p/src/headers/error.rs index b22aae9248ec..8757bb215f5f 100644 --- a/crates/net/p2p/src/headers/error.rs +++ b/crates/net/p2p/src/headers/error.rs @@ -3,19 +3,19 @@ use reth_consensus::ConsensusError; use reth_primitives::SealedHeader; /// Header downloader result -pub type HeadersDownloaderResult = Result; +pub type HeadersDownloaderResult = Result>; /// Error variants that can happen when sending requests to a session. #[derive(Debug, Clone, Eq, PartialEq, Display, Error)] -pub enum HeadersDownloaderError { +pub enum HeadersDownloaderError { /// The downloaded header cannot be attached to the local head, /// but is valid otherwise. #[display("valid downloaded header cannot be attached to the local head: {error}")] DetachedHead { /// The local head we attempted to attach to. - local_head: Box, + local_head: Box>, /// The header we attempted to attach. - header: Box, + header: Box>, /// The error that occurred when attempting to attach the header. #[error(source)] error: Box, diff --git a/crates/net/p2p/src/lib.rs b/crates/net/p2p/src/lib.rs index 2ba8012f0ae3..7dcb77671d46 100644 --- a/crates/net/p2p/src/lib.rs +++ b/crates/net/p2p/src/lib.rs @@ -52,3 +52,14 @@ pub use headers::client::HeadersClient; pub trait BlockClient: HeadersClient + BodiesClient + Unpin + Clone {} impl BlockClient for T where T: HeadersClient + BodiesClient + Unpin + Clone {} + +/// The [`BlockClient`] providing Ethereum block parts. +pub trait EthBlockClient: + BlockClient
+{ +} + +impl EthBlockClient for T where + T: BlockClient
+{ +} diff --git a/crates/net/p2p/src/test_utils/bodies.rs b/crates/net/p2p/src/test_utils/bodies.rs index cfd292129162..0689d403f2ce 100644 --- a/crates/net/p2p/src/test_utils/bodies.rs +++ b/crates/net/p2p/src/test_utils/bodies.rs @@ -36,6 +36,7 @@ impl BodiesClient for TestBodiesClient where F: Fn(Vec) -> PeerRequestResult> + Send + Sync, { + type Body = BlockBody; type Output = BodiesFut; fn get_block_bodies_with_priority( diff --git a/crates/net/p2p/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs index 8a13f69325dc..ee65bcb3f072 100644 --- a/crates/net/p2p/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -5,12 +5,13 @@ use crate::{ headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; +use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::B256; use parking_lot::Mutex; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; use std::{collections::HashMap, sync::Arc}; /// A headers+bodies client implementation that does nothing. @@ -40,6 +41,7 @@ impl DownloadClient for NoopFullBlockClient { /// Implements the `BodiesClient` trait for the `NoopFullBlockClient` struct. impl BodiesClient for NoopFullBlockClient { + type Body = BlockBody; /// Defines the output type of the function. type Output = futures::future::Ready>>; @@ -65,6 +67,7 @@ impl BodiesClient for NoopFullBlockClient { } impl HeadersClient for NoopFullBlockClient { + type Header = Header; /// The output type representing a future containing a peer request result with a vector of /// headers. type Output = futures::future::Ready>>; @@ -152,6 +155,7 @@ impl DownloadClient for TestFullBlockClient { /// Implements the `HeadersClient` trait for the `TestFullBlockClient` struct. impl HeadersClient for TestFullBlockClient { + type Header = Header; /// Specifies the associated output type. type Output = futures::future::Ready>>; @@ -205,6 +209,7 @@ impl HeadersClient for TestFullBlockClient { /// Implements the `BodiesClient` trait for the `TestFullBlockClient` struct. impl BodiesClient for TestFullBlockClient { + type Body = BlockBody; /// Defines the output type of the function. type Output = futures::future::Ready>>; diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index e61183d22e4b..bc5262abef4e 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -10,12 +10,12 @@ use crate::{ }, priority::Priority, }; -use alloy_primitives::Sealable; +use alloy_consensus::Header; use futures::{Future, FutureExt, Stream, StreamExt}; use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{Header, SealedHeader}; +use reth_primitives::SealedHeader; use std::{ fmt, pin::Pin, @@ -62,6 +62,8 @@ impl TestHeaderDownloader { } impl HeaderDownloader for TestHeaderDownloader { + type Header = Header; + fn update_local_head(&mut self, _head: SealedHeader) {} fn update_sync_target(&mut self, _target: SyncTarget) {} @@ -72,7 +74,7 @@ impl HeaderDownloader for TestHeaderDownloader { } impl Stream for TestHeaderDownloader { - type Item = HeadersDownloaderResult>; + type Item = HeadersDownloaderResult, Header>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -143,8 +145,10 @@ impl Stream for TestDownload { return Poll::Ready(None) } - let empty = SealedHeader::default(); - if let Err(error) = this.consensus.validate_header_against_parent(&empty, &empty) { + let empty: SealedHeader = SealedHeader::default(); + if let Err(error) = + Consensus::<_>::validate_header_against_parent(&this.consensus, &empty, &empty) + { this.done = true; return Poll::Ready(Some(Err(DownloadError::HeaderValidation { hash: empty.hash(), @@ -156,16 +160,8 @@ impl Stream for TestDownload { match ready!(this.get_or_init_fut().poll_unpin(cx)) { Ok(resp) => { // Skip head and seal headers - let mut headers = resp - .1 - .into_iter() - .skip(1) - .map(|header| { - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - .collect::>(); + let mut headers = + resp.1.into_iter().skip(1).map(SealedHeader::seal).collect::>(); headers.sort_unstable_by_key(|h| h.number); headers.into_iter().for_each(|h| this.buffer.push(h)); this.done = true; @@ -227,6 +223,7 @@ impl DownloadClient for TestHeadersClient { } impl HeadersClient for TestHeadersClient { + type Header = Header; type Output = TestHeadersFut; fn get_headers_with_priority( diff --git a/crates/net/peers/src/node_record.rs b/crates/net/peers/src/node_record.rs index d6836d88193c..ed48e242c1da 100644 --- a/crates/net/peers/src/node_record.rs +++ b/crates/net/peers/src/node_record.rs @@ -231,12 +231,11 @@ impl TryFrom<&Enr> for NodeRecord { #[cfg(test)] mod tests { + use super::*; use alloy_rlp::Decodable; use rand::{thread_rng, Rng, RngCore}; use std::net::Ipv6Addr; - use super::*; - #[test] fn test_mapped_ipv6() { let mut rng = thread_rng(); diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index c2c3eb46326b..a4cc0eb7eb64 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -18,15 +18,13 @@ reth-evm.workspace = true reth-provider.workspace = true reth-engine-primitives.workspace = true reth-transaction-pool.workspace = true -reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true -reth-rpc-eth-api.workspace = true reth-network-api.workspace = true reth-node-types.workspace = true -reth-primitives.workspace = true reth-node-core.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true -eyre.workspace = true \ No newline at end of file +eyre.workspace = true diff --git a/crates/node/api/src/lib.rs b/crates/node/api/src/lib.rs index 7692ed6f2cae..099cf82b5fe0 100644 --- a/crates/node/api/src/lib.rs +++ b/crates/node/api/src/lib.rs @@ -25,5 +25,3 @@ pub use node::*; // re-export for convenience pub use reth_node_types::*; pub use reth_provider::FullProvider; - -pub use reth_rpc_eth_api::EthApiTypes; diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 3173fd2b3985..90b9e2999bf0 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,7 +1,7 @@ //! Traits for configuring a node. -use std::{future::Future, marker::PhantomData}; - +use crate::ConfigureEvm; +use alloy_consensus::Header; use alloy_rpc_types_engine::JwtSecret; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_consensus::Consensus; @@ -9,13 +9,11 @@ use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; -use reth_payload_builder::PayloadBuilderHandle; -use reth_primitives::Header; +use reth_payload_primitives::PayloadBuilder; use reth_provider::FullProvider; use reth_tasks::TaskExecutor; use reth_transaction_pool::TransactionPool; - -use crate::ConfigureEvm; +use std::{future::Future, marker::PhantomData}; /// A helper trait that is downstream of the [`NodeTypesWithEngine`] trait and adds stateful /// components to the node. @@ -63,6 +61,9 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// Network API. type Network: FullNetwork; + /// Builds new blocks. + type PayloadBuilder: PayloadBuilder + Clone; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -79,9 +80,7 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { fn network(&self) -> &Self::Network; /// Returns the handle to the payload builder service. - fn payload_builder( - &self, - ) -> &PayloadBuilderHandle<::Engine>; + fn payload_builder(&self) -> &Self::PayloadBuilder; /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; @@ -91,17 +90,17 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { } /// Context passed to [`NodeAddOns::launch_add_ons`], -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct AddOnsContext<'a, N: FullNodeComponents> { /// Node with all configured components. - pub node: &'a N, + pub node: N, /// Node configuration. pub config: &'a NodeConfig<::ChainSpec>, /// Handle to the beacon consensus engine. pub beacon_engine_handle: - &'a BeaconConsensusEngineHandle<::Engine>, + BeaconConsensusEngineHandle<::Engine>, /// JWT secret for the node. - pub jwt_secret: &'a JwtSecret, + pub jwt_secret: JwtSecret, } /// Customizable node add-on types. diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 86f755cb9206..09bdd8b2269a 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] ## reth -reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true reth-chain-state.workspace = true @@ -63,6 +62,7 @@ reth-transaction-pool.workspace = true ## ethereum alloy-primitives.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } +alloy-consensus.workspace = true ## async futures.workspace = true @@ -97,20 +97,20 @@ tempfile.workspace = true [features] default = [] test-utils = [ - "reth-db/test-utils", - "reth-blockchain-tree/test-utils", - "reth-chain-state/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-engine-tree/test-utils", - "reth-evm/test-utils", - "reth-downloaders/test-utils", - "reth-network/test-utils", - "reth-network-p2p/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-stages/test-utils", - "reth-db-api/test-utils", - "reth-provider/test-utils", - "reth-transaction-pool/test-utils" + "reth-db/test-utils", + "reth-blockchain-tree/test-utils", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-engine-tree/test-utils", + "reth-evm/test-utils", + "reth-downloaders/test-utils", + "reth-network/test-utils", + "reth-network-p2p/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-stages/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", ] diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 82d8d96f6f55..2e00b08f8a56 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -79,7 +79,7 @@ pub type RethFullAdapter = FullNodeTypesAdapter< /// configured components and can interact with the node. /// /// There are convenience functions for networks that come with a preset of types and components via -/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_optimism_node::OptimismNode`. +/// the [`Node`] trait, see `reth_node_ethereum::EthereumNode` or `reth_optimism_node::OpNode`. /// /// The [`NodeBuilder::node`] function configures the node's types and components in one step. /// diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index e75a07802a6e..16b7d668ca3c 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -5,16 +5,6 @@ //! The node builder process is essentially a state machine that transitions through various states //! before the node can be launched. -use std::{fmt, future::Future}; - -use reth_exex::ExExContext; -use reth_node_api::{ - FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, -}; -use reth_node_core::node_config::NodeConfig; -use reth_payload_builder::PayloadBuilderHandle; -use reth_tasks::TaskExecutor; - use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, @@ -22,6 +12,13 @@ use crate::{ rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, AddOns, FullNode, }; +use reth_exex::ExExContext; +use reth_node_api::{ + FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB, PayloadBuilder, +}; +use reth_node_core::node_config::NodeConfig; +use reth_tasks::TaskExecutor; +use std::{fmt, future::Future}; /// A node builder that also has the configured types. pub struct NodeBuilderWithTypes { @@ -91,12 +88,16 @@ impl> FullNodeTypes for NodeAdapter type Provider = T::Provider; } -impl> FullNodeComponents for NodeAdapter { +impl> FullNodeComponents for NodeAdapter +where + C::PayloadBuilder: PayloadBuilder, +{ type Pool = C::Pool; type Evm = C::Evm; type Executor = C::Executor; - type Network = C::Network; type Consensus = C::Consensus; + type Network = C::Network; + type PayloadBuilder = C::PayloadBuilder; fn pool(&self) -> &Self::Pool { self.components.pool() @@ -110,24 +111,24 @@ impl> FullNodeComponents for NodeAdapter< self.components.block_executor() } - fn provider(&self) -> &Self::Provider { - &self.provider + fn consensus(&self) -> &Self::Consensus { + self.components.consensus() } fn network(&self) -> &Self::Network { self.components.network() } - fn payload_builder(&self) -> &PayloadBuilderHandle<::Engine> { + fn payload_builder(&self) -> &Self::PayloadBuilder { self.components.payload_builder() } - fn task_executor(&self) -> &TaskExecutor { - &self.task_executor + fn provider(&self) -> &Self::Provider { + &self.provider } - fn consensus(&self) -> &Self::Consensus { - self.components.consensus() + fn task_executor(&self) -> &TaskExecutor { + &self.task_executor } } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 48a0ba9b5fdf..95c0c764b5c3 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -1,12 +1,5 @@ //! A generic [`NodeComponentsBuilder`] -use std::{future::Future, marker::PhantomData}; - -use reth_consensus::Consensus; -use reth_evm::execute::BlockExecutorProvider; -use reth_primitives::Header; -use reth_transaction_pool::TransactionPool; - use crate::{ components::{ Components, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, NodeComponents, @@ -14,6 +7,13 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; +use alloy_consensus::Header; +use reth_consensus::Consensus; +use reth_evm::execute::BlockExecutorProvider; +use reth_node_api::NodeTypesWithEngine; +use reth_payload_builder::PayloadBuilderHandle; +use reth_transaction_pool::TransactionPool; +use std::{future::Future, marker::PhantomData}; /// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. /// @@ -358,7 +358,10 @@ impl Default for ComponentsBuilder<(), (), (), (), (), ()> { /// A type that's responsible for building the components of the node. pub trait NodeComponentsBuilder: Send { /// The components for the node with the given types - type Components: NodeComponents; + type Components: NodeComponents< + Node, + PayloadBuilder = PayloadBuilderHandle<::Engine>, + >; /// Consumes the type and returns the created components. fn build_components( diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 90cff588f7cc..4e8f63f412bc 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -1,8 +1,8 @@ //! EVM component for the node builder. use crate::{BuilderContext, FullNodeTypes}; +use alloy_consensus::Header; use reth_evm::execute::BlockExecutorProvider; use reth_node_api::ConfigureEvm; -use reth_primitives::Header; use std::future::Future; /// A type that knows how to build the executor types. diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 42001fc10056..1fe35e554d51 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -21,17 +21,16 @@ pub use network::*; pub use payload::*; pub use pool::*; +use crate::{ConfigureEvm, FullNodeTypes}; +use alloy_consensus::Header; use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; use reth_node_api::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; -use reth_primitives::Header; use reth_transaction_pool::TransactionPool; -use crate::{ConfigureEvm, FullNodeTypes}; - /// An abstraction over the components of a node, consisting of: /// - evm and executor /// - transaction pool @@ -53,6 +52,9 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati /// Network API. type Network: FullNetwork; + /// Builds new blocks. + type PayloadBuilder: Clone; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -69,7 +71,7 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati fn network(&self) -> &Self::Network; /// Returns the handle to the payload builder service. - fn payload_builder(&self) -> &PayloadBuilderHandle<::Engine>; + fn payload_builder(&self) -> &Self::PayloadBuilder; } /// All the components of the node. @@ -105,6 +107,7 @@ where type Executor = Executor; type Consensus = Cons; type Network = NetworkHandle; + type PayloadBuilder = PayloadBuilderHandle<::Engine>; fn pool(&self) -> &Self::Pool { &self.transaction_pool @@ -126,9 +129,7 @@ where &self.network } - fn payload_builder( - &self, - ) -> &PayloadBuilderHandle<::Engine> { + fn payload_builder(&self) -> &Self::PayloadBuilder { &self.payload_builder } } diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 87308e196a97..fb18f75285b3 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -5,7 +5,6 @@ use std::{sync::Arc, thread::available_parallelism}; use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; -use reth_auto_seal_consensus::MiningMode; use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, @@ -16,6 +15,7 @@ use reth_consensus::Consensus; use reth_db_api::database::Database; use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; +use reth_engine_local::MiningMode; use reth_engine_tree::tree::{InvalidBlockHook, InvalidBlockHooks, NoopInvalidBlockHook}; use reth_evm::noop::NoopBlockExecutorProvider; use reth_fs_util as fs; @@ -52,8 +52,9 @@ use reth_stages::{sets::DefaultStages, MetricEvent, PipelineBuilder, PipelineTar use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; +use reth_transaction_pool::TransactionPool; use tokio::sync::{ - mpsc::{unbounded_channel, Receiver, UnboundedSender}, + mpsc::{unbounded_channel, UnboundedSender}, oneshot, watch, }; @@ -386,13 +387,11 @@ impl LaunchContextWith) -> MiningMode { + pub fn dev_mining_mode(&self, pool: impl TransactionPool) -> MiningMode { if let Some(interval) = self.node_config().dev.block_time { MiningMode::interval(interval) - } else if let Some(max_transactions) = self.node_config().dev.block_max_transactions { - MiningMode::instant(max_transactions, pending_transactions_listener) } else { - MiningMode::instant(1, pending_transactions_listener) + MiningMode::instant(pool) } } } @@ -761,7 +760,7 @@ where /// necessary pub async fn max_block(&self, client: C) -> eyre::Result> where - C: HeadersClient, + C: HeadersClient
, { self.node_config().max_block(client, self.provider_factory().clone()).await } diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 3de651cdcd0b..65433176ba99 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -8,7 +8,7 @@ use reth_beacon_consensus::{ use reth_blockchain_tree::BlockchainTreeConfig; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; -use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder, MiningMode}; +use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, @@ -208,11 +208,6 @@ where info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); let mut engine_service = if ctx.is_dev() { - let mining_mode = if let Some(block_time) = ctx.node_config().dev.block_time { - MiningMode::interval(block_time) - } else { - MiningMode::instant(ctx.components().pool().clone()) - }; let eth_service = LocalEngineService::new( ctx.consensus(), ctx.components().block_executor().clone(), @@ -225,7 +220,7 @@ where ctx.sync_metrics_tx(), consensus_engine_tx.clone(), Box::pin(consensus_engine_stream), - mining_mode, + ctx.dev_mining_mode(ctx.components().pool()), LocalPayloadAttributesBuilder::new(ctx.chain_spec()), ); @@ -286,10 +281,10 @@ where let jwt_secret = ctx.auth_jwt_secret()?; let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter(), + node: ctx.node_adapter().clone(), config: ctx.node_config(), - beacon_engine_handle: &beacon_engine_handle, - jwt_secret: &jwt_secret, + beacon_engine_handle, + jwt_secret, }; let RpcHandle { rpc_server_handles, rpc_registry } = diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 36aa55541e00..4f9e850c97f1 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -11,7 +11,6 @@ pub use exex::ExExLauncher; use std::{future::Future, sync::Arc}; -use alloy_primitives::utils::format_ether; use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, @@ -23,18 +22,16 @@ use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider}; -use reth_node_api::{ - AddOnsContext, FullNodeComponents, FullNodeTypes, NodeTypesWithDB, NodeTypesWithEngine, -}; +use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_provider::providers::BlockchainProvider; +use reth_rpc::eth::RpcNodeCore; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; -use reth_transaction_pool::TransactionPool; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -47,14 +44,14 @@ use crate::{ AddOns, NodeBuilderWithComponents, NodeHandle, }; -/// Alias for [`reth_rpc_eth_types::EthApiBuilderCtx`], adapter for [`FullNodeComponents`]. +/// Alias for [`reth_rpc_eth_types::EthApiBuilderCtx`], adapter for [`RpcNodeCore`]. pub type EthApiBuilderCtx = reth_rpc_eth_types::EthApiBuilderCtx< - ::Provider, - ::Pool, - ::Evm, - ::Network, + ::Provider, + ::Pool, + ::Evm, + ::Network, TaskExecutor, - ::Provider, + ::Provider, >; /// A general purpose trait that launches a new node of any kind. @@ -211,47 +208,7 @@ where let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); let (pipeline, client) = if ctx.is_dev() { - info!(target: "reth::cli", "Starting Reth in dev mode"); - - for (idx, (address, alloc)) in ctx.chain_spec().genesis().alloc.iter().enumerate() { - info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); - } - - // install auto-seal - let mining_mode = - ctx.dev_mining_mode(ctx.components().pool().pending_transactions_listener()); - info!(target: "reth::cli", mode=%mining_mode, "configuring dev mining mode"); - - let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( - ctx.chain_spec(), - ctx.blockchain_db().clone(), - ctx.components().pool().clone(), - consensus_engine_tx.clone(), - mining_mode, - ctx.components().block_executor().clone(), - ) - .build(); - - let pipeline = crate::setup::build_networked_pipeline( - &ctx.toml_config().stages, - client.clone(), - ctx.consensus(), - ctx.provider_factory().clone(), - ctx.task_executor(), - ctx.sync_metrics_tx(), - ctx.prune_config(), - max_block, - static_file_producer, - ctx.components().block_executor().clone(), - pipeline_exex_handle, - )?; - - let pipeline_events = pipeline.events(); - task.set_pipeline_events(pipeline_events); - debug!(target: "reth::cli", "Spawning auto mine task"); - ctx.task_executor().spawn(Box::pin(task)); - - (pipeline, Either::Left(client)) + eyre::bail!("Dev mode is not supported for legacy engine") } else { let pipeline = crate::setup::build_networked_pipeline( &ctx.toml_config().stages, @@ -267,7 +224,7 @@ where pipeline_exex_handle, )?; - (pipeline, Either::Right(network_client.clone())) + (pipeline, network_client.clone()) }; let pipeline_events = pipeline.events(); @@ -330,10 +287,10 @@ where let jwt_secret = ctx.auth_jwt_secret()?; let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter(), + node: ctx.node_adapter().clone(), config: ctx.node_config(), - beacon_engine_handle: &beacon_engine_handle, - jwt_secret: &jwt_secret, + beacon_engine_handle, + jwt_secret, }; let RpcHandle { rpc_server_handles, rpc_registry } = diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 3e3d5b696c39..62c710ea8022 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -11,10 +11,10 @@ use reth_node_api::{EngineTypes, FullNodeComponents}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, - rpc::api::EngineApiClient, }; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::ChainSpecProvider; +use reth_rpc_api::EngineApiClient; use reth_rpc_builder::{auth::AuthServerHandle, RpcServerHandle}; use reth_tasks::TaskExecutor; @@ -69,6 +69,8 @@ where type Primitives = ::Primitives; type ChainSpec = ::ChainSpec; + + type StateCommitment = ::StateCommitment; } impl NodeTypesWithEngine for AnyNode diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 18293118dc66..9680c221d7ce 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -14,12 +14,15 @@ use reth_node_api::{ }; use reth_node_core::{ node_config::NodeConfig, - rpc::eth::{EthApiTypes, FullEthApiServer}, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; -use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_builder::PayloadStore; +use reth_payload_primitives::PayloadBuilder; use reth_provider::providers::ProviderNodeTypes; -use reth_rpc::EthApi; +use reth_rpc::{ + eth::{EthApiTypes, FullEthApiServer}, + EthApi, +}; use reth_rpc_api::eth::helpers::AddDevSigners; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, @@ -196,6 +199,7 @@ pub struct RpcRegistry { Node::Provider, EthApi, Node::Executor, + Node::Consensus, >, } @@ -212,6 +216,7 @@ where Node::Provider, EthApi, Node::Executor, + Node::Consensus, >; fn deref(&self) -> &Self::Target { @@ -290,9 +295,7 @@ where } /// Returns the handle to the payload builder service - pub fn payload_builder( - &self, - ) -> &PayloadBuilderHandle<::Engine> { + pub fn payload_builder(&self) -> &Node::PayloadBuilder { self.node.payload_builder() } } @@ -398,16 +401,21 @@ where impl NodeAddOns for RpcAddOns where - N: FullNodeComponents, + N: FullNodeComponents< + Types: ProviderNodeTypes, + PayloadBuilder: PayloadBuilder::Engine>, + >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, { type Handle = RpcHandle; async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { - let AddOnsContext { node, config, beacon_engine_handle, jwt_secret } = ctx; let Self { eth_api_builder, engine_validator_builder, hooks, _pd: _ } = self; + let engine_validator = engine_validator_builder.build(&ctx).await?; + let AddOnsContext { node, config, beacon_engine_handle, jwt_secret } = ctx; + let client = ClientVersionV1 { code: CLIENT_CODE, name: NAME_CLIENT.to_string(), @@ -418,17 +426,17 @@ where let engine_api = EngineApi::new( node.provider().clone(), config.chain.clone(), - beacon_engine_handle.clone(), - node.payload_builder().clone().into(), + beacon_engine_handle, + PayloadStore::new(node.payload_builder().clone()), node.pool().clone(), Box::new(node.task_executor().clone()), client, EngineCapabilities::default(), - engine_validator_builder.build(&ctx).await?, + engine_validator, ); info!(target: "reth::cli", "Engine API handler initialized"); - let auth_config = config.rpc.auth_server_config(*jwt_secret)?; + let auth_config = config.rpc.auth_server_config(jwt_secret)?; let module_config = config.rpc.transport_rpc_module_config(); debug!(target: "reth::cli", http=?module_config.http(), ws=?module_config.ws(), "Using RPC module config"); @@ -440,6 +448,7 @@ where .with_executor(node.task_executor().clone()) .with_evm_config(node.evm_config().clone()) .with_block_executor(node.block_executor().clone()) + .with_consensus(node.consensus().clone()) .build_with_auth_server(module_config, engine_api, eth_api_builder); // in dev mode we generate 20 random dev-signer accounts diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 3591868ddad9..db188402ca8b 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -12,7 +12,7 @@ use reth_downloaders::{ use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_network_p2p::{ - bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, BlockClient, + bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, EthBlockClient, }; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; @@ -38,7 +38,7 @@ pub fn build_networked_pipeline( ) -> eyre::Result> where N: ProviderNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, Executor: BlockExecutorProvider, { // building network downloaders using the fetch client @@ -84,8 +84,8 @@ pub fn build_pipeline( ) -> eyre::Result> where N: ProviderNodeTypes, - H: HeaderDownloader + 'static, - B: BodyDownloader + 'static, + H: HeaderDownloader
+ 'static, + B: BodyDownloader + 'static, Executor: BlockExecutorProvider, { let mut builder = Pipeline::::builder(); diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 0c9672d17779..c667a56293c1 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -13,7 +13,9 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-consensus.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-cli-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-storage-errors.workspace = true @@ -23,8 +25,6 @@ reth-network-p2p.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-rpc-types-compat.workspace = true -reth-rpc-api = { workspace = true, features = ["client"] } -reth-rpc-eth-api = { workspace = true, features = ["client"] } reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true @@ -32,13 +32,12 @@ reth-discv4.workspace = true reth-discv5.workspace = true reth-net-nat.workspace = true reth-network-peers.workspace = true -reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true # ethereum alloy-primitives.workspace = true -alloy-rpc-types-engine = { workspace = true, features = ["jwt"] } +alloy-rpc-types-engine = { workspace = true, features = ["std", "jwt"] } alloy-consensus.workspace = true alloy-eips.workspace = true diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 0eec6639a117..5b9d6ae61e21 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -1,6 +1,6 @@ //! clap [Args](clap::Args) for database configuration -use std::time::Duration; +use std::{fmt, str::FromStr, time::Duration}; use crate::version::default_client_version; use clap::{ @@ -22,6 +22,12 @@ pub struct DatabaseArgs { /// NFS volume. #[arg(long = "db.exclusive")] pub exclusive: Option, + /// Maximum database size (e.g., 4TB, 8MB) + #[arg(long = "db.max-size", value_parser = parse_byte_size)] + pub max_size: Option, + /// Database growth step (e.g., 4GB, 4KB) + #[arg(long = "db.growth-step", value_parser = parse_byte_size)] + pub growth_step: Option, /// Read transaction timeout in seconds, 0 means no timeout. #[arg(long = "db.read-transaction-timeout")] pub read_transaction_timeout: Option, @@ -33,8 +39,9 @@ impl DatabaseArgs { self.get_database_args(default_client_version()) } - /// Returns the database arguments with configured log level and given client version. - pub const fn get_database_args( + /// Returns the database arguments with configured log level, client version, + /// max read transaction duration, and geometry. + pub fn get_database_args( &self, client_version: ClientVersion, ) -> reth_db::mdbx::DatabaseArguments { @@ -48,6 +55,8 @@ impl DatabaseArgs { .with_log_level(self.log_level) .with_exclusive(self.exclusive) .with_max_read_transaction_duration(max_read_transaction_duration) + .with_geometry_max_size(self.max_size) + .with_growth_step(self.growth_step) } } @@ -89,10 +98,84 @@ impl TypedValueParser for LogLevelValueParser { Some(Box::new(values)) } } + +/// Size in bytes. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +pub struct ByteSize(pub usize); + +impl From for usize { + fn from(s: ByteSize) -> Self { + s.0 + } +} + +impl FromStr for ByteSize { + type Err = String; + + fn from_str(s: &str) -> Result { + let s = s.trim().to_uppercase(); + let parts: Vec<&str> = s.split_whitespace().collect(); + + let (num_str, unit) = match parts.len() { + 1 => { + let (num, unit) = + s.split_at(s.find(|c: char| c.is_alphabetic()).unwrap_or(s.len())); + (num, unit) + } + 2 => (parts[0], parts[1]), + _ => { + return Err("Invalid format. Use '' or ' '.".to_string()) + } + }; + + let num: usize = num_str.parse().map_err(|_| "Invalid number".to_string())?; + + let multiplier = match unit { + "B" | "" => 1, // Assume bytes if no unit is specified + "KB" => 1024, + "MB" => 1024 * 1024, + "GB" => 1024 * 1024 * 1024, + "TB" => 1024 * 1024 * 1024 * 1024, + _ => return Err(format!("Invalid unit: {}. Use B, KB, MB, GB, or TB.", unit)), + }; + + Ok(Self(num * multiplier)) + } +} + +impl fmt::Display for ByteSize { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + const KB: usize = 1024; + const MB: usize = KB * 1024; + const GB: usize = MB * 1024; + const TB: usize = GB * 1024; + + let (size, unit) = if self.0 >= TB { + (self.0 as f64 / TB as f64, "TB") + } else if self.0 >= GB { + (self.0 as f64 / GB as f64, "GB") + } else if self.0 >= MB { + (self.0 as f64 / MB as f64, "MB") + } else if self.0 >= KB { + (self.0 as f64 / KB as f64, "KB") + } else { + (self.0 as f64, "B") + }; + + write!(f, "{:.2}{}", size, unit) + } +} + +/// Value parser function that supports various formats. +fn parse_byte_size(s: &str) -> Result { + s.parse::().map(Into::into) +} + #[cfg(test)] mod tests { use super::*; use clap::Parser; + use reth_db::mdbx::{GIGABYTE, KILOBYTE, MEGABYTE, TERABYTE}; /// A helper type to parse Args more easily #[derive(Parser)] @@ -108,6 +191,101 @@ mod tests { assert_eq!(args, default_args); } + #[test] + fn test_command_parser_with_valid_max_size() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "4398046511104", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(TERABYTE * 4)); + } + + #[test] + fn test_command_parser_with_invalid_max_size() { + let result = + CommandParser::::try_parse_from(["reth", "--db.max-size", "invalid"]); + assert!(result.is_err()); + } + + #[test] + fn test_command_parser_with_valid_growth_step() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.growth-step", + "4294967296", + ]) + .unwrap(); + assert_eq!(cmd.args.growth_step, Some(GIGABYTE * 4)); + } + + #[test] + fn test_command_parser_with_invalid_growth_step() { + let result = + CommandParser::::try_parse_from(["reth", "--db.growth-step", "invalid"]); + assert!(result.is_err()); + } + + #[test] + fn test_command_parser_with_valid_max_size_and_growth_step_from_str() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "2TB", + "--db.growth-step", + "1GB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(TERABYTE * 2)); + assert_eq!(cmd.args.growth_step, Some(GIGABYTE)); + + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "12MB", + "--db.growth-step", + "2KB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); + assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); + + // with spaces + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "12 MB", + "--db.growth-step", + "2 KB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); + assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); + + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "1073741824", + "--db.growth-step", + "1048576", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(GIGABYTE)); + assert_eq!(cmd.args.growth_step, Some(MEGABYTE)); + } + + #[test] + fn test_command_parser_max_size_and_growth_step_from_str_invalid_unit() { + let result = + CommandParser::::try_parse_from(["reth", "--db.growth-step", "1 PB"]); + assert!(result.is_err()); + + let result = + CommandParser::::try_parse_from(["reth", "--db.max-size", "2PB"]); + assert!(result.is_err()); + } + #[test] fn test_possible_values() { // Initialize the LogLevelValueParser diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index 524a93195de1..cd7ba7dccfb5 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -86,7 +86,7 @@ impl TypedValueParser for ExtradataValueParser { ) -> Result { let val = value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?; - if val.as_bytes().len() > MAXIMUM_EXTRA_DATA_SIZE { + if val.len() > MAXIMUM_EXTRA_DATA_SIZE { return Err(clap::Error::raw( clap::error::ErrorKind::InvalidValue, format!( diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 382f22d37764..fe9b80cec472 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -1,11 +1,13 @@ //! clap [Args](clap::Args) for RPC related arguments. use std::{ + collections::HashSet, ffi::OsStr, net::{IpAddr, Ipv4Addr}, path::PathBuf, }; +use alloy_primitives::Address; use alloy_rpc_types_engine::JwtSecret; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, @@ -183,6 +185,11 @@ pub struct RpcServerArgs { #[arg(long = "rpc.proof-permits", alias = "rpc-proof-permits", value_name = "COUNT", default_value_t = constants::DEFAULT_PROOF_PERMITS)] pub rpc_proof_permits: usize, + /// Path to file containing disallowed addresses, json-encoded list of strings. Block + /// validation API will reject blocks containing transactions from these addresses. + #[arg(long = "builder.disallow", value_name = "PATH", value_parser = reth_cli_util::parsers::read_json_from_file::>)] + pub builder_disallow: Option>, + /// State cache configuration. #[command(flatten)] pub rpc_state_cache: RpcStateCacheArgs, @@ -199,6 +206,12 @@ impl RpcServerArgs { self } + /// Configures modules for the HTTP-RPC server. + pub fn with_http_api(mut self, http_api: RpcModuleSelection) -> Self { + self.http_api = Some(http_api); + self + } + /// Enables the WS-RPC server. pub const fn with_ws(mut self) -> Self { self.ws = true; @@ -318,6 +331,7 @@ impl Default for RpcServerArgs { gas_price_oracle: GasPriceOracleArgs::default(), rpc_state_cache: RpcStateCacheArgs::default(), rpc_proof_permits: constants::DEFAULT_PROOF_PERMITS, + builder_disallow: Default::default(), } } } diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 538315101adf..a8ea1d9cdba9 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -9,9 +9,9 @@ use reth_transaction_pool::{ pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, validate::DEFAULT_MAX_TX_INPUT_BYTES, LocalTransactionConfig, PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, - DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, REPLACE_BLOB_PRICE_BUMP, - TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, - TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, MAX_NEW_PENDING_TXS_NOTIFICATIONS, + REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, + TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, }; /// Parameters for debugging purposes #[derive(Debug, Clone, Args, PartialEq, Eq)] @@ -86,6 +86,11 @@ pub struct TxPoolArgs { /// Maximum number of new transactions to buffer #[arg(long = "txpool.max-new-txns", alias = "txpool.max_new_txns", default_value_t = NEW_TX_LISTENER_BUFFER_SIZE)] pub new_tx_listener_buffer_size: usize, + + /// How many new pending transactions to buffer and send to in progress pending transaction + /// iterators. + #[arg(long = "txpool.max-new-pending-txs-notifications", alias = "txpool.max-new-pending-txs-notifications", default_value_t = MAX_NEW_PENDING_TXS_NOTIFICATIONS)] + pub max_new_pending_txs_notifications: usize, } impl Default for TxPoolArgs { @@ -110,6 +115,7 @@ impl Default for TxPoolArgs { additional_validation_tasks: DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE, new_tx_listener_buffer_size: NEW_TX_LISTENER_BUFFER_SIZE, + max_new_pending_txs_notifications: MAX_NEW_PENDING_TXS_NOTIFICATIONS, } } } @@ -125,19 +131,19 @@ impl RethTransactionPoolConfig for TxPoolArgs { }, pending_limit: SubPoolLimit { max_txs: self.pending_max_count, - max_size: self.pending_max_size * 1024 * 1024, + max_size: self.pending_max_size.saturating_mul(1024 * 1024), }, basefee_limit: SubPoolLimit { max_txs: self.basefee_max_count, - max_size: self.basefee_max_size * 1024 * 1024, + max_size: self.basefee_max_size.saturating_mul(1024 * 1024), }, queued_limit: SubPoolLimit { max_txs: self.queued_max_count, - max_size: self.queued_max_size * 1024 * 1024, + max_size: self.queued_max_size.saturating_mul(1024 * 1024), }, blob_limit: SubPoolLimit { max_txs: self.queued_max_count, - max_size: self.queued_max_size * 1024 * 1024, + max_size: self.queued_max_size.saturating_mul(1024 * 1024), }, max_account_slots: self.max_account_slots, price_bumps: PriceBumpConfig { @@ -148,6 +154,7 @@ impl RethTransactionPoolConfig for TxPoolArgs { gas_limit: self.gas_limit, pending_tx_listener_buffer_size: self.pending_tx_listener_buffer_size, new_tx_listener_buffer_size: self.new_tx_listener_buffer_size, + max_new_pending_txs_notifications: self.max_new_pending_txs_notifications, } } } diff --git a/crates/node/core/src/lib.rs b/crates/node/core/src/lib.rs index 6af822e22eeb..a69a255a3c67 100644 --- a/crates/node/core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -22,15 +22,6 @@ pub mod primitives { /// Re-export of `reth_rpc_*` crates. pub mod rpc { - /// Re-exported from `reth_rpc_api`. - pub mod api { - pub use reth_rpc_api::*; - } - /// Re-exported from `reth_rpc::eth`. - pub mod eth { - pub use reth_rpc_eth_api::*; - } - /// Re-exported from `reth_rpc::rpc`. pub mod result { pub use reth_rpc_server_types::result::*; diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 80fb5152e7bb..24d5588b6884 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -8,6 +8,7 @@ use crate::{ dirs::{ChainPath, DataDirPath}, utils::get_single_header, }; +use alloy_consensus::BlockHeader; use eyre::eyre; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_config::config::PruneConfig; @@ -15,8 +16,9 @@ use reth_network_p2p::headers::client::HeadersClient; use serde::{de::DeserializeOwned, Serialize}; use std::{fs, path::Path}; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; -use reth_primitives::{BlockHashOrNumber, Head, SealedHeader}; +use reth_primitives::{Head, SealedHeader}; use reth_stages_types::StageId; use reth_storage_api::{ BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader, @@ -272,7 +274,7 @@ impl NodeConfig { ) -> eyre::Result> where Provider: HeaderProvider, - Client: HeadersClient, + Client: HeadersClient, { let max_block = if let Some(block) = self.debug.max_block { Some(block) @@ -331,7 +333,7 @@ impl NodeConfig { ) -> ProviderResult where Provider: HeaderProvider, - Client: HeadersClient, + Client: HeadersClient, { let header = provider.header_by_hash_or_number(tip.into())?; @@ -341,7 +343,7 @@ impl NodeConfig { return Ok(header.number) } - Ok(self.fetch_tip_from_network(client, tip.into()).await.number) + Ok(self.fetch_tip_from_network(client, tip.into()).await.number()) } /// Attempt to look up the block with the given number and return the header. @@ -351,9 +353,9 @@ impl NodeConfig { &self, client: Client, tip: BlockHashOrNumber, - ) -> SealedHeader + ) -> SealedHeader where - Client: HeadersClient, + Client: HeadersClient, { info!(target: "reth::cli", ?tip, "Fetching tip block from the network."); let mut fetch_failures = 0; diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index a64d12114558..e52af4b46fe1 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -1,21 +1,18 @@ //! Utility functions for node startup and shutdown, for example path parsing and retrieving single //! blocks from the network. -use alloy_primitives::Sealable; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockHashOrNumber; use alloy_rpc_types_engine::{JwtError, JwtSecret}; use eyre::Result; -use reth_chainspec::ChainSpec; -use reth_consensus_common::validation::validate_block_pre_execution; +use reth_consensus::Consensus; use reth_network_p2p::{ - bodies::client::BodiesClient, - headers::client::{HeadersClient, HeadersDirection, HeadersRequest}, - priority::Priority, + bodies::client::BodiesClient, headers::client::HeadersClient, priority::Priority, }; -use reth_primitives::{BlockHashOrNumber, SealedBlock, SealedHeader}; +use reth_primitives::{SealedBlock, SealedHeader}; use std::{ env::VarError, path::{Path, PathBuf}, - sync::Arc, }; use tracing::{debug, info}; @@ -40,27 +37,22 @@ pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result( client: Client, id: BlockHashOrNumber, -) -> Result +) -> Result> where - Client: HeadersClient, + Client: HeadersClient, { - let request = HeadersRequest { direction: HeadersDirection::Rising, limit: 1, start: id }; + let (peer_id, response) = client.get_header_with_priority(id, Priority::High).await?.split(); - let (peer_id, response) = - client.get_headers_with_priority(request, Priority::High).await?.split(); - - if response.len() != 1 { + let Some(header) = response else { client.report_bad_message(peer_id); - eyre::bail!("Invalid number of headers received. Expected: 1. Received: {}", response.len()) - } + eyre::bail!("Invalid number of headers received. Expected: 1. Received: 0") + }; - let sealed_header = response.into_iter().next().unwrap().seal_slow(); - let (header, seal) = sealed_header.into_parts(); - let header = SealedHeader::new(header, seal); + let header = SealedHeader::seal(header); let valid = match id { BlockHashOrNumber::Hash(hash) => header.hash() == hash, - BlockHashOrNumber::Number(number) => header.number == number, + BlockHashOrNumber::Number(number) => header.number() == number, }; if !valid { @@ -76,25 +68,23 @@ where } /// Get a body from network based on header -pub async fn get_single_body( +pub async fn get_single_body( client: Client, - chain_spec: Arc, - header: SealedHeader, -) -> Result + header: SealedHeader, + consensus: impl Consensus, +) -> Result> where Client: BodiesClient, { let (peer_id, response) = client.get_block_body(header.hash()).await?.split(); - if response.is_none() { + let Some(body) = response else { client.report_bad_message(peer_id); eyre::bail!("Invalid number of bodies received. Expected: 1. Received: 0") - } + }; - let body = response.unwrap(); let block = SealedBlock { header, body }; - - validate_block_pre_execution(&block, &chain_spec)?; + consensus.validate_block_pre_execution(&block)?; Ok(block) } diff --git a/crates/node/core/src/version.rs b/crates/node/core/src/version.rs index 84fcf3f0f11e..4bf2dc56f39b 100644 --- a/crates/node/core/src/version.rs +++ b/crates/node/core/src/version.rs @@ -144,9 +144,6 @@ mod tests { #[test] fn assert_extradata_less_32bytes() { let extradata = default_extradata(); - assert!( - extradata.as_bytes().len() <= 32, - "extradata must be less than 32 bytes: {extradata}" - ) + assert!(extradata.len() <= 32, "extradata must be less than 32 bytes: {extradata}") } } diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 3b515b8ab5e9..7a5b1cf3b02a 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -12,20 +12,19 @@ workspace = true [dependencies] # reth -reth-provider.workspace = true +reth-storage-api.workspace = true reth-beacon-consensus.workspace = true -reth-network = { workspace = true, features = ["serde"] } reth-network-api.workspace = true reth-stages.workspace = true reth-prune.workspace = true -reth-static-file.workspace = true -reth-primitives.workspace = true +reth-static-file-types.workspace = true reth-primitives-traits.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true # async tokio.workspace = true diff --git a/crates/node/events/src/cl.rs b/crates/node/events/src/cl.rs index 6d29c9bbfa29..bf0d4a59b213 100644 --- a/crates/node/events/src/cl.rs +++ b/crates/node/events/src/cl.rs @@ -1,7 +1,7 @@ //! Events related to Consensus Layer health. use futures::Stream; -use reth_provider::CanonChainTracker; +use reth_storage_api::CanonChainTracker; use std::{ fmt, pin::Pin, diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 92f8cb5e0fe7..39c6355e36ef 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -8,13 +8,11 @@ use futures::Stream; use reth_beacon_consensus::{ BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, }; -use reth_network::NetworkEvent; -use reth_network_api::PeersInfo; -use reth_primitives::constants; +use reth_network_api::{NetworkEvent, PeersInfo}; use reth_primitives_traits::{format_gas, format_gas_throughput}; use reth_prune::PrunerEvent; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; -use reth_static_file::StaticFileProducerEvent; +use reth_static_file_types::StaticFileProducerEvent; use std::{ fmt::{Display, Formatter}, future::Future, @@ -265,8 +263,8 @@ impl NodeState { gas_throughput=%format_gas_throughput(block.header.gas_used, elapsed), full=%format!("{:.1}%", block.header.gas_used as f64 * 100.0 / block.header.gas_limit as f64), base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas.unwrap_or(0) as f64 / GWEI_TO_WEI as f64), - blobs=block.header.blob_gas_used.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, - excess_blobs=block.header.excess_blob_gas.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, + blobs=block.header.blob_gas_used.unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, + excess_blobs=block.header.excess_blob_gas.unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, ?elapsed, "Block added to canonical chain" ); diff --git a/crates/node/types/Cargo.toml b/crates/node/types/Cargo.toml index 5747abe9c34b..588fe7c4062f 100644 --- a/crates/node/types/Cargo.toml +++ b/crates/node/types/Cargo.toml @@ -15,5 +15,12 @@ workspace = true reth-chainspec.workspace = true reth-db-api.workspace = true reth-engine-primitives.workspace = true -reth-primitives.workspace = true reth-primitives-traits.workspace = true +reth-trie-db.workspace = true + +[features] +default = ["std"] +std = [ + "reth-primitives-traits/std", + "reth-chainspec/std", +] \ No newline at end of file diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 5ba03e6795ab..f8770a3c0147 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -7,10 +7,13 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] -pub use reth_primitives_traits::{Block, BlockBody}; +pub use reth_primitives_traits::{ + Block, BlockBody, FullBlock, FullNodePrimitives, FullReceipt, FullSignedTx, NodePrimitives, +}; -use std::marker::PhantomData; +use core::marker::PhantomData; use reth_chainspec::EthChainSpec; use reth_db_api::{ @@ -18,16 +21,7 @@ use reth_db_api::{ Database, }; use reth_engine_primitives::EngineTypes; - -/// Configures all the primitive types of the node. -pub trait NodePrimitives { - /// Block primitive. - type Block; -} - -impl NodePrimitives for () { - type Block = reth_primitives::Block; -} +use reth_trie_db::StateCommitment; /// The type that configures the essential types of an Ethereum-like node. /// @@ -39,6 +33,8 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { type Primitives: NodePrimitives; /// The type used for configuration of the EVM. type ChainSpec: EthChainSpec; + /// The type used to perform state commitment operations. + type StateCommitment: StateCommitment; } /// The type that configures an Ethereum-like node with an engine for consensus. @@ -89,6 +85,7 @@ where { type Primitives = Types::Primitives; type ChainSpec = Types::ChainSpec; + type StateCommitment = Types::StateCommitment; } impl NodeTypesWithEngine for NodeTypesWithDBAdapter @@ -109,70 +106,85 @@ where /// A [`NodeTypes`] type builder. #[derive(Default, Debug)] -pub struct AnyNodeTypes

(PhantomData

, PhantomData); +pub struct AnyNodeTypes

(PhantomData

, PhantomData, PhantomData); -impl AnyNodeTypes { +impl AnyNodeTypes { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::, PhantomData::) + pub const fn primitives(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::, PhantomData::, PhantomData::) } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::) + pub const fn chain_spec(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) + } + + /// Sets the `StateCommitment` associated type. + pub const fn state_commitment(self) -> AnyNodeTypes { + AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) } } -impl NodeTypes for AnyNodeTypes +impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, C: EthChainSpec + 'static, + S: StateCommitment, { type Primitives = P; type ChainSpec = C; + type StateCommitment = S; } /// A [`NodeTypesWithEngine`] type builder. #[derive(Default, Debug)] -pub struct AnyNodeTypesWithEngine

{ +pub struct AnyNodeTypesWithEngine

{ /// Embedding the basic node types. - base: AnyNodeTypes, + base: AnyNodeTypes, /// Phantom data for the engine. _engine: PhantomData, } -impl AnyNodeTypesWithEngine { +impl AnyNodeTypesWithEngine { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypesWithEngine { + pub const fn primitives(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base.primitives::(), _engine: PhantomData } } /// Sets the `Engine` associated type. - pub const fn engine(self) -> AnyNodeTypesWithEngine { + pub const fn engine(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base, _engine: PhantomData:: } } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { + pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { AnyNodeTypesWithEngine { base: self.base.chain_spec::(), _engine: PhantomData } } + + /// Sets the `StateCommitment` associated type. + pub const fn state_commitment(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine { base: self.base.state_commitment::(), _engine: PhantomData } + } } -impl NodeTypes for AnyNodeTypesWithEngine +impl NodeTypes for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, + S: StateCommitment, { type Primitives = P; type ChainSpec = C; + type StateCommitment = S; } -impl NodeTypesWithEngine for AnyNodeTypesWithEngine +impl NodeTypesWithEngine for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, + S: StateCommitment, { type Engine = E; } diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index c6d3e32b7cf1..6494298ba393 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -5,7 +5,7 @@ use clap::Parser; use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher}; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; -use reth_optimism_node::{args::RollupArgs, node::OptimismAddOns, OptimismNode}; +use reth_optimism_node::{args::RollupArgs, node::OpAddOns, OpNode}; use reth_provider::providers::BlockchainProvider2; use tracing as _; @@ -23,17 +23,20 @@ fn main() { if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { - let enable_engine2 = rollup_args.experimental; + if rollup_args.experimental { + tracing::warn!(target: "reth::cli", "Experimental engine is default now, and the --engine.experimental flag is deprecated. To enable the legacy functionality, use --engine.legacy."); + } + let use_legacy_engine = rollup_args.legacy; let sequencer_http_arg = rollup_args.sequencer_http.clone(); - match enable_engine2 { - true => { + match use_legacy_engine { + false => { let engine_tree_config = TreeConfig::default() .with_persistence_threshold(rollup_args.persistence_threshold) .with_memory_block_buffer_target(rollup_args.memory_block_buffer_target); let handle = builder - .with_types_and_provider::>() - .with_components(OptimismNode::components(rollup_args)) - .with_add_ons(OptimismAddOns::new(sequencer_http_arg)) + .with_types_and_provider::>() + .with_components(OpNode::components(rollup_args)) + .with_add_ons(OpAddOns::new(sequencer_http_arg)) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( builder.task_executor().clone(), @@ -46,9 +49,9 @@ fn main() { handle.node_exit_future.await } - false => { + true => { let handle = - builder.node(OptimismNode::new(rollup_args.clone())).launch().await?; + builder.node(OpNode::new(rollup_args.clone())).launch().await?; handle.node_exit_future.await } diff --git a/crates/optimism/chainspec/src/base.rs b/crates/optimism/chainspec/src/base.rs index 7aa26bf9a645..f43457ead432 100644 --- a/crates/optimism/chainspec/src/base.rs +++ b/crates/optimism/chainspec/src/base.rs @@ -6,7 +6,7 @@ use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -21,11 +21,11 @@ pub static BASE_MAINNET: LazyLock> = LazyLock::new(|| { "f712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::base_mainnet(), + hardforks: OpHardfork::base_mainnet(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::optimism()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), ] .into(), ), diff --git a/crates/optimism/chainspec/src/base_sepolia.rs b/crates/optimism/chainspec/src/base_sepolia.rs index b992dcabaf68..adcb9e2bc1f4 100644 --- a/crates/optimism/chainspec/src/base_sepolia.rs +++ b/crates/optimism/chainspec/src/base_sepolia.rs @@ -6,7 +6,7 @@ use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -21,11 +21,11 @@ pub static BASE_SEPOLIA: LazyLock> = LazyLock::new(|| { "0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::base_sepolia(), + hardforks: OpHardfork::base_sepolia(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::base_sepolia()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::base_sepolia_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::base_sepolia_canyon()), ] .into(), ), diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 03ce75aec040..d552d08f18ca 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -17,26 +17,26 @@ mod dev; mod op; mod op_sepolia; -use alloc::{vec, vec::Vec}; +use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; +use alloy_consensus::Header; use alloy_genesis::Genesis; -use alloy_primitives::{B256, U256}; +use alloy_primitives::{Bytes, B256, U256}; pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; -use derive_more::{Constructor, Deref, From, Into}; +use derive_more::{Constructor, Deref, Display, From, Into}; pub use dev::OP_DEV; #[cfg(not(feature = "std"))] pub(crate) use once_cell::sync::Lazy as LazyLock; pub use op::OP_MAINNET; pub use op_sepolia::OP_SEPOLIA; use reth_chainspec::{ - BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, - DisplayHardforks, EthChainSpec, EthereumHardforks, ForkFilter, ForkId, Hardforks, Head, + BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, EthChainSpec, + EthereumHardforks, ForkFilter, ForkId, Hardforks, Head, }; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; use reth_network_peers::NodeRecord; -use reth_optimism_forks::OptimismHardforks; -use reth_primitives_traits::Header; +use reth_optimism_forks::OpHardforks; #[cfg(feature = "std")] pub(crate) use std::sync::LazyLock; @@ -96,7 +96,7 @@ impl OpChainSpecBuilder { } /// Remove the given fork from the spec. - pub fn without_fork(mut self, fork: reth_optimism_forks::OptimismHardfork) -> Self { + pub fn without_fork(mut self, fork: reth_optimism_forks::OpHardfork) -> Self { self.inner = self.inner.without_fork(fork); self } @@ -104,19 +104,17 @@ impl OpChainSpecBuilder { /// Enable Bedrock at genesis pub fn bedrock_activated(mut self) -> Self { self.inner = self.inner.paris_activated(); - self.inner = self - .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Bedrock, ForkCondition::Block(0)); + self.inner = + self.inner.with_fork(reth_optimism_forks::OpHardfork::Bedrock, ForkCondition::Block(0)); self } /// Enable Regolith at genesis pub fn regolith_activated(mut self) -> Self { self = self.bedrock_activated(); - self.inner = self.inner.with_fork( - reth_optimism_forks::OptimismHardfork::Regolith, - ForkCondition::Timestamp(0), - ); + self.inner = self + .inner + .with_fork(reth_optimism_forks::OpHardfork::Regolith, ForkCondition::Timestamp(0)); self } @@ -127,7 +125,7 @@ impl OpChainSpecBuilder { self.inner = self.inner.with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Canyon, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Canyon, ForkCondition::Timestamp(0)); self } @@ -137,7 +135,7 @@ impl OpChainSpecBuilder { self.inner = self.inner.with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Ecotone, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Ecotone, ForkCondition::Timestamp(0)); self } @@ -146,7 +144,7 @@ impl OpChainSpecBuilder { self = self.ecotone_activated(); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Fjord, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Fjord, ForkCondition::Timestamp(0)); self } @@ -155,7 +153,16 @@ impl OpChainSpecBuilder { self = self.fjord_activated(); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Granite, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Granite, ForkCondition::Timestamp(0)); + self + } + + /// Enable Holocene at genesis + pub fn holocene_activated(mut self) -> Self { + self = self.granite_activated(); + self.inner = self + .inner + .with_fork(reth_optimism_forks::OpHardfork::Holocene, ForkCondition::Timestamp(0)); self } @@ -177,6 +184,74 @@ pub struct OpChainSpec { pub inner: ChainSpec, } +impl OpChainSpec { + /// Read from parent to determine the base fee for the next block + pub fn next_block_base_fee( + &self, + parent: &Header, + timestamp: u64, + ) -> Result { + let is_holocene_activated = self + .inner + .is_fork_active_at_timestamp(reth_optimism_forks::OpHardfork::Holocene, timestamp); + // If we are in the Holocene, we need to use the base fee params + // from the parent block's extra data. + // Else, use the base fee params (default values) from chainspec + if is_holocene_activated { + let (denominator, elasticity) = decode_holocene_1559_params(parent.extra_data.clone())?; + if elasticity == 0 && denominator == 0 { + return Ok(U256::from( + parent + .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) + .unwrap_or_default(), + )); + } + let base_fee_params = BaseFeeParams::new(denominator as u128, elasticity as u128); + Ok(U256::from(parent.next_block_base_fee(base_fee_params).unwrap_or_default())) + } else { + Ok(U256::from( + parent + .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) + .unwrap_or_default(), + )) + } + } +} + +#[derive(Clone, Debug, Display, Eq, PartialEq)] +/// Error type for decoding Holocene 1559 parameters +pub enum DecodeError { + #[display("Insufficient data to decode")] + /// Insufficient data to decode + InsufficientData, + #[display("Invalid denominator parameter")] + /// Invalid denominator parameter + InvalidDenominator, + #[display("Invalid elasticity parameter")] + /// Invalid elasticity parameter + InvalidElasticity, +} + +impl core::error::Error for DecodeError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { + // None of the errors have sub-errors + None + } +} + +/// Extracts the Holcene 1599 parameters from the encoded form: +/// +pub fn decode_holocene_1559_params(extra_data: Bytes) -> Result<(u32, u32), DecodeError> { + if extra_data.len() < 9 { + return Err(DecodeError::InsufficientData); + } + let denominator: [u8; 4] = + extra_data[1..5].try_into().map_err(|_| DecodeError::InvalidDenominator)?; + let elasticity: [u8; 4] = + extra_data[5..9].try_into().map_err(|_| DecodeError::InvalidElasticity)?; + Ok((u32::from_be_bytes(denominator), u32::from_be_bytes(elasticity))) +} + impl EthChainSpec for OpChainSpec { fn chain(&self) -> alloy_chains::Chain { self.inner.chain() @@ -202,8 +277,8 @@ impl EthChainSpec for OpChainSpec { self.inner.prune_delete_limit() } - fn display_hardforks(&self) -> DisplayHardforks { - self.inner.display_hardforks() + fn display_hardforks(&self) -> Box { + Box::new(ChainSpec::display_hardforks(self)) } fn genesis_header(&self) -> &Header { @@ -261,12 +336,12 @@ impl EthereumHardforks for OpChainSpec { } } -impl OptimismHardforks for OpChainSpec {} +impl OpHardforks for OpChainSpec {} impl From for OpChainSpec { fn from(genesis: Genesis) -> Self { - use reth_optimism_forks::OptimismHardfork; - let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); + use reth_optimism_forks::OpHardfork; + let optimism_genesis_info = OpGenesisInfo::extract_from(&genesis); let genesis_info = optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default(); @@ -284,7 +359,7 @@ impl From for OpChainSpec { (EthereumHardfork::London.boxed(), genesis.config.london_block), (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), - (OptimismHardfork::Bedrock.boxed(), genesis_info.bedrock_block), + (OpHardfork::Bedrock.boxed(), genesis_info.bedrock_block), ]; let mut block_hardforks = hardfork_opts .into_iter() @@ -312,11 +387,12 @@ impl From for OpChainSpec { (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), - (OptimismHardfork::Regolith.boxed(), genesis_info.regolith_time), - (OptimismHardfork::Canyon.boxed(), genesis_info.canyon_time), - (OptimismHardfork::Ecotone.boxed(), genesis_info.ecotone_time), - (OptimismHardfork::Fjord.boxed(), genesis_info.fjord_time), - (OptimismHardfork::Granite.boxed(), genesis_info.granite_time), + (OpHardfork::Regolith.boxed(), genesis_info.regolith_time), + (OpHardfork::Canyon.boxed(), genesis_info.canyon_time), + (OpHardfork::Ecotone.boxed(), genesis_info.ecotone_time), + (OpHardfork::Fjord.boxed(), genesis_info.fjord_time), + (OpHardfork::Granite.boxed(), genesis_info.granite_time), + (OpHardfork::Holocene.boxed(), genesis_info.holocene_time), ]; let mut time_hardforks = time_hardfork_opts @@ -329,7 +405,7 @@ impl From for OpChainSpec { block_hardforks.append(&mut time_hardforks); // Ordered Hardforks - let mainnet_hardforks = OptimismHardfork::op_mainnet(); + let mainnet_hardforks = OpHardfork::op_mainnet(); let mainnet_order = mainnet_hardforks.forks_iter(); let mut ordered_hardforks = Vec::with_capacity(block_hardforks.len()); @@ -356,15 +432,15 @@ impl From for OpChainSpec { } #[derive(Default, Debug)] -struct OptimismGenesisInfo { - optimism_chain_info: op_alloy_rpc_types::genesis::OpChainInfo, +struct OpGenesisInfo { + optimism_chain_info: op_alloy_rpc_types::OpChainInfo, base_fee_params: BaseFeeParamsKind, } -impl OptimismGenesisInfo { +impl OpGenesisInfo { fn extract_from(genesis: &Genesis) -> Self { let mut info = Self { - optimism_chain_info: op_alloy_rpc_types::genesis::OpChainInfo::extract_from( + optimism_chain_info: op_alloy_rpc_types::OpChainInfo::extract_from( &genesis.config.extra_fields, ) .unwrap_or_default(), @@ -385,7 +461,7 @@ impl OptimismGenesisInfo { BaseFeeParams::new(denominator as u128, elasticity as u128), ), ( - reth_optimism_forks::OptimismHardfork::Canyon.boxed(), + reth_optimism_forks::OpHardfork::Canyon.boxed(), BaseFeeParams::new(canyon_denominator as u128, elasticity as u128), ), ] @@ -405,11 +481,13 @@ impl OptimismGenesisInfo { #[cfg(test)] mod tests { + use std::sync::Arc; + use alloy_genesis::{ChainConfig, Genesis}; use alloy_primitives::b256; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; - use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; + use reth_optimism_forks::{OpHardfork, OpHardforks}; use crate::*; @@ -495,7 +573,11 @@ mod tests { ), ( Head { number: 0, timestamp: 1723478400, ..Default::default() }, - ForkId { hash: ForkHash([0x75, 0xde, 0xa4, 0x1e]), next: 0 }, + ForkId { hash: ForkHash([0x75, 0xde, 0xa4, 0x1e]), next: 1732633200 }, + ), + ( + Head { number: 0, timestamp: 1732633200, ..Default::default() }, + ForkId { hash: ForkHash([0x4a, 0x1c, 0x79, 0x2e]), next: 0 }, ), ], ); @@ -562,7 +644,11 @@ mod tests { ), ( Head { number: 0, timestamp: 1723478400, ..Default::default() }, - ForkId { hash: ForkHash([0x5e, 0xdf, 0xa3, 0xb6]), next: 0 }, + ForkId { hash: ForkHash([0x5e, 0xdf, 0xa3, 0xb6]), next: 1732633200 }, + ), + ( + Head { number: 0, timestamp: 1732633200, ..Default::default() }, + ForkId { hash: ForkHash([0x8b, 0x5e, 0x76, 0x29]), next: 0 }, ), ], ); @@ -644,6 +730,7 @@ mod tests { "ecotoneTime": 40, "fjordTime": 50, "graniteTime": 51, + "holoceneTime": 52, "optimism": { "eip1559Elasticity": 60, "eip1559Denominator": 70 @@ -665,6 +752,8 @@ mod tests { assert_eq!(actual_fjord_timestamp, Some(serde_json::Value::from(50)).as_ref()); let actual_granite_timestamp = genesis.config.extra_fields.get("graniteTime"); assert_eq!(actual_granite_timestamp, Some(serde_json::Value::from(51)).as_ref()); + let actual_holocene_timestamp = genesis.config.extra_fields.get("holoceneTime"); + assert_eq!(actual_holocene_timestamp, Some(serde_json::Value::from(52)).as_ref()); let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); assert_eq!( @@ -682,19 +771,21 @@ mod tests { BaseFeeParamsKind::Constant(BaseFeeParams::new(70, 60)) ); - assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 0)); - - assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 51)); + assert!(!chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 0)); + + assert!(chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 40)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 50)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 51)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 52)); } #[test] @@ -708,6 +799,7 @@ mod tests { "ecotoneTime": 40, "fjordTime": 50, "graniteTime": 51, + "holoceneTime": 52, "optimism": { "eip1559Elasticity": 60, "eip1559Denominator": 70, @@ -730,6 +822,8 @@ mod tests { assert_eq!(actual_fjord_timestamp, Some(serde_json::Value::from(50)).as_ref()); let actual_granite_timestamp = genesis.config.extra_fields.get("graniteTime"); assert_eq!(actual_granite_timestamp, Some(serde_json::Value::from(51)).as_ref()); + let actual_holocene_timestamp = genesis.config.extra_fields.get("holoceneTime"); + assert_eq!(actual_holocene_timestamp, Some(serde_json::Value::from(52)).as_ref()); let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); assert_eq!( @@ -748,30 +842,32 @@ mod tests { BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::new(70, 60)), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::new(80, 60)), + (OpHardfork::Canyon.boxed(), BaseFeeParams::new(80, 60)), ] .into() ) ); - assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 0)); - - assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 51)); + assert!(!chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 0)); + + assert!(chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 40)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 50)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 51)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 52)); } #[test] fn parse_genesis_optimism_with_variable_base_fee_params() { - use op_alloy_rpc_types::genesis::OpBaseFeeInfo; + use op_alloy_rpc_types::OpBaseFeeInfo; let geth_genesis = r#" { @@ -840,14 +936,14 @@ mod tests { }) ); - assert!(chainspec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); + assert!(chainspec.is_fork_active_at_block(OpHardfork::Bedrock, 0)); - assert!(chainspec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); + assert!(chainspec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); } #[test] fn test_fork_order_optimism_mainnet() { - use reth_optimism_forks::OptimismHardfork; + use reth_optimism_forks::OpHardfork; let genesis = Genesis { config: ChainConfig { @@ -878,6 +974,7 @@ mod tests { (String::from("ecotoneTime"), 0.into()), (String::from("fjordTime"), 0.into()), (String::from("graniteTime"), 0.into()), + (String::from("holoceneTime"), 0.into()), ] .into_iter() .collect(), @@ -903,14 +1000,15 @@ mod tests { EthereumHardfork::ArrowGlacier.boxed(), EthereumHardfork::GrayGlacier.boxed(), EthereumHardfork::Paris.boxed(), - OptimismHardfork::Bedrock.boxed(), - OptimismHardfork::Regolith.boxed(), + OpHardfork::Bedrock.boxed(), + OpHardfork::Regolith.boxed(), EthereumHardfork::Shanghai.boxed(), - OptimismHardfork::Canyon.boxed(), + OpHardfork::Canyon.boxed(), EthereumHardfork::Cancun.boxed(), - OptimismHardfork::Ecotone.boxed(), - OptimismHardfork::Fjord.boxed(), - OptimismHardfork::Granite.boxed(), + OpHardfork::Ecotone.boxed(), + OpHardfork::Fjord.boxed(), + OpHardfork::Granite.boxed(), + OpHardfork::Holocene.boxed(), ]; assert!(expected_hardforks @@ -919,4 +1017,87 @@ mod tests { .all(|(expected, actual)| &**expected == *actual)); assert_eq!(expected_hardforks.len(), hardforks.len()); } + + #[test] + fn test_get_base_fee_pre_holocene() { + let op_chain_spec = &BASE_SEPOLIA; + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + ..Default::default() + }; + let base_fee = op_chain_spec.next_block_base_fee(&parent, 0); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) + .unwrap_or_default() + ) + ); + } + + fn holocene_chainspec() -> Arc { + let mut hardforks = OpHardfork::base_sepolia(); + hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: BASE_SEPOLIA.inner.chain, + genesis: BASE_SEPOLIA.inner.genesis.clone(), + genesis_hash: BASE_SEPOLIA.inner.genesis_hash.clone(), + paris_block_and_final_difficulty: Some((0, U256::from(0))), + hardforks, + base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(), + max_gas_limit: crate::constants::BASE_SEPOLIA_MAX_GAS_LIMIT, + prune_delete_limit: 10000, + ..Default::default() + }, + }) + } + + #[test] + fn test_get_base_fee_holocene_extra_data_not_set() { + let op_chain_spec = holocene_chainspec(); + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + timestamp: 1800000003, + extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]), + ..Default::default() + }; + let base_fee = op_chain_spec.next_block_base_fee(&parent, 1800000005); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) + .unwrap_or_default() + ) + ); + } + + #[test] + fn test_get_base_fee_holocene_extra_data_set() { + let op_chain_spec = holocene_chainspec(); + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + extra_data: Bytes::from_static(&[0, 0, 0, 0, 8, 0, 0, 0, 8]), + timestamp: 1800000003, + ..Default::default() + }; + + let base_fee = op_chain_spec.next_block_base_fee(&parent, 1800000005); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(BaseFeeParams::new(0x00000008, 0x00000008)) + .unwrap_or_default() + ) + ); + } } diff --git a/crates/optimism/chainspec/src/op.rs b/crates/optimism/chainspec/src/op.rs index 5afb236cd33e..fcbe7dee7dda 100644 --- a/crates/optimism/chainspec/src/op.rs +++ b/crates/optimism/chainspec/src/op.rs @@ -7,7 +7,7 @@ use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -24,11 +24,11 @@ pub static OP_MAINNET: LazyLock> = LazyLock::new(|| { "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::op_mainnet(), + hardforks: OpHardfork::op_mainnet(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::optimism()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), ] .into(), ), diff --git a/crates/optimism/chainspec/src/op_sepolia.rs b/crates/optimism/chainspec/src/op_sepolia.rs index 31c9eda6bddd..35466cb21548 100644 --- a/crates/optimism/chainspec/src/op_sepolia.rs +++ b/crates/optimism/chainspec/src/op_sepolia.rs @@ -7,7 +7,7 @@ use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -22,11 +22,11 @@ pub static OP_SEPOLIA: LazyLock> = LazyLock::new(|| { "102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::op_sepolia(), + hardforks: OpHardfork::op_sepolia(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::optimism_sepolia()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::optimism_sepolia_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_sepolia_canyon()), ] .into(), ), diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index a2ba71214f5c..198e5377ec4d 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -47,11 +47,15 @@ reth-node-builder.workspace = true reth-tracing.workspace = true # eth +alloy-eips.workspace = true +alloy-consensus = { workspace = true, optional = true } alloy-primitives.workspace = true alloy-rlp.workspace = true # misc futures-util.workspace = true +derive_more = { workspace = true, optional = true } +serde = { workspace = true, optional = true } clap = { workspace = true, features = ["derive", "env"] } @@ -67,9 +71,7 @@ eyre.workspace = true # reth test-vectors proptest = { workspace = true, optional = true } -op-alloy-consensus = { workspace = true, features = [ - "arbitrary", -], optional = true } +op-alloy-consensus = { workspace = true, optional = true } [dev-dependencies] @@ -80,6 +82,10 @@ reth-cli-commands.workspace = true [features] optimism = [ + "op-alloy-consensus", + "alloy-consensus", + "dep:derive_more", + "dep:serde", "reth-primitives/optimism", "reth-optimism-evm/optimism", "reth-provider/optimism", @@ -87,7 +93,8 @@ optimism = [ "reth-optimism-node/optimism", "reth-execution-types/optimism", "reth-db/optimism", - "reth-db-api/optimism" + "reth-db-api/optimism", + "reth-downloaders/optimism" ] asm-keccak = [ "alloy-primitives/asm-keccak", @@ -104,6 +111,13 @@ jemalloc = [ dev = [ "dep:proptest", - "reth-cli-commands/arbitrary", - "op-alloy-consensus" + "reth-cli-commands/arbitrary" +] +serde = [ + "alloy-consensus?/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "op-alloy-consensus?/serde", + "reth-execution-types/serde", + "reth-provider/serde" ] diff --git a/crates/optimism/cli/src/commands/init_state/mod.rs b/crates/optimism/cli/src/commands/init_state.rs similarity index 87% rename from crates/optimism/cli/src/commands/init_state/mod.rs rename to crates/optimism/cli/src/commands/init_state.rs index 3537f89e7519..68f5d9a585f6 100644 --- a/crates/optimism/cli/src/commands/init_state/mod.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -6,7 +6,8 @@ use reth_cli_commands::common::{AccessRights, Environment}; use reth_db_common::init::init_from_state_dump; use reth_node_builder::NodeTypesWithEngine; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_primitives::bedrock::BEDROCK_HEADER; +use reth_optimism_primitives::bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}; +use reth_primitives::SealedHeader; use reth_provider::{ BlockNumReader, ChainSpecProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, @@ -14,8 +15,6 @@ use reth_provider::{ use std::{fs::File, io::BufReader}; use tracing::info; -mod bedrock; - /// Initializes the database with the genesis block. #[derive(Debug, Parser)] pub struct InitStateCommandOp { @@ -53,7 +52,12 @@ impl> InitStateCommandOp { let last_block_number = provider_rw.last_block_number()?; if last_block_number == 0 { - bedrock::setup_op_mainnet_without_ovm(&provider_rw, &static_file_provider)?; + reth_cli_commands::init_state::without_evm::setup_without_evm( + &provider_rw, + &static_file_provider, + SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), + BEDROCK_HEADER_TTD, + )?; // SAFETY: it's safe to commit static files, since in the event of a crash, they // will be unwinded according to database checkpoints. diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 43d12616484d..23eaa99b5213 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -27,6 +27,11 @@ pub mod commands; /// made for op-erigon's import needs). pub mod receipt_file_codec; +/// OVM block, same as EVM block at bedrock, except for signature of deposit transaction +/// not having a signature back then. +/// Enables decoding and encoding `Block` types within file contexts. +pub mod ovm_file_codec; + pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; use reth_optimism_chainspec::OpChainSpec; @@ -47,7 +52,7 @@ use reth_node_core::{ version::{LONG_VERSION, SHORT_VERSION}, }; use reth_optimism_evm::OpExecutorProvider; -use reth_optimism_node::OptimismNode; +use reth_optimism_node::OpNode; use reth_tracing::FileWorkerGuard; use tracing::info; @@ -145,30 +150,28 @@ where runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) } Commands::Init(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::InitState(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::ImportOp(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::ImportReceiptsOp(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Db(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) - } + Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), Commands::Stage(command) => runner.run_command_until_exit(|ctx| { - command.execute::(ctx, OpExecutorProvider::optimism) + command.execute::(ctx, OpExecutorProvider::optimism) }), Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Recover(command) => { - runner.run_command_until_exit(|ctx| command.execute::(ctx)) + runner.run_command_until_exit(|ctx| command.execute::(ctx)) } - Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), #[cfg(feature = "dev")] Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), } diff --git a/crates/optimism/cli/src/ovm_file_codec.rs b/crates/optimism/cli/src/ovm_file_codec.rs new file mode 100644 index 000000000000..b29d30093ecc --- /dev/null +++ b/crates/optimism/cli/src/ovm_file_codec.rs @@ -0,0 +1,382 @@ +use alloy_consensus::{ + transaction::{from_eip155_value, RlpEcdsaTx}, + Header, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip4895::Withdrawals, +}; +use alloy_primitives::{ + bytes::{Buf, BytesMut}, + keccak256, PrimitiveSignature as Signature, TxHash, B256, U256, +}; +use alloy_rlp::{Decodable, Error as RlpError, RlpDecodable}; +use derive_more::{AsRef, Deref}; +use op_alloy_consensus::TxDeposit; +use reth_downloaders::file_client::FileClientError; +use reth_primitives::transaction::{Transaction, TxType}; +use serde::{Deserialize, Serialize}; +use tokio_util::codec::Decoder; + +#[allow(dead_code)] +/// Specific codec for reading raw block bodies from a file +/// with optimism-specific signature handling +pub(crate) struct OvmBlockFileCodec; + +impl Decoder for OvmBlockFileCodec { + type Item = Block; + type Error = FileClientError; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if src.is_empty() { + return Ok(None); + } + + let buf_slice = &mut src.as_ref(); + let body = + Block::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; + src.advance(src.len() - buf_slice.len()); + + Ok(Some(body)) + } +} + +/// OVM block, same as EVM block but with different transaction signature handling +/// Pre-bedrock system transactions on Optimism were sent from the zero address +/// with an empty signature, +#[derive(Debug, Clone, PartialEq, Eq, RlpDecodable)] +pub struct Block { + /// Block header + pub header: Header, + /// Block body + pub body: BlockBody, +} + +impl Block { + /// Decodes a `Block` from the given byte slice. + pub fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let header = Header::decode(buf)?; + let body = BlockBody::decode(buf)?; + Ok(Self { header, body }) + } +} + +/// The body of a block for OVM +#[derive(Debug, Clone, PartialEq, Eq, Default, RlpDecodable)] +#[rlp(trailing)] +pub struct BlockBody { + /// Transactions in the block + pub transactions: Vec, + /// Uncle headers for the given block + pub ommers: Vec

, + /// Withdrawals in the block. + pub withdrawals: Option, +} + +/// Signed transaction. +#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] +pub struct TransactionSigned { + /// Transaction hash + pub hash: TxHash, + /// The transaction signature values + pub signature: Signature, + /// Raw transaction info + #[deref] + #[as_ref] + pub transaction: Transaction, +} + +impl Default for TransactionSigned { + fn default() -> Self { + Self { + hash: Default::default(), + signature: Signature::test_signature(), + transaction: Default::default(), + } + } +} + +impl AsRef for TransactionSigned { + fn as_ref(&self) -> &Self { + self + } +} + +// === impl TransactionSigned === +impl TransactionSigned { + /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with + /// tx type. + pub fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } + + /// Create a new signed transaction from a transaction and its signature. + /// + /// This will also calculate the transaction hash using its encoding. + pub fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { + let mut initial_tx = Self { transaction, hash: Default::default(), signature }; + initial_tx.hash = initial_tx.recalculate_hash(); + initial_tx + } + + /// Decodes legacy transaction from the data buffer into a tuple. + /// + /// This expects `rlp(legacy_tx)` + /// + /// Refer to the docs for [`Self::decode_rlp_legacy_transaction`] for details on the exact + /// format expected. + pub(crate) fn decode_rlp_legacy_transaction_tuple( + data: &mut &[u8], + ) -> alloy_rlp::Result<(TxLegacy, TxHash, Signature)> { + let original_encoding = *data; + + let header = alloy_rlp::Header::decode(data)?; + let remaining_len = data.len(); + + let transaction_payload_len = header.payload_length; + + if transaction_payload_len > remaining_len { + return Err(RlpError::InputTooShort); + } + + let mut transaction = TxLegacy { + nonce: Decodable::decode(data)?, + gas_price: Decodable::decode(data)?, + gas_limit: Decodable::decode(data)?, + to: Decodable::decode(data)?, + value: Decodable::decode(data)?, + input: Decodable::decode(data)?, + chain_id: None, + }; + + let v = Decodable::decode(data)?; + let r: U256 = Decodable::decode(data)?; + let s: U256 = Decodable::decode(data)?; + + let tx_length = header.payload_length + header.length(); + let hash = keccak256(&original_encoding[..tx_length]); + + // Handle both pre-bedrock and regular cases + let (signature, chain_id) = if v == 0 && r.is_zero() && s.is_zero() { + // Pre-bedrock system transactions case + (Signature::new(r, s, false), None) + } else { + // Regular transaction case + let (parity, chain_id) = from_eip155_value(v) + .ok_or(alloy_rlp::Error::Custom("invalid parity for legacy transaction"))?; + (Signature::new(r, s, parity), chain_id) + }; + + // Set chain ID and verify length + transaction.chain_id = chain_id; + let decoded = remaining_len - data.len(); + if decoded != transaction_payload_len { + return Err(RlpError::UnexpectedLength); + } + + Ok((transaction, hash, signature)) + } + + /// Decodes legacy transaction from the data buffer. + /// + /// This should be used _only_ be used in general transaction decoding methods, which have + /// already ensured that the input is a legacy transaction with the following format: + /// `rlp(legacy_tx)` + /// + /// Legacy transactions are encoded as lists, so the input should start with a RLP list header. + /// + /// This expects `rlp(legacy_tx)` + // TODO: make buf advancement semantics consistent with `decode_enveloped_typed_transaction`, + // so decoding methods do not need to manually advance the buffer + pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { + let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?; + let signed = Self { transaction: Transaction::Legacy(transaction), hash, signature }; + Ok(signed) + } +} + +impl Decodable for TransactionSigned { + /// This `Decodable` implementation only supports decoding rlp encoded transactions as it's used + /// by p2p. + /// + /// The p2p encoding format always includes an RLP header, although the type RLP header depends + /// on whether or not the transaction is a legacy transaction. + /// + /// If the transaction is a legacy transaction, it is just encoded as a RLP list: + /// `rlp(tx-data)`. + /// + /// If the transaction is a typed transaction, it is encoded as a RLP string: + /// `rlp(tx-type || rlp(tx-data))` + /// + /// This can be used for decoding all signed transactions in p2p `BlockBodies` responses. + /// + /// This cannot be used for decoding EIP-4844 transactions in p2p `PooledTransactions`, since + /// the EIP-4844 variant of [`TransactionSigned`] does not include the blob sidecar. + /// + /// For a method suitable for decoding pooled transactions, see \[`PooledTransactionsElement`\]. + /// + /// CAUTION: Due to a quirk in [`Header::decode`], this method will succeed even if a typed + /// transaction is encoded in this format, and does not start with a RLP header: + /// `tx-type || rlp(tx-data)`. + /// + /// This is because [`Header::decode`] does not advance the buffer, and returns a length-1 + /// string header if the first byte is less than `0xf7`. + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + Self::network_decode(buf).map_err(Into::into) + } +} + +impl Encodable2718 for TransactionSigned { + fn type_flag(&self) -> Option { + match self.transaction.tx_type() { + TxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } + + fn encode_2718_len(&self) -> usize { + match &self.transaction { + Transaction::Legacy(legacy_tx) => legacy_tx.eip2718_encoded_length(&self.signature), + Transaction::Eip2930(access_list_tx) => { + access_list_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Eip4844(blob_tx) => blob_tx.eip2718_encoded_length(&self.signature), + Transaction::Eip7702(set_code_tx) => { + set_code_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), + } + } + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.transaction.eip2718_encode(&self.signature, out) + } +} + +impl Decodable2718 for TransactionSigned { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { + match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { + TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), + TxType::Eip2930 => { + let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) + } + TxType::Eip1559 => { + let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) + } + TxType::Eip7702 => { + let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) + } + TxType::Eip4844 => { + let (tx, signature, hash) = TxEip4844::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) + } + TxType::Deposit => Ok(Self::from_transaction_and_signature( + Transaction::Deposit(TxDeposit::rlp_decode(buf)?), + TxDeposit::signature(), + )), + } + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { + Ok(Self::decode_rlp_legacy_transaction(buf)?) + } +} + +#[cfg(test)] +mod tests { + use crate::ovm_file_codec::TransactionSigned; + use alloy_primitives::{address, hex, TxKind, B256, U256}; + use reth_primitives::transaction::Transaction; + const DEPOSIT_FUNCTION_SELECTOR: [u8; 4] = [0xb6, 0xb5, 0x5f, 0x25]; + use alloy_rlp::Decodable; + + #[test] + fn test_decode_legacy_transactions() { + // Test Case 1: contract deposit - regular L2 transaction calling deposit() function + // tx: https://optimistic.etherscan.io/getRawTx?tx=0x7860252963a2df21113344f323035ef59648638a571eef742e33d789602c7a1c + let deposit_tx_bytes = hex!("f88881f0830f481c830c6e4594a75127121d28a9bf848f3b70e7eea26570aa770080a4b6b55f2500000000000000000000000000000000000000000000000000000000000710b238a0d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45fa02c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def"); + let deposit_decoded = TransactionSigned::decode(&mut &deposit_tx_bytes[..]).unwrap(); + + // Verify deposit transaction + let deposit_tx = match &deposit_decoded.transaction { + Transaction::Legacy(ref tx) => tx, + _ => panic!("Expected legacy transaction for NFT deposit"), + }; + + assert_eq!( + deposit_tx.to, + TxKind::Call(address!("a75127121d28a9bf848f3b70e7eea26570aa7700")) + ); + assert_eq!(deposit_tx.nonce, 240); + assert_eq!(deposit_tx.gas_price, 1001500); + assert_eq!(deposit_tx.gas_limit, 814661); + assert_eq!(deposit_tx.value, U256::ZERO); + assert_eq!(&deposit_tx.input.as_ref()[0..4], DEPOSIT_FUNCTION_SELECTOR); + assert_eq!(deposit_tx.chain_id, Some(10)); + assert_eq!( + deposit_decoded.signature.r(), + U256::from_str_radix( + "d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45f", + 16 + ) + .unwrap() + ); + assert_eq!( + deposit_decoded.signature.s(), + U256::from_str_radix( + "2c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def", + 16 + ) + .unwrap() + ); + + // Test Case 2: pre-bedrock system transaction from block 105235052 + // tx: https://optimistic.etherscan.io/getRawTx?tx=0xe20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e + let system_tx_bytes = hex!("f9026c830d899383124f808302a77e94a0cc33dd6f4819d473226257792afe230ec3c67f80b902046c459a280000000000000000000000004d73adb72bc3dd368966edd0f0b2148401a178e2000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000647fac7f00000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000084704316e5000000000000000000000000000000000000000000000000000000000000006e10975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000001410975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082a39325251d44e11f3b6d92f9382438eb6c8b5068d4a488d4f177b26f2ca20db34ae53467322852afcc779f25eafd124c5586f54b9026497ba934403d4c578e3c1b5aa754c918ee2ecd25402df656c2419717e4017a7aecb84af3914fd3c7bf6930369c4e6ff76950246b98e354821775f02d33cdbee5ef6aed06c15b75691692d31c00000000000000000000000000000000000000000000000000000000000038a0e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbea013ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4"); + let system_decoded = TransactionSigned::decode(&mut &system_tx_bytes[..]).unwrap(); + + // Verify system transaction + assert!(system_decoded.is_legacy()); + + let system_tx = match &system_decoded.transaction { + Transaction::Legacy(ref tx) => tx, + _ => panic!("Expected Legacy transaction"), + }; + + assert_eq!(system_tx.nonce, 887187); + assert_eq!(system_tx.gas_price, 1200000); + assert_eq!(system_tx.gas_limit, 173950); + assert_eq!( + system_tx.to, + TxKind::Call(address!("a0cc33dd6f4819d473226257792afe230ec3c67f")) + ); + assert_eq!(system_tx.value, U256::ZERO); + assert_eq!(system_tx.chain_id, Some(10)); + + assert_eq!( + system_decoded.signature.r(), + U256::from_str_radix( + "e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbe", + 16 + ) + .unwrap() + ); + assert_eq!( + system_decoded.signature.s(), + U256::from_str_radix( + "13ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4", + 16 + ) + .unwrap() + ); + assert_eq!( + system_decoded.hash, + B256::from(hex!("e20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e")) + ); + } +} diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index e2520c89340d..0dffceaddca9 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -26,6 +26,7 @@ reth-optimism-chainspec.workspace = true # ethereum alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-trie.workspace = true tracing.workspace = true diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 16c1d5d37d7c..72f67dcb4501 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,19 +9,19 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_consensus_common::validation::{ validate_against_parent_4844, validate_against_parent_eip1559_base_fee, - validate_against_parent_hash_number, validate_against_parent_timestamp, validate_cancun_gas, - validate_header_base_fee, validate_header_extradata, validate_header_gas, - validate_shanghai_withdrawals, + validate_against_parent_hash_number, validate_against_parent_timestamp, + validate_body_against_header, validate_cancun_gas, validate_header_base_fee, + validate_header_extradata, validate_header_gas, validate_shanghai_withdrawals, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardforks; -use reth_primitives::{BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader}; +use reth_optimism_forks::OpHardforks; +use reth_primitives::{BlockBody, BlockWithSenders, GotExpected, SealedBlock, SealedHeader}; use std::{sync::Arc, time::SystemTime}; mod proof; @@ -34,19 +34,19 @@ pub use validation::validate_block_post_execution; /// /// Provides basic checks as outlined in the execution specs. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct OptimismBeaconConsensus { +pub struct OpBeaconConsensus { /// Configuration chain_spec: Arc, } -impl OptimismBeaconConsensus { - /// Create a new instance of [`OptimismBeaconConsensus`] +impl OpBeaconConsensus { + /// Create a new instance of [`OpBeaconConsensus`] pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } } -impl Consensus for OptimismBeaconConsensus { +impl Consensus for OpBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header)?; validate_header_base_fee(header, &self.chain_spec) @@ -119,6 +119,14 @@ impl Consensus for OptimismBeaconConsensus { Ok(()) } + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header) + } + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { // Check ommers hash let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.body.ommers); diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index b283356016c2..18e64a467ff1 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -1,10 +1,10 @@ //! Helper function for Receipt root calculation for Optimism hardforks. use alloy_primitives::B256; +use alloy_trie::root::ordered_trie_root_with_encoder; use reth_chainspec::ChainSpec; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use reth_primitives::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; -use reth_trie_common::root::ordered_trie_root_with_encoder; /// Calculates the receipt root for a header. pub(crate) fn calculate_receipt_root_optimism( @@ -17,8 +17,8 @@ pub(crate) fn calculate_receipt_root_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) + if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) && + !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) { let receipts = receipts .iter() @@ -50,8 +50,8 @@ pub fn calculate_receipt_root_no_memo_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) + if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) && + !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) { let receipts = receipts .iter() diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index 668fcba4ddc4..f2d35ba56c41 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -1,6 +1,6 @@ use reth_ethereum_forks::{EthereumHardfork, Head}; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; /// Returns the revm [`SpecId`](revm_primitives::SpecId) at the given timestamp. /// @@ -12,15 +12,17 @@ pub fn revm_spec_by_timestamp_after_bedrock( chain_spec: &OpChainSpec, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) { + if chain_spec.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) { + revm_primitives::HOLOCENE + } else if chain_spec.fork(OpHardfork::Granite).active_at_timestamp(timestamp) { revm_primitives::GRANITE - } else if chain_spec.fork(OptimismHardfork::Fjord).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Fjord).active_at_timestamp(timestamp) { revm_primitives::FJORD - } else if chain_spec.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Ecotone).active_at_timestamp(timestamp) { revm_primitives::ECOTONE - } else if chain_spec.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp) { revm_primitives::CANYON - } else if chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) { revm_primitives::REGOLITH } else { revm_primitives::BEDROCK @@ -29,17 +31,19 @@ pub fn revm_spec_by_timestamp_after_bedrock( /// Map the latest active hardfork at the given block to a revm [`SpecId`](revm_primitives::SpecId). pub fn revm_spec(chain_spec: &OpChainSpec, block: &Head) -> revm_primitives::SpecId { - if chain_spec.fork(OptimismHardfork::Granite).active_at_head(block) { + if chain_spec.fork(OpHardfork::Holocene).active_at_head(block) { + revm_primitives::HOLOCENE + } else if chain_spec.fork(OpHardfork::Granite).active_at_head(block) { revm_primitives::GRANITE - } else if chain_spec.fork(OptimismHardfork::Fjord).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Fjord).active_at_head(block) { revm_primitives::FJORD - } else if chain_spec.fork(OptimismHardfork::Ecotone).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Ecotone).active_at_head(block) { revm_primitives::ECOTONE - } else if chain_spec.fork(OptimismHardfork::Canyon).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Canyon).active_at_head(block) { revm_primitives::CANYON - } else if chain_spec.fork(OptimismHardfork::Regolith).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Regolith).active_at_head(block) { revm_primitives::REGOLITH - } else if chain_spec.fork(OptimismHardfork::Bedrock).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Bedrock).active_at_head(block) { revm_primitives::BEDROCK } else if chain_spec.fork(EthereumHardfork::Prague).active_at_head(block) { revm_primitives::PRAGUE diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index 71f8709e1ade..db042950674a 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -5,7 +5,7 @@ use reth_evm::execute::BlockExecutionError; /// Optimism Block Executor Errors #[derive(Debug, Clone, PartialEq, Eq, derive_more::Display)] -pub enum OptimismBlockExecutionError { +pub enum OpBlockExecutionError { /// Error when trying to parse L1 block info #[display("could not get L1 block info from L2 block: {message}")] L1BlockInfoError { @@ -23,10 +23,10 @@ pub enum OptimismBlockExecutionError { AccountLoadFailed(alloy_primitives::Address), } -impl core::error::Error for OptimismBlockExecutionError {} +impl core::error::Error for OpBlockExecutionError {} -impl From for BlockExecutionError { - fn from(err: OptimismBlockExecutionError) -> Self { +impl From for BlockExecutionError { + fn from(err: OpBlockExecutionError) -> Self { Self::other(err) } } diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 1cd92409847e..b4c2e16f593d 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,10 +1,11 @@ //! Optimism block execution strategy. -use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; +use crate::{l1::ensure_create2_deployer, OpBlockExecutionError, OpEvmConfig}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use alloy_consensus::Transaction as _; +use alloy_consensus::{Header, Transaction as _}; use alloy_eips::eip7685::Requests; use core::fmt::Display; +use op_alloy_consensus::DepositTransaction; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_evm::{ @@ -18,8 +19,8 @@ use reth_evm::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; -use reth_optimism_forks::OptimismHardfork; -use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; +use reth_optimism_forks::OpHardfork; +use reth_primitives::{BlockWithSenders, Receipt, TxType}; use reth_revm::{Database, State}; use revm_primitives::{ db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, @@ -28,7 +29,7 @@ use tracing::trace; /// Factory for [`OpExecutionStrategy`]. #[derive(Debug, Clone)] -pub struct OpExecutionStrategyFactory { +pub struct OpExecutionStrategyFactory { /// The chainspec chain_spec: Arc, /// How to create an EVM. @@ -38,7 +39,7 @@ pub struct OpExecutionStrategyFactory { impl OpExecutionStrategyFactory { /// Creates a new default optimism executor strategy factory. pub fn optimism(chain_spec: Arc) -> Self { - Self::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)) + Self::new(chain_spec.clone(), OpEvmConfig::new(chain_spec)) } } @@ -89,7 +90,7 @@ where { /// Creates a new [`OpExecutionStrategy`] pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { - let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); + let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); Self { state, chain_spec, evm_config, system_caller } } } @@ -143,7 +144,7 @@ where // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) - .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; + .map_err(|_| OpBlockExecutionError::ForceCreate2DeployerFail)?; Ok(()) } @@ -157,7 +158,7 @@ where let mut evm = self.evm_config.evm_with_env(&mut self.state, env); let is_regolith = - self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); + self.chain_spec.fork(OpHardfork::Regolith).active_at_timestamp(block.timestamp); let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.transactions.len()); @@ -177,7 +178,7 @@ where // An optimism block should never contain blob transactions. if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()) + return Err(OpBlockExecutionError::BlobTransactionRejected.into()) } // Cache the depositor account prior to the state transition for the deposit nonce. @@ -192,7 +193,7 @@ where .map(|acc| acc.account_info().unwrap_or_default()) }) .transpose() - .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; + .map_err(|_| OpBlockExecutionError::AccountLoadFailed(*sender))?; self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); @@ -232,7 +233,7 @@ where // this is only set for post-Canyon deposit transactions. deposit_receipt_version: (transaction.is_deposit() && self.chain_spec - .is_fork_active_at_timestamp(OptimismHardfork::Canyon, block.timestamp)) + .is_fork_active_at_timestamp(OpHardfork::Canyon, block.timestamp)) .then_some(1), }); } @@ -296,12 +297,14 @@ mod tests { use super::*; use crate::OpChainSpec; use alloy_consensus::TxEip1559; - use alloy_primitives::{b256, Address, StorageKey, StorageValue}; + use alloy_primitives::{ + b256, Address, PrimitiveSignature as Signature, StorageKey, StorageValue, + }; use op_alloy_consensus::TxDeposit; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; use reth_optimism_chainspec::OpChainSpecBuilder; - use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; + use reth_primitives::{Account, Block, BlockBody, Transaction, TransactionSigned}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, }; @@ -338,7 +341,7 @@ mod tests { chain_spec: Arc, ) -> BasicBlockExecutorProvider { let strategy_factory = - OpExecutionStrategyFactory::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)); + OpExecutionStrategyFactory::new(chain_spec.clone(), OpEvmConfig::new(chain_spec)); BasicBlockExecutorProvider::new(strategy_factory) } diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index e0668ab02042..9d3e76fb442b 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,12 +1,12 @@ //! Optimism-specific implementation and utilities for the executor -use crate::OptimismBlockExecutionError; +use crate::OpBlockExecutionError; use alloc::{string::ToString, sync::Arc}; use alloy_primitives::{address, b256, hex, Address, Bytes, B256, U256}; use reth_chainspec::ChainSpec; use reth_execution_errors::BlockExecutionError; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use reth_primitives::BlockBody; use revm::{ primitives::{Bytecode, HashMap, SpecId}, @@ -31,17 +31,17 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// transaction in the L2 block. /// /// Returns an error if the L1 info transaction is not found, if the block is empty. -pub fn extract_l1_info(body: &BlockBody) -> Result { +pub fn extract_l1_info(body: &BlockBody) -> Result { let l1_info_tx_data = body .transactions .first() - .ok_or_else(|| OptimismBlockExecutionError::L1BlockInfoError { + .ok_or_else(|| OpBlockExecutionError::L1BlockInfoError { message: "could not find l1 block info tx in the L2 block".to_string(), }) .map(|tx| tx.input())?; if l1_info_tx_data.len() < 4 { - return Err(OptimismBlockExecutionError::L1BlockInfoError { + return Err(OpBlockExecutionError::L1BlockInfoError { message: "invalid l1 block info transaction calldata in the L2 block".to_string(), }) } @@ -52,7 +52,7 @@ pub fn extract_l1_info(body: &BlockBody) -> Result Result { +pub fn parse_l1_info(input: &[u8]) -> Result { // If the first 4 bytes of the calldata are the L1BlockInfoEcotone selector, then we parse the // calldata as an Ecotone hardfork L1BlockInfo transaction. Otherwise, we parse it as a // Bedrock hardfork L1BlockInfo transaction. @@ -64,7 +64,7 @@ pub fn parse_l1_info(input: &[u8]) -> Result Result { +pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result { // The setL1BlockValues tx calldata must be exactly 260 bytes long, considering that // we already removed the first 4 bytes (the function selector). Detailed breakdown: // 32 bytes for the block number @@ -76,23 +76,23 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result -pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result { +pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result { if data.len() != 160 { - return Err(OptimismBlockExecutionError::L1BlockInfoError { + return Err(OpBlockExecutionError::L1BlockInfoError { message: "unexpected l1 block info tx calldata length found".to_string(), }) } @@ -142,22 +142,22 @@ pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result Result { - let spec_id = if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, timestamp) - { + let spec_id = if chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, timestamp) { SpecId::FJORD - } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) { SpecId::REGOLITH - } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Bedrock, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { - return Err(OptimismBlockExecutionError::L1BlockInfoError { + return Err(OpBlockExecutionError::L1BlockInfoError { message: "Optimism hardforks are not active".to_string(), } .into()) @@ -270,9 +268,8 @@ where // If the canyon hardfork is active at the current timestamp, and it was not active at the // previous block timestamp (heuristically, block time is not perfectly constant at 2s), and the // chain is an optimism chain, then we need to force-deploy the create2 deployer contract. - if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) && - !chain_spec - .is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp.saturating_sub(2)) + if chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) && + !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp.saturating_sub(2)) { trace!(target: "evm", "Forcing create2 deployer contract deployment on Canyon transition"); @@ -300,15 +297,16 @@ where mod tests { use alloy_eips::eip2718::Decodable2718; use reth_optimism_chainspec::OP_MAINNET; - use reth_optimism_forks::OptimismHardforks; + use reth_optimism_forks::OpHardforks; use reth_primitives::{Block, BlockBody, TransactionSigned}; use super::*; #[test] fn sanity_l1_block() { + use alloy_consensus::Header; use alloy_primitives::{hex_literal::hex, Bytes}; - use reth_primitives::{Header, TransactionSigned}; + use reth_primitives::TransactionSigned; let bytes = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); let l1_info_tx = TransactionSigned::decode_2718(&mut bytes.as_ref()).unwrap(); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 60aa9f7db083..dafb1676ebd5 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -13,13 +13,14 @@ extern crate alloc; use alloc::{sync::Arc, vec::Vec}; +use alloy_consensus::Header; use alloy_primitives::{Address, U256}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; -use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_chainspec::{DecodeError, OpChainSpec}; use reth_primitives::{ revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, transaction::FillTxEnv, - Head, Header, TransactionSigned, + Head, TransactionSigned, }; use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; @@ -31,19 +32,19 @@ pub mod l1; pub use l1::*; mod error; -pub use error::OptimismBlockExecutionError; +pub use error::OpBlockExecutionError; use revm_primitives::{ BlobExcessGasAndPrice, BlockEnv, Bytes, CfgEnv, Env, HandlerCfg, OptimismFields, SpecId, TxKind, }; /// Optimism-related EVM configuration. #[derive(Debug, Clone)] -pub struct OptimismEvmConfig { +pub struct OpEvmConfig { chain_spec: Arc, } -impl OptimismEvmConfig { - /// Creates a new [`OptimismEvmConfig`] with the given chain spec. +impl OpEvmConfig { + /// Creates a new [`OpEvmConfig`] with the given chain spec. pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } @@ -54,8 +55,9 @@ impl OptimismEvmConfig { } } -impl ConfigureEvmEnv for OptimismEvmConfig { +impl ConfigureEvmEnv for OpEvmConfig { type Header = Header; + type Error = DecodeError; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); @@ -134,7 +136,7 @@ impl ConfigureEvmEnv for OptimismEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { // configure evm env based on parent block let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); @@ -156,13 +158,7 @@ impl ConfigureEvmEnv for OptimismEvmConfig { prevrandao: Some(attributes.prev_randao), gas_limit: U256::from(parent.gas_limit), // calculate basefee based on parent block's gas usage - basefee: U256::from( - parent - .next_block_base_fee( - self.chain_spec.base_fee_params_at_timestamp(attributes.timestamp), - ) - .unwrap_or_default(), - ), + basefee: self.chain_spec.next_block_base_fee(parent, attributes.timestamp)?, // calculate excess gas based on parent block's blob gas usage blob_excess_gas_and_price, }; @@ -175,11 +171,11 @@ impl ConfigureEvmEnv for OptimismEvmConfig { }; } - (cfg_with_handler_cfg, block_env) + Ok((cfg_with_handler_cfg, block_env)) } } -impl ConfigureEvm for OptimismEvmConfig { +impl ConfigureEvm for OpEvmConfig { type DefaultExternalContext<'a> = (); fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { @@ -205,27 +201,34 @@ impl ConfigureEvm for OptimismEvmConfig { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::constants::KECCAK_EMPTY; + use alloy_consensus::{constants::KECCAK_EMPTY, Header}; + use alloy_eips::eip7685::Requests; use alloy_genesis::Genesis; - use alloy_primitives::{B256, U256}; + use alloy_primitives::{bytes, Address, LogData, B256, U256}; use reth_chainspec::ChainSpec; use reth_evm::execute::ProviderError; - use reth_execution_types::{Chain, ExecutionOutcome}; + use reth_execution_types::{ + AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit, + }; use reth_optimism_chainspec::BASE_MAINNET; use reth_primitives::{ - revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, Receipt, Receipts, SealedBlockWithSenders, TxType, + revm_primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, + Account, Log, Receipt, Receipts, SealedBlockWithSenders, TxType, }; + use reth_revm::{ - db::{CacheDB, EmptyDBTyped}, + db::{BundleState, CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, JournaledState, }; use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; - use std::{collections::HashSet, sync::Arc}; + use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + }; - fn test_evm_config() -> OptimismEvmConfig { - OptimismEvmConfig::new(BASE_MAINNET.clone()) + fn test_evm_config() -> OpEvmConfig { + OpEvmConfig::new(BASE_MAINNET.clone()) } #[test] @@ -252,9 +255,9 @@ mod tests { // Define the total difficulty as zero (default) let total_difficulty = U256::ZERO; - // Use the `OptimismEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, + // Use the `OpEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - OptimismEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) + OpEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) .fill_cfg_and_block_env(&mut cfg_env, &mut block_env, &header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the @@ -264,7 +267,7 @@ mod tests { #[test] fn test_evm_configure() { - // Create a default `OptimismEvmConfig` + // Create a default `OpEvmConfig` let evm_config = test_evm_config(); // Initialize an empty database wrapped in CacheDB @@ -620,4 +623,399 @@ mod tests { // Assert that the execution outcome at the tip block contains the whole execution outcome assert_eq!(chain.execution_outcome_at_block(11), Some(execution_outcome)); } + + #[test] + fn test_initialisation() { + // Create a new BundleState object with initial data + let bundle = BundleState::new( + vec![(Address::new([2; 20]), None, Some(AccountInfo::default()), HashMap::default())], + vec![vec![(Address::new([2; 20]), None, vec![])]], + vec![], + ); + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Create a Requests object with a vector of requests + let requests = vec![Requests::new(vec![bytes!("dead"), bytes!("beef"), bytes!("beebee")])]; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: bundle.clone(), + receipts: receipts.clone(), + requests: requests.clone(), + first_block, + }; + + // Assert that creating a new ExecutionOutcome using the constructor matches exec_res + assert_eq!( + ExecutionOutcome::new(bundle, receipts.clone(), first_block, requests.clone()), + exec_res + ); + + // Create a BundleStateInit object and insert initial data + let mut state_init: BundleStateInit = HashMap::default(); + state_init + .insert(Address::new([2; 20]), (None, Some(Account::default()), HashMap::default())); + + // Create a HashMap for account reverts and insert initial data + let mut revert_inner: HashMap = HashMap::default(); + revert_inner.insert(Address::new([2; 20]), (None, vec![])); + + // Create a RevertsInit object and insert the revert_inner data + let mut revert_init: RevertsInit = HashMap::default(); + revert_init.insert(123, revert_inner); + + // Assert that creating a new ExecutionOutcome using the new_init method matches + // exec_res + assert_eq!( + ExecutionOutcome::new_init( + state_init, + revert_init, + vec![], + receipts, + first_block, + requests, + ), + exec_res + ); + } + + #[test] + fn test_block_number_to_index() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests: vec![], + first_block, + }; + + // Test before the first block + assert_eq!(exec_res.block_number_to_index(12), None); + + // Test after after the first block but index larger than receipts length + assert_eq!(exec_res.block_number_to_index(133), None); + + // Test after the first block + assert_eq!(exec_res.block_number_to_index(123), Some(0)); + } + + #[test] + fn test_get_logs() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests: vec![], + first_block, + }; + + // Get logs for block number 123 + let logs: Vec<&Log> = exec_res.logs(123).unwrap().collect(); + + // Assert that the logs match the expected logs + assert_eq!(logs, vec![&Log::::default()]); + } + + #[test] + fn test_receipts_by_block() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts, // Include the created receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + }; + + // Get receipts for block number 123 and convert the result into a vector + let receipts_by_block: Vec<_> = exec_res.receipts_by_block(123).iter().collect(); + + // Assert that the receipts for block number 123 match the expected receipts + assert_eq!( + receipts_by_block, + vec![&Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })] + ); + } + + #[test] + fn test_receipts_len() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Create an empty Receipts object + let receipts_empty = Receipts:: { receipt_vec: vec![] }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts, // Include the created receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + }; + + // Assert that the length of receipts in exec_res is 1 + assert_eq!(exec_res.len(), 1); + + // Assert that exec_res is not empty + assert!(!exec_res.is_empty()); + + // Create a ExecutionOutcome object with an empty Receipts object + let exec_res_empty_receipts = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts: receipts_empty, // Include the empty receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + }; + + // Assert that the length of receipts in exec_res_empty_receipts is 0 + assert_eq!(exec_res_empty_receipts.len(), 0); + + // Assert that exec_res_empty_receipts is empty + assert!(exec_res_empty_receipts.is_empty()); + } + + #[test] + fn test_revert_to() { + // Create a random receipt object + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt.clone())]], + }; + + // Define the first block number + let first_block = 123; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = + vec![Requests::new(vec![request.clone()]), Requests::new(vec![request.clone()])]; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let mut exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Assert that the revert_to method returns true when reverting to the initial block number. + assert!(exec_res.revert_to(123)); + + // Assert that the receipts are properly cut after reverting to the initial block number. + assert_eq!(exec_res.receipts, Receipts { receipt_vec: vec![vec![Some(receipt)]] }); + + // Assert that the requests are properly cut after reverting to the initial block number. + assert_eq!(exec_res.requests, vec![Requests::new(vec![request])]); + + // Assert that the revert_to method returns false when attempting to revert to a block + // number greater than the initial block number. + assert!(!exec_res.revert_to(133)); + + // Assert that the revert_to method returns false when attempting to revert to a block + // number less than the initial block number. + assert!(!exec_res.revert_to(10)); + } + + #[test] + fn test_extend_execution_outcome() { + // Create a Receipt object with specific attributes. + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object containing the receipt. + let receipts = Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = vec![Requests::new(vec![request.clone()])]; + + // Define the initial block number. + let first_block = 123; + + // Create an ExecutionOutcome object. + let mut exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Extend the ExecutionOutcome object by itself. + exec_res.extend(exec_res.clone()); + + // Assert the extended ExecutionOutcome matches the expected outcome. + assert_eq!( + exec_res, + ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]] + }, + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], + first_block: 123, + } + ); + } + + #[test] + fn test_split_at_execution_outcome() { + // Create a random receipt object + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![ + vec![Some(receipt.clone())], + vec![Some(receipt.clone())], + vec![Some(receipt.clone())], + ], + }; + + // Define the first block number + let first_block = 123; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = vec![ + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + ]; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Split the ExecutionOutcome at block number 124 + let result = exec_res.clone().split_at(124); + + // Define the expected lower ExecutionOutcome after splitting + let lower_execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }, + requests: vec![Requests::new(vec![request.clone()])], + first_block, + }; + + // Define the expected higher ExecutionOutcome after splitting + let higher_execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]], + }, + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], + first_block: 124, + }; + + // Assert that the split result matches the expected lower and higher outcomes + assert_eq!(result.0, Some(lower_execution_outcome)); + assert_eq!(result.1, higher_execution_outcome); + + // Assert that splitting at the first block number returns None for the lower outcome + assert_eq!(exec_res.clone().split_at(123), (None, exec_res)); + } } diff --git a/crates/optimism/hardforks/src/dev.rs b/crates/optimism/hardforks/src/dev.rs index 328ef501c46e..5fe77a314029 100644 --- a/crates/optimism/hardforks/src/dev.rs +++ b/crates/optimism/hardforks/src/dev.rs @@ -24,13 +24,13 @@ pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { EthereumHardfork::Paris.boxed(), ForkCondition::TTD { fork_block: None, total_difficulty: U256::ZERO }, ), - (crate::OptimismHardfork::Bedrock.boxed(), ForkCondition::Block(0)), - (crate::OptimismHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Bedrock.boxed(), ForkCondition::Block(0)), + (crate::OpHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)), (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Canyon.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Canyon.boxed(), ForkCondition::Timestamp(0)), (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Ecotone.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Fjord.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Granite.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Ecotone.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Fjord.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Granite.boxed(), ForkCondition::Timestamp(0)), ]) }); diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 011c4ae72fdd..661816ae5fe0 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -18,7 +18,7 @@ hardfork!( /// /// When building a list of hardforks for a chain, it's still expected to mix with /// [`EthereumHardfork`]. - OptimismHardfork { + OpHardfork { /// Bedrock: . Bedrock, /// Regolith: . @@ -31,10 +31,12 @@ hardfork!( Fjord, /// Granite: Granite, + /// Holocene: + Holocene, } ); -impl OptimismHardfork { +impl OpHardfork { /// Retrieves the activation block for the specified hardfork on the given chain. pub fn activation_block(self, fork: H, chain: Chain) -> Option { if chain == Chain::base_sepolia() { @@ -156,6 +158,7 @@ impl OptimismHardfork { Self::Ecotone => Some(1708534800), Self::Fjord => Some(1716998400), Self::Granite => Some(1723478400), + Self::Holocene => Some(1732633200), }, ) } @@ -190,6 +193,7 @@ impl OptimismHardfork { Self::Ecotone => Some(1710374401), Self::Fjord => Some(1720627201), Self::Granite => Some(1726070401), + Self::Holocene => None, }, ) } @@ -253,6 +257,7 @@ impl OptimismHardfork { (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), (Self::Granite.boxed(), ForkCondition::Timestamp(1723478400)), + (Self::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), ]) } @@ -284,6 +289,7 @@ impl OptimismHardfork { (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), (Self::Granite.boxed(), ForkCondition::Timestamp(1723478400)), + (Self::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), ]) } @@ -324,13 +330,13 @@ fn match_hardfork(fork: H, hardfork_fn: HF, optimism_hardfork_fn: OH where H: Hardfork, HF: Fn(&EthereumHardfork) -> Option, - OHF: Fn(&OptimismHardfork) -> Option, + OHF: Fn(&OpHardfork) -> Option, { let fork: &dyn Any = ⋔ if let Some(fork) = fork.downcast_ref::() { return hardfork_fn(fork) } - fork.downcast_ref::().and_then(optimism_hardfork_fn) + fork.downcast_ref::().and_then(optimism_hardfork_fn) } #[cfg(test)] @@ -342,35 +348,34 @@ mod tests { #[test] fn test_match_hardfork() { assert_eq!( - OptimismHardfork::base_mainnet_activation_block(EthereumHardfork::Cancun), + OpHardfork::base_mainnet_activation_block(EthereumHardfork::Cancun), Some(11188936) ); - assert_eq!( - OptimismHardfork::base_mainnet_activation_block(OptimismHardfork::Canyon), - Some(9101527) - ); + assert_eq!(OpHardfork::base_mainnet_activation_block(OpHardfork::Canyon), Some(9101527)); } #[test] fn check_op_hardfork_from_str() { - let hardfork_str = ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe"]; + let hardfork_str = + ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe", "hOlOcEnE"]; let expected_hardforks = [ - OptimismHardfork::Bedrock, - OptimismHardfork::Regolith, - OptimismHardfork::Canyon, - OptimismHardfork::Ecotone, - OptimismHardfork::Fjord, - OptimismHardfork::Granite, + OpHardfork::Bedrock, + OpHardfork::Regolith, + OpHardfork::Canyon, + OpHardfork::Ecotone, + OpHardfork::Fjord, + OpHardfork::Granite, + OpHardfork::Holocene, ]; - let hardforks: Vec = - hardfork_str.iter().map(|h| OptimismHardfork::from_str(h).unwrap()).collect(); + let hardforks: Vec = + hardfork_str.iter().map(|h| OpHardfork::from_str(h).unwrap()).collect(); assert_eq!(hardforks, expected_hardforks); } #[test] fn check_nonexistent_hardfork_from_str() { - assert!(OptimismHardfork::from_str("not a hardfork").is_err()); + assert!(OpHardfork::from_str("not a hardfork").is_err()); } } diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index 91c11d3fd23c..3915bcf6cbda 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -14,30 +14,47 @@ pub mod hardfork; mod dev; pub use dev::DEV_HARDFORKS; -pub use hardfork::OptimismHardfork; +pub use hardfork::OpHardfork; use reth_ethereum_forks::EthereumHardforks; /// Extends [`EthereumHardforks`] with optimism helper methods. -pub trait OptimismHardforks: EthereumHardforks { - /// Convenience method to check if [`OptimismHardfork::Bedrock`] is active at a given block +pub trait OpHardforks: EthereumHardforks { + /// Convenience method to check if [`OpHardfork::Bedrock`] is active at a given block /// number. fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { - self.fork(OptimismHardfork::Bedrock).active_at_block(block_number) + self.fork(OpHardfork::Bedrock).active_at_block(block_number) } - /// Returns `true` if [`Ecotone`](OptimismHardfork::Ecotone) is active at given block timestamp. + /// Returns `true` if [`Canyon`](OpHardfork::Canyon) is active at given block timestamp. + fn is_canyon_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OpHardfork::Canyon).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Ecotone`](OpHardfork::Ecotone) is active at given block timestamp. fn is_ecotone_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) + self.fork(OpHardfork::Ecotone).active_at_timestamp(timestamp) } - /// Returns `true` if [`Ecotone`](OptimismHardfork::Ecotone) is active at given block timestamp. + /// Returns `true` if [`Fjord`](OpHardfork::Fjord) is active at given block timestamp. fn is_fjord_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) + self.fork(OpHardfork::Fjord).active_at_timestamp(timestamp) } - /// Returns `true` if [`Granite`](OptimismHardfork::Granite) is active at given block timestamp. + /// Returns `true` if [`Granite`](OpHardfork::Granite) is active at given block timestamp. fn is_granite_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) + self.fork(OpHardfork::Granite).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Holocene`](OpHardfork::Holocene) is active at given block + /// timestamp. + fn is_holocene_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Regolith`](OpHardfork::Regolith) is active at given block + /// timestamp. + fn is_regolith_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) } } diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 37cf4a328ea0..fb8cc27787e3 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -16,7 +16,6 @@ reth-chainspec.workspace = true reth-engine-local.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true -reth-auto-seal-consensus.workspace = true reth-basic-payload-builder.workspace = true reth-consensus.workspace = true reth-node-api.workspace = true @@ -28,6 +27,7 @@ reth-network.workspace = true reth-evm.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-beacon-consensus.workspace = true +reth-trie-db.workspace = true # op-reth reth-optimism-payload-builder.workspace = true @@ -45,6 +45,7 @@ alloy-eips.workspace = true alloy-primitives.workspace = true op-alloy-rpc-types-engine.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true # misc clap.workspace = true @@ -55,49 +56,63 @@ parking_lot.workspace = true # rpc serde_json.workspace = true +# test-utils dependencies +reth = { workspace = true, optional = true } +reth-e2e-test-utils = { workspace = true, optional = true } +alloy-genesis = { workspace = true, optional = true } +tokio = { workspace = true, optional = true } + [dev-dependencies] -reth.workspace = true +reth-optimism-node = { workspace = true, features = ["test-utils"] } reth-db.workspace = true -reth-e2e-test-utils.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } -tokio.workspace = true alloy-primitives.workspace = true -alloy-genesis.workspace = true op-alloy-consensus.workspace = true +alloy-signer-local.workspace = true +alloy-network.workspace = true +alloy-consensus.workspace = true +futures.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-optimism-evm/optimism", - "reth-optimism-payload-builder/optimism", - "reth-beacon-consensus/optimism", - "revm/optimism", - "reth-auto-seal-consensus/optimism", - "reth-optimism-rpc/optimism", - "reth-engine-local/optimism", - "reth-optimism-consensus/optimism", - "reth-db/optimism" + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-optimism-evm/optimism", + "reth-optimism-payload-builder/optimism", + "reth-beacon-consensus/optimism", + "revm/optimism", + "reth-optimism-rpc/optimism", + "reth-engine-local/optimism", + "reth-optimism-consensus/optimism", + "reth-db/optimism", + "reth-optimism-node/optimism", ] asm-keccak = [ - "reth-primitives/asm-keccak", - "reth/asm-keccak", - "alloy-primitives/asm-keccak", - "revm/asm-keccak" + "reth-primitives/asm-keccak", + "reth/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak", + "reth-optimism-node/asm-keccak", ] test-utils = [ - "reth-node-builder/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-evm/test-utils", - "reth-network/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-revm/test-utils", - "reth-db/test-utils", - "reth-provider/test-utils", - "reth-transaction-pool/test-utils", - "revm/test-utils" + "reth", + "reth-e2e-test-utils", + "alloy-genesis", + "tokio", + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-optimism-node/test-utils", ] diff --git a/crates/optimism/node/src/args.rs b/crates/optimism/node/src/args.rs index 54be83dc5101..b84e98d28b19 100644 --- a/crates/optimism/node/src/args.rs +++ b/crates/optimism/node/src/args.rs @@ -38,16 +38,23 @@ pub struct RollupArgs { #[arg(long = "rollup.discovery.v4", default_value = "false")] pub discovery_v4: bool, - /// Enable the engine2 experimental features on op-reth binary + /// Enable the experimental engine features on reth binary + /// + /// DEPRECATED: experimental engine is default now, use --engine.legacy to enable the legacy + /// functionality #[arg(long = "engine.experimental", default_value = "false")] pub experimental: bool, + /// Enable the legacy engine on reth binary + #[arg(long = "engine.legacy", default_value = "false")] + pub legacy: bool, + /// Configure persistence threshold for engine experimental. - #[arg(long = "engine.persistence-threshold", requires = "experimental", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + #[arg(long = "engine.persistence-threshold", conflicts_with = "legacy", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] pub persistence_threshold: u64, /// Configure the target number of blocks to keep in memory. - #[arg(long = "engine.memory-block-buffer-target", requires = "experimental", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + #[arg(long = "engine.memory-block-buffer-target", conflicts_with = "legacy", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] pub memory_block_buffer_target: u64, } @@ -60,6 +67,7 @@ impl Default for RollupArgs { compute_pending_block: false, discovery_v4: false, experimental: false, + legacy: false, persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, } diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index cec609671a38..dd4d0c13f24a 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -14,53 +14,53 @@ use reth_node_api::{ validate_version_specific_fields, EngineTypes, EngineValidator, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardfork; -use reth_optimism_payload_builder::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}; +use reth_optimism_forks::{OpHardfork, OpHardforks}; +use reth_optimism_payload_builder::{OpBuiltPayload, OpPayloadBuilderAttributes}; /// The types used in the optimism beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct OptimismEngineTypes { +pub struct OpEngineTypes { _marker: std::marker::PhantomData, } -impl PayloadTypes for OptimismEngineTypes { +impl PayloadTypes for OpEngineTypes { type BuiltPayload = T::BuiltPayload; type PayloadAttributes = T::PayloadAttributes; type PayloadBuilderAttributes = T::PayloadBuilderAttributes; } -impl EngineTypes for OptimismEngineTypes +impl EngineTypes for OpEngineTypes where T::BuiltPayload: TryInto + TryInto + TryInto + TryInto, { - type ExecutionPayloadV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadEnvelopeV3 = OpExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = OpExecutionPayloadEnvelopeV4; } -/// A default payload type for [`OptimismEngineTypes`] +/// A default payload type for [`OpEngineTypes`] #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct OptimismPayloadTypes; +pub struct OpPayloadTypes; -impl PayloadTypes for OptimismPayloadTypes { - type BuiltPayload = OptimismBuiltPayload; +impl PayloadTypes for OpPayloadTypes { + type BuiltPayload = OpBuiltPayload; type PayloadAttributes = OpPayloadAttributes; - type PayloadBuilderAttributes = OptimismPayloadBuilderAttributes; + type PayloadBuilderAttributes = OpPayloadBuilderAttributes; } /// Validator for Optimism engine API. #[derive(Debug, Clone)] -pub struct OptimismEngineValidator { +pub struct OpEngineValidator { chain_spec: Arc, } -impl OptimismEngineValidator { +impl OpEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } @@ -81,7 +81,7 @@ pub fn validate_withdrawals_presence( timestamp: u64, has_withdrawals: bool, ) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp); + let is_shanghai = chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp); match version { EngineApiMessageVersion::V1 => { @@ -109,7 +109,7 @@ pub fn validate_withdrawals_presence( Ok(()) } -impl EngineValidator for OptimismEngineValidator +impl EngineValidator for OpEngineValidator where Types: EngineTypes, { @@ -147,6 +147,132 @@ where )) } + if self.chain_spec.is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp) + { + let (elasticity, denominator) = + attributes.decode_eip_1559_params().ok_or_else(|| { + EngineObjectValidationError::InvalidParams( + "MissingEip1559ParamsInPayloadAttributes".to_string().into(), + ) + })?; + if elasticity != 0 && denominator == 0 { + return Err(EngineObjectValidationError::InvalidParams( + "Eip1559ParamsDenominatorZero".to_string().into(), + )) + } + } + Ok(()) } } + +#[cfg(test)] +mod test { + + use crate::engine; + use alloy_primitives::{b64, Address, B256, B64}; + use alloy_rpc_types_engine::PayloadAttributes; + use reth_optimism_chainspec::BASE_SEPOLIA; + + use super::*; + + fn get_chainspec() -> Arc { + let hardforks = OpHardfork::base_sepolia(); + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: BASE_SEPOLIA.inner.chain, + genesis: BASE_SEPOLIA.inner.genesis.clone(), + genesis_hash: BASE_SEPOLIA.inner.genesis_hash.clone(), + paris_block_and_final_difficulty: BASE_SEPOLIA + .inner + .paris_block_and_final_difficulty, + hardforks, + base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(), + max_gas_limit: BASE_SEPOLIA.inner.max_gas_limit, + prune_delete_limit: 10000, + ..Default::default() + }, + }) + } + + const fn get_attributes(eip_1559_params: Option, timestamp: u64) -> OpPayloadAttributes { + OpPayloadAttributes { + gas_limit: Some(1000), + eip_1559_params, + transactions: None, + no_tx_pool: None, + payload_attributes: PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }, + } + } + + #[test] + fn test_well_formed_attributes_pre_holocene() { + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(None, 1732633199); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); + } + + #[test] + fn test_well_formed_attributes_holocene_no_eip1559_params() { + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(None, 1732633200); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_)))); + } + + #[test] + fn test_well_formed_attributes_holocene_eip1559_params_zero_denominator() { + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(Some(b64!("0000000000000008")), 1732633200); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_)))); + } + + #[test] + fn test_well_formed_attributes_holocene_valid() { + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(Some(b64!("0000000800000008")), 1732633200); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); + } + + #[test] + fn test_well_formed_attributes_holocene_valid_all_zero() { + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(Some(b64!("0000000000000000")), 1732633200); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); + } +} diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 768f4d94efd5..7af0f3b8a722 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -15,15 +15,19 @@ pub mod args; /// Exports optimism-specific implementations of the [`EngineTypes`](reth_node_api::EngineTypes) /// trait. pub mod engine; -pub use engine::OptimismEngineTypes; +pub use engine::OpEngineTypes; pub mod node; -pub use node::OptimismNode; +pub use node::OpNode; pub mod txpool; +/// Helpers for running test node instances. +#[cfg(feature = "test-utils")] +pub mod utils; + pub use reth_optimism_payload_builder::{ - OptimismBuiltPayload, OptimismPayloadBuilder, OptimismPayloadBuilderAttributes, + OpBuiltPayload, OpPayloadBuilder, OpPayloadBuilderAttributes, }; pub use reth_optimism_evm::*; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 9492bb8c429a..0c2186c72684 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -2,12 +2,13 @@ use std::sync::Arc; +use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::{EthChainSpec, Hardforks}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, PayloadBuilder, }; use reth_node_builder::{ components::{ @@ -19,42 +20,47 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_consensus::OptimismBeaconConsensus; -use reth_optimism_evm::{OpExecutionStrategyFactory, OptimismEvmConfig}; +use reth_optimism_consensus::OpBeaconConsensus; +use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; +use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Header}; +use reth_primitives::{Block, Receipt, TransactionSigned, TxType}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ blobstore::DiskFileBlobStore, CoinbaseTipOrdering, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie_db::MerklePatriciaTrie; use crate::{ args::RollupArgs, - engine::OptimismEngineValidator, + engine::OpEngineValidator, txpool::{OpTransactionPool, OpTransactionValidator}, - OptimismEngineTypes, + OpEngineTypes, }; /// Optimism primitive types. -#[derive(Debug)] +#[derive(Debug, Default, Clone)] pub struct OpPrimitives; impl NodePrimitives for OpPrimitives { type Block = Block; + type SignedTx = TransactionSigned; + type TxType = TxType; + type Receipt = Receipt; } /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct OptimismNode { +pub struct OpNode { /// Additional Optimism args pub args: RollupArgs, } -impl OptimismNode { +impl OpNode { /// Creates a new instance of the Optimism node type. pub const fn new(args: RollupArgs) -> Self { Self { args } @@ -65,49 +71,46 @@ impl OptimismNode { args: RollupArgs, ) -> ComponentsBuilder< Node, - OptimismPoolBuilder, - OptimismPayloadBuilder, - OptimismNetworkBuilder, - OptimismExecutorBuilder, - OptimismConsensusBuilder, + OpPoolBuilder, + OpPayloadBuilder, + OpNetworkBuilder, + OpExecutorBuilder, + OpConsensusBuilder, > where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, >, { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = args; ComponentsBuilder::default() .node_types::() - .pool(OptimismPoolBuilder::default()) - .payload(OptimismPayloadBuilder::new(compute_pending_block)) - .network(OptimismNetworkBuilder { + .pool(OpPoolBuilder::default()) + .payload(OpPayloadBuilder::new(compute_pending_block)) + .network(OpNetworkBuilder { disable_txpool_gossip, disable_discovery_v4: !discovery_v4, }) - .executor(OptimismExecutorBuilder::default()) - .consensus(OptimismConsensusBuilder::default()) + .executor(OpExecutorBuilder::default()) + .consensus(OpConsensusBuilder::default()) } } -impl Node for OptimismNode +impl Node for OpNode where - N: FullNodeTypes< - Types: NodeTypesWithEngine, - >, + N: FullNodeTypes>, { type ComponentsBuilder = ComponentsBuilder< N, - OptimismPoolBuilder, - OptimismPayloadBuilder, - OptimismNetworkBuilder, - OptimismExecutorBuilder, - OptimismConsensusBuilder, + OpPoolBuilder, + OpPayloadBuilder, + OpNetworkBuilder, + OpExecutorBuilder, + OpConsensusBuilder, >; - type AddOns = OptimismAddOns< - NodeAdapter>::Components>, - >; + type AddOns = + OpAddOns>::Components>>; fn components_builder(&self) -> Self::ComponentsBuilder { let Self { args } = self; @@ -115,42 +118,44 @@ where } fn add_ons(&self) -> Self::AddOns { - OptimismAddOns::new(self.args.sequencer_http.clone()) + OpAddOns::new(self.args.sequencer_http.clone()) } } -impl NodeTypes for OptimismNode { +impl NodeTypes for OpNode { type Primitives = OpPrimitives; type ChainSpec = OpChainSpec; + type StateCommitment = MerklePatriciaTrie; } -impl NodeTypesWithEngine for OptimismNode { - type Engine = OptimismEngineTypes; +impl NodeTypesWithEngine for OpNode { + type Engine = OpEngineTypes; } /// Add-ons w.r.t. optimism. #[derive(Debug)] -pub struct OptimismAddOns( - pub RpcAddOns, OptimismEngineValidatorBuilder>, -); +pub struct OpAddOns(pub RpcAddOns, OpEngineValidatorBuilder>); -impl Default for OptimismAddOns { +impl Default for OpAddOns { fn default() -> Self { Self::new(None) } } -impl OptimismAddOns { +impl OpAddOns { /// Create a new instance with the given `sequencer_http` URL. pub fn new(sequencer_http: Option) -> Self { Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http), Default::default())) } } -impl NodeAddOns for OptimismAddOns +impl NodeAddOns for OpAddOns where - N: FullNodeComponents>, - OptimismEngineValidator: EngineValidator<::Engine>, + N: FullNodeComponents< + Types: NodeTypes, + PayloadBuilder: PayloadBuilder::Engine>, + >, + OpEngineValidator: EngineValidator<::Engine>, { type Handle = RpcHandle>; @@ -162,10 +167,13 @@ where } } -impl RethRpcAddOns for OptimismAddOns +impl RethRpcAddOns for OpAddOns where - N: FullNodeComponents>, - OptimismEngineValidator: EngineValidator<::Engine>, + N: FullNodeComponents< + Types: NodeTypes, + PayloadBuilder: PayloadBuilder::Engine>, + >, + OpEngineValidator: EngineValidator<::Engine>, { type EthApi = OpEthApi; @@ -177,20 +185,20 @@ where /// A regular optimism evm and executor builder. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] -pub struct OptimismExecutorBuilder; +pub struct OpExecutorBuilder; -impl ExecutorBuilder for OptimismExecutorBuilder +impl ExecutorBuilder for OpExecutorBuilder where Node: FullNodeTypes>, { - type EVM = OptimismEvmConfig; + type EVM = OpEvmConfig; type Executor = BasicBlockExecutorProvider; async fn build_evm( self, ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { - let evm_config = OptimismEvmConfig::new(ctx.chain_spec()); + let evm_config = OpEvmConfig::new(ctx.chain_spec()); let strategy_factory = OpExecutionStrategyFactory::new(ctx.chain_spec(), evm_config.clone()); let executor = BasicBlockExecutorProvider::new(strategy_factory); @@ -204,12 +212,12 @@ where /// This contains various settings that can be configured and take precedence over the node's /// config. #[derive(Debug, Default, Clone)] -pub struct OptimismPoolBuilder { +pub struct OpPoolBuilder { /// Enforced overrides that are applied to the pool config. pub pool_config_overrides: PoolBuilderConfigOverrides, } -impl PoolBuilder for OptimismPoolBuilder +impl PoolBuilder for OpPoolBuilder where Node: FullNodeTypes>, { @@ -286,7 +294,7 @@ where /// A basic optimism payload service builder #[derive(Debug, Default, Clone)] -pub struct OptimismPayloadBuilder { +pub struct OpPayloadBuilder { /// By default the pending block equals the latest block /// to save resources and not leak txs from the tx-pool, /// this flag enables computing of the pending block @@ -296,12 +304,30 @@ pub struct OptimismPayloadBuilder { /// will use the payload attributes from the latest block. Note /// that this flag is not yet functional. pub compute_pending_block: bool, + /// The type responsible for yielding the best transactions for the payload if mempool + /// transactions are allowed. + pub best_transactions: Txs, } -impl OptimismPayloadBuilder { +impl OpPayloadBuilder { /// Create a new instance with the given `compute_pending_block` flag. pub const fn new(compute_pending_block: bool) -> Self { - Self { compute_pending_block } + Self { compute_pending_block, best_transactions: () } + } +} + +impl OpPayloadBuilder +where + Txs: OpPayloadTransactions, +{ + /// Configures the type responsible for yielding the transactions that should be included in the + /// payload. + pub fn with_transactions( + self, + best_transactions: T, + ) -> OpPayloadBuilder { + let Self { compute_pending_block, .. } = self; + OpPayloadBuilder { compute_pending_block, best_transactions } } /// A helper method to initialize [`PayloadBuilderService`] with the given EVM config. @@ -310,17 +336,17 @@ impl OptimismPayloadBuilder { evm_config: Evm, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> + ) -> eyre::Result> where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, >, Pool: TransactionPool + Unpin + 'static, Evm: ConfigureEvm
, { - let payload_builder = - reth_optimism_payload_builder::OptimismPayloadBuilder::new(evm_config) - .set_compute_pending_block(self.compute_pending_block); + let payload_builder = reth_optimism_payload_builder::OpPayloadBuilder::new(evm_config) + .with_transactions(self.best_transactions) + .set_compute_pending_block(self.compute_pending_block); let conf = ctx.payload_builder_config(); let payload_job_config = BasicPayloadJobGeneratorConfig::default() @@ -346,35 +372,35 @@ impl OptimismPayloadBuilder { } } -impl PayloadServiceBuilder for OptimismPayloadBuilder +impl PayloadServiceBuilder for OpPayloadBuilder where - Node: FullNodeTypes< - Types: NodeTypesWithEngine, - >, + Node: + FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, + Txs: OpPayloadTransactions, { async fn spawn_payload_service( self, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { - self.spawn(OptimismEvmConfig::new(ctx.chain_spec()), ctx, pool) + ) -> eyre::Result> { + self.spawn(OpEvmConfig::new(ctx.chain_spec()), ctx, pool) } } /// A basic optimism network builder. #[derive(Debug, Default, Clone)] -pub struct OptimismNetworkBuilder { +pub struct OpNetworkBuilder { /// Disable transaction pool gossip pub disable_txpool_gossip: bool, /// Disable discovery v4 pub disable_discovery_v4: bool, } -impl OptimismNetworkBuilder { +impl OpNetworkBuilder { /// Returns the [`NetworkConfig`] that contains the settings to launch the p2p network. /// - /// This applies the configured [`OptimismNetworkBuilder`] settings. + /// This applies the configured [`OpNetworkBuilder`] settings. pub fn network_config( &self, ctx: &BuilderContext, @@ -419,7 +445,7 @@ impl OptimismNetworkBuilder { } } -impl NetworkBuilder for OptimismNetworkBuilder +impl NetworkBuilder for OpNetworkBuilder where Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, @@ -441,37 +467,33 @@ where /// A basic optimism consensus builder. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct OptimismConsensusBuilder; +pub struct OpConsensusBuilder; -impl ConsensusBuilder for OptimismConsensusBuilder +impl ConsensusBuilder for OpConsensusBuilder where Node: FullNodeTypes>, { type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { - if ctx.is_dev() { - Ok(Arc::new(reth_auto_seal_consensus::AutoSealConsensus::new(ctx.chain_spec()))) - } else { - Ok(Arc::new(OptimismBeaconConsensus::new(ctx.chain_spec()))) - } + Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) } } -/// Builder for [`OptimismEngineValidator`]. +/// Builder for [`OpEngineValidator`]. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct OptimismEngineValidatorBuilder; +pub struct OpEngineValidatorBuilder; -impl EngineValidatorBuilder for OptimismEngineValidatorBuilder +impl EngineValidatorBuilder for OpEngineValidatorBuilder where Types: NodeTypesWithEngine, Node: FullNodeComponents, - OptimismEngineValidator: EngineValidator, + OpEngineValidator: EngineValidator, { - type Validator = OptimismEngineValidator; + type Validator = OpEngineValidator; async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { - Ok(OptimismEngineValidator::new(ctx.config.chain.clone())) + Ok(OpEngineValidator::new(ctx.config.chain.clone())) } } diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 09aa76fefb8b..0edfeec73227 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -74,7 +74,7 @@ where pub fn new(inner: EthTransactionValidator) -> Self { let this = Self::with_block_info(inner, OpL1BlockInfo::default()); if let Ok(Some(block)) = - this.inner.client().block_by_number_or_tag(reth_primitives::BlockNumberOrTag::Latest) + this.inner.client().block_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest) { // genesis block has no txs, so we can't extract L1 info, we set the block info to empty // so that we will accept txs into the pool before the first block @@ -231,9 +231,8 @@ pub struct OpL1BlockInfo { mod tests { use crate::txpool::OpTransactionValidator; use alloy_eips::eip2718::Encodable2718; - use alloy_primitives::{TxKind, U256}; + use alloy_primitives::{PrimitiveSignature as Signature, TxKind, U256}; use op_alloy_consensus::TxDeposit; - use reth::primitives::Signature; use reth_chainspec::MAINNET; use reth_primitives::{Transaction, TransactionSigned, TransactionSignedEcRecovered}; use reth_provider::test_utils::MockEthProvider; diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/src/utils.rs similarity index 70% rename from crates/optimism/node/tests/e2e/utils.rs rename to crates/optimism/node/src/utils.rs index 48175e5b21ac..b54015fef0cc 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -1,3 +1,4 @@ +use crate::{node::OpAddOns, OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes}; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; @@ -5,18 +6,17 @@ use reth_e2e_test_utils::{ transaction::TransactionTestContext, wallet::Wallet, Adapter, NodeHelperType, }; use reth_optimism_chainspec::OpChainSpecBuilder; -use reth_optimism_node::{ - node::OptimismAddOns, OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes, -}; use reth_payload_builder::EthPayloadBuilderAttributes; use std::sync::Arc; use tokio::sync::Mutex; /// Optimism Node Helper type -pub(crate) type OpNode = NodeHelperType>>; +pub(crate) type OpNode = NodeHelperType>>; -pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); +/// Creates the initial setup with `num_nodes` of the node config, started and connected. +pub async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { + let genesis: Genesis = + serde_json::from_str(include_str!("../tests/assets/genesis.json")).unwrap(); reth_e2e_test_utils::setup( num_nodes, Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()), @@ -27,11 +27,11 @@ pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskMa } /// Advance the chain with sequential payloads returning them in the end. -pub(crate) async fn advance_chain( +pub async fn advance_chain( length: usize, node: &mut OpNode, wallet: Arc>, -) -> eyre::Result> { +) -> eyre::Result> { node.advance(length as u64, |_| { let wallet = wallet.clone(); Box::pin(async move { @@ -49,7 +49,7 @@ pub(crate) async fn advance_chain( } /// Helper function to create a new eth payload attributes -pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuilderAttributes { +pub fn optimism_payload_attributes(timestamp: u64) -> OpPayloadBuilderAttributes { let attributes = PayloadAttributes { timestamp, prev_randao: B256::ZERO, @@ -58,10 +58,11 @@ pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuil parent_beacon_block_root: Some(B256::ZERO), }; - OptimismPayloadBuilderAttributes { + OpPayloadBuilderAttributes { payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), transactions: vec![], no_tx_pool: false, gas_limit: Some(30_000_000), + eip_1559_params: None, } } diff --git a/crates/optimism/node/tests/e2e/main.rs b/crates/optimism/node/tests/e2e/main.rs index 3438c766048b..7f4b22ba7e04 100644 --- a/crates/optimism/node/tests/e2e/main.rs +++ b/crates/optimism/node/tests/e2e/main.rs @@ -3,7 +3,4 @@ #[cfg(feature = "optimism")] mod p2p; -#[cfg(feature = "optimism")] -mod utils; - const fn main() {} diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index 30affa9bafb9..3db4cfab8698 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,6 +1,7 @@ -use crate::utils::{advance_chain, setup}; use alloy_rpc_types_engine::PayloadStatusEnum; +use futures::StreamExt; use reth::blockchain_tree::error::BlockchainTreeError; +use reth_optimism_node::utils::{advance_chain, setup}; use std::sync::Arc; use tokio::sync::Mutex; @@ -25,6 +26,19 @@ async fn can_sync() -> eyre::Result<()> { canonical_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); // On second node, sync optimistically up to block number 88a + second_node + .engine_api + .update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth - 1]) + .await?; + second_node + .wait_block( + (tip - reorg_depth - 1) as u64, + canonical_chain[tip_index - reorg_depth - 1], + true, + ) + .await?; + // We send FCU twice to ensure that pool receives canonical chain update on the second FCU + // This is required because notifications are not sent during backfill sync second_node .engine_api .update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth]) @@ -32,6 +46,7 @@ async fn can_sync() -> eyre::Result<()> { second_node .wait_block((tip - reorg_depth) as u64, canonical_chain[tip_index - reorg_depth], true) .await?; + second_node.engine_api.canonical_stream.next().await.unwrap(); // On third node, sync optimistically up to block number 90a third_node.engine_api.update_optimistic_forkchoice(canonical_chain[tip_index]).await?; diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index f1dde4c2c0a8..67cac17d3987 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -4,7 +4,7 @@ use reth_db::test_utils::create_test_rw_db; use reth_node_api::FullNodeComponents; use reth_node_builder::{NodeBuilder, NodeConfig}; use reth_optimism_chainspec::BASE_MAINNET; -use reth_optimism_node::{node::OptimismAddOns, OptimismNode}; +use reth_optimism_node::{node::OpAddOns, OpNode}; #[test] fn test_basic_setup() { @@ -13,9 +13,9 @@ fn test_basic_setup() { let db = create_test_rw_db(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types::() - .with_components(OptimismNode::components(Default::default())) - .with_add_ons(OptimismAddOns::new(None)) + .with_types::() + .with_components(OpNode::components(Default::default())) + .with_add_ons(OpAddOns::new(None)) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); Ok(()) diff --git a/crates/optimism/node/tests/it/main.rs b/crates/optimism/node/tests/it/main.rs index b84dd7426c24..d0533fc4541c 100644 --- a/crates/optimism/node/tests/it/main.rs +++ b/crates/optimism/node/tests/it/main.rs @@ -3,4 +3,7 @@ #[cfg(feature = "optimism")] mod builder; +#[cfg(feature = "optimism")] +mod priority; + const fn main() {} diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs new file mode 100644 index 000000000000..52e3bef3d918 --- /dev/null +++ b/crates/optimism/node/tests/it/priority.rs @@ -0,0 +1,190 @@ +//! Node builder test that customizes priority of transactions in the block. + +use alloy_consensus::TxEip1559; +use alloy_genesis::Genesis; +use alloy_network::TxSignerSync; +use alloy_primitives::{Address, ChainId, TxKind}; +use reth::{args::DatadirArgs, tasks::TaskManager}; +use reth_chainspec::EthChainSpec; +use reth_db::test_utils::create_test_rw_db_with_path; +use reth_e2e_test_utils::{ + node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, +}; +use reth_node_api::{FullNodeTypes, NodeTypesWithEngine}; +use reth_node_builder::{ + components::ComponentsBuilder, EngineNodeLauncher, NodeBuilder, NodeConfig, +}; +use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder}; +use reth_optimism_node::{ + args::RollupArgs, + node::{ + OpAddOns, OpConsensusBuilder, OpExecutorBuilder, OpNetworkBuilder, OpPayloadBuilder, + OpPoolBuilder, + }, + utils::optimism_payload_attributes, + OpEngineTypes, OpNode, +}; +use reth_optimism_payload_builder::builder::OpPayloadTransactions; +use reth_primitives::{SealedBlock, Transaction, TransactionSigned, TransactionSignedEcRecovered}; +use reth_provider::providers::BlockchainProvider2; +use reth_transaction_pool::{ + pool::{BestPayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}, + PayloadTransactions, +}; +use std::sync::Arc; +use tokio::sync::Mutex; + +#[derive(Clone, Debug)] +struct CustomTxPriority { + chain_id: ChainId, +} + +impl OpPayloadTransactions for CustomTxPriority { + fn best_transactions( + &self, + pool: Pool, + attr: reth_transaction_pool::BestTransactionsAttributes, + ) -> impl PayloadTransactions + where + Pool: reth_transaction_pool::TransactionPool, + { + // Block composition: + // 1. Best transactions from the pool (up to 250k gas) + // 2. End-of-block transaction created by the node (up to 100k gas) + + // End of block transaction should send a 0-value transfer to a random address. + let sender = Wallet::default().inner; + let mut end_of_block_tx = TxEip1559 { + chain_id: self.chain_id, + nonce: 1, // it will be 2nd tx after L1 block info tx that uses the same sender + gas_limit: 21000, + max_fee_per_gas: 20e9 as u128, + to: TxKind::Call(Address::random()), + value: 0.try_into().unwrap(), + ..Default::default() + }; + let signature = sender.sign_transaction_sync(&mut end_of_block_tx).unwrap(); + let end_of_block_tx = TransactionSignedEcRecovered::from_signed_transaction( + TransactionSigned::from_transaction_and_signature( + Transaction::Eip1559(end_of_block_tx), + signature, + ), + sender.address(), + ); + + PayloadTransactionsChain::new( + BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)), + // Allow 250k gas for the transactions from the pool + Some(250_000), + PayloadTransactionsFixed::single(end_of_block_tx), + // Allow 100k gas for the end-of-block transaction + Some(100_000), + ) + } +} + +/// Builds the node with custom transaction priority service within default payload builder. +fn build_components( + chain_id: ChainId, +) -> ComponentsBuilder< + Node, + OpPoolBuilder, + OpPayloadBuilder, + OpNetworkBuilder, + OpExecutorBuilder, + OpConsensusBuilder, +> +where + Node: + FullNodeTypes>, +{ + let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = + RollupArgs::default(); + ComponentsBuilder::default() + .node_types::() + .pool(OpPoolBuilder::default()) + .payload( + OpPayloadBuilder::new(compute_pending_block) + .with_transactions(CustomTxPriority { chain_id }), + ) + .network(OpNetworkBuilder { disable_txpool_gossip, disable_discovery_v4: !discovery_v4 }) + .executor(OpExecutorBuilder::default()) + .consensus(OpConsensusBuilder::default()) +} + +#[tokio::test] +async fn test_custom_block_priority_config() { + reth_tracing::init_test_tracing(); + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = + Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()); + + // This wallet is going to send: + // 1. L1 block info tx + // 2. End-of-block custom tx + let wallet = Arc::new(Mutex::new(Wallet::default().with_chain_id(chain_spec.chain().into()))); + + // Configure and launch the node. + let config = NodeConfig::new(chain_spec).with_datadir_args(DatadirArgs { + datadir: reth_db::test_utils::tempdir_path().into(), + ..Default::default() + }); + let db = create_test_rw_db_with_path( + config + .datadir + .datadir + .unwrap_or_chain_default(config.chain.chain(), config.datadir.clone()) + .db(), + ); + let tasks = TaskManager::current(); + let node_handle = NodeBuilder::new(config.clone()) + .with_database(db) + .with_types_and_provider::>() + .with_components(build_components(config.chain.chain_id())) + .with_add_ons(OpAddOns::default()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + tasks.executor(), + builder.config.datadir(), + Default::default(), + ); + builder.launch_with(launcher) + }) + .await + .expect("Failed to launch node"); + + // Advance the chain with a single block. + let block_payloads = NodeTestContext::new(node_handle.node, optimism_payload_attributes) + .await + .unwrap() + .advance(1, |_| { + let wallet = wallet.clone(); + Box::pin(async move { + let mut wallet = wallet.lock().await; + let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( + wallet.chain_id, + wallet.inner.clone(), + // This doesn't matter in the current test (because it's only one block), + // but make sure you're not reusing the nonce from end-of-block tx + // if they have the same signer. + wallet.inner_nonce * 2, + ); + wallet.inner_nonce += 1; + tx_fut.await + }) + }) + .await + .unwrap(); + assert_eq!(block_payloads.len(), 1); + let (block_payload, _) = block_payloads.first().unwrap(); + let block_payload: SealedBlock = block_payload.block().clone(); + assert_eq!(block_payload.body.transactions.len(), 2); // L1 block info tx + end-of-block custom tx + + // Check that last transaction in the block looks like a transfer to a random address. + let end_of_block_tx = block_payload.body.transactions.last().unwrap(); + let end_of_block_tx = end_of_block_tx.transaction.as_eip1559().unwrap(); + assert_eq!(end_of_block_tx.nonce, 1); + assert_eq!(end_of_block_tx.gas_limit, 21_000); + assert!(end_of_block_tx.input.is_empty()); +} diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index de61def83506..839355b2158e 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -22,7 +22,7 @@ reth-rpc-types-compat.workspace = true reth-evm.workspace = true reth-execution-types.workspace = true reth-payload-builder.workspace = true -reth-payload-primitives.workspace = true +reth-payload-primitives = { workspace = true, features = ["op"] } reth-basic-payload-builder.workspace = true reth-trie.workspace = true reth-chain-state.workspace = true @@ -39,7 +39,7 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true -revm-primitives.workspace = true +op-alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true @@ -55,6 +55,5 @@ optimism = [ "reth-optimism-evm/optimism", "revm/optimism", "reth-execution-types/optimism", - "reth-optimism-consensus/optimism", - "revm-primitives/optimism" -] + "reth-optimism-consensus/optimism" +] \ No newline at end of file diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 85f687aa803b..beb9a5c4ae21 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,65 +1,82 @@ //! Optimism payload builder implementation. -use std::sync::Arc; +use std::{fmt::Display, sync::Arc}; -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, Transaction, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::merge::BEACON_NONCE; -use alloy_primitives::U256; +use alloy_primitives::{Address, Bytes, U256}; +use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; -use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; +use reth_chainspec::ChainSpecProvider; +use reth_evm::{system_calls::SystemCaller, ConfigureEvm, NextBlockEnvAttributes}; use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardforks; use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; use reth_primitives::{ proofs, revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, Header, Receipt, TxType, + Block, BlockBody, Receipt, SealedHeader, TransactionSigned, TxType, }; -use reth_provider::StateProviderFactory; +use reth_provider::{ProviderError, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, + noop::NoopTransactionPool, BestTransactionsAttributes, PayloadTransactions, TransactionPool, }; use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, - primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, - DatabaseCommit, + primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState, TxEnv}, + Database, DatabaseCommit, }; -use revm_primitives::calc_excess_blob_gas; use tracing::{debug, trace, warn}; use crate::{ - error::OptimismPayloadBuilderError, - payload::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}, + error::OpPayloadBuilderError, + payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, }; +use op_alloy_consensus::DepositTransaction; +use reth_transaction_pool::pool::BestPayloadTransactions; /// Optimism's payload builder #[derive(Debug, Clone, PartialEq, Eq)] -pub struct OptimismPayloadBuilder { +pub struct OpPayloadBuilder { /// The rollup's compute pending block configuration option. // TODO(clabby): Implement this feature. pub compute_pending_block: bool, /// The type responsible for creating the evm. pub evm_config: EvmConfig, + /// The type responsible for yielding the best transactions for the payload if mempool + /// transactions are allowed. + pub best_transactions: Txs, } -impl OptimismPayloadBuilder { - /// `OptimismPayloadBuilder` constructor. +impl OpPayloadBuilder { + /// `OpPayloadBuilder` constructor. pub const fn new(evm_config: EvmConfig) -> Self { - Self { compute_pending_block: true, evm_config } + Self { compute_pending_block: true, evm_config, best_transactions: () } } +} +impl OpPayloadBuilder { /// Sets the rollup's compute pending block configuration option. pub const fn set_compute_pending_block(mut self, compute_pending_block: bool) -> Self { self.compute_pending_block = compute_pending_block; self } + /// Configures the type responsible for yielding the transactions that should be included in the + /// payload. + pub fn with_transactions( + self, + best_transactions: T, + ) -> OpPayloadBuilder { + let Self { compute_pending_block, evm_config, .. } = self; + OpPayloadBuilder { compute_pending_block, evm_config, best_transactions } + } + /// Enables the rollup's compute pending block configuration option. pub const fn compute_pending_block(self) -> Self { self.set_compute_pending_block(true) @@ -70,17 +87,18 @@ impl OptimismPayloadBuilder { self.compute_pending_block } } -impl OptimismPayloadBuilder +impl OpPayloadBuilder where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvm
, + Txs: OpPayloadTransactions, { /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload /// (that has the `parent` as its parent). pub fn cfg_and_block_env( &self, - config: &PayloadConfig, + config: &PayloadConfig, parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { let next_attributes = NextBlockEnvAttributes { timestamp: config.attributes.timestamp(), suggested_fee_recipient: config.attributes.suggested_fee_recipient(), @@ -88,29 +106,80 @@ where }; self.evm_config.next_cfg_and_block_env(parent, next_attributes) } + + /// Constructs an Optimism payload from the transactions sent via the + /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in + /// the payload attributes, the transaction pool will be ignored and the only transactions + /// included in the payload will be those sent through the attributes. + /// + /// Given build arguments including an Optimism client, transaction pool, + /// and configuration, this function creates a transaction payload. Returns + /// a result indicating success with the payload or an error in case of failure. + fn build_payload( + &self, + args: BuildArguments, + ) -> Result, PayloadBuilderError> + where + Client: StateProviderFactory + ChainSpecProvider, + Pool: TransactionPool, + { + let (initialized_cfg, initialized_block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; + + let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; + + let ctx = OpPayloadBuilderCtx { + evm_config: self.evm_config.clone(), + chain_spec: client.chain_spec(), + config, + initialized_cfg, + initialized_block_env, + cancel, + best_payload, + }; + + let builder = OpBuilder { pool, best: self.best_transactions.clone() }; + + let state_provider = client.state_by_block_hash(ctx.parent().hash())?; + let state = StateProviderDatabase::new(state_provider); + + if ctx.attributes().no_tx_pool { + let db = State::builder().with_database(state).with_bundle_update().build(); + builder.build(db, ctx) + } else { + // sequencer mode we can reuse cachedreads from previous runs + let db = State::builder() + .with_database(cached_reads.as_db_mut(state)) + .with_bundle_update() + .build(); + builder.build(db, ctx) + } + .map(|out| out.with_cached_reads(cached_reads)) + } } -/// Implementation of the [`PayloadBuilder`] trait for [`OptimismPayloadBuilder`]. -impl PayloadBuilder for OptimismPayloadBuilder +/// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. +impl PayloadBuilder for OpPayloadBuilder where Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, EvmConfig: ConfigureEvm
, + Txs: OpPayloadTransactions, { - type Attributes = OptimismPayloadBuilderAttributes; - type BuiltPayload = OptimismBuiltPayload; + type Attributes = OpPayloadBuilderAttributes; + type BuiltPayload = OpBuiltPayload; fn try_build( &self, - args: BuildArguments, - ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - optimism_payload(&self.evm_config, args, cfg_env, block_env, self.compute_pending_block) + args: BuildArguments, + ) -> Result, PayloadBuilderError> { + self.build_payload(args) } fn on_missing_payload( &self, - _args: BuildArguments, + _args: BuildArguments, ) -> MissingPayloadBehaviour { // we want to await the job that's already in progress because that should be returned as // is, there's no benefit in racing another job @@ -123,7 +192,7 @@ where &self, client: &Client, config: PayloadConfig, - ) -> Result { + ) -> Result { let args = BuildArguments { client, config, @@ -133,229 +202,573 @@ where cancel: Default::default(), best_payload: None, }; - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - optimism_payload(&self.evm_config, args, cfg_env, block_env, false)? - .into_payload() - .ok_or_else(|| PayloadBuilderError::MissingPayload) + self.build_payload(args)?.into_payload().ok_or_else(|| PayloadBuilderError::MissingPayload) } } -/// Constructs an Optimism transaction payload from the transactions sent through the -/// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in -/// the payload attributes, the transaction pool will be ignored and the only transactions -/// included in the payload will be those sent through the attributes. +/// The type that builds the payload. +/// +/// Payload building for optimism is composed of several steps. +/// The first steps are mandatory and defined by the protocol. /// -/// Given build arguments including an Optimism client, transaction pool, -/// and configuration, this function creates a transaction payload. Returns -/// a result indicating success with the payload or an error in case of failure. -#[inline] -pub(crate) fn optimism_payload( - evm_config: &EvmConfig, - args: BuildArguments, - initialized_cfg: CfgEnvWithHandlerCfg, - initialized_block_env: BlockEnv, - _compute_pending_block: bool, -) -> Result, PayloadBuilderError> +/// 1. first all System calls are applied. +/// 2. After canyon the forced deployed `create2deployer` must be loaded +/// 3. all sequencer transactions are executed (part of the payload attributes) +/// +/// Depending on whether the node acts as a sequencer and is allowed to include additional +/// transactions (`no_tx_pool == false`): +/// 4. include additional transactions +/// +/// And finally +/// 5. build the block: compute all roots (txs, state) +#[derive(Debug)] +pub struct OpBuilder { + /// The transaction pool + pool: Pool, + /// Yields the best transaction to include if transactions from the mempool are allowed. + best: Txs, +} + +impl OpBuilder where - EvmConfig: ConfigureEvm
, - Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, + Txs: OpPayloadTransactions, { - let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; + /// Builds the payload on top of the state. + pub fn build( + self, + mut db: State, + ctx: OpPayloadBuilderCtx, + ) -> Result, PayloadBuilderError> + where + EvmConfig: ConfigureEvm
, + DB: Database + AsRef

, + P: StateRootProvider, + { + let Self { pool, best } = self; + debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload"); + + // 1. apply eip-4788 pre block contract call + ctx.apply_pre_beacon_root_contract_call(&mut db)?; + + // 2. ensure create2deployer is force deployed + ctx.ensure_create2_deployer(&mut db)?; + + // 3. execute sequencer transactions + let mut info = ctx.execute_sequencer_transactions(&mut db)?; + + // 4. if mem pool transactions are requested we execute them + if !ctx.attributes().no_tx_pool { + let best_txs = best.best_transactions(pool, ctx.best_transaction_attributes()); + if let Some(cancelled) = + ctx.execute_best_transactions::<_, Pool>(&mut info, &mut db, best_txs)? + { + return Ok(cancelled) + } - let chain_spec = client.chain_spec(); - let state_provider = client.state_by_block_hash(config.parent_block.hash())?; - let state = StateProviderDatabase::new(state_provider); - let mut db = - State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let PayloadConfig { parent_block, attributes, extra_data } = config; + // check if the new payload is even more valuable + if !ctx.is_better_payload(info.total_fees) { + // can skip building the block + return Ok(BuildOutcomeKind::Aborted { fees: info.total_fees }) + } + } - debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + let WithdrawalsOutcome { withdrawals_root, withdrawals } = + ctx.commit_withdrawals(&mut db)?; - let mut cumulative_gas_used = 0; - let block_gas_limit: u64 = attributes.gas_limit.unwrap_or_else(|| { - initialized_block_env.gas_limit.try_into().unwrap_or(chain_spec.max_gas_limit) - }); - let base_fee = initialized_block_env.basefee.to::(); + // merge all transitions into bundle state, this would apply the withdrawal balance changes + // and 4788 contract call + db.merge_transitions(BundleRetention::Reverts); - let mut executed_txs = Vec::with_capacity(attributes.transactions.len()); - let mut executed_senders = Vec::with_capacity(attributes.transactions.len()); + let block_number = ctx.block_number(); + let execution_outcome = ExecutionOutcome::new( + db.take_bundle(), + vec![info.receipts.clone()].into(), + block_number, + Vec::new(), + ); + let receipts_root = execution_outcome + .generic_receipts_root_slow(block_number, |receipts| { + calculate_receipt_root_no_memo_optimism( + receipts, + &ctx.chain_spec, + ctx.attributes().timestamp(), + ) + }) + .expect("Number is in range"); + let logs_bloom = + execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); + + // // calculate the state root + let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + let (state_root, trie_output) = { + db.database.as_ref().state_root_with_updates(hashed_state.clone()).inspect_err( + |err| { + warn!(target: "payload_builder", + parent_header=%ctx.parent().hash(), + %err, + "failed to calculate state root for payload" + ); + }, + )? + }; - let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( - base_fee, - initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), - )); + // create the block header + let transactions_root = proofs::calculate_transaction_root(&info.executed_transactions); + + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + let (excess_blob_gas, blob_gas_used) = ctx.blob_fields(); + let extra_data = ctx.extra_data()?; + + let header = Header { + parent_hash: ctx.parent().hash(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: ctx.initialized_block_env.coinbase, + state_root, + transactions_root, + receipts_root, + withdrawals_root, + logs_bloom, + timestamp: ctx.attributes().payload_attributes.timestamp, + mix_hash: ctx.attributes().payload_attributes.prev_randao, + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(ctx.base_fee()), + number: ctx.parent().number + 1, + gas_limit: ctx.block_gas_limit(), + difficulty: U256::ZERO, + gas_used: info.cumulative_gas_used, + extra_data, + parent_beacon_block_root: ctx.attributes().payload_attributes.parent_beacon_block_root, + blob_gas_used, + excess_blob_gas, + requests_hash: None, + }; - let mut total_fees = U256::ZERO; + // seal the block + let block = Block { + header, + body: BlockBody { + transactions: info.executed_transactions, + ommers: vec![], + withdrawals, + }, + }; - let block_number = initialized_block_env.number.to::(); + let sealed_block = Arc::new(block.seal_slow()); + debug!(target: "payload_builder", ?sealed_block, "sealed built block"); - let is_regolith = chain_spec.is_fork_active_at_timestamp( - OptimismHardfork::Regolith, - attributes.payload_attributes.timestamp, - ); + // create the executed block data + let executed = ExecutedBlock { + block: sealed_block.clone(), + senders: Arc::new(info.executed_senders), + execution_output: Arc::new(execution_outcome), + hashed_state: Arc::new(hashed_state), + trie: Arc::new(trie_output), + }; - // apply eip-4788 pre block contract call - let mut system_caller = SystemCaller::new(evm_config.clone(), &chain_spec); + let no_tx_pool = ctx.attributes().no_tx_pool; - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &initialized_cfg, - &initialized_block_env, - attributes.payload_attributes.parent_beacon_block_root, - ) - .map_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to apply beacon root contract call for payload" - ); - PayloadBuilderError::Internal(err.into()) - })?; - - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism - // blocks will always have at least a single transaction in them (the L1 info transaction), - // so we can safely assume that this will always be triggered upon the transition and that - // the above check for empty blocks will never be hit on OP chains. - reth_optimism_evm::ensure_create2_deployer( - chain_spec.clone(), - attributes.payload_attributes.timestamp, - &mut db, - ) - .map_err(|err| { - warn!(target: "payload_builder", %err, "missing create2 deployer, skipping block."); - PayloadBuilderError::other(OptimismPayloadBuilderError::ForceCreate2DeployerFail) - })?; - - let mut receipts = Vec::with_capacity(attributes.transactions.len()); - for sequencer_tx in &attributes.transactions { - // Check if the job was cancelled, if so we can exit early. - if cancel.is_cancelled() { - return Ok(BuildOutcome::Cancelled) + let payload = OpBuiltPayload::new( + ctx.payload_id(), + sealed_block, + info.total_fees, + ctx.chain_spec.clone(), + ctx.config.attributes, + Some(executed), + ); + + if no_tx_pool { + // if `no_tx_pool` is set only transactions from the payload attributes will be included + // in the payload. In other words, the payload is deterministic and we can + // freeze it once we've successfully built it. + Ok(BuildOutcomeKind::Freeze(payload)) + } else { + Ok(BuildOutcomeKind::Better { payload }) } + } +} - // A sequencer's block should never contain blob transactions. - if sequencer_tx.value().is_eip4844() { - return Err(PayloadBuilderError::other( - OptimismPayloadBuilderError::BlobTransactionRejected, - )) +/// A type that returns a the [`PayloadTransactions`] that should be included in the pool. +pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { + /// Returns an iterator that yields the transaction in the order they should get included in the + /// new payload. + fn best_transactions( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> impl PayloadTransactions; +} + +impl OpPayloadTransactions for () { + fn best_transactions( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> impl PayloadTransactions { + BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)) + } +} + +/// This acts as the container for executed transactions and its byproducts (receipts, gas used) +#[derive(Default, Debug)] +pub struct ExecutionInfo { + /// All executed transactions (unrecovered). + pub executed_transactions: Vec, + /// The recovered senders for the executed transactions. + pub executed_senders: Vec

, + /// The transaction receipts + pub receipts: Vec>, + /// All gas used so far + pub cumulative_gas_used: u64, + /// Tracks fees from executed mempool transactions + pub total_fees: U256, +} + +impl ExecutionInfo { + /// Create a new instance with allocated slots. + pub fn with_capacity(capacity: usize) -> Self { + Self { + executed_transactions: Vec::with_capacity(capacity), + executed_senders: Vec::with_capacity(capacity), + receipts: Vec::with_capacity(capacity), + cumulative_gas_used: 0, + total_fees: U256::ZERO, } + } +} - // Convert the transaction to a [TransactionSignedEcRecovered]. This is - // purely for the purposes of utilizing the `evm_config.tx_env`` function. - // Deposit transactions do not have signatures, so if the tx is a deposit, this - // will just pull in its `from` address. - let sequencer_tx = sequencer_tx.value().clone().try_into_ecrecovered().map_err(|_| { - PayloadBuilderError::other(OptimismPayloadBuilderError::TransactionEcRecoverFailed) - })?; - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor = (is_regolith && sequencer_tx.is_deposit()) - .then(|| { - db.load_cache_account(sequencer_tx.signer()) - .map(|acc| acc.account_info().unwrap_or_default()) - }) - .transpose() - .map_err(|_| { - PayloadBuilderError::other(OptimismPayloadBuilderError::AccountLoadFailed( - sequencer_tx.signer(), - )) +/// Container type that holds all necessities to build a new payload. +#[derive(Debug)] +pub struct OpPayloadBuilderCtx { + /// The type that knows how to perform system calls and configure the evm. + pub evm_config: EvmConfig, + /// The chainspec + pub chain_spec: Arc, + /// How to build the payload. + pub config: PayloadConfig, + /// Evm Settings + pub initialized_cfg: CfgEnvWithHandlerCfg, + /// Block config + pub initialized_block_env: BlockEnv, + /// Marker to check whether the job has been cancelled. + pub cancel: Cancelled, + /// The currently best payload. + pub best_payload: Option, +} + +impl OpPayloadBuilderCtx { + /// Returns the parent block the payload will be build on. + pub fn parent(&self) -> &SealedHeader { + &self.config.parent_header + } + + /// Returns the builder attributes. + pub const fn attributes(&self) -> &OpPayloadBuilderAttributes { + &self.config.attributes + } + + /// Returns the block gas limit to target. + pub fn block_gas_limit(&self) -> u64 { + self.attributes() + .gas_limit + .unwrap_or_else(|| self.initialized_block_env.gas_limit.saturating_to()) + } + + /// Returns the block number for the block. + pub fn block_number(&self) -> u64 { + self.initialized_block_env.number.to() + } + + /// Returns the current base fee + pub fn base_fee(&self) -> u64 { + self.initialized_block_env.basefee.to() + } + + /// Returns the current blob gas price. + pub fn get_blob_gasprice(&self) -> Option { + self.initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64) + } + + /// Returns the blob fields for the header. + /// + /// This will always return `Some(0)` after ecotone. + pub fn blob_fields(&self) -> (Option, Option) { + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + if self.is_ecotone_active() { + (Some(0), Some(0)) + } else { + (None, None) + } + } + + /// Returns the extra data for the block. + /// + /// After holocene this extracts the extradata from the paylpad + pub fn extra_data(&self) -> Result { + if self.is_holocene_active() { + self.attributes() + .get_holocene_extra_data( + self.chain_spec.base_fee_params_at_timestamp( + self.attributes().payload_attributes.timestamp, + ), + ) + .map_err(PayloadBuilderError::other) + } else { + Ok(self.config.extra_data.clone()) + } + } + + /// Returns the current fee settings for transactions from the mempool + pub fn best_transaction_attributes(&self) -> BestTransactionsAttributes { + BestTransactionsAttributes::new(self.base_fee(), self.get_blob_gasprice()) + } + + /// Returns the unique id for this payload job. + pub fn payload_id(&self) -> PayloadId { + self.attributes().payload_id() + } + + /// Returns true if regolith is active for the payload. + pub fn is_regolith_active(&self) -> bool { + self.chain_spec.is_regolith_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if ecotone is active for the payload. + pub fn is_ecotone_active(&self) -> bool { + self.chain_spec.is_ecotone_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if canyon is active for the payload. + pub fn is_canyon_active(&self) -> bool { + self.chain_spec.is_canyon_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if holocene is active for the payload. + pub fn is_holocene_active(&self) -> bool { + self.chain_spec.is_holocene_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if the fees are higher than the previous payload. + pub fn is_better_payload(&self, total_fees: U256) -> bool { + is_better_payload(self.best_payload.as_ref(), total_fees) + } + + /// Commits the withdrawals from the payload attributes to the state. + pub fn commit_withdrawals( + &self, + db: &mut State, + ) -> Result + where + DB: Database, + { + commit_withdrawals( + db, + &self.chain_spec, + self.attributes().payload_attributes.timestamp, + self.attributes().payload_attributes.withdrawals.clone(), + ) + } + + /// Ensure that the create2deployer is force-deployed at the canyon transition. Optimism + /// blocks will always have at least a single transaction in them (the L1 info transaction), + /// so we can safely assume that this will always be triggered upon the transition and that + /// the above check for empty blocks will never be hit on OP chains. + pub fn ensure_create2_deployer(&self, db: &mut State) -> Result<(), PayloadBuilderError> + where + DB: Database, + DB::Error: Display, + { + reth_optimism_evm::ensure_create2_deployer( + self.chain_spec.clone(), + self.attributes().payload_attributes.timestamp, + db, + ) + .map_err(|err| { + warn!(target: "payload_builder", %err, "missing create2 deployer, skipping block."); + PayloadBuilderError::other(OpPayloadBuilderError::ForceCreate2DeployerFail) + }) + } +} + +impl OpPayloadBuilderCtx +where + EvmConfig: ConfigureEvm
, +{ + /// apply eip-4788 pre block contract call + pub fn apply_pre_beacon_root_contract_call( + &self, + db: &mut DB, + ) -> Result<(), PayloadBuilderError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + SystemCaller::new(self.evm_config.clone(), self.chain_spec.clone()) + .pre_block_beacon_root_contract_call( + db, + &self.initialized_cfg, + &self.initialized_block_env, + self.attributes().payload_attributes.parent_beacon_block_root, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_header=%self.parent().hash(), + %err, + "failed to apply beacon root contract call for payload" + ); + PayloadBuilderError::Internal(err.into()) })?; + Ok(()) + } + + /// Executes all sequencer transactions that are included in the payload attributes. + pub fn execute_sequencer_transactions( + &self, + db: &mut State, + ) -> Result + where + DB: Database, + { + let mut info = ExecutionInfo::with_capacity(self.attributes().transactions.len()); + let env = EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - evm_config.tx_env(sequencer_tx.as_signed(), sequencer_tx.signer()), + self.initialized_cfg.clone(), + self.initialized_block_env.clone(), + TxEnv::default(), ); + let mut evm = self.evm_config.evm_with_env(&mut *db, env); - let mut evm = evm_config.evm_with_env(&mut db, env); + for sequencer_tx in &self.attributes().transactions { + // A sequencer's block should never contain blob transactions. + if sequencer_tx.value().is_eip4844() { + return Err(PayloadBuilderError::other( + OpPayloadBuilderError::BlobTransactionRejected, + )) + } - let ResultAndState { result, state } = match evm.transact() { - Ok(res) => res, - Err(err) => { - match err { - EVMError::Transaction(err) => { - trace!(target: "payload_builder", %err, ?sequencer_tx, "Error in sequencer transaction, skipping."); - continue - } - err => { - // this is an error that we should treat as fatal for this attempt - return Err(PayloadBuilderError::EvmExecutionError(err)) + // Convert the transaction to a [TransactionSignedEcRecovered]. This is + // purely for the purposes of utilizing the `evm_config.tx_env`` function. + // Deposit transactions do not have signatures, so if the tx is a deposit, this + // will just pull in its `from` address. + let sequencer_tx = + sequencer_tx.value().clone().try_into_ecrecovered().map_err(|_| { + PayloadBuilderError::other(OpPayloadBuilderError::TransactionEcRecoverFailed) + })?; + + // Cache the depositor account prior to the state transition for the deposit nonce. + // + // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces + // were not introduced in Bedrock. In addition, regular transactions don't have deposit + // nonces, so we don't need to touch the DB for those. + let depositor = (self.is_regolith_active() && sequencer_tx.is_deposit()) + .then(|| { + evm.db_mut() + .load_cache_account(sequencer_tx.signer()) + .map(|acc| acc.account_info().unwrap_or_default()) + }) + .transpose() + .map_err(|_| { + PayloadBuilderError::other(OpPayloadBuilderError::AccountLoadFailed( + sequencer_tx.signer(), + )) + })?; + + *evm.tx_mut() = self.evm_config.tx_env(sequencer_tx.as_signed(), sequencer_tx.signer()); + + let ResultAndState { result, state } = match evm.transact() { + Ok(res) => res, + Err(err) => { + match err { + EVMError::Transaction(err) => { + trace!(target: "payload_builder", %err, ?sequencer_tx, "Error in sequencer transaction, skipping."); + continue + } + err => { + // this is an error that we should treat as fatal for this attempt + return Err(PayloadBuilderError::EvmExecutionError(err)) + } } } - } - }; + }; - // to release the db reference drop evm. - drop(evm); - // commit changes - db.commit(state); - - let gas_used = result.gas_used(); - - // add gas used by the transaction to cumulative gas used, before creating the receipt - cumulative_gas_used += gas_used; - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Some(Receipt { - tx_type: sequencer_tx.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs().into_iter().map(Into::into).collect(), - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to how - // receipt hashes should be computed when set. The state transition process - // ensures this is only set for post-Canyon deposit transactions. - deposit_receipt_version: chain_spec - .is_fork_active_at_timestamp( - OptimismHardfork::Canyon, - attributes.payload_attributes.timestamp, - ) - .then_some(1), - })); + // commit changes + evm.db_mut().commit(state); - // append sender and transaction to the respective lists - executed_senders.push(sequencer_tx.signer()); - executed_txs.push(sequencer_tx.into_signed()); + let gas_used = result.gas_used(); + + // add gas used by the transaction to cumulative gas used, before creating the receipt + info.cumulative_gas_used += gas_used; + + // Push transaction changeset and calculate header bloom filter for receipt. + info.receipts.push(Some(Receipt { + tx_type: sequencer_tx.tx_type(), + success: result.is_success(), + cumulative_gas_used: info.cumulative_gas_used, + logs: result.into_logs().into_iter().map(Into::into).collect(), + deposit_nonce: depositor.map(|account| account.nonce), + // The deposit receipt version was introduced in Canyon to indicate an update to how + // receipt hashes should be computed when set. The state transition process + // ensures this is only set for post-Canyon deposit transactions. + deposit_receipt_version: self.is_canyon_active().then_some(1), + })); + + // append sender and transaction to the respective lists + info.executed_senders.push(sequencer_tx.signer()); + info.executed_transactions.push(sequencer_tx.into_signed()); + } + + Ok(info) } - if !attributes.no_tx_pool { - while let Some(pool_tx) = best_txs.next() { + /// Executes the given best transactions and updates the execution info + pub fn execute_best_transactions( + &self, + info: &mut ExecutionInfo, + db: &mut State, + mut best_txs: impl PayloadTransactions, + ) -> Result>, PayloadBuilderError> + where + DB: Database, + Pool: TransactionPool, + { + let block_gas_limit = self.block_gas_limit(); + let base_fee = self.base_fee(); + + let env = EnvWithHandlerCfg::new_with_cfg_env( + self.initialized_cfg.clone(), + self.initialized_block_env.clone(), + TxEnv::default(), + ); + let mut evm = self.evm_config.evm_with_env(&mut *db, env); + + while let Some(tx) = best_txs.next(()) { // ensure we still have capacity for this transaction - if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { + if info.cumulative_gas_used + tx.gas_limit() > block_gas_limit { // we can't fit this transaction into the block, so we need to mark it as // invalid which also removes all dependent transaction from // the iterator before we can continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid(tx.signer(), tx.nonce()); continue } // A sequencer's block should never contain blob or deposit transactions from the pool. - if pool_tx.is_eip4844() || pool_tx.tx_type() == TxType::Deposit as u8 { - best_txs.mark_invalid(&pool_tx); + if tx.is_eip4844() || tx.tx_type() == TxType::Deposit as u8 { + best_txs.mark_invalid(tx.signer(), tx.nonce()); continue } // check if the job was cancelled, if so we can exit early - if cancel.is_cancelled() { - return Ok(BuildOutcome::Cancelled) + if self.cancel.is_cancelled() { + return Ok(Some(BuildOutcomeKind::Cancelled)) } - // convert tx to a signed transaction - let tx = pool_tx.to_recovered_transaction(); - let env = EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - evm_config.tx_env(tx.as_signed(), tx.signer()), - ); - - // Configure the environment for the block. - let mut evm = evm_config.evm_with_env(&mut db, env); + // Configure the environment for the tx. + *evm.tx_mut() = self.evm_config.tx_env(tx.as_signed(), tx.signer()); let ResultAndState { result, state } = match evm.transact() { Ok(res) => res, @@ -369,7 +782,7 @@ where // if the transaction is invalid, we can skip it and all of its // descendants trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid(tx.signer(), tx.nonce()); } continue @@ -381,22 +794,21 @@ where } } }; - // drop evm so db is released. - drop(evm); + // commit changes - db.commit(state); + evm.db_mut().commit(state); let gas_used = result.gas_used(); // add gas used by the transaction to cumulative gas used, before creating the // receipt - cumulative_gas_used += gas_used; + info.cumulative_gas_used += gas_used; // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Some(Receipt { + info.receipts.push(Some(Receipt { tx_type: tx.tx_type(), success: result.is_success(), - cumulative_gas_used, + cumulative_gas_used: info.cumulative_gas_used, logs: result.into_logs().into_iter().map(Into::into).collect(), deposit_nonce: None, deposit_receipt_version: None, @@ -404,144 +816,15 @@ where // update add to total fees let miner_fee = tx - .effective_tip_per_gas(Some(base_fee)) + .effective_tip_per_gas(base_fee) .expect("fee is always valid; execution succeeded"); - total_fees += U256::from(miner_fee) * U256::from(gas_used); + info.total_fees += U256::from(miner_fee) * U256::from(gas_used); // append sender and transaction to the respective lists - executed_senders.push(tx.signer()); - executed_txs.push(tx.into_signed()); + info.executed_senders.push(tx.signer()); + info.executed_transactions.push(tx.into_signed()); } - } - - // check if we have a better block - if !is_better_payload(best_payload.as_ref(), total_fees) { - // can skip building the block - return Ok(BuildOutcome::Aborted { fees: total_fees, cached_reads }) - } - - let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( - &mut db, - &chain_spec, - attributes.payload_attributes.timestamp, - attributes.payload_attributes.withdrawals.clone(), - )?; - - // merge all transitions into bundle state, this would apply the withdrawal balance changes - // and 4788 contract call - db.merge_transitions(BundleRetention::Reverts); - - let execution_outcome = ExecutionOutcome::new( - db.take_bundle(), - vec![receipts.clone()].into(), - block_number, - Vec::new(), - ); - let receipts_root = execution_outcome - .generic_receipts_root_slow(block_number, |receipts| { - calculate_receipt_root_no_memo_optimism(receipts, &chain_spec, attributes.timestamp()) - }) - .expect("Number is in range"); - let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); - - // calculate the state root - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); - let (state_root, trie_output) = { - let state_provider = db.database.0.inner.borrow_mut(); - state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to calculate state root for payload" - ); - })? - }; - - // create the block header - let transactions_root = proofs::calculate_transaction_root(&executed_txs); - - // initialize empty blob sidecars. There are no blob transactions on L2. - let blob_sidecars = Vec::new(); - let mut excess_blob_gas = None; - let mut blob_gas_used = None; - - // only determine cancun fields when active - if chain_spec.is_cancun_active_at_timestamp(attributes.payload_attributes.timestamp) { - excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { - let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); - Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) - } else { - // for the first post-fork block, both parent.blob_gas_used and - // parent.excess_blob_gas are evaluated as 0 - Some(calc_excess_blob_gas(0, 0)) - }; - blob_gas_used = Some(0); - } - - let header = Header { - parent_hash: parent_block.hash(), - ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: initialized_block_env.coinbase, - state_root, - transactions_root, - receipts_root, - withdrawals_root, - logs_bloom, - timestamp: attributes.payload_attributes.timestamp, - mix_hash: attributes.payload_attributes.prev_randao, - nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, - gas_limit: block_gas_limit, - difficulty: U256::ZERO, - gas_used: cumulative_gas_used, - extra_data, - parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, - blob_gas_used, - excess_blob_gas: excess_blob_gas.map(Into::into), - requests_hash: None, - }; - - // seal the block - let block = Block { - header, - body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals }, - }; - - let sealed_block = block.seal_slow(); - debug!(target: "payload_builder", ?sealed_block, "sealed built block"); - - // create the executed block data - let executed = ExecutedBlock { - block: Arc::new(sealed_block.clone()), - senders: Arc::new(executed_senders), - execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - trie: Arc::new(trie_output), - }; - - let no_tx_pool = attributes.no_tx_pool; - - let mut payload = OptimismBuiltPayload::new( - attributes.payload_attributes.id, - sealed_block, - total_fees, - chain_spec, - attributes, - Some(executed), - ); - - // extend the payload with the blob sidecars from the executed txs - payload.extend_sidecars(blob_sidecars); - - if no_tx_pool { - // if `no_tx_pool` is set only transactions from the payload attributes will be included in - // the payload. In other words, the payload is deterministic and we can freeze it once we've - // successfully built it. - Ok(BuildOutcome::Freeze(payload)) - } else { - Ok(BuildOutcome::Better { payload, cached_reads }) + Ok(None) } } diff --git a/crates/optimism/payload/src/error.rs b/crates/optimism/payload/src/error.rs index 2016fdc6dd93..8a254e9835c2 100644 --- a/crates/optimism/payload/src/error.rs +++ b/crates/optimism/payload/src/error.rs @@ -2,7 +2,7 @@ /// Optimism specific payload building errors. #[derive(Debug, thiserror::Error)] -pub enum OptimismPayloadBuilderError { +pub enum OpPayloadBuilderError { /// Thrown when a transaction fails to convert to a /// [`reth_primitives::TransactionSignedEcRecovered`]. #[error("failed to convert deposit transaction to TransactionSignedEcRecovered")] diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index c06b49c53762..8447026d783a 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -12,7 +12,7 @@ #![cfg(feature = "optimism")] pub mod builder; -pub use builder::OptimismPayloadBuilder; +pub use builder::OpPayloadBuilder; pub mod error; pub mod payload; -pub use payload::{OpPayloadAttributes, OptimismBuiltPayload, OptimismPayloadBuilderAttributes}; +pub use payload::{OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes}; diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 98b0e41b0f57..36f11ee628b3 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -1,11 +1,13 @@ //! Payload related types -//! Optimism builder support - -use alloy_eips::{eip2718::Decodable2718, eip7685::Requests}; -use alloy_primitives::{Address, B256, U256}; +use alloy_eips::{ + eip1559::BaseFeeParams, eip2718::Decodable2718, eip4844::BlobTransactionSidecar, + eip4895::Withdrawals, eip7685::Requests, +}; +use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; +use op_alloy_consensus::eip1559::{decode_holocene_extra_data, EIP1559ParamError}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; @@ -14,17 +16,15 @@ use reth_chainspec::EthereumHardforks; use reth_optimism_chainspec::OpChainSpec; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives::{ - transaction::WithEncoded, BlobTransactionSidecar, SealedBlock, TransactionSigned, Withdrawals, -}; +use reth_primitives::{transaction::WithEncoded, SealedBlock, TransactionSigned}; use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use std::sync::Arc; /// Optimism Payload Builder Attributes -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct OptimismPayloadBuilderAttributes { +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct OpPayloadBuilderAttributes { /// Inner ethereum payload builder attributes pub payload_attributes: EthPayloadBuilderAttributes, /// `NoTxPool` option for the generated payload @@ -34,17 +34,35 @@ pub struct OptimismPayloadBuilderAttributes { pub transactions: Vec>, /// The gas limit for the generated payload pub gas_limit: Option, + /// EIP-1559 parameters for the generated payload + pub eip_1559_params: Option, +} + +impl OpPayloadBuilderAttributes { + /// Extracts the `eip1559` parameters for the payload. + pub fn get_holocene_extra_data( + &self, + default_base_fee_params: BaseFeeParams, + ) -> Result { + self.eip_1559_params + .map(|params| decode_holocene_extra_data(params, default_base_fee_params)) + .ok_or(EIP1559ParamError::NoEIP1559Params)? + } } -impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { +impl PayloadBuilderAttributes for OpPayloadBuilderAttributes { type RpcPayloadAttributes = OpPayloadAttributes; type Error = alloy_rlp::Error; /// Creates a new payload builder for the given parent block and the attributes. /// /// Derives the unique [`PayloadId`] for the given parent and attributes - fn try_new(parent: B256, attributes: OpPayloadAttributes) -> Result { - let id = payload_id_optimism(&parent, &attributes); + fn try_new( + parent: B256, + attributes: OpPayloadAttributes, + version: u8, + ) -> Result { + let id = payload_id_optimism(&parent, &attributes, version); let transactions = attributes .transactions @@ -78,6 +96,7 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { no_tx_pool: attributes.no_tx_pool.unwrap_or_default(), transactions, gas_limit: attributes.gas_limit, + eip_1559_params: attributes.eip_1559_params, }) } @@ -112,11 +131,11 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { /// Contains the built payload. #[derive(Debug, Clone)] -pub struct OptimismBuiltPayload { +pub struct OpBuiltPayload { /// Identifier of the payload pub(crate) id: PayloadId, /// The built block - pub(crate) block: SealedBlock, + pub(crate) block: Arc, /// Block execution data for the payload, if any. pub(crate) executed_block: Option, /// The fees of the block @@ -127,19 +146,19 @@ pub struct OptimismBuiltPayload { /// The rollup's chainspec. pub(crate) chain_spec: Arc, /// The payload attributes. - pub(crate) attributes: OptimismPayloadBuilderAttributes, + pub(crate) attributes: OpPayloadBuilderAttributes, } // === impl BuiltPayload === -impl OptimismBuiltPayload { +impl OpBuiltPayload { /// Initializes the payload with the given initial block. pub const fn new( id: PayloadId, - block: SealedBlock, + block: Arc, fees: U256, chain_spec: Arc, - attributes: OptimismPayloadBuilderAttributes, + attributes: OpPayloadBuilderAttributes, executed_block: Option, ) -> Self { Self { id, block, executed_block, fees, sidecars: Vec::new(), chain_spec, attributes } @@ -151,7 +170,7 @@ impl OptimismBuiltPayload { } /// Returns the built block(sealed) - pub const fn block(&self) -> &SealedBlock { + pub fn block(&self) -> &SealedBlock { &self.block } @@ -166,7 +185,7 @@ impl OptimismBuiltPayload { } } -impl BuiltPayload for OptimismBuiltPayload { +impl BuiltPayload for OpBuiltPayload { fn block(&self) -> &SealedBlock { &self.block } @@ -184,7 +203,7 @@ impl BuiltPayload for OptimismBuiltPayload { } } -impl BuiltPayload for &OptimismBuiltPayload { +impl BuiltPayload for &OpBuiltPayload { fn block(&self) -> &SealedBlock { (**self).block() } @@ -203,24 +222,27 @@ impl BuiltPayload for &OptimismBuiltPayload { } // V1 engine_getPayloadV1 response -impl From for ExecutionPayloadV1 { - fn from(value: OptimismBuiltPayload) -> Self { - block_to_payload_v1(value.block) +impl From for ExecutionPayloadV1 { + fn from(value: OpBuiltPayload) -> Self { + block_to_payload_v1(Arc::unwrap_or_clone(value.block)) } } // V2 engine_getPayloadV2 response -impl From for ExecutionPayloadEnvelopeV2 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, .. } = value; +impl From for ExecutionPayloadEnvelopeV2 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, .. } = value; - Self { block_value: fees, execution_payload: convert_block_to_payload_field_v2(block) } + Self { + block_value: fees, + execution_payload: convert_block_to_payload_field_v2(Arc::unwrap_or_clone(block)), + } } } -impl From for OpExecutionPayloadEnvelopeV3 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; +impl From for OpExecutionPayloadEnvelopeV3 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; let parent_beacon_block_root = if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp()) { @@ -229,7 +251,7 @@ impl From for OpExecutionPayloadEnvelopeV3 { B256::ZERO }; Self { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // @@ -245,9 +267,9 @@ impl From for OpExecutionPayloadEnvelopeV3 { } } } -impl From for OpExecutionPayloadEnvelopeV4 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; +impl From for OpExecutionPayloadEnvelopeV4 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; let parent_beacon_block_root = if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp()) { @@ -256,7 +278,7 @@ impl From for OpExecutionPayloadEnvelopeV4 { B256::ZERO }; Self { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // @@ -277,7 +299,11 @@ impl From for OpExecutionPayloadEnvelopeV4 { /// Generates the payload id for the configured payload from the [`OpPayloadAttributes`]. /// /// Returns an 8-byte identifier by hashing the payload components with sha256 hash. -pub(crate) fn payload_id_optimism(parent: &B256, attributes: &OpPayloadAttributes) -> PayloadId { +pub(crate) fn payload_id_optimism( + parent: &B256, + attributes: &OpPayloadAttributes, + payload_version: u8, +) -> PayloadId { use sha2::Digest; let mut hasher = sha2::Sha256::new(); hasher.update(parent.as_slice()); @@ -295,15 +321,89 @@ pub(crate) fn payload_id_optimism(parent: &B256, attributes: &OpPayloadAttribute } let no_tx_pool = attributes.no_tx_pool.unwrap_or_default(); - hasher.update([no_tx_pool as u8]); - if let Some(txs) = &attributes.transactions { - txs.iter().for_each(|tx| hasher.update(tx)); + if no_tx_pool || attributes.transactions.as_ref().is_some_and(|txs| !txs.is_empty()) { + hasher.update([no_tx_pool as u8]); + let txs_len = attributes.transactions.as_ref().map(|txs| txs.len()).unwrap_or_default(); + hasher.update(&txs_len.to_be_bytes()[..]); + if let Some(txs) = &attributes.transactions { + for tx in txs { + // we have to just hash the bytes here because otherwise we would need to decode + // the transactions here which really isn't ideal + let tx_hash = keccak256(tx); + // maybe we can try just taking the hash and not decoding + hasher.update(tx_hash) + } + } } if let Some(gas_limit) = attributes.gas_limit { hasher.update(gas_limit.to_be_bytes()); } - let out = hasher.finalize(); + if let Some(eip_1559_params) = attributes.eip_1559_params { + hasher.update(eip_1559_params.as_slice()); + } + + let mut out = hasher.finalize(); + out[0] = payload_version; PayloadId::new(out.as_slice()[..8].try_into().expect("sufficient length")) } + +#[cfg(test)] +mod tests { + use super::*; + use crate::OpPayloadAttributes; + use alloy_primitives::{address, b256, bytes, FixedBytes}; + use alloy_rpc_types_engine::PayloadAttributes; + use reth_payload_primitives::EngineApiMessageVersion; + use std::str::FromStr; + + #[test] + fn test_payload_id_parity_op_geth() { + // INFO rollup_boost::server:received fork_choice_updated_v3 from builder and l2_client + // payload_id_builder="0x6ef26ca02318dcf9" payload_id_l2="0x03d2dae446d2a86a" + let expected = + PayloadId::new(FixedBytes::<8>::from_str("0x03d2dae446d2a86a").unwrap().into()); + let attrs = OpPayloadAttributes { + payload_attributes: PayloadAttributes { + timestamp: 1728933301, + prev_randao: b256!("9158595abbdab2c90635087619aa7042bbebe47642dfab3c9bfb934f6b082765"), + suggested_fee_recipient: address!("4200000000000000000000000000000000000011"), + withdrawals: Some([].into()), + parent_beacon_block_root: b256!("8fe0193b9bf83cb7e5a08538e494fecc23046aab9a497af3704f4afdae3250ff").into() + }, + transactions: Some([bytes!("7ef8f8a0dc19cfa777d90980e4875d0a548a881baaa3f83f14d1bc0d3038bc329350e54194deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000f424000000000000000000000000300000000670d6d890000000000000125000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000014bf9181db6e381d4384bbf69c48b0ee0eed23c6ca26143c6d2544f9d39997a590000000000000000000000007f83d659683caf2767fd3c720981d51f5bc365bc")].into()), + no_tx_pool: None, + gas_limit: Some(30000000), + eip_1559_params: None, + }; + + // Reth's `PayloadId` should match op-geth's `PayloadId`. This fails + assert_eq!( + expected, + payload_id_optimism( + &b256!("3533bf30edaf9505d0810bf475cbe4e5f4b9889904b9845e83efdeab4e92eb1e"), + &attrs, + EngineApiMessageVersion::V3 as u8 + ) + ); + } + + #[test] + fn test_get_extra_data_post_holocene() { + let attributes = OpPayloadBuilderAttributes { + eip_1559_params: Some(B64::from_str("0x0000000800000008").unwrap()), + ..Default::default() + }; + let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60)); + assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 8, 0, 0, 0, 8])); + } + + #[test] + fn test_get_extra_data_post_holocene_default() { + let attributes = + OpPayloadBuilderAttributes { eip_1559_params: Some(B64::ZERO), ..Default::default() }; + let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60)); + assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 80, 0, 0, 0, 60])); + } +} diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index a2d4c20a8b72..a6f367326722 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -12,6 +12,10 @@ description = "OP primitive types" workspace = true [dependencies] -reth-primitives.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +op-alloy-consensus.workspace = true +alloy-eips.workspace = true +alloy-rlp.workspace = true +derive_more.workspace = true +bytes.workspace = true diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index 7153ae3155c7..204b34d33782 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -1,8 +1,7 @@ //! OP mainnet bedrock related data. -use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, B256, B64, U256}; -use reth_primitives::Header; /// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, /// replayed in blocks: diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 659900b9adbb..f8d8e511498b 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -8,3 +8,4 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod bedrock; +pub mod op_tx_type; diff --git a/crates/optimism/primitives/src/op_tx_type.rs b/crates/optimism/primitives/src/op_tx_type.rs new file mode 100644 index 000000000000..b317bb05c9c5 --- /dev/null +++ b/crates/optimism/primitives/src/op_tx_type.rs @@ -0,0 +1,189 @@ +//! newtype pattern on `op_alloy_consensus::OpTxType`. +//! `OpTxType` implements `reth_primitives_traits::TxType`. +//! This type is required because a `Compact` impl is needed on the deposit tx type. + +use alloy_primitives::{U64, U8}; +use alloy_rlp::{Decodable, Encodable, Error}; +use bytes::BufMut; +use core::fmt::Debug; +use derive_more::{ + derive::{From, Into}, + Display, +}; +use op_alloy_consensus::OpTxType as AlloyOpTxType; +use std::convert::TryFrom; + +/// Wrapper type for `AlloyOpTxType` to implement `TxType` trait. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] +#[into(u8)] +pub struct OpTxType(AlloyOpTxType); + +impl From for U8 { + fn from(tx_type: OpTxType) -> Self { + Self::from(u8::from(tx_type)) + } +} + +impl TryFrom for OpTxType { + type Error = Error; + + fn try_from(value: u8) -> Result { + AlloyOpTxType::try_from(value) + .map(OpTxType) + .map_err(|_| Error::Custom("Invalid transaction type")) + } +} + +impl Default for OpTxType { + fn default() -> Self { + Self(AlloyOpTxType::Legacy) + } +} + +impl PartialEq for OpTxType { + fn eq(&self, other: &u8) -> bool { + let self_as_u8: u8 = (*self).into(); + &self_as_u8 == other + } +} + +impl TryFrom for OpTxType { + type Error = Error; + + fn try_from(value: u64) -> Result { + if value > u8::MAX as u64 { + return Err(Error::Custom("value out of range")); + } + Self::try_from(value as u8) + } +} + +impl TryFrom for OpTxType { + type Error = Error; + + fn try_from(value: U64) -> Result { + let u64_value: u64 = value.try_into().map_err(|_| Error::Custom("value out of range"))?; + Self::try_from(u64_value) + } +} + +impl Encodable for OpTxType { + fn length(&self) -> usize { + let value: u8 = (*self).into(); + value.length() + } + + fn encode(&self, out: &mut dyn BufMut) { + let value: u8 = (*self).into(); + value.encode(out); + } +} + +impl Decodable for OpTxType { + fn decode(buf: &mut &[u8]) -> Result { + // Decode the u8 value from RLP + let value = if buf.is_empty() { + return Err(alloy_rlp::Error::InputTooShort); + } else if buf[0] == 0x80 { + 0 // Special case: RLP encoding for integer 0 is `b"\x80"` + } else { + u8::decode(buf)? + }; + + Self::try_from(value).map_err(|_| alloy_rlp::Error::Custom("Invalid transaction type")) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bytes::BytesMut; + + #[test] + fn test_from_alloy_op_tx_type() { + let alloy_tx = AlloyOpTxType::Legacy; + let op_tx: OpTxType = OpTxType::from(alloy_tx); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_from_op_tx_type_to_u8() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + let tx_type_u8: u8 = op_tx.into(); + assert_eq!(tx_type_u8, AlloyOpTxType::Legacy as u8); + } + + #[test] + fn test_from_op_tx_type_to_u8_u8() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + let tx_type_u8: U8 = op_tx.into(); + assert_eq!(tx_type_u8, U8::from(AlloyOpTxType::Legacy as u8)); + } + + #[test] + fn test_try_from_u8() { + let op_tx = OpTxType::try_from(AlloyOpTxType::Legacy as u8).unwrap(); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_try_from_invalid_u8() { + let invalid_value: u8 = 255; + let result = OpTxType::try_from(invalid_value); + assert_eq!(result, Err(Error::Custom("Invalid transaction type"))); + } + + #[test] + fn test_try_from_u64() { + let op_tx = OpTxType::try_from(AlloyOpTxType::Legacy as u64).unwrap(); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_try_from_u64_out_of_range() { + let result = OpTxType::try_from(u64::MAX); + assert_eq!(result, Err(Error::Custom("value out of range"))); + } + + #[test] + fn test_try_from_u64_within_range() { + let valid_value: U64 = U64::from(AlloyOpTxType::Legacy as u64); + let op_tx = OpTxType::try_from(valid_value).unwrap(); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_default() { + let default_tx = OpTxType::default(); + assert_eq!(default_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_partial_eq_u8() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + assert_eq!(op_tx, AlloyOpTxType::Legacy as u8); + } + + #[test] + fn test_encodable() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + let mut buf = BytesMut::new(); + op_tx.encode(&mut buf); + assert_eq!(buf, BytesMut::from(&[0x80][..])); + } + + #[test] + fn test_decodable_success() { + // Using the RLP-encoded form of 0, which is `b"\x80"` + let mut buf: &[u8] = &[0x80]; + let decoded_tx = OpTxType::decode(&mut buf).unwrap(); + assert_eq!(decoded_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_decodable_invalid() { + let mut buf: &[u8] = &[255]; + let result = OpTxType::decode(&mut buf); + assert!(result.is_err()); + } +} diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 17ebec7ff741..37b64b774a13 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -37,7 +37,6 @@ reth-optimism-forks.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true -alloy-rpc-types.workspace = true alloy-consensus.workspace = true op-alloy-network.workspace = true op-alloy-rpc-types.workspace = true diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index b4d349e1cc45..1dd7a639eac7 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -1,8 +1,8 @@ //! RPC errors specific to OP. -use alloy_rpc_types::error::EthRpcErrorCode; +use alloy_rpc_types_eth::{error::EthRpcErrorCode, BlockError}; use jsonrpsee_types::error::INTERNAL_ERROR_CODE; -use reth_optimism_evm::OptimismBlockExecutionError; +use reth_optimism_evm::OpBlockExecutionError; use reth_primitives::revm_primitives::{InvalidTransaction, OptimismInvalidTransaction}; use reth_rpc_eth_api::AsEthApiError; use reth_rpc_eth_types::EthApiError; @@ -16,7 +16,7 @@ pub enum OpEthApiError { Eth(#[from] EthApiError), /// EVM error originating from invalid optimism data. #[error(transparent)] - Evm(#[from] OptimismBlockExecutionError), + Evm(#[from] OpBlockExecutionError), /// Thrown when calculating L1 gas fee. #[error("failed to calculate l1 gas fee")] L1BlockFeeError, @@ -25,7 +25,7 @@ pub enum OpEthApiError { L1BlockGasError, /// Wrapper for [`revm_primitives::InvalidTransaction`](InvalidTransaction). #[error(transparent)] - InvalidTransaction(#[from] OptimismInvalidTransactionError), + InvalidTransaction(#[from] OpInvalidTransactionError), /// Sequencer client error. #[error(transparent)] Sequencer(#[from] SequencerClientError), @@ -55,7 +55,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { /// Optimism specific invalid transaction errors #[derive(thiserror::Error, Debug)] -pub enum OptimismInvalidTransactionError { +pub enum OpInvalidTransactionError { /// A deposit transaction was submitted as a system transaction post-regolith. #[error("no system transactions allowed after regolith")] DepositSystemTxPostRegolith, @@ -64,18 +64,18 @@ pub enum OptimismInvalidTransactionError { HaltedDepositPostRegolith, } -impl From for jsonrpsee_types::error::ErrorObject<'static> { - fn from(err: OptimismInvalidTransactionError) -> Self { +impl From for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: OpInvalidTransactionError) -> Self { match err { - OptimismInvalidTransactionError::DepositSystemTxPostRegolith | - OptimismInvalidTransactionError::HaltedDepositPostRegolith => { + OpInvalidTransactionError::DepositSystemTxPostRegolith | + OpInvalidTransactionError::HaltedDepositPostRegolith => { rpc_err(EthRpcErrorCode::TransactionRejected.code(), err.to_string(), None) } } } } -impl TryFrom for OptimismInvalidTransactionError { +impl TryFrom for OpInvalidTransactionError { type Error = InvalidTransaction; fn try_from(err: InvalidTransaction) -> Result { @@ -113,3 +113,9 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> ) } } + +impl From for OpEthApiError { + fn from(error: BlockError) -> Self { + Self::Eth(error.into()) + } +} diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index dfdd09608563..6678fbe5df4f 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -1,18 +1,16 @@ //! Loads and formats OP block RPC response. -use alloy_rpc_types::BlockId; +use alloy_rpc_types_eth::BlockId; use op_alloy_network::Network; use op_alloy_rpc_types::OpTransactionReceipt; use reth_chainspec::ChainSpecProvider; -use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_chainspec::OpChainSpec; use reth_primitives::TransactionMeta; -use reth_provider::{BlockReaderIdExt, HeaderProvider}; +use reth_provider::HeaderProvider; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - RpcReceipt, + RpcNodeCore, RpcReceipt, }; -use reth_rpc_eth_types::EthStateCache; use crate::{OpEthApi, OpEthApiError, OpReceiptBuilder}; @@ -22,13 +20,8 @@ where Error = OpEthApiError, NetworkTypes: Network, >, - N: FullNodeComponents>, + N: RpcNodeCore + HeaderProvider>, { - #[inline] - fn provider(&self) -> impl HeaderProvider { - self.inner.provider() - } - async fn block_receipts( &self, block_id: BlockId, @@ -85,15 +78,6 @@ where impl LoadBlock for OpEthApi where Self: LoadPendingBlock + SpawnBlocking, - N: FullNodeComponents, + N: RpcNodeCore, { - #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index f1c10e6f1726..a76c25916f32 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,15 +1,11 @@ +use alloy_consensus::Header; use alloy_primitives::{Bytes, TxKind, U256}; use alloy_rpc_types_eth::transaction::TransactionRequest; -use reth_chainspec::EthereumHardforks; use reth_evm::ConfigureEvm; -use reth_node_api::{FullNodeComponents, NodeTypes}; -use reth_primitives::{ - revm_primitives::{BlockEnv, OptimismFields, TxEnv}, - Header, -}; +use reth_primitives::revm_primitives::{BlockEnv, OptimismFields, TxEnv}; use reth_rpc_eth_api::{ - helpers::{Call, EthCall, LoadState, SpawnBlocking}, - FromEthApiError, IntoEthApiError, + helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, + FromEthApiError, IntoEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{revm_utils::CallFees, RpcInvalidTransactionError}; @@ -17,16 +13,16 @@ use crate::{OpEthApi, OpEthApiError}; impl EthCall for OpEthApi where - Self: Call, - N: FullNodeComponents>, + Self: Call + LoadPendingBlock, + N: RpcNodeCore, { } impl Call for OpEthApi where - Self: LoadState + SpawnBlocking, + Self: LoadState> + SpawnBlocking, Self::Error: From, - N: FullNodeComponents, + N: RpcNodeCore, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -38,11 +34,6 @@ where self.inner.max_simulate_blocks() } - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } - fn create_txn_env( &self, block_env: &BlockEnv, diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 04774a4651c0..60af6542e282 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -11,17 +11,16 @@ pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; use std::{fmt, sync::Arc}; +use alloy_consensus::Header; use alloy_primitives::U256; use derive_more::Deref; use op_alloy_network::Optimism; -use reth_chainspec::EthereumHardforks; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; use reth_node_builder::EthApiBuilderCtx; -use reth_primitives::Header; use reth_provider::{ - BlockIdReader, BlockNumReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, + BlockNumReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, StageCheckpointReader, StateProviderFactory, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; @@ -30,7 +29,7 @@ use reth_rpc_eth_api::{ AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState, SpawnBlocking, Trace, }, - EthApiTypes, + EthApiTypes, RpcNodeCore, RpcNodeCoreExt, }; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; use reth_tasks::{ @@ -43,10 +42,10 @@ use crate::{OpEthApiError, SequencerClient}; /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. pub type EthApiNodeBackend = EthApiInner< - ::Provider, - ::Pool, - ::Network, - ::Evm, + ::Provider, + ::Pool, + ::Network, + ::Evm, >; /// OP-Reth `Eth` API implementation. @@ -59,8 +58,8 @@ pub type EthApiNodeBackend = EthApiInner< /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. -#[derive(Deref)] -pub struct OpEthApi { +#[derive(Deref, Clone)] +pub struct OpEthApi { /// Gateway to node's core components. #[deref] inner: Arc>, @@ -69,7 +68,12 @@ pub struct OpEthApi { sequencer_client: Option, } -impl OpEthApi { +impl OpEthApi +where + N: RpcNodeCore< + Provider: BlockReaderIdExt + ChainSpecProvider + CanonStateSubscriptions + Clone + 'static, + >, +{ /// Creates a new instance for given context. pub fn new(ctx: &EthApiBuilderCtx, sequencer_http: Option) -> Self { let blocking_task_pool = @@ -98,7 +102,7 @@ impl OpEthApi { impl EthApiTypes for OpEthApi where Self: Send + Sync, - N: FullNodeComponents, + N: RpcNodeCore, { type Error = OpEthApiError; type NetworkTypes = Optimism; @@ -109,24 +113,61 @@ where } } -impl EthApiSpec for OpEthApi +impl RpcNodeCore for OpEthApi where - Self: Send + Sync, - N: FullNodeComponents>, + N: RpcNodeCore, { + type Provider = N::Provider; + type Pool = N::Pool; + type Evm = ::Evm; + type Network = ::Network; + type PayloadBuilder = (); + #[inline] - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader - { - self.inner.provider() + fn pool(&self) -> &Self::Pool { + self.inner.pool() } #[inline] - fn network(&self) -> impl NetworkInfo { + fn evm_config(&self) -> &Self::Evm { + self.inner.evm_config() + } + + #[inline] + fn network(&self) -> &Self::Network { self.inner.network() } + #[inline] + fn payload_builder(&self) -> &Self::PayloadBuilder { + &() + } + + #[inline] + fn provider(&self) -> &Self::Provider { + self.inner.provider() + } +} + +impl RpcNodeCoreExt for OpEthApi +where + N: RpcNodeCore, +{ + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } +} + +impl EthApiSpec for OpEthApi +where + N: RpcNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, + >, +{ #[inline] fn starting_block(&self) -> U256 { self.inner.starting_block() @@ -141,7 +182,7 @@ where impl SpawnBlocking for OpEthApi where Self: Send + Sync + Clone + 'static, - N: FullNodeComponents, + N: RpcNodeCore, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -161,23 +202,16 @@ where impl LoadFee for OpEthApi where - Self: LoadBlock, - N: FullNodeComponents>, + Self: LoadBlock, + N: RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + >, { #[inline] - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn gas_oracle(&self) -> &GasPriceOracle { + fn gas_oracle(&self) -> &GasPriceOracle { self.inner.gas_oracle() } @@ -187,33 +221,18 @@ where } } -impl LoadState for OpEthApi -where - Self: Send + Sync + Clone, - N: FullNodeComponents>, +impl LoadState for OpEthApi where + N: RpcNodeCore< + Provider: StateProviderFactory + ChainSpecProvider, + Pool: TransactionPool, + > { - #[inline] - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } } impl EthState for OpEthApi where Self: LoadState + SpawnBlocking, - N: FullNodeComponents, + N: RpcNodeCore, { #[inline] fn max_proof_window(&self) -> u64 { @@ -224,41 +243,28 @@ where impl EthFees for OpEthApi where Self: LoadFee, - N: FullNodeComponents, + N: RpcNodeCore, { } impl Trace for OpEthApi where - Self: LoadState, - N: FullNodeComponents, + Self: LoadState>, + N: RpcNodeCore, { - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } impl AddDevSigners for OpEthApi where - N: FullNodeComponents>, + N: RpcNodeCore, { fn with_dev_accounts(&self) { - *self.signers().write() = DevSigner::random_signers(20) + *self.inner.signers().write() = DevSigner::random_signers(20) } } -impl fmt::Debug for OpEthApi { +impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() } } - -impl Clone for OpEthApi -where - N: FullNodeComponents, -{ - fn clone(&self) -> Self { - Self { inner: self.inner.clone(), sequencer_client: self.sequencer_client.clone() } - } -} diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 5b716f39320a..8356d72dbdc7 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,20 +1,19 @@ //! Loads OP pending block for a RPC response. +use alloy_consensus::Header; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::{BlockNumber, B256}; -use reth_chainspec::EthereumHardforks; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; -use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_primitives::{ - revm_primitives::BlockEnv, BlockNumberOrTag, Header, Receipt, SealedBlockWithSenders, -}; +use reth_primitives::{revm_primitives::BlockEnv, Receipt, SealedBlockWithSenders}; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, ReceiptProvider, StateProviderFactory, }; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, - FromEthApiError, + FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_transaction_pool::TransactionPool; @@ -24,33 +23,20 @@ use crate::OpEthApi; impl LoadPendingBlock for OpEthApi where Self: SpawnBlocking, - N: FullNodeComponents>, + N: RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool, + Evm: ConfigureEvm
, + >, { - #[inline] - fn provider( - &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory { - self.inner.provider() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } - #[inline] fn pending_block(&self) -> &tokio::sync::Mutex> { self.inner.pending_block() } - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } - /// Returns the locally built pending block async fn local_pending_block( &self, diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index f2f09cdc7ff4..f3d16b4adb5f 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,17 +1,19 @@ //! Loads and formats OP receipt RPC response. use alloy_eips::eip2718::Encodable2718; -use alloy_rpc_types::{AnyReceiptEnvelope, Log, TransactionReceipt}; -use op_alloy_consensus::{OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope}; -use op_alloy_rpc_types::{receipt::L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; +use alloy_rpc_types_eth::{Log, TransactionReceipt}; +use op_alloy_consensus::{ + DepositTransaction, OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope, +}; +use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::RethL1BlockInfo; -use reth_optimism_forks::OptimismHardforks; +use reth_optimism_forks::OpHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; use reth_provider::ChainSpecProvider; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; use crate::{OpEthApi, OpEthApiError}; @@ -20,18 +22,14 @@ where Self: Send + Sync, N: FullNodeComponents>, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - async fn build_transaction_receipt( &self, tx: TransactionSigned, meta: TransactionMeta, receipt: Receipt, ) -> Result, Self::Error> { - let (block, receipts) = LoadReceipt::cache(self) + let (block, receipts) = self + .cache() .get_block_and_receipts(meta.block_hash) .await .map_err(Self::Error::from_eth_err)? @@ -174,9 +172,7 @@ impl OpReceiptFieldsBuilder { #[derive(Debug)] pub struct OpReceiptBuilder { /// Core receipt, has all the fields of an L1 receipt and is the basis for the OP receipt. - pub core_receipt: TransactionReceipt>, - /// Transaction type. - pub tx_type: TxType, + pub core_receipt: TransactionReceipt>, /// Additional OP receipt fields. pub op_receipt_fields: OpTransactionReceiptFields, } @@ -191,11 +187,29 @@ impl OpReceiptBuilder { all_receipts: &[Receipt], l1_block_info: revm::L1BlockInfo, ) -> Result { - let ReceiptBuilder { base: core_receipt, .. } = - ReceiptBuilder::new(transaction, meta, receipt, all_receipts) - .map_err(OpEthApiError::Eth)?; - - let tx_type = transaction.tx_type(); + let core_receipt = + build_receipt(transaction, meta, receipt, all_receipts, |receipt_with_bloom| { + match receipt.tx_type { + TxType::Legacy => OpReceiptEnvelope::::Legacy(receipt_with_bloom), + TxType::Eip2930 => OpReceiptEnvelope::::Eip2930(receipt_with_bloom), + TxType::Eip1559 => OpReceiptEnvelope::::Eip1559(receipt_with_bloom), + TxType::Eip4844 => { + // TODO: unreachable + OpReceiptEnvelope::::Eip1559(receipt_with_bloom) + } + TxType::Eip7702 => OpReceiptEnvelope::::Eip7702(receipt_with_bloom), + TxType::Deposit => { + OpReceiptEnvelope::::Deposit(OpDepositReceiptWithBloom:: { + receipt: OpDepositReceipt:: { + inner: receipt_with_bloom.receipt, + deposit_nonce: receipt.deposit_nonce, + deposit_receipt_version: receipt.deposit_receipt_version, + }, + logs_bloom: receipt_with_bloom.logs_bloom, + }) + } + } + })?; let op_receipt_fields = OpReceiptFieldsBuilder::default() .l1_block_info(chain_spec, transaction, l1_block_info)? @@ -203,69 +217,15 @@ impl OpReceiptBuilder { .deposit_version(receipt.deposit_receipt_version) .build(); - Ok(Self { core_receipt, tx_type, op_receipt_fields }) + Ok(Self { core_receipt, op_receipt_fields }) } /// Builds [`OpTransactionReceipt`] by combing core (l1) receipt fields and additional OP /// receipt fields. pub fn build(self) -> OpTransactionReceipt { - let Self { core_receipt, tx_type, op_receipt_fields } = self; - - let OpTransactionReceiptFields { l1_block_info, deposit_nonce, deposit_receipt_version } = - op_receipt_fields; - - let TransactionReceipt { - inner: AnyReceiptEnvelope { inner: receipt_with_bloom, .. }, - transaction_hash, - transaction_index, - block_hash, - block_number, - gas_used, - effective_gas_price, - blob_gas_used, - blob_gas_price, - from, - to, - contract_address, - authorization_list, - } = core_receipt; - - let inner = match tx_type { - TxType::Legacy => OpReceiptEnvelope::::Legacy(receipt_with_bloom), - TxType::Eip2930 => OpReceiptEnvelope::::Eip2930(receipt_with_bloom), - TxType::Eip1559 => OpReceiptEnvelope::::Eip1559(receipt_with_bloom), - TxType::Eip4844 => { - // TODO: unreachable - OpReceiptEnvelope::::Eip1559(receipt_with_bloom) - } - TxType::Eip7702 => OpReceiptEnvelope::::Eip7702(receipt_with_bloom), - TxType::Deposit => { - OpReceiptEnvelope::::Deposit(OpDepositReceiptWithBloom:: { - receipt: OpDepositReceipt:: { - inner: receipt_with_bloom.receipt, - deposit_nonce, - deposit_receipt_version, - }, - logs_bloom: receipt_with_bloom.logs_bloom, - }) - } - }; + let Self { core_receipt: inner, op_receipt_fields } = self; - let inner = TransactionReceipt::> { - inner, - transaction_hash, - transaction_index, - block_hash, - block_number, - gas_used, - effective_gas_price, - blob_gas_used, - blob_gas_price, - from, - to, - contract_address, - authorization_list, - }; + let OpTransactionReceiptFields { l1_block_info, .. } = op_receipt_fields; OpTransactionReceipt { inner, l1_block_info } } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 4ac2d7e6b74f..11e33817229f 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,31 +1,27 @@ //! Loads and formats OP transaction RPC response. -use alloy_consensus::Transaction as _; -use alloy_primitives::{Bytes, B256}; -use alloy_rpc_types::TransactionInfo; +use alloy_consensus::{Signed, Transaction as _}; +use alloy_primitives::{Bytes, Sealable, Sealed, B256}; +use alloy_rpc_types_eth::TransactionInfo; +use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::Transaction; use reth_node_api::FullNodeComponents; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; -use reth_rpc::eth::EthTxBuilder; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FromEthApiError, FullEthApiTypes, TransactionCompat, + FromEthApiError, FullEthApiTypes, RpcNodeCore, TransactionCompat, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthStateCache}; +use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -use crate::{OpEthApi, SequencerClient}; +use crate::{OpEthApi, OpEthApiError, SequencerClient}; impl EthTransactions for OpEthApi where - Self: LoadTransaction, - N: FullNodeComponents, + Self: LoadTransaction, + N: RpcNodeCore, { - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - fn signers(&self) -> &parking_lot::RwLock>> { self.inner.signers() } @@ -61,26 +57,13 @@ where impl LoadTransaction for OpEthApi where Self: SpawnBlocking + FullEthApiTypes, - N: FullNodeComponents, + N: RpcNodeCore, { - type Pool = N::Pool; - - fn provider(&self) -> impl TransactionsProvider { - self.inner.provider() - } - - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - fn pool(&self) -> &Self::Pool { - self.inner.pool() - } } impl OpEthApi where - N: FullNodeComponents, + N: RpcNodeCore, { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { @@ -93,45 +76,96 @@ where N: FullNodeComponents, { type Transaction = Transaction; + type Error = OpEthApiError; fn fill( &self, tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, - ) -> Self::Transaction { - let signed_tx = tx.clone().into_signed(); - let hash = tx.hash; - - let mut inner = EthTxBuilder.fill(tx, tx_info).inner; - - if signed_tx.is_deposit() { - inner.gas_price = Some(signed_tx.max_fee_per_gas()) - } - - let deposit_receipt_version = self - .inner - .provider() - .receipt_by_hash(hash) - .ok() // todo: change sig to return result - .flatten() - .and_then(|receipt| receipt.deposit_receipt_version); - - Transaction { - inner, - source_hash: signed_tx.source_hash(), - mint: signed_tx.mint(), - // only include is_system_tx if true: - is_system_tx: (signed_tx.is_deposit() && signed_tx.is_system_transaction()) - .then_some(true), + ) -> Result { + let from = tx.signer(); + let TransactionSigned { transaction, signature, hash } = tx.into_signed(); + let mut deposit_receipt_version = None; + let mut deposit_nonce = None; + + let inner = match transaction { + reth_primitives::Transaction::Legacy(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip2930(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip1559(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip4844(_) => unreachable!(), + reth_primitives::Transaction::Eip7702(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Deposit(tx) => { + self.inner + .provider() + .receipt_by_hash(hash) + .map_err(Self::Error::from_eth_err)? + .inspect(|receipt| { + deposit_receipt_version = receipt.deposit_receipt_version; + deposit_nonce = receipt.deposit_nonce; + }); + + OpTxEnvelope::Deposit(tx.seal_unchecked(hash)) + } + }; + + let TransactionInfo { + block_hash, block_number, index: transaction_index, base_fee, .. + } = tx_info; + + let effective_gas_price = if inner.is_deposit() { + // For deposits, we must always set the `gasPrice` field to 0 in rpc + // deposit tx don't have a gas price field, but serde of `Transaction` will take care of + // it + 0 + } else { + base_fee + .map(|base_fee| { + inner.effective_tip_per_gas(base_fee as u64).unwrap_or_default() + base_fee + }) + .unwrap_or_else(|| inner.max_fee_per_gas()) + }; + + Ok(Transaction { + inner: alloy_rpc_types_eth::Transaction { + inner, + block_hash, + block_number, + transaction_index, + from, + effective_gas_price: Some(effective_gas_price), + }, + deposit_nonce, deposit_receipt_version, - } + }) } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - tx.inner.input = tx.inner.input.slice(..4); - } - - fn tx_type(tx: &Self::Transaction) -> u8 { - tx.inner.transaction_type.unwrap_or_default() + let input = match &mut tx.inner.inner { + OpTxEnvelope::Eip1559(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Eip2930(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Legacy(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Eip7702(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Deposit(tx) => { + let (mut deposit, hash) = std::mem::replace( + tx, + Sealed::new_unchecked(Default::default(), Default::default()), + ) + .split(); + deposit.input = deposit.input.slice(..4); + let mut deposit = deposit.seal_unchecked(hash); + std::mem::swap(tx, &mut deposit); + return + } + _ => return, + }; + *input = input.slice(..4); } } diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 0ff1451d05b7..44d0fa353890 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -14,6 +14,6 @@ pub mod error; pub mod eth; pub mod sequencer; -pub use error::{OpEthApiError, OptimismInvalidTransactionError, SequencerClientError}; +pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; pub use eth::{OpEthApi, OpReceiptBuilder}; pub use sequencer::SequencerClient; diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index 347b690c5c72..c3b8a71feea1 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -16,7 +16,7 @@ mod tests { CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }; - use reth_primitives::{Account, Receipt, ReceiptWithBloom, Withdrawals}; + use reth_primitives::{Account, Receipt, ReceiptWithBloom}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, @@ -47,7 +47,6 @@ mod tests { assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); // In case of failure, refer to the documentation of the // [`validate_bitflag_backwards_compat`] macro for detailed instructions on handling @@ -73,6 +72,5 @@ mod tests { validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); } } diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 88ab99272dbd..74dea45d10d9 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -21,6 +21,7 @@ reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-evm.workspace = true +reth-revm.workspace=true # ethereum alloy-rlp.workspace = true diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index b8eab3c0fea0..9b36e44b1fc0 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -10,24 +10,19 @@ use crate::metrics::PayloadBuilderMetrics; use alloy_consensus::constants::EMPTY_WITHDRAWALS; -use alloy_eips::merge::SLOT_DURATION; +use alloy_eips::{eip4895::Withdrawals, merge::SLOT_DURATION}; use alloy_primitives::{Bytes, B256, U256}; use futures_core::ready; use futures_util::FutureExt; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_chainspec::EthereumHardforks; use reth_evm::state_change::post_block_withdrawals_balance_increments; -use reth_payload_builder::{ - database::CachedReads, KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator, -}; +use reth_payload_builder::{KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator}; use reth_payload_primitives::{ BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; -use reth_primitives::{ - constants::RETH_CLIENT_VERSION, proofs, BlockNumberOrTag, SealedBlock, Withdrawals, -}; -use reth_provider::{ - BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, -}; +use reth_primitives::{constants::RETH_CLIENT_VERSION, proofs, SealedHeader}; +use reth_provider::{BlockReaderIdExt, CanonStateNotification, StateProviderFactory}; +use reth_revm::cached::CachedReads; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; use revm::{Database, State}; @@ -47,6 +42,9 @@ use tokio::{ use tracing::{debug, trace, warn}; mod metrics; +mod stack; + +pub use stack::PayloadBuilderStack; /// The [`PayloadJobGenerator`] that creates [`BasicPayloadJob`]s. #[derive(Debug)] @@ -95,10 +93,11 @@ impl BasicPayloadJobGenerator Client software SHOULD stop the updating process when either a call to engine_getPayload - // > with the build process's payloadId is made or SECONDS_PER_SLOT (12s in the Mainnet - // > configuration) have passed since the point in time identified by the timestamp parameter. - // See also + /// > Client software SHOULD stop the updating process when either a call to engine_getPayload + /// > with the build process's payloadId is made or SECONDS_PER_SLOT (12s in the Mainnet + /// > configuration) have passed since the point in time identified by the timestamp parameter. + /// + /// See also #[inline] fn max_job_duration(&self, unix_timestamp: u64) -> Duration { let duration_until_timestamp = duration_until(unix_timestamp); @@ -121,7 +120,7 @@ impl BasicPayloadJobGenerator Option { self.pre_cached.as_ref().filter(|pc| pc.block == parent).map(|pc| pc.cached.clone()) @@ -146,29 +145,30 @@ where &self, attributes: ::PayloadAttributes, ) -> Result { - let parent_block = if attributes.parent().is_zero() { - // use latest block if parent is zero: genesis block + let parent_header = if attributes.parent().is_zero() { + // Use latest header for genesis block case self.client - .block_by_number_or_tag(BlockNumberOrTag::Latest)? - .ok_or_else(|| PayloadBuilderError::MissingParentBlock(attributes.parent()))? - .seal_slow() + .latest_header() + .map_err(PayloadBuilderError::from)? + .ok_or_else(|| PayloadBuilderError::MissingParentHeader(B256::ZERO))? } else { - let block = self - .client - .find_block_by_hash(attributes.parent(), BlockSource::Any)? - .ok_or_else(|| PayloadBuilderError::MissingParentBlock(attributes.parent()))?; - - // we already know the hash, so we can seal it - block.seal(attributes.parent()) + // Fetch specific header by hash + self.client + .sealed_header_by_hash(attributes.parent()) + .map_err(PayloadBuilderError::from)? + .ok_or_else(|| PayloadBuilderError::MissingParentHeader(attributes.parent()))? }; - let config = - PayloadConfig::new(Arc::new(parent_block), self.config.extradata.clone(), attributes); + let config = PayloadConfig::new( + Arc::new(parent_header.clone()), + self.config.extradata.clone(), + attributes, + ); let until = self.job_deadline(config.attributes.timestamp()); let deadline = Box::pin(tokio::time::sleep_until(until)); - let cached_reads = self.maybe_pre_cached(config.parent_block.hash()); + let cached_reads = self.maybe_pre_cached(parent_header.hash()); let mut job = BasicPayloadJob { config, @@ -616,7 +616,9 @@ where if let Some(fut) = Pin::new(&mut this.maybe_better).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { this.maybe_better = None; - if let Ok(BuildOutcome::Better { payload, .. }) = res { + if let Ok(Some(payload)) = res.map(|out| out.into_payload()) + .inspect_err(|err| warn!(target: "payload_builder", %err, "failed to resolve pending payload")) + { debug!(target: "payload_builder", "resolving better payload"); return Poll::Ready(Ok(payload)) } @@ -705,8 +707,8 @@ impl Drop for Cancelled { /// Static config for how to build a payload. #[derive(Clone, Debug)] pub struct PayloadConfig { - /// The parent block. - pub parent_block: Arc, + /// The parent header. + pub parent_header: Arc, /// Block extra data. pub extra_data: Bytes, /// Requested attributes for the payload. @@ -726,11 +728,11 @@ where { /// Create new payload config. pub const fn new( - parent_block: Arc, + parent_header: Arc, extra_data: Bytes, attributes: Attributes, ) -> Self { - Self { parent_block, extra_data, attributes } + Self { parent_header, extra_data, attributes } } /// Returns the payload id. @@ -767,7 +769,7 @@ impl BuildOutcome { /// Consumes the type and returns the payload if the outcome is `Better`. pub fn into_payload(self) -> Option { match self { - Self::Better { payload, .. } => Some(payload), + Self::Better { payload, .. } | Self::Freeze(payload) => Some(payload), _ => None, } } @@ -786,6 +788,52 @@ impl BuildOutcome { pub const fn is_cancelled(&self) -> bool { matches!(self, Self::Cancelled) } + + /// Applies a fn on the current payload. + pub(crate) fn map_payload(self, f: F) -> BuildOutcome

+ where + F: FnOnce(Payload) -> P, + { + match self { + Self::Better { payload, cached_reads } => { + BuildOutcome::Better { payload: f(payload), cached_reads } + } + Self::Aborted { fees, cached_reads } => BuildOutcome::Aborted { fees, cached_reads }, + Self::Cancelled => BuildOutcome::Cancelled, + Self::Freeze(payload) => BuildOutcome::Freeze(f(payload)), + } + } +} + +/// The possible outcomes of a payload building attempt without reused [`CachedReads`] +#[derive(Debug)] +pub enum BuildOutcomeKind { + /// Successfully built a better block. + Better { + /// The new payload that was built. + payload: Payload, + }, + /// Aborted payload building because resulted in worse block wrt. fees. + Aborted { + /// The total fees associated with the attempted payload. + fees: U256, + }, + /// Build job was cancelled + Cancelled, + /// The payload is final and no further building should occur + Freeze(Payload), +} + +impl BuildOutcomeKind { + /// Attaches the [`CachedReads`] to the outcome. + pub fn with_cached_reads(self, cached_reads: CachedReads) -> BuildOutcome { + match self { + Self::Better { payload } => BuildOutcome::Better { payload, cached_reads }, + Self::Aborted { fees } => BuildOutcome::Aborted { fees, cached_reads }, + Self::Cancelled => BuildOutcome::Cancelled, + Self::Freeze(payload) => BuildOutcome::Freeze(payload), + } + } } /// A collection of arguments used for building payloads. @@ -961,12 +1009,16 @@ impl WithdrawalsOutcome { /// Returns the withdrawals root. /// /// Returns `None` values pre shanghai -pub fn commit_withdrawals>( +pub fn commit_withdrawals( db: &mut State, chain_spec: &ChainSpec, timestamp: u64, withdrawals: Withdrawals, -) -> Result { +) -> Result +where + DB: Database, + ChainSpec: EthereumHardforks, +{ if !chain_spec.is_shanghai_active_at_timestamp(timestamp) { return Ok(WithdrawalsOutcome::pre_shanghai()) } @@ -993,7 +1045,7 @@ pub fn commit_withdrawals>( /// /// This compares the total fees of the blocks, higher is better. #[inline(always)] -pub fn is_better_payload(best_payload: Option, new_fees: U256) -> bool { +pub fn is_better_payload(best_payload: Option<&T>, new_fees: U256) -> bool { if let Some(best_payload) = best_payload { new_fees > best_payload.fees() } else { diff --git a/crates/payload/basic/src/stack.rs b/crates/payload/basic/src/stack.rs new file mode 100644 index 000000000000..45a3f3b42448 --- /dev/null +++ b/crates/payload/basic/src/stack.rs @@ -0,0 +1,271 @@ +use crate::{ + BuildArguments, BuildOutcome, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, + PayloadConfig, +}; + +use alloy_eips::eip4895::Withdrawals; +use alloy_primitives::{Address, B256, U256}; +use reth_payload_builder::PayloadId; +use reth_payload_primitives::BuiltPayload; +use reth_primitives::SealedBlock; + +use alloy_eips::eip7685::Requests; +use std::{error::Error, fmt}; + +/// hand rolled Either enum to handle two builder types +#[derive(Debug, Clone)] +pub enum Either { + /// left variant + Left(L), + /// right variant + Right(R), +} + +impl fmt::Display for Either +where + L: fmt::Display, + R: fmt::Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Left(l) => write!(f, "Left: {}", l), + Self::Right(r) => write!(f, "Right: {}", r), + } + } +} + +impl Error for Either +where + L: Error + 'static, + R: Error + 'static, +{ + fn source(&self) -> Option<&(dyn Error + 'static)> { + match self { + Self::Left(l) => Some(l), + Self::Right(r) => Some(r), + } + } +} + +impl PayloadBuilderAttributes for Either +where + L: PayloadBuilderAttributes, + R: PayloadBuilderAttributes, + L::Error: Error + 'static, + R::Error: Error + 'static, +{ + type RpcPayloadAttributes = Either; + type Error = Either; + + fn try_new( + parent: B256, + rpc_payload_attributes: Self::RpcPayloadAttributes, + version: u8, + ) -> Result { + match rpc_payload_attributes { + Either::Left(attr) => { + L::try_new(parent, attr, version).map(Either::Left).map_err(Either::Left) + } + Either::Right(attr) => { + R::try_new(parent, attr, version).map(Either::Right).map_err(Either::Right) + } + } + } + + fn payload_id(&self) -> PayloadId { + match self { + Self::Left(l) => l.payload_id(), + Self::Right(r) => r.payload_id(), + } + } + + fn parent(&self) -> B256 { + match self { + Self::Left(l) => l.parent(), + Self::Right(r) => r.parent(), + } + } + + fn timestamp(&self) -> u64 { + match self { + Self::Left(l) => l.timestamp(), + Self::Right(r) => r.timestamp(), + } + } + + fn parent_beacon_block_root(&self) -> Option { + match self { + Self::Left(l) => l.parent_beacon_block_root(), + Self::Right(r) => r.parent_beacon_block_root(), + } + } + + fn suggested_fee_recipient(&self) -> Address { + match self { + Self::Left(l) => l.suggested_fee_recipient(), + Self::Right(r) => r.suggested_fee_recipient(), + } + } + + fn prev_randao(&self) -> B256 { + match self { + Self::Left(l) => l.prev_randao(), + Self::Right(r) => r.prev_randao(), + } + } + + fn withdrawals(&self) -> &Withdrawals { + match self { + Self::Left(l) => l.withdrawals(), + Self::Right(r) => r.withdrawals(), + } + } +} + +/// this structure enables the chaining of multiple `PayloadBuilder` implementations, +/// creating a hierarchical fallback system. It's designed to be nestable, allowing +/// for complex builder arrangements like `Stack, C>` with different +#[derive(Debug)] +pub struct PayloadBuilderStack { + left: L, + right: R, +} + +impl PayloadBuilderStack { + /// Creates a new `PayloadBuilderStack` with the given left and right builders. + pub const fn new(left: L, right: R) -> Self { + Self { left, right } + } +} + +impl Clone for PayloadBuilderStack +where + L: Clone, + R: Clone, +{ + fn clone(&self) -> Self { + Self::new(self.left.clone(), self.right.clone()) + } +} + +impl BuiltPayload for Either +where + L: BuiltPayload, + R: BuiltPayload, +{ + fn block(&self) -> &SealedBlock { + match self { + Self::Left(l) => l.block(), + Self::Right(r) => r.block(), + } + } + + fn fees(&self) -> U256 { + match self { + Self::Left(l) => l.fees(), + Self::Right(r) => r.fees(), + } + } + + fn requests(&self) -> Option { + match self { + Self::Left(l) => l.requests(), + Self::Right(r) => r.requests(), + } + } +} + +impl PayloadBuilder for PayloadBuilderStack +where + L: PayloadBuilder + Unpin + 'static, + R: PayloadBuilder + Unpin + 'static, + Client: Clone, + Pool: Clone, + L::Attributes: Unpin + Clone, + R::Attributes: Unpin + Clone, + L::BuiltPayload: Unpin + Clone, + R::BuiltPayload: Unpin + Clone, + <>::Attributes as PayloadBuilderAttributes>::Error: 'static, + <>::Attributes as PayloadBuilderAttributes>::Error: 'static, +{ + type Attributes = Either; + type BuiltPayload = Either; + + fn try_build( + &self, + args: BuildArguments, + ) -> Result, PayloadBuilderError> { + match args.config.attributes { + Either::Left(ref left_attr) => { + let left_args: BuildArguments = + BuildArguments { + client: args.client.clone(), + pool: args.pool.clone(), + cached_reads: args.cached_reads.clone(), + config: PayloadConfig { + parent_header: args.config.parent_header.clone(), + extra_data: args.config.extra_data.clone(), + attributes: left_attr.clone(), + }, + cancel: args.cancel.clone(), + best_payload: args.best_payload.clone().and_then(|payload| { + if let Either::Left(p) = payload { + Some(p) + } else { + None + } + }), + }; + + self.left.try_build(left_args).map(|out| out.map_payload(Either::Left)) + } + Either::Right(ref right_attr) => { + let right_args = BuildArguments { + client: args.client.clone(), + pool: args.pool.clone(), + cached_reads: args.cached_reads.clone(), + config: PayloadConfig { + parent_header: args.config.parent_header.clone(), + extra_data: args.config.extra_data.clone(), + attributes: right_attr.clone(), + }, + cancel: args.cancel.clone(), + best_payload: args.best_payload.clone().and_then(|payload| { + if let Either::Right(p) = payload { + Some(p) + } else { + None + } + }), + }; + + self.right.try_build(right_args).map(|out| out.map_payload(Either::Right)) + } + } + } + + fn build_empty_payload( + &self, + client: &Client, + config: PayloadConfig, + ) -> Result { + match config.attributes { + Either::Left(left_attr) => { + let left_config = PayloadConfig { + attributes: left_attr, + parent_header: config.parent_header.clone(), + extra_data: config.extra_data.clone(), + }; + self.left.build_empty_payload(client, left_config).map(Either::Left) + } + Either::Right(right_attr) => { + let right_config = PayloadConfig { + parent_header: config.parent_header.clone(), + extra_data: config.extra_data.clone(), + attributes: right_attr, + }; + self.right.build_empty_payload(client, right_config).map(Either::Right) + } + } + } +} diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 3b71011e02e5..8b2fef7b8782 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -13,15 +13,14 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true -reth-provider.workspace = true +reth-primitives = { workspace = true, optional = true } +reth-chain-state.workspace = true reth-payload-primitives.workspace = true reth-ethereum-engine-primitives.workspace = true -reth-chain-state = { workspace = true, optional = true } # alloy +alloy-primitives = { workspace = true, optional = true } alloy-rpc-types = { workspace = true, features = ["engine"] } -alloy-primitives.workspace = true # async async-trait.workspace = true @@ -37,13 +36,15 @@ metrics.workspace = true tracing.workspace = true [dev-dependencies] +reth-primitives.workspace = true +alloy-primitives.workspace = true revm.workspace = true +alloy-consensus.workspace = true [features] test-utils = [ - "reth-chain-state", - "reth-chain-state?/test-utils", - "reth-primitives/test-utils", - "reth-provider/test-utils", - "revm/test-utils" + "alloy-primitives", + "reth-chain-state/test-utils", + "reth-primitives/test-utils", + "revm/test-utils", ] diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 7af61ac4c682..da44072c99d2 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -26,10 +26,12 @@ //! ``` //! use std::future::Future; //! use std::pin::Pin; +//! use std::sync::Arc; //! use std::task::{Context, Poll}; +//! use alloy_consensus::Header; //! use alloy_primitives::U256; //! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator, PayloadKind}; -//! use reth_primitives::{Block, Header}; +//! use reth_primitives::Block; //! //! /// The generator type that creates new jobs that builds empty blocks. //! pub struct EmptyBlockPayloadJobGenerator; @@ -56,7 +58,7 @@ //! //! fn best_payload(&self) -> Result { //! // NOTE: some fields are omitted here for brevity -//! let payload = Block { +//! let block = Block { //! header: Header { //! parent_hash: self.attributes.parent, //! timestamp: self.attributes.timestamp, @@ -65,7 +67,7 @@ //! }, //! ..Default::default() //! }; -//! let payload = EthBuiltPayload::new(self.attributes.id, payload.seal_slow(), U256::ZERO, None, None); +//! let payload = EthBuiltPayload::new(self.attributes.id, Arc::new(block.seal_slow()), U256::ZERO, None, None); //! Ok(payload) //! } //! @@ -101,7 +103,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub mod database; mod metrics; mod service; mod traits; diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 853c69e90d83..267a1e355b0e 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -9,15 +9,16 @@ use crate::{ }; use alloy_rpc_types::engine::PayloadId; use futures_util::{future::FutureExt, Stream, StreamExt}; +use reth_chain_state::CanonStateNotification; use reth_payload_primitives::{ BuiltPayload, Events, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, - PayloadEvents, PayloadKind, PayloadTypes, + PayloadEvents, PayloadKind, PayloadStoreExt, PayloadTypes, }; -use reth_provider::CanonStateNotification; use std::{ fmt, future::Future, pin::Pin, + sync::Arc, task::{Context, Poll}, }; use tokio::sync::{ @@ -30,13 +31,14 @@ use tracing::{debug, info, trace, warn}; type PayloadFuture

= Pin> + Send + Sync>>; /// A communication channel to the [`PayloadBuilderService`] that can retrieve payloads. +/// +/// This type is intended to be used to retrieve payloads from the service (e.g. from the engine +/// API). #[derive(Debug)] pub struct PayloadStore { - inner: PayloadBuilderHandle, + inner: Arc>, } -// === impl PayloadStore === - impl PayloadStore where T: PayloadTypes, @@ -82,12 +84,16 @@ where } } -impl Clone for PayloadStore +impl PayloadStore where T: PayloadTypes, { - fn clone(&self) -> Self { - Self { inner: self.inner.clone() } + /// Create a new instance + pub fn new

(inner: P) -> Self + where + P: PayloadStoreExt + 'static, + { + Self { inner: Arc::new(inner) } } } @@ -96,7 +102,7 @@ where T: PayloadTypes, { fn from(inner: PayloadBuilderHandle) -> Self { - Self { inner } + Self::new(inner) } } @@ -156,6 +162,18 @@ where let _ = self.to_service.send(PayloadServiceCommand::Subscribe(tx)); Ok(PayloadEvents { receiver: rx.await? }) } + + /// Returns the payload attributes associated with the given identifier. + /// + /// Note: this returns the attributes of the payload and does not resolve the job. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + let (tx, rx) = oneshot::channel(); + self.to_service.send(PayloadServiceCommand::PayloadAttributes(id, tx)).ok()?; + rx.await.ok()? + } } impl PayloadBuilderHandle @@ -169,18 +187,6 @@ where pub const fn new(to_service: mpsc::UnboundedSender>) -> Self { Self { to_service } } - - /// Returns the payload attributes associated with the given identifier. - /// - /// Note: this returns the attributes of the payload and does not resolve the job. - async fn payload_attributes( - &self, - id: PayloadId, - ) -> Option> { - let (tx, rx) = oneshot::channel(); - self.to_service.send(PayloadServiceCommand::PayloadAttributes(id, tx)).ok()?; - rx.await.ok()? - } } impl Clone for PayloadBuilderHandle diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 676e60d912f0..780df5c84636 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -6,13 +6,13 @@ use crate::{ }; use alloy_primitives::U256; -use reth_chain_state::ExecutedBlock; +use reth_chain_state::{CanonStateNotification, ExecutedBlock}; use reth_payload_primitives::{PayloadBuilderError, PayloadKind, PayloadTypes}; use reth_primitives::Block; -use reth_provider::CanonStateNotification; use std::{ future::Future, pin::Pin, + sync::Arc, task::{Context, Poll}, }; @@ -86,7 +86,7 @@ impl PayloadJob for TestPayloadJob { fn best_payload(&self) -> Result { Ok(EthBuiltPayload::new( self.attr.payload_id(), - Block::default().seal_slow(), + Arc::new(Block::default().seal_slow()), U256::ZERO, Some(ExecutedBlock::default()), Some(Default::default()), diff --git a/crates/payload/builder/src/traits.rs b/crates/payload/builder/src/traits.rs index 62dadeb45d7c..ba8486b69079 100644 --- a/crates/payload/builder/src/traits.rs +++ b/crates/payload/builder/src/traits.rs @@ -1,9 +1,9 @@ //! Trait abstractions used by the payload crate. +use reth_chain_state::CanonStateNotification; use reth_payload_primitives::{ BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, }; -use reth_provider::CanonStateNotification; use std::future::Future; /// A type that can build a payload. diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index ad8ce63a7e9b..951108e7da3c 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -22,8 +22,8 @@ reth-chain-state.workspace = true # alloy alloy-eips.workspace = true alloy-primitives.workspace = true -alloy-rpc-types = { workspace = true, features = ["engine"] } -op-alloy-rpc-types-engine.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["serde"] } +op-alloy-rpc-types-engine = { workspace = true, optional = true } # async async-trait.workspace = true @@ -35,3 +35,6 @@ pin-project.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true + +[features] +op = ["dep:op-alloy-rpc-types-engine"] \ No newline at end of file diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index 00df9e8d290f..ab222f5f6ef9 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -9,6 +9,9 @@ use tokio::sync::oneshot; /// Possible error variants during payload building. #[derive(Debug, thiserror::Error)] pub enum PayloadBuilderError { + /// Thrown when the parent header cannot be found + #[error("missing parent header: {0}")] + MissingParentHeader(B256), /// Thrown when the parent block is missing. #[error("missing parent block {0}")] MissingParentBlock(B256), @@ -18,9 +21,6 @@ pub enum PayloadBuilderError { /// If there's no payload to resolve. #[error("missing payload")] MissingPayload, - /// Build cancelled - #[error("build outcome cancelled")] - BuildOutcomeCancelled, /// Error occurring in the blob store. #[error(transparent)] BlobStore(#[from] BlobStoreError), @@ -30,9 +30,6 @@ pub enum PayloadBuilderError { /// Unrecoverable error during evm execution. #[error("evm execution error: {0}")] EvmExecutionError(EVMError), - /// Thrown if the payload requests withdrawals before Shanghai activation. - #[error("withdrawals set before Shanghai activation")] - WithdrawalsBeforeShanghai, /// Any other payload building errors. #[error(transparent)] Other(Box), diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 08aa428000ed..3604ff5d8d81 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -20,7 +20,7 @@ pub use crate::events::{Events, PayloadEvents}; mod traits; pub use traits::{ BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, PayloadBuilder, - PayloadBuilderAttributes, + PayloadBuilderAttributes, PayloadStoreExt, }; mod payload; @@ -324,22 +324,23 @@ where } /// The version of Engine API message. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)] pub enum EngineApiMessageVersion { /// Version 1 - V1, + V1 = 1, /// Version 2 /// /// Added in the Shanghai hardfork. - V2, + V2 = 2, /// Version 3 /// /// Added in the Cancun hardfork. - V3, + #[default] + V3 = 3, /// Version 4 /// /// Added in the Prague hardfork. - V4, + V4 = 4, } /// Determines how we should choose the payload to return. diff --git a/crates/payload/primitives/src/payload.rs b/crates/payload/primitives/src/payload.rs index fc685559e087..bcf48cea8343 100644 --- a/crates/payload/primitives/src/payload.rs +++ b/crates/payload/primitives/src/payload.rs @@ -1,6 +1,7 @@ use crate::{MessageValidationKind, PayloadAttributes}; +use alloy_eips::eip4895::Withdrawal; use alloy_primitives::B256; -use alloy_rpc_types::engine::ExecutionPayload; +use alloy_rpc_types_engine::ExecutionPayload; /// Either an [`ExecutionPayload`] or a types that implements the [`PayloadAttributes`] trait. /// @@ -39,7 +40,7 @@ where Attributes: PayloadAttributes, { /// Return the withdrawals for the payload or attributes. - pub fn withdrawals(&self) -> Option<&Vec> { + pub fn withdrawals(&self) -> Option<&Vec> { match self { Self::ExecutionPayload { payload, .. } => payload.withdrawals(), Self::PayloadAttributes(attributes) => attributes.withdrawals(), diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index df76149028aa..197a7fe3af99 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,22 +1,22 @@ -use crate::{PayloadEvents, PayloadKind, PayloadTypes}; -use alloy_eips::eip7685::Requests; -use alloy_primitives::{Address, B256, U256}; -use alloy_rpc_types::{ - engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}, - Withdrawal, +use crate::{PayloadBuilderError, PayloadEvents, PayloadKind, PayloadTypes}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + eip7685::Requests, }; -use op_alloy_rpc_types_engine::OpPayloadAttributes; +use alloy_primitives::{Address, B256, U256}; +use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use reth_chain_state::ExecutedBlock; -use reth_primitives::{SealedBlock, Withdrawals}; +use reth_primitives::SealedBlock; +use std::fmt::Debug; use tokio::sync::oneshot; /// A type that can request, subscribe to and resolve payloads. #[async_trait::async_trait] -pub trait PayloadBuilder: Send + Unpin { +pub trait PayloadBuilder: Debug + Send + Sync + Unpin { /// The Payload type for the builder. type PayloadType: PayloadTypes; /// The error type returned by the builder. - type Error; + type Error: Into; /// Sends a message to the service to start building a new payload for the given payload. /// @@ -51,6 +51,69 @@ pub trait PayloadBuilder: Send + Unpin { /// Sends a message to the service to subscribe to payload events. /// Returns a receiver that will receive them. async fn subscribe(&self) -> Result, Self::Error>; + + /// Returns the payload attributes associated with the given identifier. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option::PayloadBuilderAttributes, Self::Error>>; +} + +/// A helper trait for internal usage to retrieve and resolve payloads. +#[async_trait::async_trait] +pub trait PayloadStoreExt: Debug + Send + Sync + Unpin { + /// Resolves the payload job and returns the best payload that has been built so far. + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option>; + + /// Resolves the payload job as fast and possible and returns the best payload that has been + /// built so far. + async fn resolve(&self, id: PayloadId) -> Option> { + self.resolve_kind(id, PayloadKind::Earliest).await + } + + /// Returns the best payload for the given identifier. + async fn best_payload( + &self, + id: PayloadId, + ) -> Option>; + + /// Returns the payload attributes associated with the given identifier. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option>; +} + +#[async_trait::async_trait] +impl PayloadStoreExt for P +where + P: PayloadBuilder, +{ + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option> { + Some(PayloadBuilder::resolve_kind(self, id, kind).await?.map_err(Into::into)) + } + + async fn best_payload( + &self, + id: PayloadId, + ) -> Option> { + Some(PayloadBuilder::best_payload(self, id).await?.map_err(Into::into)) + } + + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + Some(PayloadBuilder::payload_attributes(self, id).await?.map_err(Into::into)) + } } /// Represents a built payload type that contains a built [`SealedBlock`] and can be converted into @@ -84,10 +147,11 @@ pub trait PayloadBuilderAttributes: Send + Sync + std::fmt::Debug { /// Creates a new payload builder for the given parent block and the attributes. /// - /// Derives the unique [`PayloadId`] for the given parent and attributes + /// Derives the unique [`PayloadId`] for the given parent, attributes and version. fn try_new( parent: B256, rpc_payload_attributes: Self::RpcPayloadAttributes, + version: u8, ) -> Result where Self: Sized; @@ -145,7 +209,8 @@ impl PayloadAttributes for EthPayloadAttributes { } } -impl PayloadAttributes for OpPayloadAttributes { +#[cfg(feature = "op")] +impl PayloadAttributes for op_alloy_rpc_types_engine::OpPayloadAttributes { fn timestamp(&self) -> u64 { self.payload_attributes.timestamp } diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 9952815fd982..e74b5f48d40f 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -23,7 +23,7 @@ pub struct ExecutionPayloadValidator { chain_spec: Arc, } -impl ExecutionPayloadValidator { +impl ExecutionPayloadValidator { /// Create a new validator. pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } @@ -31,10 +31,12 @@ impl ExecutionPayloadValidator { /// Returns the chain spec used by the validator. #[inline] - pub fn chain_spec(&self) -> &ChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } +} +impl ExecutionPayloadValidator { /// Returns true if the Cancun hardfork is active at the given timestamp. #[inline] fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 4319232f8248..6cafe8b8b1e5 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -44,7 +44,6 @@ reth-testing-utils.workspace = true alloy-primitives = { workspace = true, features = ["arbitrary"] } alloy-consensus = { workspace = true, features = ["arbitrary"] } -arbitrary = { workspace = true, features = ["derive"] } bincode.workspace = true proptest-arbitrary-interop.workspace = true proptest.workspace = true diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index c9b673ec7241..6ec184a21546 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -6,19 +6,23 @@ use alloy_consensus::{BlockHeader, Transaction, TxType}; use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; use alloy_primitives::{Address, B256}; -use crate::Block; +use crate::InMemorySize; /// Abstraction for block's body. pub trait BlockBody: - Clone + Send + + Sync + + Unpin + + Clone + + Default + fmt::Debug + PartialEq + Eq - + Default + serde::Serialize + for<'de> serde::Deserialize<'de> + alloy_rlp::Encodable + alloy_rlp::Decodable + + InMemorySize { /// Ordered list of signed transactions as committed in block. // todo: requires trait for signed transaction @@ -43,11 +47,6 @@ pub trait BlockBody: /// Returns [`Requests`] in block, if any. fn requests(&self) -> Option<&Requests>; - /// Create a [`Block`] from the body and its header. - fn into_block>(self, header: Self::Header) -> T { - T::from((header, self)) - } - /// Calculate the transaction root for the block body. fn calculate_tx_root(&self) -> B256; @@ -90,7 +89,4 @@ pub trait BlockBody: fn blob_versioned_hashes(&self) -> Vec<&B256> { self.blob_versioned_hashes_iter().collect() } - - /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. - fn size(&self) -> usize; } diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs new file mode 100644 index 000000000000..7ab76f249873 --- /dev/null +++ b/crates/primitives-traits/src/block/header.rs @@ -0,0 +1,51 @@ +//! Block header data primitive. + +use core::fmt; + +use alloy_primitives::Sealable; +use reth_codecs::Compact; + +use crate::InMemorySize; + +/// Helper trait that unifies all behaviour required by block header to support full node +/// operations. +pub trait FullBlockHeader: BlockHeader + Compact {} + +impl FullBlockHeader for T where T: BlockHeader + Compact {} + +/// Abstraction of a block header. +pub trait BlockHeader: + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + alloy_consensus::BlockHeader + + Sealable + + InMemorySize +{ +} + +impl BlockHeader for T where + T: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + serde::Serialize + + for<'de> serde::Deserialize<'de> + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + alloy_consensus::BlockHeader + + Sealable + + InMemorySize +{ +} diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 395cf61df145..33008c4381dc 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -1,106 +1,45 @@ //! Block abstraction. pub mod body; +pub mod header; -use alloc::{fmt, vec::Vec}; +use alloc::fmt; -use alloy_consensus::BlockHeader; -use alloy_primitives::{Address, Sealable, B256}; +use reth_codecs::Compact; -use crate::BlockBody; +use crate::{BlockHeader, FullBlockHeader, InMemorySize}; -/// Helper trait, unifies behaviour required of a block header. -pub trait Header: BlockHeader + Sealable {} +/// Helper trait that unifies all behaviour required by block to support full node operations. +pub trait FullBlock: Block + Compact {} -impl Header for T where T: BlockHeader + Sealable {} +impl FullBlock for T where T: Block + Compact {} /// Abstraction of block data type. // todo: make sealable super-trait, depends on // todo: make with senders extension trait, so block can be impl by block type already containing // senders pub trait Block: - fmt::Debug + Send + + Sync + + Unpin + Clone + + Default + + fmt::Debug + PartialEq + Eq - + Default + serde::Serialize + for<'a> serde::Deserialize<'a> - + From<(Self::Header, Self::Body)> - + Into<(Self::Header, Self::Body)> + + InMemorySize { /// Header part of the block. - type Header: Header; + type Header: BlockHeader; /// The block's body contains the transactions in the block. - type Body: BlockBody; - - /// A block and block hash. - type SealedBlock; + type Body: Send + Sync + Unpin + 'static; - /// A block and addresses of senders of transactions in it. - type BlockWithSenders; - - /// Returns reference to [`BlockHeader`] type. + /// Returns reference to block header. fn header(&self) -> &Self::Header; - /// Returns reference to [`BlockBody`] type. + /// Returns reference to block body. fn body(&self) -> &Self::Body; - - /// Calculate the header hash and seal the block so that it can't be changed. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal_slow(self) -> Self::SealedBlock; - - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal(self, hash: B256) -> Self::SealedBlock; - - /// Expensive operation that recovers transaction signer. See - /// `SealedBlockWithSenders`. - fn senders(&self) -> Option> { - self.body().recover_signers() - } - - /// Transform into a `BlockWithSenders`. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - fn with_senders_unchecked(self, senders: Vec

) -> Self::BlockWithSenders { - self.try_with_senders_unchecked(senders).expect("stored block is valid") - } - - /// Transform into a `BlockWithSenders` using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also `SignedTransaction::recover_signer_unchecked`. - /// - /// Returns an error if a signature is invalid. - // todo: can be default impl if block with senders type is made generic over block and migrated - // to alloy - #[track_caller] - fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result, Self>; - - /// **Expensive**. Transform into a `BlockWithSenders` by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn with_recovered_senders(self) -> Option>; - - /// Calculates a heuristic for the in-memory size of the [`Block`]. - fn size(&self) -> usize; } diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index f3dd28e19295..94eaf95c269f 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -1,6 +1,6 @@ //! Ethereum protocol-related constants -use alloy_primitives::{address, b256, Address, B256}; +use alloy_primitives::{b256, B256}; /// Gas units, for example [`GIGAGAS`]. pub mod gas_units; @@ -16,12 +16,6 @@ pub const MINIMUM_GAS_LIMIT: u64 = 5000; pub const HOLESKY_GENESIS_HASH: B256 = b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); -/// From address from Optimism system txs: `0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001` -pub const OP_SYSTEM_TX_FROM_ADDR: Address = address!("deaddeaddeaddeaddeaddeaddeaddeaddead0001"); - -/// To address from Optimism system txs: `0x4200000000000000000000000000000000000015` -pub const OP_SYSTEM_TX_TO_ADDR: Address = address!("4200000000000000000000000000000000000015"); - /// The number of blocks to unwind during a reorg that already became a part of canonical chain. /// /// In reality, the node can end up in this particular situation very rarely. It would happen only diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index fa9c33245359..760abf33720b 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -7,8 +7,7 @@ pub use error::HeaderError; #[cfg(any(test, feature = "test-utils", feature = "arbitrary"))] pub mod test_utils; -pub use alloy_consensus::Header; - +use alloy_consensus::Header; use alloy_primitives::{Address, BlockNumber, B256, U256}; /// Bincode-compatible header type serde implementations. diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 7119a37e742a..5dd9fcf0d5fb 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,8 +1,8 @@ use super::Header; +use crate::InMemorySize; +use alloy_consensus::Sealed; use alloy_eips::BlockNumHash; use alloy_primitives::{keccak256, BlockHash, Sealable}; -#[cfg(any(test, feature = "test-utils"))] -use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; use core::mem; @@ -29,12 +29,10 @@ impl SealedHeader { pub const fn new(header: H, hash: BlockHash) -> Self { Self { header, hash } } -} -impl SealedHeader { /// Returns the sealed Header fields. #[inline] - pub const fn header(&self) -> &Header { + pub const fn header(&self) -> &H { &self.header } @@ -45,30 +43,42 @@ impl SealedHeader { } /// Extract raw header that can be modified. - pub fn unseal(self) -> Header { + pub fn unseal(self) -> H { self.header } /// This is the inverse of [`Header::seal_slow`] which returns the raw header and hash. - pub fn split(self) -> (Header, BlockHash) { + pub fn split(self) -> (H, BlockHash) { (self.header, self.hash) } +} +impl SealedHeader { + /// Hashes the header and creates a sealed header. + pub fn seal(header: H) -> Self { + let hash = header.hash_slow(); + Self::new(header, hash) + } +} + +impl SealedHeader { /// Return the number hash tuple. pub fn num_hash(&self) -> BlockNumHash { - BlockNumHash::new(self.number, self.hash) + BlockNumHash::new(self.number(), self.hash) } +} +impl InMemorySize for SealedHeader { /// Calculates a heuristic for the in-memory size of the [`SealedHeader`]. #[inline] - pub fn size(&self) -> usize { + fn size(&self) -> usize { self.header.size() + mem::size_of::() } } -impl Default for SealedHeader { +impl Default for SealedHeader { fn default() -> Self { - let sealed = Header::default().seal_slow(); + let sealed = H::default().seal_slow(); let (header, hash) = sealed.into_parts(); Self { header, hash } } @@ -117,29 +127,33 @@ impl SealedHeader { } /// Updates the block number. - pub fn set_block_number(&mut self, number: BlockNumber) { + pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { self.header.number = number; } /// Updates the block state root. - pub fn set_state_root(&mut self, state_root: B256) { + pub fn set_state_root(&mut self, state_root: alloy_primitives::B256) { self.header.state_root = state_root; } /// Updates the block difficulty. - pub fn set_difficulty(&mut self, difficulty: U256) { + pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { self.header.difficulty = difficulty; } } +impl From> for Sealed { + fn from(value: SealedHeader) -> Self { + Self::new_unchecked(value.header, value.hash) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { let header = Header::arbitrary(u)?; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Self::new(header, seal)) + Ok(Self::seal(header)) } } diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs index c5f6e86b9db1..0e79f6cb462f 100644 --- a/crates/primitives-traits/src/header/test_utils.rs +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -1,6 +1,6 @@ //! Test utilities to generate random valid headers. -use crate::Header; +use alloy_consensus::Header; use alloy_primitives::B256; use proptest::{arbitrary::any, prop_compose}; use proptest_arbitrary_interop::arb; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 57d1119b0351..b8f0aa4c8a88 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -14,6 +14,7 @@ extern crate alloc; /// Common constants. pub mod constants; + pub use constants::gas_units::{format_gas, format_gas_throughput}; /// Minimal account @@ -21,19 +22,25 @@ pub mod account; pub use account::{Account, Bytecode}; pub mod receipt; -pub use receipt::Receipt; +pub use receipt::{FullReceipt, Receipt}; pub mod transaction; -pub use transaction::{signed::SignedTransaction, Transaction}; +pub use transaction::{ + signed::{FullSignedTx, SignedTransaction}, + FullTransaction, Transaction, TransactionExt, +}; mod integer_list; pub use integer_list::{IntegerList, IntegerListError}; pub mod block; -pub use block::{body::BlockBody, Block}; +pub use block::{ + body::BlockBody, + header::{BlockHeader, FullBlockHeader}, + Block, FullBlock, +}; mod withdrawal; -pub use withdrawal::{Withdrawal, Withdrawals}; mod error; pub use error::{GotExpected, GotExpectedBoxed}; @@ -44,11 +51,15 @@ pub use alloy_primitives::{logs_bloom, Log, LogData}; mod storage; pub use storage::StorageEntry; +/// Transaction types +pub mod tx_type; +pub use tx_type::{FullTxType, TxType}; + /// Common header types pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; -pub use header::{BlockHeader, Header, HeaderError, SealedHeader}; +pub use header::{HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. /// @@ -61,3 +72,23 @@ pub use header::{BlockHeader, Header, HeaderError, SealedHeader}; pub mod serde_bincode_compat { pub use super::header::{serde_bincode_compat as header, serde_bincode_compat::*}; } + +/// Heuristic size trait +pub mod size; +pub use size::InMemorySize; + +/// Node traits +pub mod node; +pub use node::{FullNodePrimitives, NodePrimitives}; + +/// Helper trait that requires arbitrary implementation if the feature is enabled. +#[cfg(any(feature = "test-utils", feature = "arbitrary"))] +pub trait MaybeArbitrary: for<'a> arbitrary::Arbitrary<'a> {} +/// Helper trait that requires arbitrary implementation if the feature is enabled. +#[cfg(not(any(feature = "test-utils", feature = "arbitrary")))] +pub trait MaybeArbitrary {} + +#[cfg(any(feature = "test-utils", feature = "arbitrary"))] +impl MaybeArbitrary for T where T: for<'a> arbitrary::Arbitrary<'a> {} +#[cfg(not(any(feature = "test-utils", feature = "arbitrary")))] +impl MaybeArbitrary for T {} diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs new file mode 100644 index 000000000000..cebbbe202e85 --- /dev/null +++ b/crates/primitives-traits/src/node.rs @@ -0,0 +1,44 @@ +use core::fmt; + +use crate::{BlockBody, FullBlock, FullReceipt, FullSignedTx, FullTxType}; + +/// Configures all the primitive types of the node. +pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { + /// Block primitive. + type Block: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + /// Signed version of the transaction type. + type SignedTx: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + /// Transaction envelope type ID. + type TxType: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; + /// A receipt. + type Receipt: Send + Sync + Unpin + Clone + Default + fmt::Debug + 'static; +} + +impl NodePrimitives for () { + type Block = (); + type SignedTx = (); + type TxType = (); + type Receipt = (); +} + +/// Helper trait that sets trait bounds on [`NodePrimitives`]. +pub trait FullNodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug { + /// Block primitive. + type Block: FullBlock>; + /// Signed version of the transaction type. + type SignedTx: FullSignedTx; + /// Transaction envelope type ID. + type TxType: FullTxType; + /// A receipt. + type Receipt: FullReceipt; +} + +impl NodePrimitives for T +where + T: FullNodePrimitives, +{ + type Block = T::Block; + type SignedTx = T::SignedTx; + type TxType = T::TxType; + type Receipt = T::Receipt; +} diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 5c317dc49a23..68917d628120 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,6 +1,9 @@ //! Receipt abstraction +use core::fmt; + use alloy_consensus::TxReceipt; +use alloy_primitives::B256; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; @@ -11,8 +14,13 @@ impl FullReceipt for T where T: Receipt + Compact {} /// Abstraction of a receipt. pub trait Receipt: - TxReceipt + Send + + Sync + + Unpin + + Clone + Default + + fmt::Debug + + TxReceipt + alloy_rlp::Encodable + alloy_rlp::Decodable + Serialize @@ -20,4 +28,7 @@ pub trait Receipt: { /// Returns transaction type. fn tx_type(&self) -> u8; + + /// Calculates the receipts root of the given receipts. + fn receipts_root(receipts: &[&Self]) -> B256; } diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs new file mode 100644 index 000000000000..0c250688e057 --- /dev/null +++ b/crates/primitives-traits/src/size.rs @@ -0,0 +1,11 @@ +/// Trait for calculating a heuristic for the in-memory size of a struct. +pub trait InMemorySize { + /// Returns a heuristic for the in-memory size of a struct. + fn size(&self) -> usize; +} + +impl InMemorySize for alloy_consensus::Header { + fn size(&self) -> usize { + self.size() + } +} diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index a306c5f76ed5..bb6f6a711e3e 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -2,11 +2,14 @@ pub mod signed; -use alloc::fmt; +use core::{fmt, hash::Hash}; +use alloy_primitives::B256; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; +use crate::{InMemorySize, MaybeArbitrary, TxType}; + /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullTransaction: Transaction + Compact {} @@ -14,15 +17,52 @@ impl FullTransaction for T where T: Transaction + Compact {} /// Abstraction of a transaction. pub trait Transaction: - alloy_consensus::Transaction + Send + + Sync + + Unpin + Clone + + Default + fmt::Debug - + PartialEq + Eq - + Default - + alloy_rlp::Encodable - + alloy_rlp::Decodable + + PartialEq + + Hash + Serialize + for<'de> Deserialize<'de> + + TransactionExt + + InMemorySize + + MaybeArbitrary +{ +} + +impl Transaction for T where + T: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + Eq + + PartialEq + + Hash + + Serialize + + for<'de> Deserialize<'de> + + TransactionExt + + InMemorySize + + MaybeArbitrary { } + +/// Extension trait of [`alloy_consensus::Transaction`]. +pub trait TransactionExt: alloy_consensus::Transaction { + /// Transaction envelope type ID. + type Type: TxType; + + /// Heavy operation that return signature hash over rlp encoded transaction. + /// It is only for signature signing or signer recovery. + fn signature_hash(&self) -> B256; + + /// Returns the transaction type. + fn tx_type(&self) -> Self::Type { + Self::Type::try_from(self.ty()).expect("should decode tx type id") + } +} diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 1bc8308b13f0..455a9886eb8f 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -3,32 +3,41 @@ use alloc::fmt; use core::hash::Hash; -use alloy_consensus::Transaction; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; -use alloy_primitives::{keccak256, Address, TxHash, B256}; +use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; +use reth_codecs::Compact; +use revm_primitives::TxEnv; + +use crate::{transaction::TransactionExt, FullTransaction, MaybeArbitrary, Transaction}; + +/// Helper trait that unifies all behaviour required by block to support full node operations. +pub trait FullSignedTx: SignedTransaction + Compact {} + +impl FullSignedTx for T where T: SignedTransaction + Compact {} /// A signed transaction. pub trait SignedTransaction: - fmt::Debug + Send + + Sync + + Unpin + Clone + + Default + + fmt::Debug + PartialEq + Eq + Hash - + Send - + Sync + serde::Serialize + for<'a> serde::Deserialize<'a> + alloy_rlp::Encodable + alloy_rlp::Decodable + Encodable2718 + Decodable2718 + + TransactionExt + + MaybeArbitrary { /// Transaction type that is signed. type Transaction: Transaction; - /// Signature type that results from signing transaction. - type Signature; - /// Returns reference to transaction hash. fn tx_hash(&self) -> &TxHash; @@ -36,7 +45,7 @@ pub trait SignedTransaction: fn transaction(&self) -> &Self::Transaction; /// Returns reference to signature. - fn signature(&self) -> &Self::Signature; + fn signature(&self) -> &PrimitiveSignature; /// Recover signer from signature and hash. /// @@ -61,7 +70,7 @@ pub trait SignedTransaction: /// This will also calculate the transaction hash using its encoding. fn from_transaction_and_signature( transaction: Self::Transaction, - signature: Self::Signature, + signature: PrimitiveSignature, ) -> Self; /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with @@ -69,4 +78,15 @@ pub trait SignedTransaction: fn recalculate_hash(&self) -> B256 { keccak256(self.encoded_2718()) } + + /// Fills [`TxEnv`] with an [`Address`] and transaction. + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); +} + +impl TransactionExt for T { + type Type = ::Type; + + fn signature_hash(&self) -> B256 { + self.transaction().signature_hash() + } } diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/tx_type.rs new file mode 100644 index 000000000000..078d8ac947ba --- /dev/null +++ b/crates/primitives-traits/src/tx_type.rs @@ -0,0 +1,55 @@ +use core::fmt; + +use alloy_primitives::{U64, U8}; +use reth_codecs::Compact; + +/// Helper trait that unifies all behaviour required by transaction type ID to support full node +/// operations. +pub trait FullTxType: TxType + Compact {} + +impl FullTxType for T where T: TxType + Compact {} + +/// Trait representing the behavior of a transaction type. +pub trait TxType: + Send + + Sync + + Unpin + + Clone + + Copy + + Default + + fmt::Debug + + fmt::Display + + PartialEq + + Eq + + PartialEq + + Into + + Into + + TryFrom + + TryFrom + + TryFrom + + alloy_rlp::Encodable + + alloy_rlp::Decodable +{ +} + +impl TxType for T where + T: Send + + Sync + + Unpin + + Clone + + Copy + + Default + + fmt::Debug + + fmt::Display + + PartialEq + + Eq + + PartialEq + + Into + + Into + + TryFrom + + TryFrom + + TryFrom + + alloy_rlp::Encodable + + alloy_rlp::Decodable +{ +} diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index 995e60292c6e..0849ab6202e6 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -1,97 +1,14 @@ //! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. -use alloc::vec::Vec; -use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; -use derive_more::{AsRef, Deref, DerefMut, From, IntoIterator}; -use reth_codecs::{add_arbitrary_tests, Compact}; - -/// Re-export from `alloy_eips`. -#[doc(inline)] -pub use alloy_eips::eip4895::Withdrawal; -use serde::{Deserialize, Serialize}; - -/// Represents a collection of Withdrawals. -#[derive( - Debug, - Clone, - PartialEq, - Eq, - Default, - Hash, - From, - AsRef, - Deref, - DerefMut, - IntoIterator, - RlpEncodableWrapper, - RlpDecodableWrapper, - Serialize, - Deserialize, - Compact, -)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] -#[as_ref(forward)] -pub struct Withdrawals(Vec); - -impl Withdrawals { - /// Create a new Withdrawals instance. - pub const fn new(withdrawals: Vec) -> Self { - Self(withdrawals) - } - - /// Calculate the total size, including capacity, of the Withdrawals. - #[inline] - pub fn total_size(&self) -> usize { - self.capacity() * core::mem::size_of::() - } - - /// Calculate a heuristic for the in-memory size of the [Withdrawals]. - #[inline] - pub fn size(&self) -> usize { - self.len() * core::mem::size_of::() - } - - /// Get an iterator over the Withdrawals. - pub fn iter(&self) -> core::slice::Iter<'_, Withdrawal> { - self.0.iter() - } - - /// Get a mutable iterator over the Withdrawals. - pub fn iter_mut(&mut self) -> core::slice::IterMut<'_, Withdrawal> { - self.0.iter_mut() - } - - /// Convert [Self] into raw vec of withdrawals. - pub fn into_inner(self) -> Vec { - self.0 - } -} - -impl<'a> IntoIterator for &'a Withdrawals { - type Item = &'a Withdrawal; - type IntoIter = core::slice::Iter<'a, Withdrawal>; - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a> IntoIterator for &'a mut Withdrawals { - type Item = &'a mut Withdrawal; - type IntoIter = core::slice::IterMut<'a, Withdrawal>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - #[cfg(test)] mod tests { - use super::*; + use alloy_eips::eip4895::Withdrawal; use alloy_primitives::Address; use alloy_rlp::{RlpDecodable, RlpEncodable}; use proptest::proptest; use proptest_arbitrary_interop::arb; + use reth_codecs::{add_arbitrary_tests, Compact}; + use serde::{Deserialize, Serialize}; /// This type is kept for compatibility tests after the codec support was added to alloy-eips /// Withdrawal type natively diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 5e761f41fe22..34d04c94edcd 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -16,17 +16,18 @@ workspace = true reth-primitives-traits.workspace = true reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true -reth-trie-common.workspace = true revm-primitives = { workspace = true, features = ["serde"] } reth-codecs = { workspace = true, optional = true } # ethereum alloy-consensus.workspace = true +alloy-network = { workspace = true, optional = true } alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-rpc-types = { workspace = true, optional = true } alloy-serde = { workspace = true, optional = true } alloy-eips = { workspace = true, features = ["serde"] } +alloy-trie = { workspace = true, features = ["serde"] } # optimism op-alloy-rpc-types = { workspace = true, optional = true } @@ -65,6 +66,7 @@ reth-chainspec.workspace = true reth-codecs = { workspace = true, features = ["test-utils"] } reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true +reth-trie-common.workspace = true revm-primitives = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } @@ -78,6 +80,7 @@ proptest.workspace = true rand.workspace = true serde_json.workspace = true test-fuzz.workspace = true +rstest.workspace = true criterion.workspace = true pprof = { workspace = true, features = [ @@ -100,6 +103,7 @@ std = [ "revm-primitives/std", "secp256k1?/std", "serde/std", + "alloy-trie/std" ] reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] asm-keccak = ["alloy-primitives/asm-keccak", "revm-primitives/asm-keccak"] @@ -113,14 +117,15 @@ arbitrary = [ "revm-primitives/arbitrary", "secp256k1", "reth-chainspec/arbitrary", - "reth-trie-common/arbitrary", "alloy-consensus/arbitrary", "alloy-primitives/arbitrary", "alloy-rpc-types?/arbitrary", "alloy-serde?/arbitrary", "op-alloy-consensus?/arbitrary", "op-alloy-rpc-types?/arbitrary", - "reth-codecs?/arbitrary" + "reth-codecs?/arbitrary", + "alloy-trie/arbitrary", + "reth-trie-common/arbitrary" ] secp256k1 = ["dep:secp256k1"] c-kzg = [ @@ -138,12 +143,14 @@ alloy-compat = [ "dep:alloy-rpc-types", "dep:alloy-serde", "dep:op-alloy-rpc-types", + "dep:alloy-network", ] test-utils = [ "reth-primitives-traits/test-utils", "reth-chainspec/test-utils", "reth-codecs?/test-utils", "reth-trie-common/test-utils", + "arbitrary", ] serde-bincode-compat = [ "alloy-consensus/serde-bincode-compat", diff --git a/crates/primitives/benches/validate_blob_tx.rs b/crates/primitives/benches/validate_blob_tx.rs index 50498a9420fd..453381366e14 100644 --- a/crates/primitives/benches/validate_blob_tx.rs +++ b/crates/primitives/benches/validate_blob_tx.rs @@ -1,7 +1,9 @@ #![allow(missing_docs)] use alloy_consensus::TxEip4844; -use alloy_eips::eip4844::{env_settings::EnvKzgSettings, MAX_BLOBS_PER_BLOCK}; +use alloy_eips::eip4844::{ + env_settings::EnvKzgSettings, BlobTransactionSidecar, MAX_BLOBS_PER_BLOCK, +}; use alloy_primitives::hex; use criterion::{ criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, @@ -12,7 +14,6 @@ use proptest::{ test_runner::{RngAlgorithm, TestRng, TestRunner}, }; use proptest_arbitrary_interop::arb; -use reth_primitives::BlobTransactionSidecar; // constant seed to use for the rng const SEED: [u8; 32] = hex!("1337133713371337133713371337133713371337133713371337133713371337"); diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 917baef66612..462b27f9c73c 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -1,26 +1,20 @@ //! Common conversions from alloy types. -use crate::{ - transaction::extract_chain_id, Block, BlockBody, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, -}; +use crate::{Block, BlockBody, Transaction, TransactionSigned}; use alloc::{string::ToString, vec::Vec}; -use alloy_consensus::{ - constants::EMPTY_TRANSACTIONS, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxLegacy, -}; -use alloy_primitives::{Parity, TxKind}; -use alloy_rlp::Error as RlpError; +use alloy_consensus::{constants::EMPTY_TRANSACTIONS, Header, TxEnvelope}; +use alloy_network::{AnyHeader, AnyRpcBlock, AnyRpcTransaction, AnyTxEnvelope}; use alloy_serde::WithOtherFields; use op_alloy_rpc_types as _; -impl TryFrom>> for Block { +impl TryFrom for Block { type Error = alloy_rpc_types::ConversionError; - fn try_from( - block: alloy_rpc_types::Block>, - ) -> Result { + fn try_from(block: AnyRpcBlock) -> Result { use alloy_rpc_types::ConversionError; + let block = block.inner; + let transactions = { let transactions: Result, ConversionError> = match block .transactions @@ -35,241 +29,134 @@ impl TryFrom> for Transaction { +impl TryFrom for TransactionSigned { type Error = alloy_rpc_types::ConversionError; - fn try_from(tx: WithOtherFields) -> Result { - use alloy_eips::eip2718::Eip2718Error; + fn try_from(tx: AnyRpcTransaction) -> Result { use alloy_rpc_types::ConversionError; - #[cfg(feature = "optimism")] - let WithOtherFields { inner: tx, other } = tx; - #[cfg(not(feature = "optimism"))] let WithOtherFields { inner: tx, other: _ } = tx; - match tx.transaction_type.map(TryInto::try_into).transpose().map_err(|_| { - ConversionError::Eip2718Error(Eip2718Error::UnexpectedType( - tx.transaction_type.unwrap(), - )) - })? { - None | Some(TxType::Legacy) => { - // legacy - if tx.max_fee_per_gas.is_some() || tx.max_priority_fee_per_gas.is_some() { - return Err(ConversionError::Eip2718Error( - RlpError::Custom("EIP-1559 fields are present in a legacy transaction") - .into(), - )) - } - - // extract the chain id if possible - let chain_id = match tx.chain_id { - Some(chain_id) => Some(chain_id), - None => { - if let Some(signature) = tx.signature { - // TODO: make this error conversion better. This is needed because - // sometimes rpc providers return legacy transactions without a chain id - // explicitly in the response, however those transactions may also have - // a chain id in the signature from eip155 - extract_chain_id(signature.v.to()) - .map_err(|err| ConversionError::Eip2718Error(err.into()))? - .1 - } else { - return Err(ConversionError::MissingChainId) - } - } - }; - - Ok(Self::Legacy(TxLegacy { - chain_id, - nonce: tx.nonce, - gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, - gas_limit: tx.gas, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - input: tx.input, - })) + let (transaction, signature, hash) = match tx.inner { + AnyTxEnvelope::Ethereum(TxEnvelope::Legacy(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Legacy(tx), signature, hash) } - Some(TxType::Eip2930) => { - // eip2930 - Ok(Self::Eip2930(TxEip2930 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - gas_limit: tx.gas, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - input: tx.input, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, - })) + AnyTxEnvelope::Ethereum(TxEnvelope::Eip2930(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip2930(tx), signature, hash) } - Some(TxType::Eip1559) => { - // EIP-1559 - Ok(Self::Eip1559(TxEip1559 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - max_priority_fee_per_gas: tx - .max_priority_fee_per_gas - .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, - max_fee_per_gas: tx - .max_fee_per_gas - .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx.gas, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - input: tx.input, - })) + AnyTxEnvelope::Ethereum(TxEnvelope::Eip1559(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip1559(tx), signature, hash) } - Some(TxType::Eip4844) => { - // EIP-4844 - Ok(Self::Eip4844(TxEip4844 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - max_priority_fee_per_gas: tx - .max_priority_fee_per_gas - .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, - max_fee_per_gas: tx - .max_fee_per_gas - .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx.gas, - to: tx.to.unwrap_or_default(), - value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - input: tx.input, - blob_versioned_hashes: tx - .blob_versioned_hashes - .ok_or(ConversionError::MissingBlobVersionedHashes)?, - max_fee_per_blob_gas: tx - .max_fee_per_blob_gas - .ok_or(ConversionError::MissingMaxFeePerBlobGas)?, - })) + AnyTxEnvelope::Ethereum(TxEnvelope::Eip4844(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip4844(tx.into()), signature, hash) } - Some(TxType::Eip7702) => { - // this is currently unsupported as it is not present in alloy due to missing rpc - // specs - Err(ConversionError::Custom("Unimplemented".to_string())) - /* - // EIP-7702 - Ok(Transaction::Eip7702(TxEip7702 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - max_priority_fee_per_gas: tx - .max_priority_fee_per_gas - .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, - max_fee_per_gas: tx - .max_fee_per_gas - .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - authorization_list: tx - .authorization_list - .ok_or(ConversionError::MissingAuthorizationList)?, - input: tx.input, - }))*/ + AnyTxEnvelope::Ethereum(TxEnvelope::Eip7702(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip7702(tx), signature, hash) } #[cfg(feature = "optimism")] - Some(TxType::Deposit) => { - let fields = other - .deserialize_into::() - .map_err(|e| ConversionError::Custom(e.to_string()))?; - Ok(Self::Deposit(op_alloy_consensus::TxDeposit { - source_hash: fields - .source_hash - .ok_or_else(|| ConversionError::Custom("MissingSourceHash".to_string()))?, - from: tx.from, - to: TxKind::from(tx.to), - mint: fields.mint.filter(|n| *n != 0), - value: tx.value, - gas_limit: tx.gas, - is_system_transaction: fields.is_system_tx.unwrap_or(false), - input: tx.input, - })) - } - } - } -} + AnyTxEnvelope::Unknown(alloy_network::UnknownTxEnvelope { hash, inner }) => { + use alloy_consensus::Transaction as _; -impl TryFrom> for TransactionSigned { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: WithOtherFields) -> Result { - use alloy_rpc_types::ConversionError; - - let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; - let transaction: Transaction = tx.try_into()?; - let y_parity = if let Some(y_parity) = signature.y_parity { - y_parity.0 - } else { - match transaction.tx_type() { - // If the transaction type is Legacy, adjust the v component of the - // signature according to the Ethereum specification - TxType::Legacy => { - extract_chain_id(signature.v.to()) - .map_err(|_| ConversionError::InvalidSignature)? - .0 + if inner.ty() == crate::TxType::Deposit { + let fields: op_alloy_rpc_types::OpTransactionFields = inner + .fields + .clone() + .deserialize_into::() + .map_err(|e| ConversionError::Custom(e.to_string()))?; + ( + Transaction::Deposit(op_alloy_consensus::TxDeposit { + source_hash: fields.source_hash.ok_or_else(|| { + ConversionError::Custom("MissingSourceHash".to_string()) + })?, + from: tx.from, + to: revm_primitives::TxKind::from(inner.to()), + mint: fields.mint.filter(|n| *n != 0), + value: inner.value(), + gas_limit: inner.gas_limit(), + is_system_transaction: fields.is_system_tx.unwrap_or(false), + input: inner.input().clone(), + }), + op_alloy_consensus::TxDeposit::signature(), + hash, + ) + } else { + return Err(ConversionError::Custom("unknown transaction type".to_string())) } - _ => !signature.v.is_zero(), } + _ => return Err(ConversionError::Custom("unknown transaction type".to_string())), }; - let mut parity = Parity::Parity(y_parity); - - if matches!(transaction.tx_type(), TxType::Legacy) { - if let Some(chain_id) = transaction.chain_id() { - parity = parity.with_chain_id(chain_id) - } - } - - Ok(Self::from_transaction_and_signature( - transaction, - Signature::new(signature.r, signature.s, parity), - )) - } -} - -impl TryFrom> for TransactionSignedEcRecovered { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: WithOtherFields) -> Result { - use alloy_rpc_types::ConversionError; - - let transaction: TransactionSigned = tx.try_into()?; - - transaction.try_into_ecrecovered().map_err(|_| ConversionError::InvalidSignature) - } -} - -impl TryFrom> for TransactionSignedNoHash { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: WithOtherFields) -> Result { - Ok(Self { - signature: tx.signature.ok_or(Self::Error::MissingSignature)?.try_into()?, - transaction: tx.try_into()?, - }) + Ok(Self { transaction, signature, hash }) } } @@ -278,7 +165,7 @@ impl TryFrom> for TransactionSigne mod tests { use super::*; use alloy_primitives::{address, Address, B256, U256}; - use alloy_rpc_types::Transaction as AlloyTransaction; + use revm_primitives::TxKind; #[test] fn optimism_deposit_tx_conversion_no_mint() { @@ -302,10 +189,11 @@ mod tests { "v": "0x0", "value": "0x0" }"#; - let alloy_tx: WithOtherFields = + let alloy_tx: WithOtherFields> = serde_json::from_str(input).expect("failed to deserialize"); - let reth_tx: Transaction = alloy_tx.try_into().expect("failed to convert"); + let TransactionSigned { transaction: reth_tx, .. } = + alloy_tx.try_into().expect("failed to convert"); if let Transaction::Deposit(deposit_tx) = reth_tx { assert_eq!( deposit_tx.source_hash, @@ -352,10 +240,11 @@ mod tests { "v": "0x0", "value": "0x239c2e16a5ca590000" }"#; - let alloy_tx: WithOtherFields = + let alloy_tx: WithOtherFields> = serde_json::from_str(input).expect("failed to deserialize"); - let reth_tx: Transaction = alloy_tx.try_into().expect("failed to convert"); + let TransactionSigned { transaction: reth_tx, .. } = + alloy_tx.try_into().expect("failed to convert"); if let Transaction::Deposit(deposit_tx) = reth_tx { assert_eq!( diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index a06979300acd..703f9d331696 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,16 +1,13 @@ -use crate::{ - GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, -}; +use crate::{GotExpected, SealedHeader, TransactionSigned, TransactionSignedEcRecovered}; use alloc::vec::Vec; -pub use alloy_eips::eip1898::{ - BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash, -}; -use alloy_eips::eip2718::Encodable2718; -use alloy_primitives::{Address, Bytes, Sealable, B256}; +use alloy_consensus::Header; +use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; +use alloy_primitives::{Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; /// Ethereum full block. @@ -29,9 +26,7 @@ pub struct Block { impl Block { /// Calculate the header hash and seal the block so that it can't be changed. pub fn seal_slow(self) -> SealedBlock { - let sealed = self.header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedBlock { header: SealedHeader::new(header, seal), body: self.body } + SealedBlock { header: SealedHeader::seal(self.header), body: self.body } } /// Seal the block with a known hash. @@ -89,10 +84,25 @@ impl Block { let senders = self.senders()?; Some(BlockWithSenders { block: self, senders }) } +} + +impl reth_primitives_traits::Block for Block { + type Header = Header; + type Body = BlockBody; + + fn body(&self) -> &Self::Body { + &self.body + } + + fn header(&self) -> &Self::Header { + &self.header + } +} +impl InMemorySize for Block { /// Calculates a heuristic for the in-memory size of the [`Block`]. #[inline] - pub fn size(&self) -> usize { + fn size(&self) -> usize { self.header.size() + self.body.size() } } @@ -150,27 +160,27 @@ mod block_rlp { } impl Encodable for Block { - fn length(&self) -> usize { - let helper: HelperRef<'_, _> = self.into(); - helper.length() - } - fn encode(&self, out: &mut dyn bytes::BufMut) { let helper: HelperRef<'_, _> = self.into(); helper.encode(out) } - } - impl Encodable for SealedBlock { fn length(&self) -> usize { let helper: HelperRef<'_, _> = self.into(); helper.length() } + } + impl Encodable for SealedBlock { fn encode(&self, out: &mut dyn bytes::BufMut) { let helper: HelperRef<'_, _> = self.into(); helper.encode(out) } + + fn length(&self) -> usize { + let helper: HelperRef<'_, _> = self.into(); + helper.length() + } } } @@ -261,22 +271,21 @@ impl BlockWithSenders { /// Sealed Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp, 32))] -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)] -pub struct SealedBlock { +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] +pub struct SealedBlock { /// Locked block header. #[deref] #[deref_mut] - pub header: SealedHeader, + pub header: SealedHeader, /// Block body. - pub body: BlockBody, + pub body: B, } -impl SealedBlock { +impl SealedBlock { /// Create a new sealed block instance using the sealed header and block body. #[inline] - pub const fn new(header: SealedHeader, body: BlockBody) -> Self { + pub const fn new(header: SealedHeader, body: B) -> Self { Self { header, body } } @@ -286,16 +295,18 @@ impl SealedBlock { self.header.hash() } - /// Splits the sealed block into underlying components + /// Splits the [`BlockBody`] and [`SealedHeader`] into separate components #[inline] - pub fn split(self) -> (SealedHeader, Vec, Vec
) { - (self.header, self.body.transactions, self.body.ommers) + pub fn split_header_body(self) -> (SealedHeader, B) { + (self.header, self.body) } +} - /// Splits the [`BlockBody`] and [`SealedHeader`] into separate components +impl SealedBlock { + /// Splits the sealed block into underlying components #[inline] - pub fn split_header_body(self) -> (SealedHeader, BlockBody) { - (self.header, self.body) + pub fn split(self) -> (SealedHeader, Vec, Vec
) { + (self.header, self.body.transactions, self.body.ommers) } /// Returns an iterator over all blob transactions of the block @@ -380,12 +391,6 @@ impl SealedBlock { Block { header: self.header.unseal(), body: self.body } } - /// Calculates a heuristic for the in-memory size of the [`SealedBlock`]. - #[inline] - pub fn size(&self) -> usize { - self.header.size() + self.body.size() - } - /// Calculates the total gas used by blob transactions in the sealed block. pub fn blob_gas_used(&self) -> u64 { self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() @@ -435,12 +440,40 @@ impl SealedBlock { } } +impl InMemorySize for SealedBlock { + #[inline] + fn size(&self) -> usize { + self.header.size() + self.body.size() + } +} + impl From for Block { fn from(block: SealedBlock) -> Self { block.unseal() } } +impl Default for SealedBlock +where + SealedHeader: Default, + B: Default, +{ + fn default() -> Self { + Self { header: Default::default(), body: Default::default() } + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a, H, B> arbitrary::Arbitrary<'a> for SealedBlock +where + SealedHeader: arbitrary::Arbitrary<'a>, + B: arbitrary::Arbitrary<'a>, +{ + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + Ok(Self { header: u.arbitrary()?, body: u.arbitrary()? }) + } +} + /// Sealed block with senders recovered from transactions. #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)] pub struct SealedBlockWithSenders { @@ -508,7 +541,7 @@ impl SealedBlockWithSenders { #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for SealedBlockWithSenders { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let block = SealedBlock::arbitrary(u)?; + let block: SealedBlock = SealedBlock::arbitrary(u)?; let senders = block .body @@ -608,10 +641,12 @@ impl BlockBody { pub fn transactions(&self) -> impl Iterator + '_ { self.transactions.iter() } +} +impl InMemorySize for BlockBody { /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. #[inline] - pub fn size(&self) -> usize { + fn size(&self) -> usize { self.transactions.iter().map(TransactionSigned::size).sum::() + self.transactions.capacity() * core::mem::size_of::() + self.ommers.iter().map(Header::size).sum::() + @@ -658,8 +693,9 @@ impl<'a> arbitrary::Arbitrary<'a> for BlockBody { pub(super) mod serde_bincode_compat { use alloc::{borrow::Cow, vec::Vec}; use alloy_consensus::serde_bincode_compat::Header; + use alloy_eips::eip4895::Withdrawals; use alloy_primitives::Address; - use reth_primitives_traits::{serde_bincode_compat::SealedHeader, Withdrawals}; + use reth_primitives_traits::serde_bincode_compat::SealedHeader; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -906,8 +942,11 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { - use super::{BlockNumberOrTag::*, *}; - use alloy_eips::eip1898::HexStringMissingPrefixError; + use super::*; + use alloy_eips::{ + eip1898::HexStringMissingPrefixError, BlockId, BlockNumberOrTag, BlockNumberOrTag::*, + RpcBlockHash, + }; use alloy_primitives::hex_literal::hex; use alloy_rlp::{Decodable, Encodable}; use std::str::FromStr; @@ -1084,7 +1123,7 @@ mod tests { #[test] fn test_default_seal() { - let block = SealedBlock::default(); + let block: SealedBlock = SealedBlock::default(); let sealed = block.hash(); let block = block.unseal(); let block = block.seal_slow(); diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs deleted file mode 100644 index 14e892adfbeb..000000000000 --- a/crates/primitives/src/constants/eip4844.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) protocol constants and utils for shard Blob Transactions. - -pub use alloy_eips::eip4844::{ - BLOB_GASPRICE_UPDATE_FRACTION, BLOB_TX_MIN_BLOB_GASPRICE, DATA_GAS_PER_BLOB, - FIELD_ELEMENTS_PER_BLOB, FIELD_ELEMENT_BYTES, MAX_BLOBS_PER_BLOCK, MAX_DATA_GAS_PER_BLOCK, - TARGET_BLOBS_PER_BLOCK, TARGET_DATA_GAS_PER_BLOCK, VERSIONED_HASH_VERSION_KZG, -}; diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index fd1dc1586248..09c488cc25ad 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -1,6 +1,3 @@ //! Ethereum protocol-related constants pub use reth_primitives_traits::constants::*; - -/// [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) constants. -pub mod eip4844; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 4e3f1d3bd249..534b525f0869 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -33,10 +33,7 @@ pub use reth_static_file_types as static_file; pub mod transaction; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; -pub use block::{ - Block, BlockBody, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, BlockWithSenders, - ForkBlock, RpcBlockHash, SealedBlock, SealedBlockWithSenders, -}; +pub use block::{Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockWithSenders}; #[cfg(feature = "reth-codec")] pub use compression::*; pub use constants::HOLESKY_GENESIS_HASH; @@ -44,22 +41,18 @@ pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; pub use reth_primitives_traits::{ - logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, - LogData, SealedHeader, StorageEntry, Withdrawal, Withdrawals, + logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, HeaderError, Log, LogData, + SealedHeader, StorageEntry, }; pub use static_file::StaticFileSegment; pub use transaction::{ - BlobTransaction, BlobTransactionSidecar, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, + BlobTransaction, PooledTransactionsElement, PooledTransactionsElementEcRecovered, }; -#[cfg(feature = "c-kzg")] -pub use transaction::BlobTransactionValidationError; - pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, - InvalidTransactionError, Signature, Transaction, TransactionMeta, TransactionSigned, + InvalidTransactionError, Transaction, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHashOrNumber, TxType, }; diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 1697246702a4..1712112281f4 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,13 +1,11 @@ //! Helper function for calculating Merkle proofs and hashes. -use crate::{ - Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned, Withdrawal, -}; +use crate::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned}; use alloc::vec::Vec; -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; -use alloy_eips::eip2718::Encodable2718; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawal}; use alloy_primitives::{keccak256, B256}; -use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; +use alloy_trie::root::{ordered_trie_root, ordered_trie_root_with_encoder}; /// Calculate a transaction root. /// diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 940b491e3351..3258d4be6ebd 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,19 +1,21 @@ -#[cfg(feature = "reth-codec")] -use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; -use crate::TxType; use alloc::{vec, vec::Vec}; -use alloy_consensus::constants::{ - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, +use core::{cmp::Ordering, ops::Deref}; + +use alloy_consensus::{ + constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, + Eip658Value, TxReceipt, }; -use alloy_primitives::{Bloom, Bytes, Log, B256}; +use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::{Bloom, Log, B256}; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; -use core::{cmp::Ordering, ops::Deref}; use derive_more::{DerefMut, From, IntoIterator}; -#[cfg(feature = "reth-codec")] -use reth_codecs::Compact; use serde::{Deserialize, Serialize}; +#[cfg(feature = "reth-codec")] +use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; +use crate::TxType; + /// Receipt containing result of transaction execution. #[derive( Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable, Serialize, Deserialize, @@ -65,6 +67,42 @@ impl Receipt { } } +// todo: replace with alloy receipt +impl TxReceipt for Receipt { + fn status_or_post_state(&self) -> Eip658Value { + self.success.into() + } + + fn status(&self) -> bool { + self.success + } + + fn bloom(&self) -> Bloom { + alloy_primitives::logs_bloom(self.logs.iter()) + } + + fn cumulative_gas_used(&self) -> u128 { + self.cumulative_gas_used as u128 + } + + fn logs(&self) -> &[Log] { + &self.logs + } +} + +impl reth_primitives_traits::Receipt for Receipt { + fn tx_type(&self) -> u8 { + self.tx_type as u8 + } + + fn receipts_root(_receipts: &[&Self]) -> B256 { + #[cfg(feature = "optimism")] + panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); + #[cfg(not(feature = "optimism"))] + crate::proofs::calculate_receipt_root_no_memo(_receipts) + } +} + /// A collection of receipts organized as a two-dimensional vector. #[derive( Clone, @@ -79,43 +117,43 @@ impl Receipt { DerefMut, IntoIterator, )] -pub struct Receipts { +pub struct Receipts { /// A two-dimensional vector of optional `Receipt` instances. - pub receipt_vec: Vec>>, + pub receipt_vec: Vec>>, } -impl Receipts { +impl Receipts { /// Returns the length of the `Receipts` vector. pub fn len(&self) -> usize { self.receipt_vec.len() } - /// Returns `true` if the `Receipts` vector is empty. + /// Returns true if the `Receipts` vector is empty. pub fn is_empty(&self) -> bool { self.receipt_vec.is_empty() } /// Push a new vector of receipts into the `Receipts` collection. - pub fn push(&mut self, receipts: Vec>) { + pub fn push(&mut self, receipts: Vec>) { self.receipt_vec.push(receipts); } /// Retrieves all recorded receipts from index and calculates the root using the given closure. - pub fn root_slow(&self, index: usize, f: impl FnOnce(&[&Receipt]) -> B256) -> Option { + pub fn root_slow(&self, index: usize, f: impl FnOnce(&[&T]) -> B256) -> Option { let receipts = self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?; Some(f(receipts.as_slice())) } } -impl From> for Receipts { - fn from(block_receipts: Vec) -> Self { +impl From> for Receipts { + fn from(block_receipts: Vec) -> Self { Self { receipt_vec: vec![block_receipts.into_iter().map(Option::Some).collect()] } } } -impl FromIterator>> for Receipts { - fn from_iter>>>(iter: I) -> Self { +impl FromIterator>> for Receipts { + fn from_iter>>>(iter: I) -> Self { iter.into_iter().collect::>().into() } } @@ -204,14 +242,20 @@ impl<'a> arbitrary::Arbitrary<'a> for Receipt { } } -impl ReceiptWithBloom { - /// Returns the enveloped encoded receipt. - /// - /// See also [`ReceiptWithBloom::encode_enveloped`] - pub fn envelope_encoded(&self) -> Bytes { - let mut buf = Vec::new(); - self.encode_enveloped(&mut buf); - buf.into() +impl Encodable2718 for ReceiptWithBloom { + fn type_flag(&self) -> Option { + match self.receipt.tx_type { + TxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } + + fn encode_2718_len(&self) -> usize { + let encoder = self.as_encoder(); + match self.receipt.tx_type { + TxType::Legacy => encoder.receipt_length(), + _ => 1 + encoder.receipt_length(), // 1 byte for the type prefix + } } /// Encodes the receipt into its "raw" format. @@ -223,10 +267,18 @@ impl ReceiptWithBloom { /// of the receipt: /// - EIP-1559, 2930 and 4844 transactions: `tx-type || rlp([status, cumulativeGasUsed, /// logsBloom, logs])` - pub fn encode_enveloped(&self, out: &mut dyn bytes::BufMut) { + fn encode_2718(&self, out: &mut dyn BufMut) { self.encode_inner(out, false) } + fn encoded_2718(&self) -> Vec { + let mut out = vec![]; + self.encode_2718(&mut out); + out + } +} + +impl ReceiptWithBloom { /// Encode receipt with or without the header data. pub fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { self.as_encoder().encode_inner(out, with_header) @@ -501,7 +553,9 @@ impl Encodable for ReceiptWithBloomEncoder<'_> { #[cfg(test)] mod tests { use super::*; + use crate::revm_primitives::Bytes; use alloy_primitives::{address, b256, bytes, hex_literal::hex}; + use reth_codecs::Compact; #[test] fn test_decode_receipt() { @@ -661,4 +715,50 @@ mod tests { let (decoded, _) = Receipt::from_compact(&data[..], data.len()); assert_eq!(decoded, receipt); } + + #[test] + fn test_encode_2718_length() { + let receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 21000, + logs: vec![], + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + }, + bloom: Bloom::default(), + }; + + let encoded = receipt.encoded_2718(); + assert_eq!( + encoded.len(), + receipt.encode_2718_len(), + "Encoded length should match the actual encoded data length" + ); + + // Test for legacy receipt as well + let legacy_receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 21000, + logs: vec![], + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + }, + bloom: Bloom::default(), + }; + + let legacy_encoded = legacy_receipt.encoded_2718(); + assert_eq!( + legacy_encoded.len(), + legacy_receipt.encode_2718_len(), + "Encoded length for legacy receipt should match the actual encoded data length" + ); + } } diff --git a/crates/primitives/src/transaction/compat.rs b/crates/primitives/src/transaction/compat.rs index 81281186f64c..883c89c45f51 100644 --- a/crates/primitives/src/transaction/compat.rs +++ b/crates/primitives/src/transaction/compat.rs @@ -1,5 +1,7 @@ use crate::{Transaction, TransactionSigned}; use alloy_primitives::{Address, TxKind, U256}; +#[cfg(feature = "optimism")] +use op_alloy_consensus::DepositTransaction; use revm_primitives::{AuthorizationList, TxEnv}; /// Implements behaviour to fill a [`TxEnv`] from another transaction. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index b09fff9e2b6a..d1a95b09be24 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,26 +1,32 @@ //! Transaction types. -use crate::BlockHashOrNumber; #[cfg(any(test, feature = "reth-codec"))] use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; use alloy_consensus::{ - SignableTransaction, Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, + transaction::RlpEcdsaTx, SignableTransaction, Transaction as _, TxEip1559, TxEip2930, + TxEip4844, TxEip7702, TxLegacy, }; use alloy_eips::{ + eip1898::BlockHashOrNumber, eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, eip2930::AccessList, eip7702::SignedAuthorization, }; -use alloy_primitives::{keccak256, Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; +use alloy_primitives::{ + keccak256, Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, +}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use core::mem; use derive_more::{AsRef, Deref}; use once_cell as _; #[cfg(not(feature = "std"))] use once_cell::sync::Lazy as LazyLock; +#[cfg(feature = "optimism")] +use op_alloy_consensus::DepositTransaction; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; -use signature::{decode_with_eip155_chain_id, with_eip155_parity}; +use signature::decode_with_eip155_chain_id; #[cfg(feature = "std")] use std::sync::LazyLock; @@ -29,16 +35,10 @@ pub use error::{ }; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; -#[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] -pub use sidecar::generate_blob_sidecar; -#[cfg(feature = "c-kzg")] -pub use sidecar::BlobTransactionValidationError; -pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; +pub use sidecar::BlobTransaction; pub use compat::FillTxEnv; -pub use signature::{ - extract_chain_id, legacy_parity, recover_signer, recover_signer_unchecked, Signature, -}; +pub use signature::{recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; pub use variant::TransactionSignedVariant; @@ -48,8 +48,12 @@ mod error; mod meta; mod pooled; mod sidecar; -mod signature; mod tx_type; + +/// Handling transaction signature operations, including signature recovery, +/// applying chain IDs, and EIP-2 validation. +pub mod signature; + pub(crate) mod util; mod variant; @@ -63,17 +67,16 @@ use tx_type::{ COMPACT_IDENTIFIER_LEGACY, }; -#[cfg(test)] -use reth_codecs::Compact; - use alloc::vec::Vec; +use reth_primitives_traits::{transaction::TransactionExt, SignedTransaction}; +use revm_primitives::{AuthorizationList, TxEnv}; /// Either a transaction hash or number. pub type TxHashOrNumber = BlockHashOrNumber; -// Expected number of transactions where we can expect a speed-up by recovering the senders in -// parallel. -pub(crate) static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = +/// Expected number of transactions where we can expect a speed-up by recovering the senders in +/// parallel. +pub static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = LazyLock::new(|| match rayon::current_num_threads() { 0..=1 => usize::MAX, 2..=8 => 10, @@ -136,6 +139,31 @@ pub enum Transaction { Deposit(TxDeposit), } +#[cfg(feature = "optimism")] +impl DepositTransaction for Transaction { + fn source_hash(&self) -> Option { + match self { + Self::Deposit(tx) => tx.source_hash(), + _ => None, + } + } + fn mint(&self) -> Option { + match self { + Self::Deposit(tx) => tx.mint(), + _ => None, + } + } + fn is_system_transaction(&self) -> bool { + match self { + Self::Deposit(tx) => tx.is_system_transaction(), + _ => false, + } + } + fn is_deposit(&self) -> bool { + matches!(self, Self::Deposit(_)) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Transaction { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -297,26 +325,11 @@ impl Transaction { /// transaction. /// /// This is the number of blobs times the - /// [`DATA_GAS_PER_BLOB`](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. + /// [`DATA_GAS_PER_BLOB`](alloy_eips::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } - /// Returns the effective gas price for the given base fee. - /// - /// If the transaction is a legacy or EIP2930 transaction, the gas price is returned. - pub const fn effective_gas_price(&self, base_fee: Option) -> u128 { - match self { - Self::Legacy(tx) => tx.gas_price, - Self::Eip2930(tx) => tx.gas_price, - Self::Eip1559(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - Self::Eip4844(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - Self::Eip7702(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - #[cfg(feature = "optimism")] - Self::Deposit(_) => 0, - } - } - /// Returns the effective miner gas tip cap (`gasTipCap`) for the given base fee: /// `min(maxFeePerGas - baseFee, maxPriorityFeePerGas)` /// @@ -361,76 +374,39 @@ impl Transaction { } } - /// Returns the source hash of the transaction, which uniquely identifies its source. - /// If not a deposit transaction, this will always return `None`. - #[cfg(feature = "optimism")] - pub const fn source_hash(&self) -> Option { - match self { - Self::Deposit(TxDeposit { source_hash, .. }) => Some(*source_hash), - _ => None, - } - } - - /// Returns the amount of ETH locked up on L1 that will be minted on L2. If the transaction - /// is not a deposit transaction, this will always return `None`. - #[cfg(feature = "optimism")] - pub const fn mint(&self) -> Option { - match self { - Self::Deposit(TxDeposit { mint, .. }) => *mint, - _ => None, - } - } - - /// Returns whether or not the transaction is a system transaction. If the transaction - /// is not a deposit transaction, this will always return `false`. - #[cfg(feature = "optimism")] - pub const fn is_system_transaction(&self) -> bool { - match self { - Self::Deposit(TxDeposit { is_system_transaction, .. }) => *is_system_transaction, - _ => false, - } - } - - /// Returns whether or not the transaction is an Optimism Deposited transaction. - #[cfg(feature = "optimism")] - pub const fn is_deposit(&self) -> bool { - matches!(self, Self::Deposit(_)) - } - /// This encodes the transaction _without_ the signature, and is only suitable for creating a /// hash intended for signing. - pub fn encode_without_signature(&self, out: &mut dyn bytes::BufMut) { - Encodable::encode(self, out); + pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + match self { + Self::Legacy(tx) => tx.encode_for_signing(out), + Self::Eip2930(tx) => tx.encode_for_signing(out), + Self::Eip1559(tx) => tx.encode_for_signing(out), + Self::Eip4844(tx) => tx.encode_for_signing(out), + Self::Eip7702(tx) => tx.encode_for_signing(out), + #[cfg(feature = "optimism")] + Self::Deposit(_) => {} + } } - /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating - /// hash that for eip2718 does not require rlp header - pub fn encode_with_signature( - &self, - signature: &Signature, - out: &mut dyn bytes::BufMut, - with_header: bool, - ) { + /// Produces EIP-2718 encoding of the transaction + pub fn eip2718_encode(&self, signature: &Signature, out: &mut dyn bytes::BufMut) { match self { Self::Legacy(legacy_tx) => { // do nothing w/ with_header - legacy_tx.encode_with_signature_fields( - &with_eip155_parity(signature, legacy_tx.chain_id), - out, - ) + legacy_tx.eip2718_encode(signature, out); } Self::Eip2930(access_list_tx) => { - access_list_tx.encode_with_signature(signature, out, with_header) + access_list_tx.eip2718_encode(signature, out); } Self::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encode_with_signature(signature, out, with_header) + dynamic_fee_tx.eip2718_encode(signature, out); } - Self::Eip4844(blob_tx) => blob_tx.encode_with_signature(signature, out, with_header), + Self::Eip4844(blob_tx) => blob_tx.eip2718_encode(signature, out), Self::Eip7702(set_code_tx) => { - set_code_tx.encode_with_signature(signature, out, with_header) + set_code_tx.eip2718_encode(signature, out); } #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.encode_inner(out, with_header), + Self::Deposit(deposit_tx) => deposit_tx.eip2718_encode(out), } } @@ -486,20 +462,6 @@ impl Transaction { } } - /// Calculates a heuristic for the in-memory size of the [Transaction]. - #[inline] - pub fn size(&self) -> usize { - match self { - Self::Legacy(tx) => tx.size(), - Self::Eip2930(tx) => tx.size(), - Self::Eip1559(tx) => tx.size(), - Self::Eip4844(tx) => tx.size(), - Self::Eip7702(tx) => tx.size(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.size(), - } - } - /// Returns true if the transaction is a legacy transaction. #[inline] pub const fn is_legacy(&self) -> bool { @@ -571,6 +533,22 @@ impl Transaction { } } +impl InMemorySize for Transaction { + /// Calculates a heuristic for the in-memory size of the [Transaction]. + #[inline] + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip4844(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.size(), + } + } +} + #[cfg(any(test, feature = "reth-codec"))] impl reth_codecs::Compact for Transaction { // Serializes the TxType to the buffer if necessary, returning 2 bits of the type as an @@ -665,46 +643,6 @@ impl Default for Transaction { } } -impl Encodable for Transaction { - /// This encodes the transaction _without_ the signature, and is only suitable for creating a - /// hash intended for signing. - fn encode(&self, out: &mut dyn bytes::BufMut) { - match self { - Self::Legacy(legacy_tx) => { - legacy_tx.encode_for_signing(out); - } - Self::Eip2930(access_list_tx) => { - access_list_tx.encode_for_signing(out); - } - Self::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encode_for_signing(out); - } - Self::Eip4844(blob_tx) => { - blob_tx.encode_for_signing(out); - } - Self::Eip7702(set_code_tx) => { - set_code_tx.encode_for_signing(out); - } - #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => { - deposit_tx.encode_inner(out, true); - } - } - } - - fn length(&self) -> usize { - match self { - Self::Legacy(legacy_tx) => legacy_tx.payload_len_for_signature(), - Self::Eip2930(access_list_tx) => access_list_tx.payload_len_for_signature(), - Self::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.payload_len_for_signature(), - Self::Eip4844(blob_tx) => blob_tx.payload_len_for_signature(), - Self::Eip7702(set_code_tx) => set_code_tx.payload_len_for_signature(), - #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.encoded_len(true), - } - } -} - impl alloy_consensus::Transaction for Transaction { fn chain_id(&self) -> Option { match self { @@ -802,6 +740,39 @@ impl alloy_consensus::Transaction for Transaction { } } + fn effective_gas_price(&self, base_fee: Option) -> u128 { + match self { + Self::Legacy(tx) => tx.effective_gas_price(base_fee), + Self::Eip2930(tx) => tx.effective_gas_price(base_fee), + Self::Eip1559(tx) => tx.effective_gas_price(base_fee), + Self::Eip4844(tx) => tx.effective_gas_price(base_fee), + Self::Eip7702(tx) => tx.effective_gas_price(base_fee), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.effective_gas_price(base_fee), + } + } + + fn is_dynamic_fee(&self) -> bool { + match self { + Self::Legacy(_) | Self::Eip2930(_) => false, + Self::Eip1559(_) | Self::Eip4844(_) | Self::Eip7702(_) => true, + #[cfg(feature = "optimism")] + Self::Deposit(_) => false, + } + } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.kind(), + Self::Eip2930(tx) => tx.kind(), + Self::Eip1559(tx) => tx.kind(), + Self::Eip4844(tx) => tx.kind(), + Self::Eip7702(tx) => tx.kind(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.kind(), + } + } + fn value(&self) -> U256 { match self { Self::Legacy(tx) => tx.value(), @@ -873,16 +844,20 @@ impl alloy_consensus::Transaction for Transaction { Self::Deposit(tx) => tx.authorization_list(), } } +} - fn kind(&self) -> TxKind { +impl TransactionExt for Transaction { + type Type = TxType; + + fn signature_hash(&self) -> B256 { match self { - Self::Legacy(tx) => tx.kind(), - Self::Eip2930(tx) => tx.kind(), - Self::Eip1559(tx) => tx.kind(), - Self::Eip4844(tx) => tx.kind(), - Self::Eip7702(tx) => tx.kind(), + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip4844(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.kind(), + _ => todo!("use op type for op"), } } } @@ -907,7 +882,7 @@ impl TransactionSignedNoHash { pub fn hash(&self) -> B256 { // pre-allocate buffer for the transaction let mut buf = Vec::with_capacity(128 + self.transaction.input().len()); - self.transaction.encode_with_signature(&self.signature, &mut buf, false); + self.transaction.eip2718_encode(&self.signature, &mut buf); keccak256(&buf) } @@ -941,7 +916,7 @@ impl TransactionSignedNoHash { /// This makes it possible to import pre bedrock transactions via the sender recovery stage. pub fn encode_and_recover_unchecked(&self, buffer: &mut Vec) -> Option
{ buffer.clear(); - self.transaction.encode_without_signature(buffer); + self.transaction.encode_for_signing(buffer); // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. @@ -1050,7 +1025,7 @@ impl reth_codecs::Compact for TransactionSignedNoHash { let bitflags = buf.get_u8() as usize; let sig_bit = bitflags & 1; - let (mut signature, buf) = Signature::from_compact(buf, sig_bit); + let (signature, buf) = Signature::from_compact(buf, sig_bit); let zstd_bit = bitflags >> 3; let (transaction, buf) = if zstd_bit != 0 { @@ -1079,10 +1054,6 @@ impl reth_codecs::Compact for TransactionSignedNoHash { Transaction::from_compact(buf, transaction_type) }; - if matches!(transaction, Transaction::Legacy(_)) { - signature = signature.with_parity(legacy_parity(&signature, transaction.chain_id())) - } - (Self { signature, transaction }, buf) } } @@ -1137,6 +1108,11 @@ impl TransactionSigned { &self.signature } + /// Transaction + pub const fn transaction(&self) -> &Transaction { + &self.transaction + } + /// Transaction hash. Used to identify transaction. pub const fn hash(&self) -> TxHash { self.hash @@ -1350,6 +1326,117 @@ impl TransactionSigned { } } +impl SignedTransaction for TransactionSigned { + type Transaction = Transaction; + + fn tx_hash(&self) -> &TxHash { + &self.hash + } + + fn transaction(&self) -> &Self::Transaction { + &self.transaction + } + + fn signature(&self) -> &Signature { + &self.signature + } + + fn recover_signer(&self) -> Option
{ + let signature_hash = self.signature_hash(); + recover_signer(&self.signature, signature_hash) + } + + fn recover_signer_unchecked(&self) -> Option
{ + let signature_hash = self.signature_hash(); + recover_signer_unchecked(&self.signature, signature_hash) + } + + fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { + let mut initial_tx = Self { transaction, hash: Default::default(), signature }; + initial_tx.hash = initial_tx.recalculate_hash(); + initial_tx + } + + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { + tx_env.caller = sender; + match self.as_ref() { + Transaction::Legacy(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = tx.chain_id; + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clear(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip2930(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip1559(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip4844(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = TxKind::Call(tx.to); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clone_from(&tx.blob_versioned_hashes); + tx_env.max_fee_per_blob_gas = Some(U256::from(tx.max_fee_per_blob_gas)); + tx_env.authorization_list = None; + } + Transaction::Eip7702(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to.into(); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = + Some(AuthorizationList::Signed(tx.authorization_list.clone())); + } + #[cfg(feature = "optimism")] + Transaction::Deposit(_) => {} + } + } +} + impl alloy_consensus::Transaction for TransactionSigned { fn chain_id(&self) -> Option { self.deref().chain_id() @@ -1383,6 +1470,18 @@ impl alloy_consensus::Transaction for TransactionSigned { self.deref().priority_fee_or_price() } + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.deref().effective_gas_price(base_fee) + } + + fn is_dynamic_fee(&self) -> bool { + self.deref().is_dynamic_fee() + } + + fn kind(&self) -> TxKind { + self.deref().kind() + } + fn value(&self) -> U256 { self.deref().value() } @@ -1406,10 +1505,6 @@ impl alloy_consensus::Transaction for TransactionSigned { fn authorization_list(&self) -> Option<&[SignedAuthorization]> { self.deref().authorization_list() } - - fn kind(&self) -> TxKind { - self.deref().kind() - } } impl From for TransactionSigned { @@ -1482,28 +1577,24 @@ impl Encodable2718 for TransactionSigned { fn encode_2718_len(&self) -> usize { match &self.transaction { - Transaction::Legacy(legacy_tx) => legacy_tx.encoded_len_with_signature( - &with_eip155_parity(&self.signature, legacy_tx.chain_id), - ), + Transaction::Legacy(legacy_tx) => legacy_tx.eip2718_encoded_length(&self.signature), Transaction::Eip2930(access_list_tx) => { - access_list_tx.encoded_len_with_signature(&self.signature, false) + access_list_tx.eip2718_encoded_length(&self.signature) } Transaction::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encoded_len_with_signature(&self.signature, false) - } - Transaction::Eip4844(blob_tx) => { - blob_tx.encoded_len_with_signature(&self.signature, false) + dynamic_fee_tx.eip2718_encoded_length(&self.signature) } + Transaction::Eip4844(blob_tx) => blob_tx.eip2718_encoded_length(&self.signature), Transaction::Eip7702(set_code_tx) => { - set_code_tx.encoded_len_with_signature(&self.signature, false) + set_code_tx.eip2718_encoded_length(&self.signature) } #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => deposit_tx.encoded_len(false), + Transaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), } } fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { - self.transaction.encode_with_signature(&self.signature, out, false) + self.transaction.eip2718_encode(&self.signature, out) } } @@ -1512,24 +1603,24 @@ impl Decodable2718 for TransactionSigned { match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), TxType::Eip2930 => { - let (tx, signature, hash) = TxEip2930::decode_signed_fields(buf)?.into_parts(); + let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) } TxType::Eip1559 => { - let (tx, signature, hash) = TxEip1559::decode_signed_fields(buf)?.into_parts(); + let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) } TxType::Eip7702 => { - let (tx, signature, hash) = TxEip7702::decode_signed_fields(buf)?.into_parts(); + let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) } TxType::Eip4844 => { - let (tx, signature, hash) = TxEip4844::decode_signed_fields(buf)?.into_parts(); + let (tx, signature, hash) = TxEip4844::rlp_decode_signed(buf)?.into_parts(); Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) } #[cfg(feature = "optimism")] TxType::Deposit => Ok(Self::from_transaction_and_signature( - Transaction::Deposit(TxDeposit::decode(buf)?), + Transaction::Deposit(TxDeposit::rlp_decode(buf)?), TxDeposit::signature(), )), } @@ -1548,22 +1639,12 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { let secp = secp256k1::Secp256k1::new(); let key_pair = secp256k1::Keypair::new(&secp, &mut rand::thread_rng()); - let mut signature = crate::sign_message( + let signature = crate::sign_message( B256::from_slice(&key_pair.secret_bytes()[..]), transaction.signature_hash(), ) .unwrap(); - signature = if matches!(transaction, Transaction::Legacy(_)) { - if let Some(chain_id) = transaction.chain_id() { - signature.with_chain_id(chain_id) - } else { - signature.with_parity(alloy_primitives::Parity::NonEip155(bool::arbitrary(u)?)) - } - } else { - signature.with_parity_bool() - }; - #[cfg(feature = "optimism")] // Both `Some(0)` and `None` values are encoded as empty string byte. This introduces // ambiguity in roundtrip tests. Patch the mint value of deposit transaction here, so that @@ -1711,9 +1792,7 @@ pub mod serde_bincode_compat { transaction::serde_bincode_compat::{TxEip1559, TxEip2930, TxEip7702, TxLegacy}, TxEip4844, }; - use alloy_primitives::{Signature, TxHash}; - #[cfg(feature = "optimism")] - use op_alloy_consensus::serde_bincode_compat::TxDeposit; + use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -1741,8 +1820,7 @@ pub mod serde_bincode_compat { Eip4844(Cow<'a, TxEip4844>), Eip7702(TxEip7702<'a>), #[cfg(feature = "optimism")] - #[cfg(feature = "optimism")] - Deposit(TxDeposit<'a>), + Deposit(op_alloy_consensus::serde_bincode_compat::TxDeposit<'a>), } impl<'a> From<&'a super::Transaction> for Transaction<'a> { @@ -1754,7 +1832,9 @@ pub mod serde_bincode_compat { super::Transaction::Eip4844(tx) => Self::Eip4844(Cow::Borrowed(tx)), super::Transaction::Eip7702(tx) => Self::Eip7702(TxEip7702::from(tx)), #[cfg(feature = "optimism")] - super::Transaction::Deposit(tx) => Self::Deposit(TxDeposit::from(tx)), + super::Transaction::Deposit(tx) => { + Self::Deposit(op_alloy_consensus::serde_bincode_compat::TxDeposit::from(tx)) + } } } } @@ -1857,7 +1937,6 @@ pub mod serde_bincode_compat { #[cfg(test)] mod tests { use super::super::{serde_bincode_compat, Transaction, TransactionSigned}; - use arbitrary::Arbitrary; use rand::Rng; use reth_testing_utils::generators; @@ -1913,12 +1992,14 @@ pub mod serde_bincode_compat { #[cfg(test)] mod tests { use crate::{ - transaction::{signature::Signature, TxEip1559, TxKind, TxLegacy}, + transaction::{TxEip1559, TxKind, TxLegacy}, Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, }; use alloy_consensus::Transaction as _; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; - use alloy_primitives::{address, b256, bytes, hex, Address, Bytes, Parity, B256, U256}; + use alloy_primitives::{ + address, b256, bytes, hex, Address, Bytes, PrimitiveSignature as Signature, B256, U256, + }; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_codecs::Compact; @@ -2026,7 +2107,7 @@ mod tests { .unwrap(), U256::from_str("0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18") .unwrap(), - Parity::Eip155(43), + false, ); let hash = b256!("a517b206d2223278f860ea017d3626cacad4f52ff51030dc9a96b432f17f8d34"); test_decode_and_encode(&bytes, transaction, signature, Some(hash)); @@ -2046,7 +2127,7 @@ mod tests { .unwrap(), U256::from_str("0x5406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631da") .unwrap(), - Parity::Eip155(43), + false, ); test_decode_and_encode(&bytes, transaction, signature, None); @@ -2065,7 +2146,7 @@ mod tests { .unwrap(), U256::from_str("0x3ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88") .unwrap(), - Parity::Eip155(43), + false, ); test_decode_and_encode(&bytes, transaction, signature, None); @@ -2086,7 +2167,7 @@ mod tests { .unwrap(), U256::from_str("0x016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469") .unwrap(), - Parity::Parity(true), + true, ); test_decode_and_encode(&bytes, transaction, signature, None); @@ -2105,7 +2186,7 @@ mod tests { .unwrap(), U256::from_str("0x612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860") .unwrap(), - Parity::Eip155(44), + true, ); test_decode_and_encode(&bytes, transaction, signature, None); } @@ -2269,7 +2350,7 @@ mod tests { .unwrap(), U256::from_str("0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18") .unwrap(), - Parity::Eip155(43), + false, ); let inputs: Vec> = vec![ diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 32d4da65980e..86cd40a8fe6b 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -1,22 +1,17 @@ //! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a //! response to `GetPooledTransactions`. -use super::{ - error::TransactionConversionError, - signature::{recover_signer, with_eip155_parity}, - TxEip7702, -}; -use crate::{ - BlobTransaction, BlobTransactionSidecar, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, -}; +use super::{error::TransactionConversionError, signature::recover_signer, TxEip7702}; +use crate::{BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered}; +use alloy_eips::eip4844::BlobTransactionSidecar; + use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, - transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, + transaction::{RlpEcdsaTx, TxEip1559, TxEip2930, TxEip4844, TxLegacy}, SignableTransaction, TxEip4844WithSidecar, }; use alloy_eips::eip2718::{Decodable2718, Eip2718Result, Encodable2718}; -use alloy_primitives::{Address, TxHash, B256}; +use alloy_primitives::{Address, PrimitiveSignature as Signature, TxHash, B256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; use derive_more::{AsRef, Deref}; @@ -264,7 +259,7 @@ impl PooledTransactionsElement { /// transaction. /// /// This is the number of blobs times the - /// [`DATA_GAS_PER_BLOB`](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. + /// [`DATA_GAS_PER_BLOB`](alloy_eips::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } @@ -403,62 +398,46 @@ impl Encodable2718 for PooledTransactionsElement { fn encode_2718_len(&self) -> usize { match self { Self::Legacy { transaction, signature, .. } => { - // method computes the payload len with a RLP header - transaction.encoded_len_with_signature(&with_eip155_parity( - signature, - transaction.chain_id, - )) + transaction.eip2718_encoded_length(signature) } Self::Eip2930 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) + transaction.eip2718_encoded_length(signature) } Self::Eip1559 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) + transaction.eip2718_encoded_length(signature) } Self::Eip7702 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) + transaction.eip2718_encoded_length(signature) } - Self::BlobTransaction(blob_tx) => { - // the encoding does not use a header, so we set `with_header` to false - blob_tx.payload_len_with_type(false) + Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { + transaction.eip2718_encoded_length(signature) } } } fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { - // The encoding of `tx-data` depends on the transaction type. Refer to these docs for more - // information on the exact format: - // - Legacy: TxLegacy::encode_with_signature - // - EIP-2930: TxEip2930::encode_with_signature - // - EIP-1559: TxEip1559::encode_with_signature - // - EIP-4844: BlobTransaction::encode_with_type_inner - // - EIP-7702: TxEip7702::encode_with_signature match self { - Self::Legacy { transaction, signature, .. } => transaction - .encode_with_signature_fields( - &with_eip155_parity(signature, transaction.chain_id), - out, - ), + Self::Legacy { transaction, signature, .. } => { + transaction.eip2718_encode(signature, out) + } Self::Eip2930 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) + transaction.eip2718_encode(signature, out) } Self::Eip1559 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) + transaction.eip2718_encode(signature, out) } Self::Eip7702 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) + transaction.eip2718_encode(signature, out) } - Self::BlobTransaction(blob_tx) => { - // The inner encoding is used with `with_header` set to true, making the final - // encoding: - // `tx_type || rlp([transaction_payload_body, blobs, commitments, proofs]))` - blob_tx.encode_with_type_inner(out, false); + Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { + transaction.eip2718_encode(signature, out) } } } + + fn trie_hash(&self) -> B256 { + *self.hash() + } } impl Decodable2718 for PooledTransactionsElement { @@ -546,7 +525,7 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { match Self::try_from(tx_signed) { Ok(Self::BlobTransaction(mut tx)) => { // Successfully converted to a BlobTransaction, now generate a sidecar. - tx.transaction.sidecar = crate::BlobTransactionSidecar::arbitrary(u)?; + tx.transaction.sidecar = alloy_eips::eip4844::BlobTransactionSidecar::arbitrary(u)?; Ok(Self::BlobTransaction(tx)) } Ok(tx) => Ok(tx), // Successfully converted, but not a BlobTransaction. diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index e901cbfc08dc..48a02f4e7405 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,17 +1,11 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] -use crate::{Signature, Transaction, TransactionSigned}; -use alloy_consensus::{constants::EIP4844_TX_TYPE_ID, TxEip4844WithSidecar}; -use alloy_primitives::TxHash; -use alloy_rlp::Header; +use crate::{Transaction, TransactionSigned}; +use alloy_consensus::{transaction::RlpEcdsaTx, TxEip4844WithSidecar}; +use alloy_eips::eip4844::BlobTransactionSidecar; +use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; use serde::{Deserialize, Serialize}; -#[doc(inline)] -pub use alloy_eips::eip4844::BlobTransactionSidecar; - -#[cfg(feature = "c-kzg")] -pub use alloy_eips::eip4844::BlobTransactionValidationError; - /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their /// corresponding proofs. /// @@ -58,7 +52,7 @@ impl BlobTransaction { pub fn validate( &self, proof_settings: &c_kzg::KzgSettings, - ) -> Result<(), BlobTransactionValidationError> { + ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { self.transaction.validate_blob(proof_settings) } @@ -74,107 +68,6 @@ impl BlobTransaction { (transaction, self.transaction.sidecar) } - /// Encodes the [`BlobTransaction`] fields as RLP, with a tx type. If `with_header` is `false`, - /// the following will be encoded: - /// `tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// If `with_header` is `true`, the following will be encoded: - /// `rlp(tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs]))` - /// - /// NOTE: The header will be a byte string header, not a list header. - pub(crate) fn encode_with_type_inner(&self, out: &mut dyn bytes::BufMut, with_header: bool) { - // Calculate the length of: - // `tx_type || rlp([transaction_payload_body, blobs, commitments, proofs])` - // - // to construct and encode the string header - if with_header { - Header { - list: false, - // add one for the tx type - payload_length: 1 + self.payload_len(), - } - .encode(out); - } - - out.put_u8(EIP4844_TX_TYPE_ID); - - // Now we encode the inner blob transaction: - self.encode_inner(out); - } - - /// Encodes the [`BlobTransaction`] fields as RLP, with the following format: - /// `rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// where `transaction_payload_body` is a list: - /// `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - /// - /// Note: this should be used only when implementing other RLP encoding methods, and does not - /// represent the full RLP encoding of the blob transaction. - pub(crate) fn encode_inner(&self, out: &mut dyn bytes::BufMut) { - self.transaction.encode_with_signature_fields(&self.signature, out); - } - - /// Outputs the length of the RLP encoding of the blob transaction, including the tx type byte, - /// optionally including the length of a wrapping string header. If `with_header` is `false`, - /// the length of the following will be calculated: - /// `tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// If `with_header` is `true`, the length of the following will be calculated: - /// `rlp(tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs]))` - pub(crate) fn payload_len_with_type(&self, with_header: bool) -> usize { - if with_header { - // Construct a header and use that to calculate the total length - let wrapped_header = Header { - list: false, - // add one for the tx type byte - payload_length: 1 + self.payload_len(), - }; - - // The total length is now the length of the header plus the length of the payload - // (which includes the tx type byte) - wrapped_header.length() + wrapped_header.payload_length - } else { - // Just add the length of the tx type to the payload length - 1 + self.payload_len() - } - } - - /// Outputs the length of the RLP encoding of the blob transaction with the following format: - /// `rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// where `transaction_payload_body` is a list: - /// `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - /// - /// Note: this should be used only when implementing other RLP encoding length methods, and - /// does not represent the full RLP encoding of the blob transaction. - pub(crate) fn payload_len(&self) -> usize { - // The `transaction_payload_body` length is the length of the fields, plus the length of - // its list header. - let tx_header = Header { - list: true, - payload_length: self.transaction.tx.fields_len() + self.signature.rlp_vrs_len(), - }; - - let tx_length = tx_header.length() + tx_header.payload_length; - - // The payload length is the length of the `tranascation_payload_body` list, plus the - // length of the blobs, commitments, and proofs. - let payload_length = tx_length + self.transaction.sidecar.rlp_encoded_fields_length(); - - // We use the calculated payload len to construct the first list header, which encompasses - // everything in the tx - the length of the second, inner list header is part of - // payload_length - let blob_tx_header = Header { list: true, payload_length }; - - // The final length is the length of: - // * the outer blob tx header + - // * the inner tx header + - // * the inner tx fields + - // * the signature fields + - // * the sidecar fields - blob_tx_header.length() + blob_tx_header.payload_length - } - /// Decodes a [`BlobTransaction`] from RLP. This expects the encoding to be: /// `rlp([transaction_payload_body, blobs, commitments, proofs])` /// @@ -185,41 +78,12 @@ impl BlobTransaction { /// represent the full RLP decoding of the `PooledTransactionsElement` type. pub(crate) fn decode_inner(data: &mut &[u8]) -> alloy_rlp::Result { let (transaction, signature, hash) = - TxEip4844WithSidecar::decode_signed_fields(data)?.into_parts(); + TxEip4844WithSidecar::rlp_decode_signed(data)?.into_parts(); Ok(Self { transaction, hash, signature }) } } -/// Generates a [`BlobTransactionSidecar`] structure containing blobs, commitments, and proofs. -#[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] -pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { - use alloc::vec::Vec; - use alloy_eips::eip4844::env_settings::EnvKzgSettings; - use c_kzg::{KzgCommitment, KzgProof}; - - let kzg_settings = EnvKzgSettings::Default; - - let commitments: Vec = blobs - .iter() - .map(|blob| { - KzgCommitment::blob_to_kzg_commitment(&blob.clone(), kzg_settings.get()).unwrap() - }) - .map(|commitment| commitment.to_bytes()) - .collect(); - - let proofs: Vec = blobs - .iter() - .zip(commitments.iter()) - .map(|(blob, commitment)| { - KzgProof::compute_blob_kzg_proof(blob, commitment, kzg_settings.get()).unwrap() - }) - .map(|proof| proof.to_bytes()) - .collect(); - - BlobTransactionSidecar::from_kzg(blobs, commitments, proofs) -} - #[cfg(all(test, feature = "c-kzg"))] mod tests { use super::*; @@ -251,7 +115,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Assert commitment equality assert_eq!( @@ -300,7 +164,7 @@ mod tests { } // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs.clone()); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Assert sidecar size assert_eq!(sidecar.size(), 524672); @@ -325,7 +189,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create a vector to store the encoded RLP let mut encoded_rlp = Vec::new(); @@ -356,7 +220,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create a vector to store the encoded RLP let mut encoded_rlp = Vec::new(); diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 5bfdab8e68e9..8fab719947a1 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -1,8 +1,7 @@ use crate::transaction::util::secp256k1; -use alloy_primitives::{Address, Parity, B256, U256}; -use alloy_rlp::{Decodable, Error as RlpError}; - -pub use alloy_primitives::Signature; +use alloy_consensus::transaction::from_eip155_value; +use alloy_primitives::{Address, PrimitiveSignature as Signature, B256, U256}; +use alloy_rlp::Decodable; /// The order of the secp256k1 curve, divided by two. Signatures that should be checked according /// to EIP-2 should have an S value less than or equal to this. @@ -16,25 +15,23 @@ const SECP256K1N_HALF: U256 = U256::from_be_bytes([ pub(crate) fn decode_with_eip155_chain_id( buf: &mut &[u8], ) -> alloy_rlp::Result<(Signature, Option)> { - let v: Parity = Decodable::decode(buf)?; + let v = Decodable::decode(buf)?; let r: U256 = Decodable::decode(buf)?; let s: U256 = Decodable::decode(buf)?; - #[cfg(not(feature = "optimism"))] - if matches!(v, Parity::Parity(_)) { - return Err(alloy_rlp::Error::Custom("invalid parity for legacy transaction")); - } - - #[cfg(feature = "optimism")] - // pre bedrock system transactions were sent from the zero address as legacy - // transactions with an empty signature - // - // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if matches!(v, Parity::Parity(false)) && r.is_zero() && s.is_zero() { - return Ok((Signature::new(r, s, Parity::Parity(false)), None)) - } + let Some((parity, chain_id)) = from_eip155_value(v) else { + // pre bedrock system transactions were sent from the zero address as legacy + // transactions with an empty signature + // + // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock + #[cfg(feature = "optimism")] + if v == 0 && r.is_zero() && s.is_zero() { + return Ok((Signature::new(r, s, false), None)) + } + return Err(alloy_rlp::Error::Custom("invalid parity for legacy transaction")) + }; - Ok((Signature::new(r, s, v), v.chain_id())) + Ok((Signature::new(r, s, parity), chain_id)) } /// Recover signer from message hash, _without ensuring that the signature has a low `s` @@ -48,7 +45,7 @@ pub fn recover_signer_unchecked(signature: &Signature, hash: B256) -> Option()); sig[32..64].copy_from_slice(&signature.s().to_be_bytes::<32>()); - sig[64] = signature.v().y_parity_byte(); + sig[64] = signature.v() as u8; // NOTE: we are removing error from underlying crypto library as it will restrain primitive // errors and we care only if recovery is passing or not. @@ -68,71 +65,15 @@ pub fn recover_signer(signature: &Signature, hash: B256) -> Option
{ recover_signer_unchecked(signature, hash) } -/// Returns [Parity] value based on `chain_id` for legacy transaction signature. -#[allow(clippy::missing_const_for_fn)] -pub fn legacy_parity(signature: &Signature, chain_id: Option) -> Parity { - if let Some(chain_id) = chain_id { - Parity::Parity(signature.v().y_parity()).with_chain_id(chain_id) - } else { - #[cfg(feature = "optimism")] - // pre bedrock system transactions were sent from the zero address as legacy - // transactions with an empty signature - // - // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if *signature == op_alloy_consensus::TxDeposit::signature() { - return Parity::Parity(false) - } - Parity::NonEip155(signature.v().y_parity()) - } -} - -/// Returns a signature with the given chain ID applied to the `v` value. -pub(crate) fn with_eip155_parity(signature: &Signature, chain_id: Option) -> Signature { - Signature::new(signature.r(), signature.s(), legacy_parity(signature, chain_id)) -} - -/// Outputs (`odd_y_parity`, `chain_id`) from the `v` value. -/// This doesn't check validity of the `v` value for optimism. -#[inline] -pub const fn extract_chain_id(v: u64) -> alloy_rlp::Result<(bool, Option)> { - if v < 35 { - // non-EIP-155 legacy scheme, v = 27 for even y-parity, v = 28 for odd y-parity - if v != 27 && v != 28 { - return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) - } - Ok((v == 28, None)) - } else { - // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 - let odd_y_parity = ((v - 35) % 2) != 0; - let chain_id = (v - 35) >> 1; - Ok((odd_y_parity, Some(chain_id))) - } -} - #[cfg(test)] mod tests { - use crate::{ - transaction::signature::{ - legacy_parity, recover_signer, recover_signer_unchecked, SECP256K1N_HALF, - }, - Signature, + use crate::transaction::signature::{ + recover_signer, recover_signer_unchecked, SECP256K1N_HALF, }; use alloy_eips::eip2718::Decodable2718; - use alloy_primitives::{hex, Address, Parity, B256, U256}; + use alloy_primitives::{hex, Address, PrimitiveSignature as Signature, B256, U256}; use std::str::FromStr; - #[test] - fn test_legacy_parity() { - // Select 1 as an arbitrary nonzero value for R and S, as v() always returns 0 for (0, 0). - let signature = Signature::new(U256::from(1), U256::from(1), Parity::Parity(false)); - assert_eq!(Parity::NonEip155(false), legacy_parity(&signature, None)); - assert_eq!(Parity::Eip155(37), legacy_parity(&signature, Some(1))); - - let signature = Signature::new(U256::from(1), U256::from(1), Parity::Parity(true)); - assert_eq!(Parity::NonEip155(true), legacy_parity(&signature, None)); - assert_eq!(Parity::Eip155(38), legacy_parity(&signature, Some(1))); - } - #[test] fn test_recover_signer() { let signature = Signature::new( @@ -144,7 +85,7 @@ mod tests { "46948507304638947509940763649030358759909902576025900602547168820602576006531", ) .unwrap(), - Parity::Parity(false), + false, ); let hash = B256::from_str("daf5a779ae972f972197303d7b574746c7ef83eadac0f2791ad23db92e4c8e53") diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index b6e1ecbf226a..46e370861133 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -4,11 +4,9 @@ use alloy_consensus::constants::{ }; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; +use derive_more::Display; use serde::{Deserialize, Serialize}; -#[cfg(test)] -use reth_codecs::Compact; - /// Identifier parameter for legacy transaction #[cfg(any(test, feature = "reth-codec"))] pub(crate) const COMPACT_IDENTIFIER_LEGACY: usize = 0; @@ -39,24 +37,42 @@ pub const DEPOSIT_TX_TYPE_ID: u8 = 126; /// /// Other required changes when adding a new type can be seen on [PR#3953](https://github.com/paradigmxyz/reth/pull/3953/files). #[derive( - Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Serialize, Deserialize, Hash, + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Default, + Serialize, + Deserialize, + Hash, + Display, )] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[display("tx type: {_variant}")] pub enum TxType { /// Legacy transaction pre EIP-2929 #[default] + #[display("legacy (0)")] Legacy = 0_isize, /// AccessList transaction + #[display("eip2930 (1)")] Eip2930 = 1_isize, /// Transaction with Priority fee + #[display("eip1559 (2)")] Eip1559 = 2_isize, /// Shard Blob Transactions - EIP-4844 + #[display("eip4844 (3)")] Eip4844 = 3_isize, /// EOA Contract Code Transactions - EIP-7702 + #[display("eip7702 (4)")] Eip7702 = 4_isize, /// Optimism Deposit transaction. #[cfg(feature = "optimism")] + #[display("deposit (126)")] Deposit = 126_isize, } @@ -220,128 +236,74 @@ impl Decodable for TxType { } } -impl From for TxType { - fn from(value: alloy_consensus::TxType) -> Self { - match value { - alloy_consensus::TxType::Legacy => Self::Legacy, - alloy_consensus::TxType::Eip2930 => Self::Eip2930, - alloy_consensus::TxType::Eip1559 => Self::Eip1559, - alloy_consensus::TxType::Eip4844 => Self::Eip4844, - alloy_consensus::TxType::Eip7702 => Self::Eip7702, - } - } -} - #[cfg(test)] mod tests { use alloy_primitives::hex; - use rand::Rng; use reth_codecs::Compact; + use rstest::rstest; use super::*; - #[test] - fn test_u64_to_tx_type() { - // Test for Legacy transaction - assert_eq!(TxType::try_from(U64::from(LEGACY_TX_TYPE_ID)).unwrap(), TxType::Legacy); - - // Test for EIP2930 transaction - assert_eq!(TxType::try_from(U64::from(EIP2930_TX_TYPE_ID)).unwrap(), TxType::Eip2930); - - // Test for EIP1559 transaction - assert_eq!(TxType::try_from(U64::from(EIP1559_TX_TYPE_ID)).unwrap(), TxType::Eip1559); - - // Test for EIP4844 transaction - assert_eq!(TxType::try_from(U64::from(EIP4844_TX_TYPE_ID)).unwrap(), TxType::Eip4844); - - // Test for EIP7702 transaction - assert_eq!(TxType::try_from(U64::from(EIP7702_TX_TYPE_ID)).unwrap(), TxType::Eip7702); - - // Test for Deposit transaction - #[cfg(feature = "optimism")] - assert_eq!(TxType::try_from(U64::from(DEPOSIT_TX_TYPE_ID)).unwrap(), TxType::Deposit); - - // For transactions with unsupported values - assert!(TxType::try_from(U64::from(EIP7702_TX_TYPE_ID + 1)).is_err()); + #[rstest] + #[case(U64::from(LEGACY_TX_TYPE_ID), Ok(TxType::Legacy))] + #[case(U64::from(EIP2930_TX_TYPE_ID), Ok(TxType::Eip2930))] + #[case(U64::from(EIP1559_TX_TYPE_ID), Ok(TxType::Eip1559))] + #[case(U64::from(EIP4844_TX_TYPE_ID), Ok(TxType::Eip4844))] + #[case(U64::from(EIP7702_TX_TYPE_ID), Ok(TxType::Eip7702))] + #[cfg_attr(feature = "optimism", case(U64::from(DEPOSIT_TX_TYPE_ID), Ok(TxType::Deposit)))] + #[case(U64::MAX, Err("invalid tx type"))] + fn test_u64_to_tx_type(#[case] input: U64, #[case] expected: Result) { + let tx_type_result = TxType::try_from(input); + assert_eq!(tx_type_result, expected); } - #[test] - fn test_txtype_to_compat() { - let cases = vec![ - (TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![]), - (TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![]), - (TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![]), - (TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID]), - (TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID]), - #[cfg(feature = "optimism")] - (TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]), - ]; - - for (tx_type, expected_identifier, expected_buf) in cases { - let mut buf = vec![]; - let identifier = tx_type.to_compact(&mut buf); - assert_eq!( - identifier, expected_identifier, - "Unexpected identifier for TxType {tx_type:?}", - ); - assert_eq!(buf, expected_buf, "Unexpected buffer for TxType {tx_type:?}"); - } + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]))] + fn test_txtype_to_compact( + #[case] tx_type: TxType, + #[case] expected_identifier: usize, + #[case] expected_buf: Vec, + ) { + let mut buf = vec![]; + let identifier = tx_type.to_compact(&mut buf); + + assert_eq!(identifier, expected_identifier, "Unexpected identifier for TxType {tx_type:?}",); + assert_eq!(buf, expected_buf, "Unexpected buffer for TxType {tx_type:?}",); } - #[test] - fn test_txtype_from_compact() { - let cases = vec![ - (TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![]), - (TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![]), - (TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![]), - (TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID]), - (TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID]), - #[cfg(feature = "optimism")] - (TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]), - ]; - - for (expected_type, identifier, buf) in cases { - let (actual_type, remaining_buf) = TxType::from_compact(&buf, identifier); - assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}",); - assert!( - remaining_buf.is_empty(), - "Buffer not fully consumed for identifier {identifier}", - ); - } + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]))] + fn test_txtype_from_compact( + #[case] expected_type: TxType, + #[case] identifier: usize, + #[case] buf: Vec, + ) { + let (actual_type, remaining_buf) = TxType::from_compact(&buf, identifier); + + assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}"); + assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}"); } - #[test] - fn decode_tx_type() { - // Test for Legacy transaction - let tx_type = TxType::decode(&mut &hex!("80")[..]).unwrap(); - assert_eq!(tx_type, TxType::Legacy); - - // Test for EIP2930 transaction - let tx_type = TxType::decode(&mut &[EIP2930_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip2930); - - // Test for EIP1559 transaction - let tx_type = TxType::decode(&mut &[EIP1559_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip1559); - - // Test for EIP4844 transaction - let tx_type = TxType::decode(&mut &[EIP4844_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip4844); - - // Test for EIP7702 transaction - let tx_type = TxType::decode(&mut &[EIP7702_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip7702); - - // Test random byte not in range - let buf = [rand::thread_rng().gen_range(EIP7702_TX_TYPE_ID + 1..=u8::MAX)]; - assert!(TxType::decode(&mut &buf[..]).is_err()); - - // Test for Deposit transaction - #[cfg(feature = "optimism")] - { - let buf = [DEPOSIT_TX_TYPE_ID]; - let tx_type = TxType::decode(&mut &buf[..]).unwrap(); - assert_eq!(tx_type, TxType::Deposit); - } + #[rstest] + #[case(&hex!("80"), Ok(TxType::Legacy))] + #[case(&[EIP2930_TX_TYPE_ID], Ok(TxType::Eip2930))] + #[case(&[EIP1559_TX_TYPE_ID], Ok(TxType::Eip1559))] + #[case(&[EIP4844_TX_TYPE_ID], Ok(TxType::Eip4844))] + #[case(&[EIP7702_TX_TYPE_ID], Ok(TxType::Eip7702))] + #[case(&[u8::MAX], Err(alloy_rlp::Error::InputTooShort))] + #[cfg_attr(feature = "optimism", case(&[DEPOSIT_TX_TYPE_ID], Ok(TxType::Deposit)))] + fn decode_tx_type(#[case] input: &[u8], #[case] expected: Result) { + let tx_type_result = TxType::decode(&mut &input[..]); + assert_eq!(tx_type_result, expected) } } diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index 7569400e94b4..7964cc1c5f00 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -1,5 +1,4 @@ -use crate::Signature; -use alloy_primitives::Address; +use alloy_primitives::{Address, PrimitiveSignature as Signature}; #[cfg(feature = "secp256k1")] pub(crate) mod secp256k1 { @@ -19,7 +18,7 @@ mod impl_secp256k1 { ecdsa::{RecoverableSignature, RecoveryId}, Message, PublicKey, SecretKey, SECP256K1, }; - use alloy_primitives::{keccak256, Parity, B256, U256}; + use alloy_primitives::{keccak256, B256, U256}; /// Recovers the address of the sender using secp256k1 pubkey recovery. /// @@ -45,7 +44,7 @@ mod impl_secp256k1 { let signature = Signature::new( U256::try_from_be_slice(&data[..32]).expect("The slice has at most 32 bytes"), U256::try_from_be_slice(&data[32..64]).expect("The slice has at most 32 bytes"), - Parity::Parity(rec_id.to_i32() != 0), + rec_id.to_i32() != 0, ); Ok(signature) } @@ -63,7 +62,7 @@ mod impl_secp256k1 { #[cfg_attr(feature = "secp256k1", allow(unused, unreachable_pub))] mod impl_k256 { use super::*; - use alloy_primitives::{keccak256, Parity, B256, U256}; + use alloy_primitives::{keccak256, B256}; pub(crate) use k256::ecdsa::Error; use k256::ecdsa::{RecoveryId, SigningKey, VerifyingKey}; @@ -93,15 +92,7 @@ mod impl_k256 { /// Returns the corresponding signature. pub fn sign_message(secret: B256, message: B256) -> Result { let sec = SigningKey::from_slice(secret.as_ref())?; - let (sig, rec_id) = sec.sign_prehash_recoverable(&message.0)?; - let (r, s) = sig.split_bytes(); - - let signature = Signature::new( - U256::try_from_be_slice(&r).expect("The slice has at most 32 bytes"), - U256::try_from_be_slice(&s).expect("The slice has at most 32 bytes"), - Parity::Parity(rec_id.is_y_odd()), - ); - Ok(signature) + sec.sign_prehash_recoverable(&message.0).map(Into::into) } /// Converts a public key into an ethereum address by hashing the encoded public key with @@ -132,7 +123,7 @@ mod tests { let mut sig: [u8; 65] = [0; 65]; sig[0..32].copy_from_slice(&signature.r().to_be_bytes::<32>()); sig[32..64].copy_from_slice(&signature.s().to_be_bytes::<32>()); - sig[64] = signature.v().y_parity_byte(); + sig[64] = signature.v() as u8; assert_eq!(recover_signer_unchecked(&sig, &hash), Ok(signer)); } @@ -190,14 +181,14 @@ mod tests { sig[0..32].copy_from_slice(&secp256k1_signature.r().to_be_bytes::<32>()); sig[32..64].copy_from_slice(&secp256k1_signature.s().to_be_bytes::<32>()); - sig[64] = secp256k1_signature.v().y_parity_byte(); + sig[64] = secp256k1_signature.v() as u8; let secp256k1_recovered = impl_secp256k1::recover_signer_unchecked(&sig, &hash).expect("secp256k1 recover"); assert_eq!(secp256k1_recovered, secp256k1_signer); sig[0..32].copy_from_slice(&k256_signature.r().to_be_bytes::<32>()); sig[32..64].copy_from_slice(&k256_signature.s().to_be_bytes::<32>()); - sig[64] = k256_signature.v().y_parity_byte(); + sig[64] = k256_signature.v() as u8; let k256_recovered = impl_k256::recover_signer_unchecked(&sig, &hash).expect("k256 recover"); assert_eq!(k256_recovered, k256_signer); diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 2f2a37d5ba66..4df9ace81338 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -41,6 +41,7 @@ rustc-hash.workspace = true # reth reth-db = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true reth-tracing.workspace = true diff --git a/crates/prune/prune/src/event.rs b/crates/prune/prune/src/event.rs index 95a90d7628cc..4f5806e592ee 100644 --- a/crates/prune/prune/src/event.rs +++ b/crates/prune/prune/src/event.rs @@ -1,5 +1,5 @@ use alloy_primitives::BlockNumber; -use reth_prune_types::{PruneProgress, PruneSegment}; +use reth_prune_types::PrunedSegmentInfo; use std::time::Duration; /// An event emitted by a [Pruner][crate::Pruner]. @@ -8,9 +8,5 @@ pub enum PrunerEvent { /// Emitted when pruner started running. Started { tip_block_number: BlockNumber }, /// Emitted when pruner finished running. - Finished { - tip_block_number: BlockNumber, - elapsed: Duration, - stats: Vec<(PruneSegment, usize, PruneProgress)>, - }, + Finished { tip_block_number: BlockNumber, elapsed: Duration, stats: Vec }, } diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index d21560cae607..0ad149bb654d 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -9,7 +9,7 @@ use reth_exex_types::FinishedExExHeight; use reth_provider::{ DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, }; -use reth_prune_types::{PruneLimiter, PruneProgress, PruneSegment, PrunerOutput}; +use reth_prune_types::{PruneLimiter, PruneProgress, PrunedSegmentInfo, PrunerOutput}; use reth_tokio_util::{EventSender, EventStream}; use std::time::{Duration, Instant}; use tokio::sync::watch; @@ -21,8 +21,6 @@ pub type PrunerResult = Result; /// The pruner type itself with the result of [`Pruner::run`] pub type PrunerWithResult = (Pruner, PrunerResult); -type PrunerStats = Vec<(PruneSegment, usize, PruneProgress)>; - /// Pruner with preset provider factory. pub type PrunerWithFactory = Pruner<::ProviderRW, PF>; @@ -174,14 +172,15 @@ where /// be pruned according to the highest `static_files`. Segments are parts of the database that /// represent one or more tables. /// - /// Returns [`PrunerStats`], total number of entries pruned, and [`PruneProgress`]. + /// Returns a list of stats per pruned segment, total number of entries pruned, and + /// [`PruneProgress`]. fn prune_segments( &mut self, provider: &Provider, tip_block_number: BlockNumber, limiter: &mut PruneLimiter, - ) -> Result<(PrunerStats, usize, PrunerOutput), PrunerError> { - let mut stats = PrunerStats::new(); + ) -> Result<(Vec, usize, PrunerOutput), PrunerError> { + let mut stats = Vec::with_capacity(self.segments.len()); let mut pruned = 0; let mut output = PrunerOutput { progress: PruneProgress::Finished, @@ -249,7 +248,12 @@ where if segment_output.pruned > 0 { limiter.increment_deleted_entries_count_by(segment_output.pruned); pruned += segment_output.pruned; - stats.push((segment.segment(), segment_output.pruned, segment_output.progress)); + let info = PrunedSegmentInfo { + segment: segment.segment(), + pruned: segment_output.pruned, + progress: segment_output.progress, + }; + stats.push(info); } } else { debug!(target: "pruner", segment = ?segment.segment(), purpose = ?segment.purpose(), "Nothing to prune for the segment"); diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index d1b7819ac763..b3b40aab5b3b 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -23,9 +23,9 @@ pub use user::{ /// A segment represents a pruning of some portion of the data. /// -/// Segments are called from [Pruner](crate::Pruner) with the following lifecycle: +/// Segments are called from [`Pruner`](crate::Pruner) with the following lifecycle: /// 1. Call [`Segment::prune`] with `delete_limit` of [`PruneInput`]. -/// 2. If [`Segment::prune`] returned a [Some] in `checkpoint` of [`SegmentOutput`], call +/// 2. If [`Segment::prune`] returned a [`Some`] in `checkpoint` of [`SegmentOutput`], call /// [`Segment::save_checkpoint`]. /// 3. Subtract `pruned` of [`SegmentOutput`] from `delete_limit` of next [`PruneInput`]. pub trait Segment: Debug + Send + Sync { @@ -88,7 +88,7 @@ impl PruneInput { }, }) // No checkpoint exists, prune from genesis - .unwrap_or(0); + .unwrap_or_default(); let to_tx_number = match provider.block_body_indices(self.to_block)? { Some(body) => { @@ -143,3 +143,206 @@ impl PruneInput { .unwrap_or(0) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + use reth_provider::{ + providers::BlockchainProvider2, + test_utils::{create_test_provider_factory, MockEthProvider}, + }; + use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; + + #[test] + fn test_prune_input_get_next_tx_num_range_no_to_block() { + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + // Default provider with no block corresponding to block 10 + let provider = MockEthProvider::default(); + + // No block body for block 10, expected None + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } + + #[test] + fn test_prune_input_get_next_tx_num_range_no_tx() { + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks with no transactions + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Since there are no transactions, expected None + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } + + #[test] + fn test_prune_input_get_next_tx_num_range_valid() { + // Create a new prune input + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks with some transactions + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Get the next tx number range + let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); + + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + + assert_eq!(range, 0..=num_txs - 1); + } + + #[test] + fn test_prune_input_get_next_tx_checkpoint_without_tx_number() { + // Create a prune input with a previous checkpoint without a tx number (unexpected) + let input = PruneInput { + previous_checkpoint: Some(PruneCheckpoint { + block_number: Some(5), + tx_number: None, + prune_mode: PruneMode::Full, + }), + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Fetch the range and check if it is correct + let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); + + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + + assert_eq!(range, 0..=num_txs - 1,); + } + + #[test] + fn test_prune_input_get_next_tx_empty_range() { + // Create a new provider via factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Get the last tx number + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + let max_range = num_txs - 1; + + // Create a prune input with a previous checkpoint that is the last tx number + let input = PruneInput { + previous_checkpoint: Some(PruneCheckpoint { + block_number: Some(5), + tx_number: Some(max_range), + prune_mode: PruneMode::Full, + }), + to_block: 10, + limiter: PruneLimiter::default(), + }; + + // We expect an empty range since the previous checkpoint is the last tx number + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } +} diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 710b2b721cd6..23d03345b096 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -11,7 +11,7 @@ use reth_prune_types::PruneModes; use super::{StaticFileHeaders, StaticFileReceipts, StaticFileTransactions}; -/// Collection of [Segment]. Thread-safe, allocated on the heap. +/// Collection of [`Segment`]. Thread-safe, allocated on the heap. #[derive(Debug)] pub struct SegmentSet { inner: Vec>>, @@ -23,7 +23,7 @@ impl SegmentSet { Self::default() } - /// Adds new [Segment] to collection. + /// Adds new [`Segment`] to collection. pub fn segment + 'static>(mut self, segment: S) -> Self { self.inner.push(Box::new(segment)); self diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index ee2accee1b34..ee404b074c3c 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -10,7 +10,6 @@ use reth_prune_types::{ SegmentOutput, MINIMUM_PRUNING_DISTANCE, }; use tracing::{instrument, trace}; - #[derive(Debug)] pub struct ReceiptsByLogs { config: ReceiptsLogPruneConfig, @@ -223,6 +222,7 @@ mod tests { use assert_matches::assert_matches; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; + use reth_primitives_traits::InMemorySize; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader, TransactionsProvider}; use reth_prune_types::{PruneLimiter, PruneMode, PruneSegment, ReceiptsLogPruneConfig}; use reth_stages::test_utils::{StorageKind, TestStageDB}; diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 6e06d6fc5dc9..0722e760fafb 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -19,7 +19,8 @@ pub use checkpoint::PruneCheckpoint; pub use limiter::PruneLimiter; pub use mode::PruneMode; pub use pruner::{ - PruneInterruptReason, PruneProgress, PrunerOutput, SegmentOutput, SegmentOutputCheckpoint, + PruneInterruptReason, PruneProgress, PrunedSegmentInfo, PrunerOutput, SegmentOutput, + SegmentOutputCheckpoint, }; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; use serde::{Deserialize, Serialize}; @@ -27,6 +28,7 @@ use std::collections::BTreeMap; pub use target::{PruneModes, MINIMUM_PRUNING_DISTANCE}; use alloy_primitives::{Address, BlockNumber}; +use std::ops::Deref; /// Configuration for pruning receipts not associated with logs emitted by the specified contracts. #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] @@ -59,7 +61,7 @@ impl ReceiptsLogPruneConfig { pruned_block: Option, ) -> Result>, PruneSegmentError> { let mut map = BTreeMap::new(); - let pruned_block = pruned_block.unwrap_or_default(); + let base_block = pruned_block.unwrap_or_default() + 1; for (address, mode) in &self.0 { // Getting `None`, means that there is nothing to prune yet, so we need it to include in @@ -69,7 +71,7 @@ impl ReceiptsLogPruneConfig { // // Reminder, that we increment because the [`BlockNumber`] key of the new map should be // viewed as `PruneMode::Before(block)` - let block = (pruned_block + 1).max( + let block = base_block.max( mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? .map(|(block, _)| block) .unwrap_or_default() + @@ -90,8 +92,8 @@ impl ReceiptsLogPruneConfig { let pruned_block = pruned_block.unwrap_or_default(); let mut lowest = None; - for mode in self.0.values() { - if let PruneMode::Distance(_) = mode { + for mode in self.values() { + if mode.is_distance() { if let Some((block, _)) = mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? { @@ -103,3 +105,224 @@ impl ReceiptsLogPruneConfig { Ok(lowest.map(|lowest| lowest.max(pruned_block))) } } + +impl Deref for ReceiptsLogPruneConfig { + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_group_by_block_empty_config() { + let config = ReceiptsLogPruneConfig(BTreeMap::new()); + let tip = 1000; + let pruned_block = None; + + let result = config.group_by_block(tip, pruned_block).unwrap(); + assert!(result.is_empty(), "The result should be empty when the config is empty"); + } + + #[test] + fn test_group_by_block_single_entry() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Before(500); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + // Big tip to have something to prune for the target block + let tip = 3000000; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect one entry with block 500 and the corresponding address + assert_eq!(result.len(), 1); + assert_eq!(result[&500], vec![&address], "Address should be grouped under block 500"); + + // Tip smaller than the target block, so that we have nothing to prune for the block + let tip = 300; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect one entry with block 400 and the corresponding address + assert_eq!(result.len(), 1); + assert_eq!(result[&401], vec![&address], "Address should be grouped under block 400"); + } + + #[test] + fn test_group_by_block_multiple_entries() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Before(600); + let prune_mode2 = PruneMode::Before(800); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 900000; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect two entries: one for block 600 and another for block 800 + assert_eq!(result.len(), 2); + assert_eq!(result[&600], vec![&address1], "Address1 should be grouped under block 600"); + assert_eq!(result[&800], vec![&address2], "Address2 should be grouped under block 800"); + } + + #[test] + fn test_group_by_block_with_distance_prune_mode() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Distance(100000); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 100100; + // Pruned block is smaller than the target block + let pruned_block = Some(50); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect the entry to be grouped under block 100 (tip - distance) + assert_eq!(result.len(), 1); + assert_eq!(result[&101], vec![&address], "Address should be grouped under block 100"); + + let tip = 100100; + // Pruned block is larger than the target block + let pruned_block = Some(800); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect the entry to be grouped under block 800 which is larger than tip - distance + assert_eq!(result.len(), 1); + assert_eq!(result[&801], vec![&address], "Address should be grouped under block 800"); + } + + #[test] + fn test_lowest_block_with_distance_empty_config() { + let config = ReceiptsLogPruneConfig(BTreeMap::new()); + let tip = 1000; + let pruned_block = None; + + let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); + assert_eq!(result, None, "The result should be None when the config is empty"); + } + + #[test] + fn test_lowest_block_with_distance_no_distance_mode() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Before(500); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 1000; + let pruned_block = None; + + let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); + assert_eq!(result, None, "The result should be None when there are no Distance modes"); + } + + #[test] + fn test_lowest_block_with_distance_single_entry() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Distance(100000); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + + let tip = 100100; + let pruned_block = Some(400); + + // Expect the lowest block to be 400 as 400 > 100100 - 100000 (tip - distance) + assert_eq!( + config.lowest_block_with_distance(tip, pruned_block).unwrap(), + Some(400), + "The lowest block should be 400" + ); + + let tip = 100100; + let pruned_block = Some(50); + + // Expect the lowest block to be 100 as 100 > 50 (pruned block) + assert_eq!( + config.lowest_block_with_distance(tip, pruned_block).unwrap(), + Some(100), + "The lowest block should be 100" + ); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_last() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100100); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100); + + // The lowest block should be 200300 - 100300 = 100000: + // - First iteration will return 100200 => 200300 - 100100 = 100200 + // - Second iteration will return 100000 => 200300 - 100300 = 100000 < 100200 + // - Final result is 100000 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_first() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100400); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100); + + // The lowest block should be 200300 - 100400 = 99900: + // - First iteration, lowest block is 200300 - 100400 = 99900 + // - Second iteration, lowest block is still 99900 < 200300 - 100300 = 100000 + // - Final result is 99900 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(99900)); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_pruned_block() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100400); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100000); + + // The lowest block should be 100000 because: + // - Lowest is 200300 - 100400 = 99900 < 200300 - 100300 = 100000 + // - Lowest is compared to the pruned block 100000: 100000 > 99900 + // - Finally the lowest block is 100000 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); + } +} diff --git a/crates/prune/types/src/mode.rs b/crates/prune/types/src/mode.rs index 9a8e55bb383a..de9b9e6dc081 100644 --- a/crates/prune/types/src/mode.rs +++ b/crates/prune/types/src/mode.rs @@ -74,6 +74,11 @@ impl PruneMode { pub const fn is_full(&self) -> bool { matches!(self, Self::Full) } + + /// Returns true if the prune mode is [`PruneMode::Distance`]. + pub const fn is_distance(&self) -> bool { + matches!(self, Self::Distance(_)) + } } #[cfg(test)] diff --git a/crates/prune/types/src/pruner.rs b/crates/prune/types/src/pruner.rs index dbfafff639e8..3046dda06790 100644 --- a/crates/prune/types/src/pruner.rs +++ b/crates/prune/types/src/pruner.rs @@ -1,6 +1,5 @@ -use alloy_primitives::{BlockNumber, TxNumber}; - use crate::{PruneCheckpoint, PruneLimiter, PruneMode, PruneSegment}; +use alloy_primitives::{BlockNumber, TxNumber}; /// Pruner run output. #[derive(Debug)] @@ -17,6 +16,17 @@ impl From for PrunerOutput { } } +/// Represents information of a pruner run for a segment. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PrunedSegmentInfo { + /// The pruned segment + pub segment: PruneSegment, + /// Number of pruned entries + pub pruned: usize, + /// Prune progress + pub progress: PruneProgress, +} + /// Segment pruning output. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct SegmentOutput { diff --git a/crates/payload/builder/src/database.rs b/crates/revm/src/cached.rs similarity index 60% rename from crates/payload/builder/src/database.rs rename to crates/revm/src/cached.rs index d63f7322dee2..88a41e1d8957 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/revm/src/cached.rs @@ -1,13 +1,13 @@ //! Database adapters for payload building. -use alloy_primitives::{Address, B256, U256}; +use alloy_primitives::{ + map::{Entry, HashMap}, + Address, B256, U256, +}; +use core::cell::RefCell; use reth_primitives::revm_primitives::{ db::{Database, DatabaseRef}, AccountInfo, Bytecode, }; -use std::{ - cell::RefCell, - collections::{hash_map::Entry, HashMap}, -}; /// A container type that caches reads from an underlying [`DatabaseRef`]. /// @@ -17,15 +17,15 @@ use std::{ /// # Example /// /// ``` -/// use reth_payload_builder::database::CachedReads; +/// use reth_revm::cached::CachedReads; /// use revm::db::{DatabaseRef, State}; /// /// fn build_payload(db: DB) { /// let mut cached_reads = CachedReads::default(); -/// let db_ref = cached_reads.as_db(db); -/// // this is `Database` and can be used to build a payload, it never writes to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`. +/// let db = cached_reads.as_db_mut(db); +/// // this is `Database` and can be used to build a payload, it never commits to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`. /// // Subsequent payload build attempts can use cached reads and avoid hitting the underlying database. -/// let db = State::builder().with_database_ref(db_ref).build(); +/// let state = State::builder().with_database(db).build(); /// } /// ``` #[derive(Debug, Clone, Default)] @@ -40,10 +40,11 @@ pub struct CachedReads { impl CachedReads { /// Gets a [`DatabaseRef`] that will cache reads from the given database. pub fn as_db(&mut self, db: DB) -> CachedReadsDBRef<'_, DB> { - CachedReadsDBRef { inner: RefCell::new(self.as_db_mut(db)) } + self.as_db_mut(db).into_db() } - fn as_db_mut(&mut self, db: DB) -> CachedReadsDbMut<'_, DB> { + /// Gets a mutable [`Database`] that will cache reads from the underlying database. + pub fn as_db_mut(&mut self, db: DB) -> CachedReadsDbMut<'_, DB> { CachedReadsDbMut { cached: self, db } } @@ -56,6 +57,15 @@ impl CachedReads { ) { self.accounts.insert(address, CachedAccount { info: Some(info), storage }); } + + /// Extends current cache with entries from another [`CachedReads`] instance. + /// + /// Note: It is expected that both instances are based on the exact same state. + pub fn extend(&mut self, other: Self) { + self.accounts.extend(other.accounts); + self.contracts.extend(other.contracts); + self.block_hashes.extend(other.block_hashes); + } } /// A [Database] that caches reads inside [`CachedReads`]. @@ -67,6 +77,28 @@ pub struct CachedReadsDbMut<'a, DB> { pub db: DB, } +impl<'a, DB> CachedReadsDbMut<'a, DB> { + /// Converts this [`Database`] implementation into a [`DatabaseRef`] that will still cache + /// reads. + pub const fn into_db(self) -> CachedReadsDBRef<'a, DB> { + CachedReadsDBRef { inner: RefCell::new(self) } + } + + /// Returns access to wrapped [`DatabaseRef`]. + pub const fn inner(&self) -> &DB { + &self.db + } +} + +impl AsRef for CachedReadsDbMut<'_, DB> +where + DB: AsRef, +{ + fn as_ref(&self) -> &T { + self.inner().as_ref() + } +} + impl Database for CachedReadsDbMut<'_, DB> { type Error = ::Error; @@ -161,3 +193,57 @@ impl CachedAccount { Self { info, storage: HashMap::default() } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_extend_with_two_cached_reads() { + // Setup test data + let hash1 = B256::from_slice(&[1u8; 32]); + let hash2 = B256::from_slice(&[2u8; 32]); + let address1 = Address::from_slice(&[1u8; 20]); + let address2 = Address::from_slice(&[2u8; 20]); + + // Create primary cache + let mut primary = { + let mut cache = CachedReads::default(); + cache.accounts.insert(address1, CachedAccount::new(Some(AccountInfo::default()))); + cache.contracts.insert(hash1, Bytecode::default()); + cache.block_hashes.insert(1, hash1); + cache + }; + + // Create additional cache + let additional = { + let mut cache = CachedReads::default(); + cache.accounts.insert(address2, CachedAccount::new(Some(AccountInfo::default()))); + cache.contracts.insert(hash2, Bytecode::default()); + cache.block_hashes.insert(2, hash2); + cache + }; + + // Extending primary with additional cache + primary.extend(additional); + + // Verify the combined state + assert!( + primary.accounts.len() == 2 && + primary.contracts.len() == 2 && + primary.block_hashes.len() == 2, + "All maps should contain 2 entries" + ); + + // Verify specific entries + assert!( + primary.accounts.contains_key(&address1) && + primary.accounts.contains_key(&address2) && + primary.contracts.contains_key(&hash1) && + primary.contracts.contains_key(&hash2) && + primary.block_hashes.get(&1) == Some(&hash1) && + primary.block_hashes.get(&2) == Some(&hash2), + "All expected entries should be present" + ); + } +} diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 8f40d2be8d91..682aca6cf379 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -79,6 +79,12 @@ impl StateProviderDatabase { } } +impl AsRef for StateProviderDatabase { + fn as_ref(&self) -> &DB { + self + } +} + impl Deref for StateProviderDatabase { type Target = DB; @@ -101,21 +107,21 @@ impl Database for StateProviderDatabase { /// Returns `Ok` with `Some(AccountInfo)` if the account exists, /// `None` if it doesn't, or an error if encountered. fn basic(&mut self, address: Address) -> Result, Self::Error> { - DatabaseRef::basic_ref(self, address) + self.basic_ref(address) } /// Retrieves the bytecode associated with a given code hash. /// /// Returns `Ok` with the bytecode if found, or the default bytecode otherwise. fn code_by_hash(&mut self, code_hash: B256) -> Result { - DatabaseRef::code_by_hash_ref(self, code_hash) + self.code_by_hash_ref(code_hash) } /// Retrieves the storage value at a specific index for a given address. /// /// Returns `Ok` with the storage value, or the default value if not found. fn storage(&mut self, address: Address, index: U256) -> Result { - DatabaseRef::storage_ref(self, address, index) + self.storage_ref(address, index) } /// Retrieves the block hash for a given block number. @@ -123,7 +129,7 @@ impl Database for StateProviderDatabase { /// Returns `Ok` with the block hash if found, or the default hash otherwise. /// Note: It safely casts the `number` to `u64`. fn block_hash(&mut self, number: u64) -> Result { - DatabaseRef::block_hash_ref(self, number) + self.block_hash_ref(number) } } diff --git a/crates/revm/src/either.rs b/crates/revm/src/either.rs new file mode 100644 index 000000000000..e93ba3a8d010 --- /dev/null +++ b/crates/revm/src/either.rs @@ -0,0 +1,52 @@ +use alloy_primitives::{Address, B256, U256}; +use revm::{ + primitives::{AccountInfo, Bytecode}, + Database, +}; + +/// An enum type that can hold either of two different [`Database`] implementations. +/// +/// This allows flexible usage of different [`Database`] types in the same context. +#[derive(Debug, Clone)] +pub enum Either { + /// A value of type `L`. + Left(L), + /// A value of type `R`. + Right(R), +} + +impl Database for Either +where + L: Database, + R: Database, +{ + type Error = L::Error; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + match self { + Self::Left(db) => db.basic(address), + Self::Right(db) => db.basic(address), + } + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + match self { + Self::Left(db) => db.code_by_hash(code_hash), + Self::Right(db) => db.code_by_hash(code_hash), + } + } + + fn storage(&mut self, address: Address, index: U256) -> Result { + match self { + Self::Left(db) => db.storage(address, index), + Self::Right(db) => db.storage(address, index), + } + } + + fn block_hash(&mut self, number: u64) -> Result { + match self { + Self::Left(db) => db.block_hash(number), + Self::Right(db) => db.block_hash(number), + } + } +} diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 02eb182ee119..b06ee816f8d0 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -11,14 +11,21 @@ extern crate alloc; +pub mod batch; + +/// Cache database that reads from an underlying [`DatabaseRef`]. +/// Database adapters for payload building. +pub mod cached; + /// Contains glue code for integrating reth database into revm's [Database]. pub mod database; -pub mod batch; - /// Common test helpers #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; // Convenience re-exports. pub use revm::{self, *}; + +/// Either type for flexible usage of different database types in the same context. +pub mod either; diff --git a/crates/rpc/ipc/src/client/mod.rs b/crates/rpc/ipc/src/client/mod.rs index 8f2fe0255c7d..48f58e77a4ae 100644 --- a/crates/rpc/ipc/src/client/mod.rs +++ b/crates/rpc/ipc/src/client/mod.rs @@ -136,10 +136,9 @@ pub enum IpcError { #[cfg(test)] mod tests { - use interprocess::local_socket::ListenerOptions; - use super::*; use crate::server::dummy_name; + use interprocess::local_socket::ListenerOptions; #[tokio::test] async fn test_connect() { diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 363e22955301..abcdf98b5448 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true reth-rpc-eth-api.workspace = true reth-engine-primitives.workspace = true reth-network-peers.workspace = true diff --git a/crates/rpc/rpc-api/src/anvil.rs b/crates/rpc/rpc-api/src/anvil.rs index baa09166b838..0930264a63b4 100644 --- a/crates/rpc/rpc-api/src/anvil.rs +++ b/crates/rpc/rpc-api/src/anvil.rs @@ -1,8 +1,8 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use alloy_primitives::{Address, Bytes, B256, U256}; -use alloy_rpc_types::Block; use alloy_rpc_types_anvil::{Forking, Metadata, MineOptions, NodeInfo}; +use alloy_rpc_types_eth::Block; /// Anvil rpc interface. /// https://book.getfoundry.sh/reference/anvil/#custom-methods diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 3e03210f1ffd..1b857d4a11f2 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,12 +1,11 @@ +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256}; -use alloy_rpc_types::{Block, Bundle, StateContext}; use alloy_rpc_types_debug::ExecutionWitness; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Bundle, StateContext}; use alloy_rpc_types_trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{BlockId, BlockNumberOrTag}; /// Debug rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index ddf6d846119d..f78b8349be86 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -6,15 +6,15 @@ use alloy_eips::{eip4844::BlobAndProofV1, eip7685::Requests, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, BlockHash, Bytes, B256, U256, U64}; -use alloy_rpc_types::{ - state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, -}; use alloy_rpc_types_engine::{ ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{ + state::StateOverride, transaction::TransactionRequest, BlockOverrides, + EIP1186AccountProofResponse, Filter, Log, SyncStatus, +}; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_engine_primitives::EngineTypes; @@ -110,7 +110,10 @@ pub trait EngineApi { /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV1")] - async fn get_payload_v1(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v1( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// See also /// diff --git a/crates/rpc/rpc-api/src/otterscan.rs b/crates/rpc/rpc-api/src/otterscan.rs index ee805b482c34..b4679ae5ceca 100644 --- a/crates/rpc/rpc-api/src/otterscan.rs +++ b/crates/rpc/rpc-api/src/otterscan.rs @@ -1,12 +1,12 @@ +use alloy_eips::BlockId; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, TxHash, B256}; -use alloy_rpc_types::Header; +use alloy_rpc_types_eth::Header; use alloy_rpc_types_trace::otterscan::{ BlockDetails, ContractCreator, InternalOperation, OtsBlockTransactions, TraceEntry, TransactionsWithReceipts, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; /// Otterscan rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "ots"))] diff --git a/crates/rpc/rpc-api/src/reth.rs b/crates/rpc/rpc-api/src/reth.rs index 98c31b78f9a4..0589ffc00cea 100644 --- a/crates/rpc/rpc-api/src/reth.rs +++ b/crates/rpc/rpc-api/src/reth.rs @@ -1,6 +1,6 @@ +use alloy_eips::BlockId; use alloy_primitives::{Address, U256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; use std::collections::HashMap; /// Reth API namespace for reth-specific methods diff --git a/crates/rpc/rpc-api/src/trace.rs b/crates/rpc/rpc-api/src/trace.rs index 58dda422ab86..41e2b4c1c3ee 100644 --- a/crates/rpc/rpc-api/src/trace.rs +++ b/crates/rpc/rpc-api/src/trace.rs @@ -1,13 +1,14 @@ +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256}; -use alloy_rpc_types::{state::StateOverride, BlockOverrides, Index}; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{ + state::StateOverride, transaction::TransactionRequest, BlockOverrides, Index, +}; use alloy_rpc_types_trace::{ filter::TraceFilter, opcode::{BlockOpcodeGas, TransactionOpcodeGas}, parity::*, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; /// Ethereum trace API #[cfg_attr(not(feature = "client"), rpc(server, namespace = "trace"))] diff --git a/crates/rpc/rpc-api/src/validation.rs b/crates/rpc/rpc-api/src/validation.rs index bbfa673d2598..5e4f2e261433 100644 --- a/crates/rpc/rpc-api/src/validation.rs +++ b/crates/rpc/rpc-api/src/validation.rs @@ -2,6 +2,7 @@ use alloy_rpc_types_beacon::relay::{ BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, + BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, }; use jsonrpsee::proc_macros::rpc; @@ -22,4 +23,18 @@ pub trait BlockSubmissionValidationApi { &self, request: BuilderBlockValidationRequestV2, ) -> jsonrpsee::core::RpcResult<()>; + + /// A Request to validate a block submission. + #[method(name = "validateBuilderSubmissionV3")] + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> jsonrpsee::core::RpcResult<()>; + + /// A Request to validate a block submission. + #[method(name = "validateBuilderSubmissionV4")] + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> jsonrpsee::core::RpcResult<()>; } diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index b9b511a078bc..04e97f99e34d 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-ipc.workspace = true reth-chainspec.workspace = true +reth-consensus.workspace = true reth-network-api.workspace = true reth-node-core.workspace = true reth-provider.workspace = true @@ -28,12 +29,8 @@ reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true -reth-primitives.workspace = true -# ethereum -alloy-network.workspace = true -alloy-rpc-types.workspace = true -alloy-serde.workspace = true +alloy-consensus.workspace = true # rpc/net jsonrpsee = { workspace = true, features = ["server"] } @@ -67,11 +64,13 @@ reth-rpc-engine-api.workspace = true reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-rpc-types-compat.workspace = true +reth-primitives.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types-trace.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } serde_json.workspace = true diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 4ff98ae8d501..daff81fa2ae1 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -2,6 +2,7 @@ use std::{net::SocketAddr, path::PathBuf}; use jsonrpsee::server::ServerBuilder; use reth_node_core::{args::RpcServerArgs, utils::get_or_create_jwt_secret_from_path}; +use reth_rpc::ValidationApiConfig; use reth_rpc_eth_types::{EthConfig, EthStateCacheConfig, GasPriceOracleConfig}; use reth_rpc_layer::{JwtError, JwtSecret}; use reth_rpc_server_types::RpcModuleSelection; @@ -27,6 +28,9 @@ pub trait RethRpcServerConfig { /// The configured ethereum RPC settings. fn eth_config(&self) -> EthConfig; + /// The configured ethereum RPC settings. + fn flashbots_config(&self) -> ValidationApiConfig; + /// Returns state cache configuration. fn state_cache_config(&self) -> EthStateCacheConfig; @@ -101,6 +105,10 @@ impl RethRpcServerConfig for RpcServerArgs { .proof_permits(self.rpc_proof_permits) } + fn flashbots_config(&self) -> ValidationApiConfig { + ValidationApiConfig { disallow: self.builder_disallow.clone().unwrap_or_default() } + } + fn state_cache_config(&self) -> EthStateCacheConfig { EthStateCacheConfig { max_blocks: self.rpc_state_cache.max_blocks, @@ -124,7 +132,7 @@ impl RethRpcServerConfig for RpcServerArgs { fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig { let mut config = TransportRpcModuleConfig::default() - .with_config(RpcModuleConfig::new(self.eth_config())); + .with_config(RpcModuleConfig::new(self.eth_config(), self.flashbots_config())); if self.http { config = config.with_http( diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 40acecfedf33..e88f6aa86bbd 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,5 +1,5 @@ +use alloy_consensus::Header; use reth_evm::ConfigureEvm; -use reth_primitives::Header; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; use reth_rpc::{EthFilter, EthPubSub}; use reth_rpc_eth_api::EthApiTypes; diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ceafe206531a..27eceed98cb5 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -16,9 +16,9 @@ //! Configure only an http server with a selection of [`RethRpcModule`]s //! //! ``` +//! use alloy_consensus::Header; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::Header; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_builder::{ @@ -27,20 +27,22 @@ //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::TransactionPool; //! -//! pub async fn launch( +//! pub async fn launch( //! provider: Provider, //! pool: Pool, //! network: Network, //! events: Events, //! evm_config: EvmConfig, //! block_executor: BlockExecutor, +//! consensus: Consensus, //! ) where //! Provider: FullRpcProvider + AccountReader + ChangeSetReader, -//! Pool: TransactionPool + 'static, +//! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, +//! Consensus: reth_consensus::Consensus + Clone + 'static, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -57,6 +59,7 @@ //! events, //! evm_config, //! block_executor, +//! consensus, //! ) //! .build(transports, Box::new(EthApi::with_spawner)); //! let handle = RpcServerConfig::default() @@ -70,10 +73,10 @@ //! //! //! ``` +//! use alloy_consensus::Header; //! use reth_engine_primitives::EngineTypes; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::Header; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_api::EngineApiServer; @@ -85,6 +88,7 @@ //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::TransactionPool; //! use tokio::try_join; +//! //! pub async fn launch< //! Provider, //! Pool, @@ -94,6 +98,7 @@ //! EngineT, //! EvmConfig, //! BlockExecutor, +//! Consensus, //! >( //! provider: Provider, //! pool: Pool, @@ -102,15 +107,17 @@ //! engine_api: EngineApi, //! evm_config: EvmConfig, //! block_executor: BlockExecutor, +//! consensus: Consensus, //! ) where //! Provider: FullRpcProvider + AccountReader + ChangeSetReader, -//! Pool: TransactionPool + 'static, +//! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, //! EngineT: EngineTypes, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, +//! Consensus: reth_consensus::Consensus + Clone + 'static, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -127,6 +134,7 @@ //! events, //! evm_config, //! block_executor, +//! consensus, //! ); //! //! // configure the server modules @@ -154,9 +162,12 @@ use std::{ collections::HashMap, fmt::Debug, net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}, }; +use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; +use alloy_consensus::Header; use error::{ConflictingModules, RpcError, ServerKind}; use eth::DynEthApiBuilder; use http::{header::AUTHORIZATION, HeaderMap}; @@ -169,17 +180,17 @@ use jsonrpsee::{ Methods, RpcModule, }; use reth_chainspec::EthereumHardforks; +use reth_consensus::Consensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives::Header; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, FullRpcProvider, StateProviderFactory, }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, - TxPoolApi, Web3Api, + TxPoolApi, ValidationApi, ValidationApiConfig, Web3Api, }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ @@ -187,15 +198,13 @@ use reth_rpc_eth_api::{ EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcReceipt, RpcTransaction, }; use reth_rpc_eth_types::{EthConfig, EthStateCache, EthSubscriptionIdProvider}; -use reth_rpc_layer::{AuthLayer, Claims, JwtAuthValidator, JwtSecret}; +use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; use serde::{Deserialize, Serialize}; use tower::Layer; use tower_http::cors::CorsLayer; -use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; - pub use cors::CorsDomainError; // re-export for convenience @@ -242,6 +251,7 @@ pub async fn launch, block_executor: BlockExecutor, + consensus: Arc, ) -> Result where Provider: FullRpcProvider + AccountReader + ChangeSetReader, @@ -249,7 +259,7 @@ where Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, EthApi: FullEthApiServer, BlockExecutor: BlockExecutorProvider, { @@ -265,6 +275,7 @@ where events, evm_config, block_executor, + consensus, ) .build(module_config, eth), ) @@ -275,7 +286,16 @@ where /// /// This is the main entrypoint and the easiest way to configure an RPC server. #[derive(Debug, Clone)] -pub struct RpcModuleBuilder { +pub struct RpcModuleBuilder< + Provider, + Pool, + Network, + Tasks, + Events, + EvmConfig, + BlockExecutor, + Consensus, +> { /// The Provider type to when creating all rpc handlers provider: Provider, /// The Pool type to when creating all rpc handlers @@ -290,14 +310,17 @@ pub struct RpcModuleBuilder - RpcModuleBuilder +impl + RpcModuleBuilder { /// Create a new instance of the builder + #[allow(clippy::too_many_arguments)] pub const fn new( provider: Provider, pool: Pool, @@ -306,32 +329,54 @@ impl events: Events, evm_config: EvmConfig, block_executor: BlockExecutor, + consensus: Consensus, ) -> Self { - Self { provider, pool, network, executor, events, evm_config, block_executor } + Self { provider, pool, network, executor, events, evm_config, block_executor, consensus } } /// Configure the provider instance. pub fn with_provider

( self, provider: P, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where P: BlockReader + StateProviderFactory + EvmEnvProvider + 'static, { - let Self { pool, network, executor, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { pool, network, executor, events, evm_config, block_executor, consensus, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure the transaction pool instance. pub fn with_pool

( self, pool: P, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where P: TransactionPool + 'static, { - let Self { provider, network, executor, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { + provider, network, executor, events, evm_config, block_executor, consensus, .. + } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure a [`NoopTransactionPool`] instance. @@ -349,8 +394,11 @@ impl Events, EvmConfig, BlockExecutor, + Consensus, > { - let Self { provider, executor, events, network, evm_config, block_executor, .. } = self; + let Self { + provider, executor, events, network, evm_config, block_executor, consensus, .. + } = self; RpcModuleBuilder { provider, executor, @@ -359,6 +407,7 @@ impl evm_config, block_executor, pool: NoopTransactionPool::default(), + consensus, } } @@ -366,12 +415,23 @@ impl pub fn with_network( self, network: N, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where N: NetworkInfo + Peers + 'static, { - let Self { provider, pool, executor, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { + provider, pool, executor, events, evm_config, block_executor, consensus, .. + } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure a [`NoopNetwork`] instance. @@ -381,9 +441,19 @@ impl /// [`EthApi`](reth_rpc::eth::EthApi) which requires a [`NetworkInfo`] implementation. pub fn with_noop_network( self, - ) -> RpcModuleBuilder - { - let Self { provider, pool, executor, events, evm_config, block_executor, .. } = self; + ) -> RpcModuleBuilder< + Provider, + Pool, + NoopNetwork, + Tasks, + Events, + EvmConfig, + BlockExecutor, + Consensus, + > { + let Self { + provider, pool, executor, events, evm_config, block_executor, consensus, .. + } = self; RpcModuleBuilder { provider, pool, @@ -392,6 +462,7 @@ impl network: NoopNetwork::default(), evm_config, block_executor, + consensus, } } @@ -399,12 +470,22 @@ impl pub fn with_executor( self, executor: T, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where T: TaskSpawner + 'static, { - let Self { pool, network, provider, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { pool, network, provider, events, evm_config, block_executor, consensus, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure [`TokioTaskExecutor`] as the task executor to use for additional tasks. @@ -421,8 +502,10 @@ impl Events, EvmConfig, BlockExecutor, + Consensus, > { - let Self { pool, network, provider, events, evm_config, block_executor, .. } = self; + let Self { pool, network, provider, events, evm_config, block_executor, consensus, .. } = + self; RpcModuleBuilder { provider, network, @@ -431,6 +514,7 @@ impl executor: TokioTaskExecutor::default(), evm_config, block_executor, + consensus, } } @@ -438,41 +522,90 @@ impl pub fn with_events( self, events: E, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where E: CanonStateSubscriptions + 'static, { - let Self { provider, pool, executor, network, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { + provider, pool, executor, network, evm_config, block_executor, consensus, .. + } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure the evm configuration type pub fn with_evm_config( self, evm_config: E, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where E: ConfigureEvm + 'static, { - let Self { provider, pool, executor, network, events, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { provider, pool, executor, network, events, block_executor, consensus, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure the block executor provider pub fn with_block_executor( self, block_executor: BE, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where BE: BlockExecutorProvider, { - let Self { provider, network, pool, executor, events, evm_config, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { provider, network, pool, executor, events, evm_config, consensus, .. } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } + } + + /// Configure the consensus implementation. + pub fn with_consensus( + self, + consensus: C, + ) -> RpcModuleBuilder { + let Self { provider, network, pool, executor, events, evm_config, block_executor, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } } -impl - RpcModuleBuilder +impl + RpcModuleBuilder where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, @@ -481,6 +614,7 @@ where Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm

, BlockExecutor: BlockExecutorProvider, + Consensus: reth_consensus::Consensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -497,14 +631,23 @@ where ) -> ( TransportRpcModules, AuthRpcModule, - RpcRegistryInner, + RpcRegistryInner, ) where EngineT: EngineTypes, EngineApi: EngineApiServer, EthApi: FullEthApiServer, { - let Self { provider, pool, network, executor, events, evm_config, block_executor } = self; + let Self { + provider, + pool, + network, + executor, + events, + evm_config, + block_executor, + consensus, + } = self; let config = module_config.config.clone().unwrap_or_default(); @@ -514,6 +657,7 @@ where network, executor, events, + consensus, config, evm_config, eth, @@ -535,10 +679,11 @@ where /// # Example /// /// ```no_run + /// use alloy_consensus::Header; + /// use reth_consensus::noop::NoopConsensus; /// use reth_evm::ConfigureEvm; /// use reth_evm_ethereum::execute::EthExecutorProvider; /// use reth_network_api::noop::NoopNetwork; - /// use reth_primitives::Header; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; /// use reth_rpc::EthApi; /// use reth_rpc_builder::RpcModuleBuilder; @@ -554,6 +699,7 @@ where /// .with_events(TestCanonStateSubscriptions::default()) /// .with_evm_config(evm) /// .with_block_executor(EthExecutorProvider::mainnet()) + /// .with_consensus(NoopConsensus::default()) /// .into_registry(Default::default(), Box::new(EthApi::with_spawner)); /// /// let eth_api = registry.eth_api(); @@ -563,17 +709,27 @@ where self, config: RpcModuleConfig, eth: DynEthApiBuilder, - ) -> RpcRegistryInner + ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, { - let Self { provider, pool, network, executor, events, evm_config, block_executor } = self; + let Self { + provider, + pool, + network, + executor, + events, + evm_config, + block_executor, + consensus, + } = self; RpcRegistryInner::new( provider, pool, network, executor, events, + consensus, config, evm_config, eth, @@ -593,7 +749,16 @@ where { let mut modules = TransportRpcModules::default(); - let Self { provider, pool, network, executor, events, evm_config, block_executor } = self; + let Self { + provider, + pool, + network, + executor, + events, + evm_config, + block_executor, + consensus, + } = self; if !module_config.is_empty() { let TransportRpcModuleConfig { http, ws, ipc, config } = module_config.clone(); @@ -604,6 +769,7 @@ where network, executor, events, + consensus, config.unwrap_or_default(), evm_config, eth, @@ -620,9 +786,9 @@ where } } -impl Default for RpcModuleBuilder<(), (), (), (), (), (), ()> { +impl Default for RpcModuleBuilder<(), (), (), (), (), (), (), ()> { fn default() -> Self { - Self::new((), (), (), (), (), (), ()) + Self::new((), (), (), (), (), (), (), ()) } } @@ -631,6 +797,8 @@ impl Default for RpcModuleBuilder<(), (), (), (), (), (), ()> { pub struct RpcModuleConfig { /// `eth` namespace settings eth: EthConfig, + /// `flashbots` namespace settings + flashbots: ValidationApiConfig, } // === impl RpcModuleConfig === @@ -642,8 +810,8 @@ impl RpcModuleConfig { } /// Returns a new RPC module config given the eth namespace config - pub const fn new(eth: EthConfig) -> Self { - Self { eth } + pub const fn new(eth: EthConfig, flashbots: ValidationApiConfig) -> Self { + Self { eth, flashbots } } /// Get a reference to the eth namespace config @@ -661,6 +829,7 @@ impl RpcModuleConfig { #[derive(Clone, Debug, Default)] pub struct RpcModuleConfigBuilder { eth: Option, + flashbots: Option, } // === impl RpcModuleConfigBuilder === @@ -672,10 +841,16 @@ impl RpcModuleConfigBuilder { self } + /// Configures a custom flashbots namespace config + pub fn flashbots(mut self, flashbots: ValidationApiConfig) -> Self { + self.flashbots = Some(flashbots); + self + } + /// Consumes the type and creates the [`RpcModuleConfig`] pub fn build(self) -> RpcModuleConfig { - let Self { eth } = self; - RpcModuleConfig { eth: eth.unwrap_or_default() } + let Self { eth, flashbots } = self; + RpcModuleConfig { eth: eth.unwrap_or_default(), flashbots: flashbots.unwrap_or_default() } } /// Get a reference to the eth namespace config, if any @@ -704,6 +879,7 @@ pub struct RpcRegistryInner< Events, EthApi: EthApiTypes, BlockExecutor, + Consensus, > { provider: Provider, pool: Pool, @@ -711,6 +887,9 @@ pub struct RpcRegistryInner< executor: Tasks, events: Events, block_executor: BlockExecutor, + consensus: Consensus, + /// Holds the configuration for the RPC modules + config: RpcModuleConfig, /// Holds a all `eth_` namespace handlers eth: EthHandlers, /// to put trace calls behind semaphore @@ -721,8 +900,8 @@ pub struct RpcRegistryInner< // === impl RpcRegistryInner === -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, Pool: Send + Sync + Clone + 'static, @@ -740,6 +919,7 @@ where network: Network, executor: Tasks, events: Events, + consensus: Consensus, config: RpcModuleConfig, evm_config: EvmConfig, eth_api_builder: DynEthApiBuilder< @@ -775,6 +955,8 @@ where network, eth, executor, + consensus, + config, modules: Default::default(), blocking_pool_guard, events, @@ -783,8 +965,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where EthApi: EthApiTypes, { @@ -841,8 +1023,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Network: NetworkInfo + Clone + 'static, EthApi: EthApiTypes, @@ -880,8 +1062,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Network: NetworkInfo + Peers + Clone + 'static, @@ -911,14 +1093,7 @@ where /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_ots(&mut self) -> &mut Self where - EthApi: TraceExt - + EthTransactions< - NetworkTypes: alloy_network::Network< - TransactionResponse = alloy_serde::WithOtherFields< - alloy_rpc_types::Transaction, - >, - >, - >, + EthApi: TraceExt + EthTransactions, { let otterscan_api = self.otterscan_api(); self.modules.insert(RethRpcModule::Ots, otterscan_api.into_rpc().into()); @@ -993,8 +1168,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Network: NetworkInfo + Peers + Clone + 'static, @@ -1066,10 +1241,23 @@ where pub fn reth_api(&self) -> RethApi { RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) } + + /// Instantiates `ValidationApi` + pub fn validation_api(&self) -> ValidationApi + where + Consensus: reth_consensus::Consensus + Clone + 'static, + { + ValidationApi::new( + self.provider.clone(), + Arc::new(self.consensus.clone()), + self.block_executor.clone(), + self.config.flashbots.clone(), + ) + } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, @@ -1078,6 +1266,7 @@ where Events: CanonStateSubscriptions + Clone + 'static, EthApi: FullEthApiServer, BlockExecutor: BlockExecutorProvider, + Consensus: reth_consensus::Consensus + Clone + 'static, { /// Configures the auth module that includes the /// * `engine_` namespace @@ -1222,6 +1411,14 @@ where .into_rpc() .into() } + RethRpcModule::Flashbots => ValidationApi::new( + self.provider.clone(), + Arc::new(self.consensus.clone()), + self.block_executor.clone(), + self.config.flashbots.clone(), + ) + .into_rpc() + .into(), }) .clone() }) @@ -1449,6 +1646,12 @@ impl RpcServerConfig { jwt_secret.map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) } + /// Returns a [`CompressionLayer`] that adds compression support (gzip, deflate, brotli, zstd) + /// based on the client's `Accept-Encoding` header + fn maybe_compression_layer() -> Option { + Some(CompressionLayer::new()) + } + /// Builds and starts the configured server(s): http, ws, ipc. /// /// If both http and ws are on the same port, they are combined into one server. @@ -1513,7 +1716,8 @@ impl RpcServerConfig { .set_http_middleware( tower::ServiceBuilder::new() .option_layer(Self::maybe_cors_layer(cors)?) - .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)) + .option_layer(Self::maybe_compression_layer()), ) .set_rpc_middleware( self.rpc_middleware.clone().layer( @@ -1585,8 +1789,9 @@ impl RpcServerConfig { .http_only() .set_http_middleware( tower::ServiceBuilder::new() - .option_layer(Self::maybe_cors_layer(self.http_cors_domains.clone())?) - .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), + .option_layer(Self::maybe_cors_layer(self.ws_cors_domains.clone())?) + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)) + .option_layer(Self::maybe_compression_layer()), ) .set_rpc_middleware( self.rpc_middleware.clone().layer( @@ -1679,7 +1884,7 @@ impl TransportRpcModuleConfig { } /// Sets a custom [`RpcModuleConfig`] for the configured modules. - pub const fn with_config(mut self, config: RpcModuleConfig) -> Self { + pub fn with_config(mut self, config: RpcModuleConfig) -> Self { self.config = Some(config); self } @@ -1810,7 +2015,7 @@ impl TransportRpcModules { Ok(false) } - /// Merge the given [Methods] in all configured methods. + /// Merge the given [`Methods`] in all configured methods. /// /// Fails if any of the methods in other is present already. pub fn merge_configured( @@ -1901,7 +2106,22 @@ impl TransportRpcModules { http_removed || ws_removed || ipc_removed } - /// Replace the given [Methods] in the configured http methods. + /// Renames a method in all configured transports by: + /// 1. Removing the old method name. + /// 2. Adding the new method. + pub fn rename( + &mut self, + old_name: &'static str, + new_method: impl Into, + ) -> Result<(), RegisterMethodError> { + // Remove the old method from all configured transports + self.remove_method_from_configured(old_name); + + // Merge the new method into the configured transports + self.merge_configured(new_method) + } + + /// Replace the given [`Methods`] in the configured http methods. /// /// Fails if any of the methods in other is present already or if the method being removed is /// not present @@ -2273,6 +2493,43 @@ mod tests { assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); } + #[test] + fn test_transport_rpc_module_rename() { + let mut modules = TransportRpcModules { + http: Some(create_test_module()), + ws: Some(create_test_module()), + ipc: Some(create_test_module()), + ..Default::default() + }; + + // Verify that the old we want to rename exists at the start + assert!(modules.http.as_ref().unwrap().method("anything").is_some()); + assert!(modules.ws.as_ref().unwrap().method("anything").is_some()); + assert!(modules.ipc.as_ref().unwrap().method("anything").is_some()); + + // Verify that the new method does not exist at the start + assert!(modules.http.as_ref().unwrap().method("something").is_none()); + assert!(modules.ws.as_ref().unwrap().method("something").is_none()); + assert!(modules.ipc.as_ref().unwrap().method("something").is_none()); + + // Create another module + let mut other_module = RpcModule::new(()); + other_module.register_method("something", |_, _, _| "fails").unwrap(); + + // Rename the method + modules.rename("anything", other_module).expect("rename failed"); + + // Verify that the old method was removed from all transports + assert!(modules.http.as_ref().unwrap().method("anything").is_none()); + assert!(modules.ws.as_ref().unwrap().method("anything").is_none()); + assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); + + // Verify that the new method was added to all transports + assert!(modules.http.as_ref().unwrap().method("something").is_some()); + assert!(modules.ws.as_ref().unwrap().method("something").is_some()); + assert!(modules.ipc.as_ref().unwrap().method("something").is_some()); + } + #[test] fn test_replace_http_method() { let mut modules = diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 7a8093c5062b..8393d9427a6b 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -2,12 +2,12 @@ //! Standalone http tests use crate::utils::{launch_http, launch_http_ws, launch_ws}; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{hex_literal::hex, Address, Bytes, TxHash, B256, B64, U256, U64}; -use alloy_rpc_types::{ - Block, FeeHistory, Filter, Index, Log, PendingTransactionFilterKind, SyncStatus, Transaction, - TransactionReceipt, +use alloy_rpc_types_eth::{ + transaction::TransactionRequest, Block, FeeHistory, Filter, Index, Log, + PendingTransactionFilterKind, SyncStatus, Transaction, TransactionReceipt, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_rpc_types_trace::filter::TraceFilter; use jsonrpsee::{ core::{ @@ -19,7 +19,7 @@ use jsonrpsee::{ types::error::ErrorCode, }; use reth_network_peers::NodeRecord; -use reth_primitives::{BlockId, BlockNumberOrTag, Receipt}; +use reth_primitives::Receipt; use reth_rpc_api::{ clients::{AdminApiClient, EthApiClient}, DebugApiClient, EthFilterApiClient, NetApiClient, OtterscanClient, TraceApiClient, @@ -278,7 +278,13 @@ where .await .unwrap(); EthApiClient::::syncing(client).await.unwrap(); - EthApiClient::::send_transaction(client, transaction_request) + EthApiClient::::send_transaction( + client, + transaction_request.clone(), + ) + .await + .unwrap_err(); + EthApiClient::::sign_transaction(client, transaction_request) .await .unwrap_err(); EthApiClient::::hashrate(client).await.unwrap(); @@ -318,12 +324,6 @@ where .err() .unwrap() )); - assert!(is_unimplemented( - EthApiClient::::sign_transaction(client, call_request.clone()) - .await - .err() - .unwrap() - )); } async fn test_basic_debug_calls(client: &C) diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index bcc26dcad895..0e2186e56eeb 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -1,5 +1,5 @@ use crate::utils::{test_address, test_rpc_builder}; -use alloy_rpc_types::{Block, Receipt, Transaction}; +use alloy_rpc_types_eth::{Block, Receipt, Transaction}; use jsonrpsee::{ server::{middleware::rpc::RpcServiceT, RpcServiceBuilder}, types::Request, diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 44614ea49a85..175992c0f14b 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -3,6 +3,7 @@ use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::MAINNET; +use reth_consensus::noop::NoopConsensus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::{execute::EthExecutionStrategyFactory, EthEvmConfig}; @@ -126,6 +127,7 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< TestCanonStateSubscriptions, EthEvmConfig, BasicBlockExecutorProvider, + NoopConsensus, > { RpcModuleBuilder::default() .with_provider(NoopProvider::default()) @@ -137,4 +139,5 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< .with_block_executor( BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::mainnet()), ) + .with_consensus(NoopConsensus::default()) } diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 00503f2c1dd7..62d1eea32254 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -45,6 +45,7 @@ jsonrpsee-types.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true +parking_lot.workspace = true [dev-dependencies] reth-ethereum-engine-primitives.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index eb280408ecdb..666154f3b223 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1,7 +1,7 @@ use crate::{ capabilities::EngineCapabilities, metrics::EngineApiMetrics, EngineApiError, EngineApiResult, }; -use alloy_eips::{eip4844::BlobAndProofV1, eip7685::Requests}; +use alloy_eips::{eip1898::BlockHashOrNumber, eip4844::BlobAndProofV1, eip7685::Requests}; use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, @@ -10,6 +10,7 @@ use alloy_rpc_types_engine::{ }; use async_trait::async_trait; use jsonrpsee_core::RpcResult; +use parking_lot::Mutex; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::{EthereumHardforks, Hardforks}; use reth_engine_primitives::{EngineTypes, EngineValidator}; @@ -19,7 +20,7 @@ use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_primitives::{Block, BlockHashOrNumber, EthereumHardfork}; +use reth_primitives::{Block, EthereumHardfork}; use reth_rpc_api::EngineApiServer; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, @@ -67,6 +68,8 @@ struct EngineApiInner>, } impl @@ -102,6 +105,7 @@ where capabilities, tx_pool, validator, + latest_new_payload_response: Mutex::new(None), }); Self { inner } } @@ -140,11 +144,27 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V1, payload_or_attrs)?; + Ok(self .inner .beacon_consensus .new_payload(payload, ExecutionPayloadSidecar::none()) - .await?) + .await + .inspect(|_| self.inner.on_new_payload_response())?) + } + + /// Metered version of `new_payload_v1`. + async fn new_payload_v1_metered( + &self, + payload: ExecutionPayloadV1, + ) -> EngineApiResult { + let start = Instant::now(); + let gas_used = payload.gas_used; + let res = Self::new_payload_v1(self, payload).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v1.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + res } /// See also @@ -164,7 +184,22 @@ where .inner .beacon_consensus .new_payload(payload, ExecutionPayloadSidecar::none()) - .await?) + .await + .inspect(|_| self.inner.on_new_payload_response())?) + } + + /// Metered version of `new_payload_v2`. + pub async fn new_payload_v2_metered( + &self, + payload: ExecutionPayloadInputV2, + ) -> EngineApiResult { + let start = Instant::now(); + let gas_used = payload.execution_payload.gas_used; + let res = Self::new_payload_v2(self, payload).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v2.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + res } /// See also @@ -194,7 +229,25 @@ where parent_beacon_block_root, }), ) - .await?) + .await + .inspect(|_| self.inner.on_new_payload_response())?) + } + + // Metrics version of `new_payload_v3` + async fn new_payload_v3_metered( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> RpcResult { + let start = Instant::now(); + let gas_used = payload.payload_inner.payload_inner.gas_used; + let res = + Self::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v3.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + Ok(res?) } /// See also @@ -225,7 +278,32 @@ where execution_requests, ), ) - .await?) + .await + .inspect(|_| self.inner.on_new_payload_response())?) + } + + /// Metrics version of `new_payload_v4` + async fn new_payload_v4_metered( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + execution_requests: Requests, + ) -> RpcResult { + let start = Instant::now(); + let gas_used = payload.payload_inner.payload_inner.gas_used; + let res = Self::new_payload_v4( + self, + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + ) + .await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v4.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + Ok(res?) } /// Sends a message to the beacon consensus engine to update the fork choice _without_ @@ -281,7 +359,7 @@ where pub async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { self.inner .payload_store .resolve(payload_id) @@ -598,6 +676,8 @@ where state: ForkchoiceState, payload_attrs: Option, ) -> EngineApiResult { + self.inner.record_elapsed_time_on_fcu(); + if let Some(ref attrs) = payload_attrs { let attr_validation_res = self.inner.validator.ensure_well_formed_attributes(version, attrs); @@ -616,7 +696,8 @@ where // To do this, we set the payload attrs to `None` if attribute validation failed, but // we still apply the forkchoice update. if let Err(err) = attr_validation_res { - let fcu_res = self.inner.beacon_consensus.fork_choice_updated(state, None).await?; + let fcu_res = + self.inner.beacon_consensus.fork_choice_updated(state, None, version).await?; // TODO: decide if we want this branch - the FCU INVALID response might be more // useful than the payload attributes INVALID response if fcu_res.is_invalid() { @@ -626,7 +707,27 @@ where } } - Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) + Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs, version).await?) + } +} + +impl + EngineApiInner +where + EngineT: EngineTypes, +{ + /// Tracks the elapsed time between the new payload response and the received forkchoice update + /// request. + fn record_elapsed_time_on_fcu(&self) { + if let Some(start_time) = self.latest_new_payload_response.lock().take() { + let elapsed_time = start_time.elapsed(); + self.metrics.latency.new_payload_forkchoice_updated_time_diff.record(elapsed_time); + } + } + + /// Updates the timestamp for the latest new payload response. + fn on_new_payload_response(&self) { + self.latest_new_payload_response.lock().replace(Instant::now()); } } @@ -645,26 +746,14 @@ where /// Caution: This should not accept the `withdrawals` field async fn new_payload_v1(&self, payload: ExecutionPayloadV1) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV1"); - let start = Instant::now(); - let gas_used = payload.gas_used; - let res = Self::new_payload_v1(self, payload).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v1.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v1_metered(payload).await?) } /// Handler for `engine_newPayloadV2` /// See also async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV2"); - let start = Instant::now(); - let gas_used = payload.execution_payload.gas_used; - let res = Self::new_payload_v2(self, payload).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v2.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v2_metered(payload).await?) } /// Handler for `engine_newPayloadV3` @@ -676,14 +765,7 @@ where parent_beacon_block_root: B256, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV3"); - let start = Instant::now(); - let gas_used = payload.payload_inner.payload_inner.gas_used; - let res = - Self::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v3.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v3_metered(payload, versioned_hashes, parent_beacon_block_root).await?) } /// Handler for `engine_newPayloadV4` @@ -696,20 +778,14 @@ where execution_requests: Requests, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV4"); - let start = Instant::now(); - let gas_used = payload.payload_inner.payload_inner.gas_used; - let res = Self::new_payload_v4( - self, - payload, - versioned_hashes, - parent_beacon_block_root, - execution_requests, - ) - .await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v4.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self + .new_payload_v4_metered( + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + ) + .await?) } /// Handler for `engine_forkchoiceUpdatedV1` @@ -774,7 +850,7 @@ where async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV1"); let start = Instant::now(); let res = Self::get_payload_v1(self, payload_id).await; diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 8d0106f9dd93..9325ce267784 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -34,6 +34,8 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) fork_choice_updated_v2: Histogram, /// Latency for `engine_forkchoiceUpdatedV3` pub(crate) fork_choice_updated_v3: Histogram, + /// Time diff between `engine_newPayloadV*` and the next FCU + pub(crate) new_payload_forkchoice_updated_time_diff: Histogram, /// Latency for `engine_getPayloadV1` pub(crate) get_payload_v1: Histogram, /// Latency for `engine_getPayloadV2` diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index febbc291e35e..78b0351d4a5c 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -1,13 +1,14 @@ //! Some payload tests -use alloy_primitives::{Bytes, Sealable, U256}; +use alloy_eips::eip4895::Withdrawals; +use alloy_primitives::{Bytes, U256}; use alloy_rlp::{Decodable, Error as RlpError}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadSidecar, ExecutionPayloadV1, PayloadError, }; use assert_matches::assert_matches; -use reth_primitives::{proofs, Block, SealedBlock, SealedHeader, TransactionSigned, Withdrawals}; +use reth_primitives::{proofs, Block, SealedBlock, SealedHeader, TransactionSigned}; use reth_rpc_types_compat::engine::payload::{ block_to_payload, block_to_payload_v1, convert_to_payload_body_v1, try_into_sealed_block, try_payload_v1_to_block, @@ -23,10 +24,8 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi transformed.header.transactions_root = proofs::calculate_transaction_root(&transformed.body.transactions); transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.body.ommers); - let sealed = transformed.header.seal_slow(); - let (header, seal) = sealed.into_parts(); block_to_payload(SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(transformed.header), body: transformed.body, }) } diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 9d0f6cfd83d6..e4b1b28074f0 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -30,15 +30,16 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-api.workspace = true reth-trie.workspace = true +reth-node-api.workspace = true # ethereum +alloy-serde.workspace = true alloy-eips.workspace = true alloy-dyn-abi = { workspace = true, features = ["eip712"] } alloy-json-rpc.workspace = true alloy-network.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true -alloy-rpc-types.workspace = true alloy-rpc-types-mev.workspace = true alloy-consensus.workspace = true diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 66bc5a44d2db..9cd9ba2921ae 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -1,19 +1,18 @@ //! Implementation of the [`jsonrpsee`] generated [`EthApiServer`] trait. Handles RPC requests for //! the `eth_` namespace. use alloy_dyn_abi::TypedData; -use alloy_eips::eip2930::AccessListResult; +use alloy_eips::{eip2930::AccessListResult, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, B256, B64, U256, U64}; -use alloy_rpc_types::{ - serde_helpers::JsonStorageKey, +use alloy_rpc_types_eth::{ simulate::{SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, + transaction::TransactionRequest, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, StateContext, SyncStatus, Work, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{BlockId, BlockNumberOrTag}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; @@ -277,7 +276,7 @@ pub trait EthApi { &self, address: Address, block: BlockId, - ) -> RpcResult>; + ) -> RpcResult>; /// Introduced in EIP-1559, returns suggestion for the priority for dynamic fee transactions. #[method(name = "maxPriorityFeePerGas")] @@ -502,7 +501,8 @@ where trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); Ok(EthTransactions::transaction_by_hash(self, hash) .await? - .map(|tx| tx.into_transaction(self.tx_resp_builder()))) + .map(|tx| tx.into_transaction(self.tx_resp_builder())) + .transpose()?) } /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` @@ -695,7 +695,7 @@ where &self, address: Address, block: BlockId, - ) -> RpcResult> { + ) -> RpcResult> { trace!(target: "rpc::eth", "Serving eth_getAccount"); Ok(EthState::get_account(self, address, block).await?) } @@ -780,8 +780,9 @@ where } /// Handler for: `eth_signTransaction` - async fn sign_transaction(&self, _transaction: TransactionRequest) -> RpcResult { - Err(internal_rpc_err("unimplemented")) + async fn sign_transaction(&self, request: TransactionRequest) -> RpcResult { + trace!(target: "rpc::eth", ?request, "Serving eth_signTransaction"); + Ok(EthTransactions::sign_transaction(self, request).await?) } /// Handler for: `eth_signTypedData` diff --git a/crates/rpc/rpc-eth-api/src/filter.rs b/crates/rpc/rpc-eth-api/src/filter.rs index c73d96728433..1acba351af7c 100644 --- a/crates/rpc/rpc-eth-api/src/filter.rs +++ b/crates/rpc/rpc-eth-api/src/filter.rs @@ -1,7 +1,7 @@ //! `eth_` RPC API for filtering. use alloy_json_rpc::RpcObject; -use alloy_rpc_types::{Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind}; +use alloy_rpc_types_eth::{Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; /// Rpc Interface for poll-based ethereum filter API. diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index da5f275ef0cf..251ca225eb19 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -2,14 +2,14 @@ use std::sync::Arc; -use alloy_rpc_types::{Header, Index}; +use alloy_eips::BlockId; +use alloy_rpc_types_eth::{Block, Header, Index}; use futures::Future; -use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; -use reth_rpc_eth_types::EthStateCache; -use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; +use reth_rpc_types_compat::block::from_block; -use crate::{FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; +use crate::{node::RpcNodeCoreExt, FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; @@ -21,11 +21,6 @@ pub type BlockAndReceiptsResult = Result impl HeaderProvider; - /// Returns the block header for the given block id. fn rpc_block_header( &self, @@ -52,15 +47,15 @@ pub trait EthBlocks: LoadBlock { async move { let Some(block) = self.block_with_senders(block_id).await? else { return Ok(None) }; let block_hash = block.hash(); - let mut total_difficulty = EthBlocks::provider(self) + let mut total_difficulty = self + .provider() .header_td_by_number(block.number) .map_err(Self::Error::from_eth_err)?; if total_difficulty.is_none() { // if we failed to find td after we successfully loaded the block, try again using // the hash this only matters if the chain is currently transitioning the merge block and there's a reorg: - total_difficulty = EthBlocks::provider(self) - .header_td(&block.hash()) - .map_err(Self::Error::from_eth_err)?; + total_difficulty = + self.provider().header_td(&block.hash()).map_err(Self::Error::from_eth_err)?; } let block = from_block( @@ -69,8 +64,7 @@ pub trait EthBlocks: LoadBlock { full.into(), Some(block_hash), self.tx_resp_builder(), - ) - .map_err(Self::Error::from_eth_err)?; + )?; Ok(Some(block)) } } @@ -85,13 +79,15 @@ pub trait EthBlocks: LoadBlock { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - return Ok(LoadBlock::provider(self) + return Ok(self + .provider() .pending_block() .map_err(Self::Error::from_eth_err)? .map(|block| block.body.transactions.len())) } - let block_hash = match LoadBlock::provider(self) + let block_hash = match self + .provider() .block_hash_for_id(block_id) .map_err(Self::Error::from_eth_err)? { @@ -132,7 +128,8 @@ pub trait EthBlocks: LoadBlock { if block_id.is_pending() { // First, try to get the pending block from the provider, in case we already // received the actual pending block from the CL. - if let Some((block, receipts)) = LoadBlock::provider(self) + if let Some((block, receipts)) = self + .provider() .pending_block_and_receipts() .map_err(Self::Error::from_eth_err)? { @@ -145,11 +142,11 @@ pub trait EthBlocks: LoadBlock { } } - if let Some(block_hash) = LoadBlock::provider(self) - .block_hash_for_id(block_id) - .map_err(Self::Error::from_eth_err)? + if let Some(block_hash) = + self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)? { - return LoadReceipt::cache(self) + return self + .cache() .get_block_and_receipts(block_hash) .await .map_err(Self::Error::from_eth_err) @@ -166,8 +163,8 @@ pub trait EthBlocks: LoadBlock { fn ommers( &self, block_id: BlockId, - ) -> Result>, Self::Error> { - LoadBlock::provider(self).ommers_by_id(block_id).map_err(Self::Error::from_eth_err) + ) -> Result>, Self::Error> { + self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err) } /// Returns uncle block at given index in given block. @@ -182,18 +179,16 @@ pub trait EthBlocks: LoadBlock { async move { let uncles = if block_id.is_pending() { // Pending block can be fetched directly without need for caching - LoadBlock::provider(self) + self.provider() .pending_block() .map_err(Self::Error::from_eth_err)? .map(|block| block.body.ommers) } else { - LoadBlock::provider(self) - .ommers_by_id(block_id) - .map_err(Self::Error::from_eth_err)? + self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err)? } .unwrap_or_default(); - Ok(uncles.into_iter().nth(index.into()).map(uncle_block_from_header)) + Ok(uncles.into_iter().nth(index.into()).map(Block::uncle_from_header)) } } } @@ -201,17 +196,7 @@ pub trait EthBlocks: LoadBlock { /// Loads a block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { - // Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl BlockReaderIdExt; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - +pub trait LoadBlock: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt { /// Returns the block object for the given block id. fn block_with_senders( &self, @@ -220,7 +205,8 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - if let Some(pending_block) = LoadPendingBlock::provider(self) + if let Some(pending_block) = self + .provider() .pending_block_with_senders() .map_err(Self::Error::from_eth_err)? { @@ -234,7 +220,8 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { }; } - let block_hash = match LoadPendingBlock::provider(self) + let block_hash = match self + .provider() .block_hash_for_id(block_id) .map_err(Self::Error::from_eth_err)? { diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 1510233c5059..6a5506ad2ad7 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -3,15 +3,17 @@ use crate::{ AsEthApiError, FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, RpcBlock, + RpcNodeCore, }; +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::{eip1559::calc_next_block_base_fee, eip2930::AccessListResult}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ simulate::{SimBlock, SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, + transaction::TransactionRequest, BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use futures::Future; use reth_chainspec::{EthChainSpec, MIN_TRANSACTION_GAS}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; @@ -20,7 +22,7 @@ use reth_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, HaltReason, ResultAndState, TransactTo, TxEnv, }, - Header, TransactionSigned, + TransactionSigned, }; use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider, StateProvider}; use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; @@ -96,7 +98,7 @@ pub trait EthCall: Call + LoadPendingBlock { let base_block = self.block_with_senders(block).await?.ok_or(EthApiError::HeaderNotFound(block))?; let mut parent_hash = base_block.header.hash(); - let total_difficulty = LoadPendingBlock::provider(self) + let total_difficulty = RpcNodeCore::provider(self) .header_td_by_number(block_env.number.to()) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block))?; @@ -118,15 +120,15 @@ pub trait EthCall: Call + LoadPendingBlock { block_env.timestamp += U256::from(1); if validation { - let chain_spec = LoadPendingBlock::provider(&this).chain_spec(); + let chain_spec = RpcNodeCore::provider(&this).chain_spec(); let base_fee_params = chain_spec.base_fee_params_at_timestamp(block_env.timestamp.to()); let base_fee = if let Some(latest) = blocks.last() { let header = &latest.inner.header; calc_next_block_base_fee( - header.gas_used, - header.gas_limit, - header.base_fee_per_gas.unwrap_or_default(), + header.gas_used(), + header.gas_limit(), + header.base_fee_per_gas().unwrap_or_default(), base_fee_params, ) } else { @@ -191,19 +193,20 @@ pub trait EthCall: Call + LoadPendingBlock { results.push((env.tx.caller, res.result)); } - let block = simulate::build_block( - results, - transactions, - &block_env, - parent_hash, - total_difficulty, - return_full_transactions, - &db, - this.tx_resp_builder(), - )?; + let block: SimulatedBlock> = + simulate::build_block( + results, + transactions, + &block_env, + parent_hash, + total_difficulty, + return_full_transactions, + &db, + this.tx_resp_builder(), + )?; parent_hash = block.inner.header.hash; - gas_used += block.inner.header.gas_used; + gas_used += block.inner.header.gas_used(); blocks.push(block); } @@ -258,7 +261,8 @@ pub trait EthCall: Call + LoadPendingBlock { // if it's not pending, we should always use block_hash over block_number to ensure that // different provider calls query data related to the same block. if !is_block_target_pending { - target_block = LoadBlock::provider(self) + target_block = self + .provider() .block_hash_for_id(target_block) .map_err(|_| EthApiError::HeaderNotFound(target_block))? .ok_or_else(|| EthApiError::HeaderNotFound(target_block))? @@ -300,7 +304,7 @@ pub trait EthCall: Call + LoadPendingBlock { let env = EnvWithHandlerCfg::new_with_cfg_env( cfg.clone(), block_env.clone(), - Call::evm_config(&this).tx_env(tx, *signer), + RpcNodeCore::evm_config(&this).tx_env(tx, *signer), ); let (res, _) = this.transact(&mut db, env)?; db.commit(res.state); @@ -452,7 +456,7 @@ pub trait EthCall: Call + LoadPendingBlock { } /// Executes code on state. -pub trait Call: LoadState + SpawnBlocking { +pub trait Call: LoadState> + SpawnBlocking { /// Returns default gas limit to use for `eth_call` and tracing RPC methods. /// /// Data access in default trait method implementations. @@ -461,11 +465,6 @@ pub trait Call: LoadState + SpawnBlocking { /// Returns the maximum number of blocks accepted for `eth_simulateV1`. fn max_simulate_blocks(&self) -> u64; - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; - /// Executes the closure with the state that corresponds to the given [`BlockId`]. fn with_state_at_block(&self, at: BlockId, f: F) -> Result where @@ -636,7 +635,7 @@ pub trait Call: LoadState + SpawnBlocking { let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, block_env, - Call::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), + RpcNodeCore::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), ); let (res, _) = this.transact(&mut db, env)?; diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 34ba6dc7e4ec..8ed45d2ac080 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -1,13 +1,13 @@ //! Loads fee history from database. Helper trait for `eth_` fee and transaction RPC methods. use alloy_primitives::U256; -use alloy_rpc_types::{BlockNumberOrTag, FeeHistory}; +use alloy_rpc_types_eth::{BlockNumberOrTag, FeeHistory}; use futures::Future; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; +use reth_chainspec::EthChainSpec; +use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; use reth_rpc_eth_types::{ - fee_history::calculate_reward_percentiles_for_block, EthApiError, EthStateCache, - FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, + fee_history::calculate_reward_percentiles_for_block, EthApiError, FeeHistoryCache, + FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, }; use tracing::debug; @@ -82,7 +82,8 @@ pub trait EthFees: LoadFee { block_count = block_count.saturating_sub(1); } - let end_block = LoadFee::provider(self) + let end_block = self + .provider() .block_number_for_id(newest_block.into()) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(newest_block.into()))?; @@ -147,13 +148,12 @@ pub trait EthFees: LoadFee { // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the // next block base_fee_per_gas - .push(last_entry.next_block_base_fee(LoadFee::provider(self).chain_spec()) - as u128); + .push(last_entry.next_block_base_fee(self.provider().chain_spec()) as u128); base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default()); } else { // read the requested header range - let headers = LoadFee::provider(self) + let headers = self.provider() .sealed_headers_range(start_block..=end_block) .map_err(Self::Error::from_eth_err)?; if headers.len() != block_count as usize { @@ -167,12 +167,12 @@ pub trait EthFees: LoadFee { base_fee_per_blob_gas.push(header.blob_fee().unwrap_or_default()); blob_gas_used_ratio.push( header.blob_gas_used.unwrap_or_default() as f64 - / reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, + / alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, ); // Percentiles were specified, so we need to collect reward percentile ino if let Some(percentiles) = &reward_percentiles { - let (block, receipts) = LoadFee::cache(self) + let (block, receipts) = self.cache() .get_block_and_receipts(header.hash()) .await .map_err(Self::Error::from_eth_err)? @@ -197,7 +197,7 @@ pub trait EthFees: LoadFee { // The unwrap is safe since we checked earlier that we got at least 1 header. let last_header = headers.last().expect("is present"); base_fee_per_gas.push( - LoadFee::provider(self) + self.provider() .chain_spec() .base_fee_params_at_timestamp(last_header.timestamp) .next_block_base_fee( @@ -242,22 +242,10 @@ pub trait EthFees: LoadFee { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` fees RPC methods. pub trait LoadFee: LoadBlock { - // Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - /// Returns a handle for reading gas price. /// /// Data access in default (L1) trait method implementations. - fn gas_oracle(&self) -> &GasPriceOracle; + fn gas_oracle(&self) -> &GasPriceOracle; /// Returns a handle for reading fee history data from memory. /// diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs index 8adb0e281e71..a881330b0453 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/mod.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -17,7 +17,6 @@ pub mod block; pub mod blocking_task; pub mod call; -pub mod error; pub mod fee; pub mod pending_block; pub mod receipt; diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 872f17ee9107..490447d6152d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -3,12 +3,14 @@ use std::time::{Duration, Instant}; -use crate::{EthApiTypes, FromEthApiError, FromEvmError}; +use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; -use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE}; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::{ + eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, +}; use alloy_primitives::{BlockNumber, B256, U256}; -use alloy_rpc_types::BlockNumberOrTag; +use alloy_rpc_types_eth::BlockNumberOrTag; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::{ @@ -17,14 +19,12 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - constants::eip4844::MAX_DATA_GAS_PER_BLOCK, proofs::calculate_transaction_root, revm_primitives::{ BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, ResultAndState, SpecId, }, - Block, BlockBody, Header, Receipt, SealedBlockWithSenders, SealedHeader, - TransactionSignedEcRecovered, + Block, BlockBody, Receipt, SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, @@ -43,32 +43,22 @@ use super::SpawnBlocking; /// Loads a pending block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -pub trait LoadPendingBlock: EthApiTypes { - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider( - &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory; - - /// Returns a handle for reading data from transaction pool. - /// - /// Data access in default (L1) trait method implementations. - fn pool(&self) -> impl TransactionPool; - +pub trait LoadPendingBlock: + EthApiTypes + + RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool, + Evm: ConfigureEvm
, + > +{ /// Returns a handle to the pending block. /// /// Data access in default (L1) trait method implementations. fn pending_block(&self) -> &Mutex>; - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; - /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block @@ -408,9 +398,7 @@ pub trait LoadPendingBlock: EthApiTypes { execution_outcome.block_logs_bloom(block_number).expect("Block is present"); // calculate the state root - let state_provider = &db.database; - let state_root = - state_provider.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; + let state_root = db.database.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; // create the block header let transactions_root = calculate_transaction_root(&executed_txs); diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index eae99bbe45d8..48394f1cd6bb 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -3,19 +3,13 @@ use futures::Future; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_types::EthStateCache; -use crate::{EthApiTypes, RpcReceipt}; +use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. -pub trait LoadReceipt: EthApiTypes + Send + Sync { - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - +pub trait LoadReceipt: EthApiTypes + RpcNodeCoreExt + Send + Sync { /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( &self, diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index ab11e62d5431..dc8beab38a02 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -1,10 +1,10 @@ //! An abstraction over ethereum signers. use alloy_dyn_abi::TypedData; -use alloy_primitives::Address; +use alloy_primitives::{Address, PrimitiveSignature as Signature}; use alloy_rpc_types_eth::TransactionRequest; use dyn_clone::DynClone; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_rpc_eth_types::SignError; use std::result; diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs index 5976cf29c07d..9957a00a41d8 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/spec.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -1,28 +1,27 @@ //! Loads chain metadata. use alloy_primitives::{Address, U256, U64}; -use alloy_rpc_types::{Stage, SyncInfo, SyncStatus}; +use alloy_rpc_types_eth::{Stage, SyncInfo, SyncStatus}; use futures::Future; use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_errors::{RethError, RethResult}; use reth_network_api::NetworkInfo; use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; -use super::EthSigner; +use crate::{helpers::EthSigner, RpcNodeCore}; /// `Eth` API trait. /// /// Defines core functionality of the `eth` API implementation. #[auto_impl::auto_impl(&, Arc)] -pub trait EthApiSpec: Send + Sync { - /// Returns a handle for reading data from disk. - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader; - - /// Returns a handle for reading network data summary. - fn network(&self) -> impl NetworkInfo; - +pub trait EthApiSpec: + RpcNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, +> +{ /// Returns the block node is started on. fn starting_block(&self) -> U256; diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 080d90dc3b00..7ff9fa4deff5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -1,24 +1,25 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. -use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_consensus::{constants::KECCAK_EMPTY, Header}; +use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256, U256}; -use alloy_rpc_types::{serde_helpers::JsonStorageKey, Account, EIP1186AccountProofResponse}; +use alloy_rpc_types_eth::{Account, EIP1186AccountProofResponse}; +use alloy_serde::JsonStorageKey; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; -use reth_primitives::{BlockId, Header}; use reth_provider::{ BlockIdReader, BlockNumReader, ChainSpecProvider, StateProvider, StateProviderBox, StateProviderFactory, }; -use reth_rpc_eth_types::{EthApiError, EthStateCache, PendingBlockEnv, RpcInvalidTransactionError}; +use reth_rpc_eth_types::{EthApiError, PendingBlockEnv, RpcInvalidTransactionError}; use reth_rpc_types_compat::proof::from_primitive_account_proof; use reth_transaction_pool::TransactionPool; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; -use crate::{EthApiTypes, FromEthApiError}; +use crate::{EthApiTypes, FromEthApiError, RpcNodeCore, RpcNodeCoreExt}; use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; @@ -29,7 +30,7 @@ pub trait EthState: LoadState + SpawnBlocking { /// Returns the number of transactions sent from an address at the given block identifier. /// - /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// If this is [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this will /// look up the highest transaction in pool and return the next nonce (highest + 1). fn transaction_count( &self, @@ -73,7 +74,7 @@ pub trait EthState: LoadState + SpawnBlocking { self.spawn_blocking_io(move |this| { Ok(B256::new( this.state_at_block_id_or_latest(block_id)? - .storage(address, index.0) + .storage(address, index.as_b256()) .map_err(Self::Error::from_eth_err)? .unwrap_or_default() .to_be_bytes(), @@ -105,7 +106,8 @@ pub trait EthState: LoadState + SpawnBlocking { let block_id = block_id.unwrap_or_default(); // Check whether the distance to the block exceeds the maximum configured window. - let block_number = LoadState::provider(self) + let block_number = self + .provider() .block_number_for_id(block_id) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -116,11 +118,11 @@ pub trait EthState: LoadState + SpawnBlocking { self.spawn_blocking_io(move |this| { let state = this.state_at_block_id(block_id)?; - let storage_keys = keys.iter().map(|key| key.0).collect::>(); + let storage_keys = keys.iter().map(|key| key.as_b256()).collect::>(); let proof = state .proof(Default::default(), address, &storage_keys) .map_err(Self::Error::from_eth_err)?; - Ok(from_primitive_account_proof(proof)) + Ok(from_primitive_account_proof(proof, keys)) }) .await }) @@ -138,9 +140,9 @@ pub trait EthState: LoadState + SpawnBlocking { let Some(account) = account else { return Ok(None) }; // Check whether the distance to the block exceeds the maximum configured proof window. - let chain_info = - LoadState::provider(&this).chain_info().map_err(Self::Error::from_eth_err)?; - let block_number = LoadState::provider(&this) + let chain_info = this.provider().chain_info().map_err(Self::Error::from_eth_err)?; + let block_number = this + .provider() .block_number_for_id(block_id) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -167,24 +169,14 @@ pub trait EthState: LoadState + SpawnBlocking { /// Loads state from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods. -pub trait LoadState: EthApiTypes { - /// Returns a handle for reading state from database. - /// - /// Data access in default trait method implementations. - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - - /// Returns a handle for reading data from transaction pool. - /// - /// Data access in default trait method implementations. - fn pool(&self) -> impl TransactionPool; - +pub trait LoadState: + EthApiTypes + + RpcNodeCoreExt< + Provider: StateProviderFactory + + ChainSpecProvider, + Pool: TransactionPool, + > +{ /// Returns the state at the given block number fn state_at_hash(&self, block_hash: B256) -> Result { self.provider().history_by_block_hash(block_hash).map_err(Self::Error::from_eth_err) @@ -192,7 +184,7 @@ pub trait LoadState: EthApiTypes { /// Returns the state at the given [`BlockId`] enum. /// - /// Note: if not [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this + /// Note: if not [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this /// will only return canonical state. See also fn state_at_block_id(&self, at: BlockId) -> Result { self.provider().state_by_block_id(at).map_err(Self::Error::from_eth_err) @@ -237,7 +229,7 @@ pub trait LoadState: EthApiTypes { Ok((cfg, block_env, origin.state_block_id())) } else { // Use cached values if there is no pending block - let block_hash = LoadPendingBlock::provider(self) + let block_hash = RpcNodeCore::provider(self) .block_hash_for_id(at) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(at))?; @@ -310,7 +302,7 @@ pub trait LoadState: EthApiTypes { /// Returns the number of transactions sent from an address at the given block identifier. /// - /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// If this is [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this will /// look up the highest transaction in pool and return the next nonce (highest + 1). fn transaction_count( &self, diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 64056148cd38..104042d17a2b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -1,14 +1,15 @@ //! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. -use std::sync::Arc; +use std::{fmt::Display, sync::Arc}; -use crate::FromEvmError; +use crate::{FromEvmError, RpcNodeCore}; +use alloy_consensus::Header; use alloy_primitives::B256; -use alloy_rpc_types::{BlockId, TransactionInfo}; +use alloy_rpc_types_eth::{BlockId, TransactionInfo}; use futures::Future; use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{Header, SealedBlockWithSenders}; +use reth_primitives::SealedBlockWithSenders; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -16,17 +17,14 @@ use reth_rpc_eth_types::{ }; use revm::{db::CacheDB, Database, DatabaseCommit, GetInspector, Inspector}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; -use revm_primitives::{EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState}; +use revm_primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState, +}; use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; /// Executes CPU heavy tasks. -pub trait Trace: LoadState { - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; - +pub trait Trace: LoadState> { /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state /// changes. fn inspect( @@ -192,30 +190,13 @@ pub trait Trace: LoadState { // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in let parent_block = block.parent_hash; - let parent_beacon_block_root = block.parent_beacon_block_root; let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); let block_txs = block.transactions_with_sender(); - // apply relevant system calls - let mut system_caller = SystemCaller::new( - Trace::evm_config(&this).clone(), - LoadState::provider(&this).chain_spec(), - ); - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - parent_beacon_block_root, - ) - .map_err(|_| { - EthApiError::EvmCustom( - "failed to apply 4788 beacon root system call".to_string(), - ) - })?; + this.apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; // replay all transactions prior to the targeted transaction this.replay_transactions_until( @@ -229,7 +210,7 @@ pub trait Trace: LoadState { let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, block_env, - Call::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), + RpcNodeCore::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), ); let (res, _) = this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; @@ -342,21 +323,7 @@ pub trait Trace: LoadState { let mut db = CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); - // apply relevant system calls - let mut system_caller = SystemCaller::new( - Trace::evm_config(&this).clone(), - LoadState::provider(&this).chain_spec(), - ); - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - block.header().parent_beacon_block_root, - ) - .map_err(|_| { - EthApiError::EvmCustom("failed to apply 4788 system call".to_string()) - })?; + this.apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; // prepare transactions, we do everything upfront to reduce time spent with open // state @@ -379,7 +346,7 @@ pub trait Trace: LoadState { block_number: Some(block_number), base_fee: Some(base_fee), }; - let tx_env = Trace::evm_config(&this).tx_env(tx, *signer); + let tx_env = this.evm_config().tx_env(tx, *signer); (tx_info, tx_env) }) .peekable(); @@ -483,4 +450,37 @@ pub trait Trace: LoadState { { self.trace_block_until_with_inspector(block_id, block, None, insp_setup, f) } + + /// Applies chain-specific state transitions required before executing a block. + /// + /// Note: This should only be called when tracing an entire block vs individual transactions. + /// When tracing transaction on top of an already committed block state, those transitions are + /// already applied. + fn apply_pre_execution_changes + DatabaseCommit>( + &self, + block: &SealedBlockWithSenders, + db: &mut DB, + cfg: &CfgEnvWithHandlerCfg, + block_env: &BlockEnv, + ) -> Result<(), Self::Error> { + let mut system_caller = + SystemCaller::new(self.evm_config().clone(), self.provider().chain_spec()); + // apply relevant system calls + system_caller + .pre_block_beacon_root_contract_call( + db, + cfg, + block_env, + block.header.parent_beacon_block_root, + ) + .map_err(|_| EthApiError::EvmCustom("failed to apply 4788 system call".to_string()))?; + + system_caller + .pre_block_blockhashes_contract_call(db, cfg, block_env, block.header.parent_hash) + .map_err(|_| { + EthApiError::EvmCustom("failed to apply blockhashes system call".to_string()) + })?; + + Ok(()) + } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 0d16a5c9145b..e041b8c46051 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -3,25 +3,25 @@ use alloy_consensus::Transaction; use alloy_dyn_abi::TypedData; -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; -use alloy_rpc_types::{BlockNumberOrTag, TransactionInfo}; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; -use reth_primitives::{ - BlockId, Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned, -}; +use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned}; use reth_provider::{BlockNumReader, BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_types::{ utils::{binary_search, recover_raw_transaction}, - EthApiError, EthStateCache, SignError, TransactionSource, + EthApiError, SignError, TransactionSource, }; use reth_rpc_types_compat::transaction::{from_recovered, from_recovered_with_block_context}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use std::sync::Arc; -use crate::{FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcReceipt, RpcTransaction}; +use crate::{ + FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt, + RpcTransaction, +}; use super::{ Call, EthApiSpec, EthSigner, LoadBlock, LoadPendingBlock, LoadReceipt, LoadState, SpawnBlocking, @@ -50,12 +50,7 @@ use super::{ /// See also /// /// This implementation follows the behaviour of Geth and disables the basefee check for tracing. -pub trait EthTransactions: LoadTransaction { - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl BlockReaderIdExt; - +pub trait EthTransactions: LoadTransaction { /// Returns a handle for signing data. /// /// Singer access in default (L1) trait method implementations. @@ -109,7 +104,8 @@ pub trait EthTransactions: LoadTransaction { } self.spawn_blocking_io(move |ref this| { - Ok(LoadTransaction::provider(this) + Ok(this + .provider() .transaction_by_hash(hash) .map_err(Self::Error::from_eth_err)? .map(|tx| tx.encoded_2718().into())) @@ -164,7 +160,8 @@ pub trait EthTransactions: LoadTransaction { { let this = self.clone(); self.spawn_blocking_io(move |_| { - let (tx, meta) = match LoadTransaction::provider(&this) + let (tx, meta) = match this + .provider() .transaction_by_hash_with_meta(hash) .map_err(Self::Error::from_eth_err)? { @@ -172,13 +169,11 @@ pub trait EthTransactions: LoadTransaction { None => return Ok(None), }; - let receipt = match EthTransactions::provider(&this) - .receipt_by_hash(hash) - .map_err(Self::Error::from_eth_err)? - { - Some(recpt) => recpt, - None => return Ok(None), - }; + let receipt = + match this.provider().receipt_by_hash(hash).map_err(Self::Error::from_eth_err)? { + Some(recpt) => recpt, + None => return Ok(None), + }; Ok(Some((tx, meta, receipt))) }) @@ -213,7 +208,7 @@ pub trait EthTransactions: LoadTransaction { tx.clone().with_signer(*signer), tx_info, self.tx_resp_builder(), - ))) + )?)) } } @@ -229,16 +224,16 @@ pub trait EthTransactions: LoadTransaction { include_pending: bool, ) -> impl Future>, Self::Error>> + Send where - Self: LoadBlock + LoadState + FullEthApiTypes, + Self: LoadBlock + LoadState, { async move { // Check the pool first if include_pending { if let Some(tx) = - LoadState::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) + RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { let transaction = tx.transaction.clone().into_consensus(); - return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder()))); + return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder())?)); } } @@ -255,7 +250,7 @@ pub trait EthTransactions: LoadTransaction { return Ok(None); } - let Ok(high) = LoadBlock::provider(self).best_block_number() else { + let Ok(high) = self.provider().best_block_number() else { return Err(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()).into()); }; @@ -296,7 +291,7 @@ pub trait EthTransactions: LoadTransaction { ) }) }) - .ok_or(EthApiError::HeaderNotFound(block_id).into()) + .ok_or(EthApiError::HeaderNotFound(block_id))? .map(Some) } } @@ -381,10 +376,15 @@ pub trait EthTransactions: LoadTransaction { let transaction = self.sign_request(&from, request).await?.with_signer(from); - let pool_transaction = <::Pool as TransactionPool>::Transaction::try_from_consensus(transaction.into()).map_err(|_| EthApiError::TransactionConversionError)?; + let pool_transaction = + <::Pool as TransactionPool>::Transaction::try_from_consensus( + transaction.into(), + ) + .map_err(|_| EthApiError::TransactionConversionError)?; // submit the transaction to the pool with a `Local` origin - let hash = LoadTransaction::pool(self) + let hash = self + .pool() .add_transaction(TransactionOrigin::Local, pool_transaction) .await .map_err(Self::Error::from_eth_err)?; @@ -400,16 +400,10 @@ pub trait EthTransactions: LoadTransaction { txn: TransactionRequest, ) -> impl Future> + Send { async move { - let signers: Vec<_> = self.signers().read().iter().cloned().collect(); - for signer in signers { - if signer.is_signer_for(from) { - return match signer.sign_transaction(txn, from).await { - Ok(tx) => Ok(tx), - Err(e) => Err(e.into_eth_err()), - } - } - } - Err(EthApiError::InvalidTransactionSignature.into()) + self.find_signer(from)? + .sign_transaction(txn, from) + .await + .map_err(Self::Error::from_eth_err) } } @@ -430,6 +424,22 @@ pub trait EthTransactions: LoadTransaction { } } + /// Signs a transaction request using the given account in request + /// Returns the EIP-2718 encoded signed transaction. + fn sign_transaction( + &self, + request: TransactionRequest, + ) -> impl Future> + Send { + async move { + let from = match request.from { + Some(from) => from, + None => return Err(SignError::NoAccount.into_eth_err()), + }; + + Ok(self.sign_request(&from, request).await?.encoded_2718().into()) + } + } + /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. fn sign_typed_data(&self, data: &TypedData, account: Address) -> Result { Ok(self @@ -458,26 +468,11 @@ pub trait EthTransactions: LoadTransaction { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` transactions RPC /// methods. -pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes { - /// Transaction pool with pending transactions. [`TransactionPool::Transaction`] is the - /// supported transaction type. - type Pool: TransactionPool; - - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl TransactionsProvider; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - - /// Returns a handle for reading data from pool. - /// - /// Data access in default (L1) trait method implementations. - fn pool(&self) -> &Self::Pool; - +pub trait LoadTransaction: + SpawnBlocking + + FullEthApiTypes + + RpcNodeCoreExt +{ /// Returns the transaction by hash. /// /// Checks the pool and state. diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index 849c8e2e4c8f..cb97a03e8b80 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -16,15 +16,19 @@ pub mod bundle; pub mod core; pub mod filter; pub mod helpers; +pub mod node; pub mod pubsub; pub mod types; +pub use reth_rpc_eth_types::error::{ + AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError, +}; pub use reth_rpc_types_compat::TransactionCompat; pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; -pub use helpers::error::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; +pub use node::{RpcNodeCore, RpcNodeCoreExt}; pub use pubsub::EthPubSubApiServer; pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs new file mode 100644 index 000000000000..12dbe8f66641 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -0,0 +1,82 @@ +//! Helper trait for interfacing with [`FullNodeComponents`]. + +use reth_node_api::FullNodeComponents; +use reth_rpc_eth_types::EthStateCache; + +/// Helper trait to relax trait bounds on [`FullNodeComponents`]. +/// +/// Helpful when defining types that would otherwise have a generic `N: FullNodeComponents`. Using +/// `N: RpcNodeCore` instead, allows access to all the associated types on [`FullNodeComponents`] +/// that are used in RPC, but with more flexibility since they have no trait bounds (asides auto +/// traits). +pub trait RpcNodeCore: Clone + Send + Sync { + /// The provider type used to interact with the node. + type Provider: Send + Sync + Clone + Unpin; + /// The transaction pool of the node. + type Pool: Send + Sync + Clone + Unpin; + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + type Evm: Send + Sync + Clone + Unpin; + /// Network API. + type Network: Send + Sync + Clone; + + /// Builds new blocks. + type PayloadBuilder: Send + Sync + Clone; + + /// Returns the transaction pool of the node. + fn pool(&self) -> &Self::Pool; + + /// Returns the node's evm config. + fn evm_config(&self) -> &Self::Evm; + + /// Returns the handle to the network + fn network(&self) -> &Self::Network; + + /// Returns the handle to the payload builder service. + fn payload_builder(&self) -> &Self::PayloadBuilder; + + /// Returns the provider of the node. + fn provider(&self) -> &Self::Provider; +} + +impl RpcNodeCore for T +where + T: FullNodeComponents, +{ + type Provider = T::Provider; + type Pool = T::Pool; + type Evm = ::Evm; + type Network = ::Network; + type PayloadBuilder = ::PayloadBuilder; + + #[inline] + fn pool(&self) -> &Self::Pool { + FullNodeComponents::pool(self) + } + + #[inline] + fn evm_config(&self) -> &Self::Evm { + FullNodeComponents::evm_config(self) + } + + #[inline] + fn network(&self) -> &Self::Network { + FullNodeComponents::network(self) + } + + #[inline] + fn payload_builder(&self) -> &Self::PayloadBuilder { + FullNodeComponents::payload_builder(self) + } + + #[inline] + fn provider(&self) -> &Self::Provider { + FullNodeComponents::provider(self) + } +} + +/// Additional components, asides the core node components, needed to run `eth_` namespace API +/// server. +pub trait RpcNodeCoreExt: RpcNodeCore { + /// Returns handle to RPC cache service. + fn cache(&self) -> &EthStateCache; +} diff --git a/crates/rpc/rpc-eth-api/src/pubsub.rs b/crates/rpc/rpc-eth-api/src/pubsub.rs index b70dacb26fa9..ecbb1fe9a833 100644 --- a/crates/rpc/rpc-eth-api/src/pubsub.rs +++ b/crates/rpc/rpc-eth-api/src/pubsub.rs @@ -1,7 +1,7 @@ //! `eth_` RPC API for pubsub subscription. use alloy_json_rpc::RpcObject; -use alloy_rpc_types::pubsub::{Params, SubscriptionKind}; +use alloy_rpc_types_eth::pubsub::{Params, SubscriptionKind}; use jsonrpsee::proc_macros::rpc; /// Ethereum pub-sub rpc interface. diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 653730ed3c99..12ff090d37c1 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -1,17 +1,19 @@ //! Trait for specifying `eth` network dependent API types. -use std::{error::Error, fmt}; +use std::{ + error::Error, + fmt::{self}, +}; -use alloy_network::{AnyNetwork, Network}; -use alloy_rpc_types::Block; -use reth_rpc_eth_types::EthApiError; +use alloy_network::Network; +use alloy_rpc_types_eth::Block; use reth_rpc_types_compat::TransactionCompat; use crate::{AsEthApiError, FromEthApiError, FromEvmError}; /// Network specific `eth` API types. pub trait EthApiTypes: Send + Sync + Clone { - /// Extension of [`EthApiError`], with network specific errors. + /// Extension of [`FromEthApiError`], with network specific errors. type Error: Into> + FromEthApiError + AsEthApiError @@ -20,7 +22,7 @@ pub trait EthApiTypes: Send + Sync + Clone { + Send + Sync; /// Blockchain primitive types, specific to network, e.g. block and transaction. - type NetworkTypes: Network; + type NetworkTypes: Network; /// Conversion methods for transaction RPC type. type TransactionCompat: Send + Sync + Clone + fmt::Debug; @@ -28,16 +30,6 @@ pub trait EthApiTypes: Send + Sync + Clone { fn tx_resp_builder(&self) -> &Self::TransactionCompat; } -impl EthApiTypes for () { - type Error = EthApiError; - type NetworkTypes = AnyNetwork; - type TransactionCompat = (); - - fn tx_resp_builder(&self) -> &Self::TransactionCompat { - self - } -} - /// Adapter for network specific transaction type. pub type RpcTransaction = ::TransactionResponse; @@ -47,15 +39,26 @@ pub type RpcBlock = Block, ::HeaderResponse>; /// Adapter for network specific receipt type. pub type RpcReceipt = ::ReceiptResponse; +/// Adapter for network specific error type. +pub type RpcError = ::Error; + /// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API. pub trait FullEthApiTypes: - EthApiTypes>> + EthApiTypes< + TransactionCompat: TransactionCompat< + Transaction = RpcTransaction, + Error = RpcError, + >, +> { } impl FullEthApiTypes for T where T: EthApiTypes< - TransactionCompat: TransactionCompat>, + TransactionCompat: TransactionCompat< + Transaction = RpcTransaction, + Error = RpcError, + >, > { } diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 46a0d7b5c323..9b38ed89724a 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -35,8 +35,6 @@ alloy-rpc-types-eth.workspace = true revm.workspace = true revm-inspectors.workspace = true revm-primitives = { workspace = true, features = ["dev"] } -alloy-rpc-types.workspace = true -alloy-serde.workspace = true alloy-eips.workspace = true # rpc diff --git a/crates/rpc/rpc-eth-types/src/builder/config.rs b/crates/rpc/rpc-eth-types/src/builder/config.rs index a016d021586b..532c10772035 100644 --- a/crates/rpc/rpc-eth-types/src/builder/config.rs +++ b/crates/rpc/rpc-eth-types/src/builder/config.rs @@ -15,7 +15,7 @@ use serde::{Deserialize, Serialize}; pub const DEFAULT_STALE_FILTER_TTL: Duration = Duration::from_secs(5 * 60); /// Additional config values for the eth namespace. -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] pub struct EthConfig { /// Settings for the caching layer pub cache: EthStateCacheConfig, diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 627fd2b2df72..50fd4b04625f 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -116,7 +116,7 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { fn convert_block_hash( &self, - hash_or_number: alloy_rpc_types::BlockHashOrNumber, + hash_or_number: alloy_rpc_types_eth::BlockHashOrNumber, ) -> reth_errors::ProviderResult> { self.0.convert_block_hash(hash_or_number) } diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index cbf05f2764e4..b4a110e96af7 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -1,14 +1,14 @@ //! Async caching support for eth RPC +use alloy_consensus::Header; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::{future::Either, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; use reth_evm::{provider::EvmEnvProvider, ConfigureEvm}; use reth_execution_types::Chain; -use reth_primitives::{ - BlockHashOrNumber, Header, Receipt, SealedBlockWithSenders, TransactionSigned, -}; +use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSigned}; use reth_storage_api::{BlockReader, StateProviderFactory, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; diff --git a/crates/rpc/rpc-eth-api/src/helpers/error.rs b/crates/rpc/rpc-eth-types/src/error/api.rs similarity index 87% rename from crates/rpc/rpc-eth-api/src/helpers/error.rs rename to crates/rpc/rpc-eth-types/src/error/api.rs index 1d991b8e65b6..419f530c4e21 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/error.rs +++ b/crates/rpc/rpc-eth-types/src/error/api.rs @@ -1,9 +1,10 @@ //! Helper traits to wrap generic l1 errors, in network specific error type configured in -//! [`EthApiTypes`](crate::EthApiTypes). +//! `reth_rpc_eth_api::EthApiTypes`. -use reth_rpc_eth_types::EthApiError; use revm_primitives::EVMError; +use crate::EthApiError; + /// Helper trait to wrap core [`EthApiError`]. pub trait FromEthApiError: From { /// Converts from error via [`EthApiError`]. @@ -51,7 +52,7 @@ pub trait AsEthApiError { fn as_err(&self) -> Option<&EthApiError>; /// Returns `true` if error is - /// [`RpcInvalidTransactionError::GasTooHigh`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooHigh). + /// [`RpcInvalidTransactionError::GasTooHigh`](crate::RpcInvalidTransactionError::GasTooHigh). fn is_gas_too_high(&self) -> bool { if let Some(err) = self.as_err() { return err.is_gas_too_high() @@ -61,7 +62,7 @@ pub trait AsEthApiError { } /// Returns `true` if error is - /// [`RpcInvalidTransactionError::GasTooLow`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooLow). + /// [`RpcInvalidTransactionError::GasTooLow`](crate::RpcInvalidTransactionError::GasTooLow). fn is_gas_too_low(&self) -> bool { if let Some(err) = self.as_err() { return err.is_gas_too_low() diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs similarity index 99% rename from crates/rpc/rpc-eth-types/src/error.rs rename to crates/rpc/rpc-eth-types/src/error/mod.rs index b38b3122708b..99c41daea379 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -1,12 +1,16 @@ //! Implementation specific Errors for the `eth_` namespace. -use std::time::Duration; +pub mod api; +pub use api::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; +use core::time::Duration; + +use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, U256}; -use alloy_rpc_types::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; +use alloy_rpc_types_eth::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; use alloy_sol_types::decode_revert_reason; use reth_errors::RethError; -use reth_primitives::{revm_primitives::InvalidHeader, BlockId}; +use reth_primitives::revm_primitives::InvalidHeader; use reth_rpc_server_types::result::{ block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, }; diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index c845d9683870..6c8b66246f33 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -8,7 +8,7 @@ use std::{ use alloy_eips::eip1559::calc_next_block_base_fee; use alloy_primitives::B256; -use alloy_rpc_types::TxGasAndReward; +use alloy_rpc_types_eth::TxGasAndReward; use futures::{ future::{Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -366,7 +366,7 @@ impl FeeHistoryEntry { gas_used_ratio: block.gas_used as f64 / block.gas_limit as f64, base_fee_per_blob_gas: block.blob_fee(), blob_gas_used_ratio: block.blob_gas_used() as f64 / - reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, + alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, excess_blob_gas: block.excess_blob_gas, blob_gas_used: block.blob_gas_used, gas_used: block.gas_used, diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 065ac1acc204..d73cd72b650c 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -2,11 +2,11 @@ //! previous blocks. use alloy_consensus::constants::GWEI_TO_WEI; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::{B256, U256}; -use alloy_rpc_types::BlockId; +use alloy_rpc_types_eth::BlockId; use derive_more::{Deref, DerefMut, From, Into}; use itertools::Itertools; -use reth_primitives::BlockNumberOrTag; use reth_rpc_server_types::constants; use reth_storage_api::BlockReaderIdExt; use schnellru::{ByLength, LruMap}; diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index fa36dae4c881..03c23dc34565 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -37,5 +37,5 @@ pub use gas_oracle::{ }; pub use id_provider::EthSubscriptionIdProvider; pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -pub use receipt::ReceiptBuilder; +pub use receipt::EthReceiptBuilder; pub use transaction::TransactionSource; diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 205e2bba37be..3e7c9db6d68e 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -2,11 +2,12 @@ //! //! Log parsing for building filter. +use alloy_eips::BlockNumHash; use alloy_primitives::TxHash; -use alloy_rpc_types::{FilteredParams, Log}; +use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; -use reth_primitives::{BlockNumHash, Receipt, SealedBlockWithSenders}; +use reth_primitives::{Receipt, SealedBlockWithSenders}; use reth_storage_api::BlockReader; use std::sync::Arc; @@ -178,7 +179,7 @@ pub fn get_filter_block_range( #[cfg(test)] mod tests { - use alloy_rpc_types::Filter; + use alloy_rpc_types_eth::Filter; use super::*; @@ -241,8 +242,8 @@ mod tests { let start_block = info.best_number; let (from_block_number, to_block_number) = get_filter_block_range( - from_block.and_then(alloy_rpc_types::BlockNumberOrTag::as_number), - to_block.and_then(alloy_rpc_types::BlockNumberOrTag::as_number), + from_block.and_then(alloy_rpc_types_eth::BlockNumberOrTag::as_number), + to_block.and_then(alloy_rpc_types_eth::BlockNumberOrTag::as_number), start_block, info, ); diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 949e205dcf88..d8f413650a30 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,9 +4,10 @@ use std::time::Instant; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::B256; use derive_more::Constructor; -use reth_primitives::{BlockId, BlockNumberOrTag, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; /// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block. diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 198ca79aa2ac..247b4449ef5d 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,24 +1,101 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. -use alloy_consensus::Transaction; +use alloy_consensus::{ReceiptEnvelope, Transaction}; use alloy_primitives::{Address, TxKind}; -use alloy_rpc_types::{AnyReceiptEnvelope, Log, ReceiptWithBloom, TransactionReceipt}; -use alloy_serde::OtherFields; -use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt}; +use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; use revm_primitives::calc_blob_gasprice; use super::{EthApiError, EthResult}; +/// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. +pub fn build_receipt( + transaction: &TransactionSigned, + meta: TransactionMeta, + receipt: &Receipt, + all_receipts: &[Receipt], + build_envelope: impl FnOnce(ReceiptWithBloom) -> T, +) -> EthResult> { + // Note: we assume this transaction is valid, because it's mined (or part of pending block) + // and we don't need to check for pre EIP-2 + let from = + transaction.recover_signer_unchecked().ok_or(EthApiError::InvalidTransactionSignature)?; + + // get the previous transaction cumulative gas used + let gas_used = if meta.index == 0 { + receipt.cumulative_gas_used + } else { + let prev_tx_idx = (meta.index - 1) as usize; + all_receipts + .get(prev_tx_idx) + .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) + .unwrap_or_default() + }; + + let blob_gas_used = transaction.transaction.blob_gas_used(); + // Blob gas price should only be present if the transaction is a blob transaction + let blob_gas_price = blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); + let logs_bloom = receipt.bloom_slow(); + + // get number of logs in the block + let mut num_logs = 0; + for prev_receipt in all_receipts.iter().take(meta.index as usize) { + num_logs += prev_receipt.logs.len(); + } + + let logs: Vec = receipt + .logs + .iter() + .enumerate() + .map(|(tx_log_idx, log)| Log { + inner: log.clone(), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + block_timestamp: Some(meta.timestamp), + transaction_hash: Some(meta.tx_hash), + transaction_index: Some(meta.index), + log_index: Some((num_logs + tx_log_idx) as u64), + removed: false, + }) + .collect(); + + let rpc_receipt = alloy_rpc_types_eth::Receipt { + status: receipt.success.into(), + cumulative_gas_used: receipt.cumulative_gas_used as u128, + logs, + }; + + let (contract_address, to) = match transaction.transaction.kind() { + TxKind::Create => (Some(from.create(transaction.transaction.nonce())), None), + TxKind::Call(addr) => (None, Some(Address(*addr))), + }; + + Ok(TransactionReceipt { + inner: build_envelope(ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }), + transaction_hash: meta.tx_hash, + transaction_index: Some(meta.index), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + from, + to, + gas_used: gas_used as u128, + contract_address, + effective_gas_price: transaction.effective_gas_price(meta.base_fee), + // EIP-4844 fields + blob_gas_price, + blob_gas_used: blob_gas_used.map(u128::from), + authorization_list: transaction.authorization_list().map(|l| l.to_vec()), + }) +} + /// Receipt response builder. #[derive(Debug)] -pub struct ReceiptBuilder { +pub struct EthReceiptBuilder { /// The base response body, contains L1 fields. - pub base: TransactionReceipt>, - /// Additional L2 fields. - pub other: OtherFields, + pub base: TransactionReceipt, } -impl ReceiptBuilder { +impl EthReceiptBuilder { /// Returns a new builder with the base response body (L1 fields) set. /// /// Note: This requires _all_ block receipts because we need to calculate the gas used by the @@ -29,88 +106,23 @@ impl ReceiptBuilder { receipt: &Receipt, all_receipts: &[Receipt], ) -> EthResult { - // Note: we assume this transaction is valid, because it's mined (or part of pending block) - // and we don't need to check for pre EIP-2 - let from = transaction - .recover_signer_unchecked() - .ok_or(EthApiError::InvalidTransactionSignature)?; - - // get the previous transaction cumulative gas used - let gas_used = if meta.index == 0 { - receipt.cumulative_gas_used - } else { - let prev_tx_idx = (meta.index - 1) as usize; - all_receipts - .get(prev_tx_idx) - .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) - .unwrap_or_default() - }; - - let blob_gas_used = transaction.transaction.blob_gas_used(); - // Blob gas price should only be present if the transaction is a blob transaction - let blob_gas_price = - blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); - let logs_bloom = receipt.bloom_slow(); - - // get number of logs in the block - let mut num_logs = 0; - for prev_receipt in all_receipts.iter().take(meta.index as usize) { - num_logs += prev_receipt.logs.len(); - } - - let logs: Vec = receipt - .logs - .iter() - .enumerate() - .map(|(tx_log_idx, log)| Log { - inner: log.clone(), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - block_timestamp: Some(meta.timestamp), - transaction_hash: Some(meta.tx_hash), - transaction_index: Some(meta.index), - log_index: Some((num_logs + tx_log_idx) as u64), - removed: false, - }) - .collect(); - - let rpc_receipt = alloy_rpc_types::Receipt { - status: receipt.success.into(), - cumulative_gas_used: receipt.cumulative_gas_used as u128, - logs, - }; - - let (contract_address, to) = match transaction.transaction.kind() { - TxKind::Create => (Some(from.create(transaction.transaction.nonce())), None), - TxKind::Call(addr) => (None, Some(Address(*addr))), - }; - - #[allow(clippy::needless_update)] - let base = TransactionReceipt { - inner: AnyReceiptEnvelope { - inner: ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, - r#type: transaction.transaction.tx_type().into(), - }, - transaction_hash: meta.tx_hash, - transaction_index: Some(meta.index), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - from, - to, - gas_used: gas_used as u128, - contract_address, - effective_gas_price: transaction.effective_gas_price(meta.base_fee), - // EIP-4844 fields - blob_gas_price, - blob_gas_used: blob_gas_used.map(u128::from), - authorization_list: transaction.authorization_list().map(|l| l.to_vec()), - }; + let base = build_receipt(transaction, meta, receipt, all_receipts, |receipt_with_bloom| { + match receipt.tx_type { + TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom), + TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom), + TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom), + TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom), + TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom), + #[allow(unreachable_patterns)] + _ => unreachable!(), + } + })?; - Ok(Self { base, other: Default::default() }) + Ok(Self { base }) } /// Builds a receipt response from the base response body, and any set additional fields. - pub fn build(self) -> TransactionReceipt> { + pub fn build(self) -> TransactionReceipt { self.base } } diff --git a/crates/rpc/rpc-eth-types/src/revm_utils.rs b/crates/rpc/rpc-eth-types/src/revm_utils.rs index 7dc20c524219..782ef5697960 100644 --- a/crates/rpc/rpc-eth-types/src/revm_utils.rs +++ b/crates/rpc/rpc-eth-types/src/revm_utils.rs @@ -1,7 +1,7 @@ //! utilities for working with revm use alloy_primitives::{Address, B256, U256}; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ state::{AccountOverride, StateOverride}, BlockOverrides, }; @@ -265,7 +265,7 @@ where { // we need to fetch the account via the `DatabaseRef` to not update the state of the account, // which is modified via `Database::basic_ref` - let mut account_info = DatabaseRef::basic_ref(db, account)?.unwrap_or_default(); + let mut account_info = db.basic_ref(account)?.unwrap_or_default(); if let Some(nonce) = account_override.nonce { account_info.nonce = nonce; diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 4249c78fe6ac..9807010c0cfb 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,17 +1,16 @@ //! Utilities for serving `eth_simulateV1` use alloy_consensus::{Transaction as _, TxEip4844Variant, TxType, TypedTransaction}; -use alloy_primitives::Parity; -use alloy_rpc_types::{ +use alloy_primitives::PrimitiveSignature as Signature; +use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, + transaction::TransactionRequest, Block, BlockTransactionsKind, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use jsonrpsee_types::ErrorObject; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, - BlockBody, BlockWithSenders, Receipt, Signature, Transaction, TransactionSigned, - TransactionSignedNoHash, + BlockBody, BlockWithSenders, Receipt, Transaction, TransactionSigned, TransactionSignedNoHash, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_server_types::result::rpc_err; @@ -22,8 +21,9 @@ use revm::{db::CacheDB, Database}; use revm_primitives::{keccak256, Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; use crate::{ - cache::db::StateProviderTraitObjWrapper, error::ToRpcError, EthApiError, RevertError, - RpcInvalidTransactionError, + cache::db::StateProviderTraitObjWrapper, + error::{api::FromEthApiError, ToRpcError}, + EthApiError, RevertError, RpcInvalidTransactionError, }; /// Errors which may occur during `eth_simulateV1` execution. @@ -134,8 +134,7 @@ where }; // Create an empty signature for the transaction. - let signature = - Signature::new(Default::default(), Default::default(), Parity::Parity(false)); + let signature = Signature::new(Default::default(), Default::default(), false); let tx = match tx { TypedTransaction::Legacy(tx) => { @@ -171,8 +170,8 @@ where } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -#[expect(clippy::too_many_arguments)] -pub fn build_block( +#[expect(clippy::complexity)] +pub fn build_block>( results: Vec<(Address, ExecutionResult)>, transactions: Vec, block_env: &BlockEnv, @@ -181,7 +180,7 @@ pub fn build_block( full_transactions: bool, db: &CacheDB>>, tx_resp_builder: &T, -) -> Result>, EthApiError> { +) -> Result>, T::Error> { let mut calls: Vec = Vec::with_capacity(results.len()); let mut senders = Vec::with_capacity(results.len()); let mut receipts = Vec::with_capacity(results.len()); @@ -227,7 +226,7 @@ pub fn build_block( .into_iter() .map(|log| { log_index += 1; - alloy_rpc_types::Log { + alloy_rpc_types_eth::Log { inner: log, log_index: Some(log_index - 1), transaction_index: Some(transaction_index as u64), @@ -274,9 +273,9 @@ pub fn build_block( } } - let state_root = db.db.0.state_root(hashed_state)?; + let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; - let header = reth_primitives::Header { + let header = alloy_consensus::Header { beneficiary: block_env.coinbase, difficulty: block_env.difficulty, number: block_env.number.to(), @@ -307,6 +306,6 @@ pub fn build_block( let txs_kind = if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; - let block = from_block(block, total_difficulty, txs_kind, None, tx_resp_builder)?; + let block = from_block::(block, total_difficulty, txs_kind, None, tx_resp_builder)?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index 7d2237a1b7fb..a4ede0a1a4eb 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -3,7 +3,7 @@ //! Transaction wrapper that labels transaction with its origin. use alloy_primitives::B256; -use alloy_rpc_types::TransactionInfo; +use alloy_rpc_types_eth::TransactionInfo; use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_types_compat::{ transaction::{from_recovered, from_recovered_with_block_context}, @@ -41,7 +41,10 @@ impl TransactionSource { } /// Conversion into network specific transaction type. - pub fn into_transaction(self, resp_builder: &T) -> T::Transaction { + pub fn into_transaction( + self, + resp_builder: &T, + ) -> Result { match self { Self::Pool(tx) => from_recovered(tx, resp_builder), Self::Block { transaction, index, block_hash, block_number, base_fee } => { diff --git a/crates/rpc/rpc-layer/Cargo.toml b/crates/rpc/rpc-layer/Cargo.toml index ec8dcb8229ec..d44e5e89f013 100644 --- a/crates/rpc/rpc-layer/Cargo.toml +++ b/crates/rpc/rpc-layer/Cargo.toml @@ -17,10 +17,11 @@ http.workspace = true jsonrpsee-http-client.workspace = true pin-project.workspace = true tower.workspace = true - +tower-http = { workspace = true, features = ["full"] } tracing.workspace = true [dev-dependencies] reqwest.workspace = true tokio = { workspace = true, features = ["macros"] } jsonrpsee = { workspace = true, features = ["server"] } +http-body-util.workspace=true diff --git a/crates/rpc/rpc-layer/src/compression_layer.rs b/crates/rpc/rpc-layer/src/compression_layer.rs new file mode 100644 index 000000000000..cf15f04aa78a --- /dev/null +++ b/crates/rpc/rpc-layer/src/compression_layer.rs @@ -0,0 +1,169 @@ +use jsonrpsee_http_client::{HttpBody, HttpRequest, HttpResponse}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tower::{Layer, Service}; +use tower_http::compression::{Compression, CompressionLayer as TowerCompressionLayer}; + +/// This layer is a wrapper around [`tower_http::compression::CompressionLayer`] that integrates +/// with jsonrpsee's HTTP types. It automatically compresses responses based on the client's +/// Accept-Encoding header. +#[allow(missing_debug_implementations)] +#[derive(Clone)] +pub struct CompressionLayer { + inner_layer: TowerCompressionLayer, +} + +impl CompressionLayer { + /// Creates a new compression layer with zstd, gzip, brotli and deflate enabled. + pub fn new() -> Self { + Self { + inner_layer: TowerCompressionLayer::new().gzip(true).br(true).deflate(true).zstd(true), + } + } +} + +impl Default for CompressionLayer { + /// Creates a new compression layer with default settings. + /// See [`CompressionLayer::new`] for details. + fn default() -> Self { + Self::new() + } +} + +impl Layer for CompressionLayer { + type Service = CompressionService; + + fn layer(&self, inner: S) -> Self::Service { + CompressionService { compression: self.inner_layer.layer(inner) } + } +} + +/// Service that performs response compression. +/// +/// Created by [`CompressionLayer`]. +#[allow(missing_debug_implementations)] +#[derive(Clone)] +pub struct CompressionService { + compression: Compression, +} + +impl Service for CompressionService +where + S: Service, + S::Future: Send + 'static, +{ + type Response = HttpResponse; + type Error = S::Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.compression.poll_ready(cx) + } + + fn call(&mut self, req: HttpRequest) -> Self::Future { + let fut = self.compression.call(req); + + Box::pin(async move { + let resp = fut.await?; + let (parts, compressed_body) = resp.into_parts(); + let http_body = HttpBody::new(compressed_body); + + Ok(Self::Response::from_parts(parts, http_body)) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::header::{ACCEPT_ENCODING, CONTENT_ENCODING}; + use http_body_util::BodyExt; + use jsonrpsee_http_client::{HttpRequest, HttpResponse}; + use std::{convert::Infallible, future::ready}; + + const TEST_DATA: &str = "compress test data "; + const REPEAT_COUNT: usize = 1000; + + #[derive(Clone)] + struct MockRequestService; + + impl Service for MockRequestService { + type Response = HttpResponse; + type Error = Infallible; + type Future = std::future::Ready>; + + fn poll_ready( + &mut self, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + std::task::Poll::Ready(Ok(())) + } + + fn call(&mut self, _: HttpRequest) -> Self::Future { + let body = HttpBody::from(TEST_DATA.repeat(REPEAT_COUNT)); + let response = HttpResponse::builder().body(body).unwrap(); + ready(Ok(response)) + } + } + + fn setup_compression_service( + ) -> impl Service { + CompressionLayer::new().layer(MockRequestService) + } + + async fn get_response_size(response: HttpResponse) -> usize { + // Get the total size of the response body + response.into_body().collect().await.unwrap().to_bytes().len() + } + + #[tokio::test] + async fn test_gzip_compression() { + let mut service = setup_compression_service(); + let request = + HttpRequest::builder().header(ACCEPT_ENCODING, "gzip").body(HttpBody::empty()).unwrap(); + + let uncompressed_len = TEST_DATA.repeat(REPEAT_COUNT).len(); + + // Make the request + let response = service.call(request).await.unwrap(); + + // Verify the response has gzip content-encoding + assert_eq!( + response.headers().get(CONTENT_ENCODING).unwrap(), + "gzip", + "Response should be gzip encoded" + ); + + // Verify the response body is actually compressed (should be smaller than original) + let compressed_size = get_response_size(response).await; + assert!( + compressed_size < uncompressed_len, + "Compressed size ({compressed_size}) should be smaller than original size ({uncompressed_len})" + ); + } + + #[tokio::test] + async fn test_no_compression_when_not_requested() { + // Create a service with compression + let mut service = setup_compression_service(); + let request = HttpRequest::builder().body(HttpBody::empty()).unwrap(); + + let response = service.call(request).await.unwrap(); + assert!( + response.headers().get(CONTENT_ENCODING).is_none(), + "Response should not be compressed when not requested" + ); + + let uncompressed_len = TEST_DATA.repeat(REPEAT_COUNT).len(); + + // Verify the response body matches the original size + let response_size = get_response_size(response).await; + assert!( + response_size == uncompressed_len, + "Response size ({response_size}) should equal original size ({uncompressed_len})" + ); + } +} diff --git a/crates/rpc/rpc-layer/src/lib.rs b/crates/rpc/rpc-layer/src/lib.rs index 8387bb160e8b..540daf5592b7 100644 --- a/crates/rpc/rpc-layer/src/lib.rs +++ b/crates/rpc/rpc-layer/src/lib.rs @@ -13,9 +13,11 @@ use jsonrpsee_http_client::HttpResponse; mod auth_client_layer; mod auth_layer; +mod compression_layer; mod jwt_validator; pub use auth_layer::{AuthService, ResponseFuture}; +pub use compression_layer::CompressionLayer; // Export alloy JWT types pub use alloy_rpc_types_engine::{Claims, JwtError, JwtSecret}; diff --git a/crates/rpc/rpc-server-types/Cargo.toml b/crates/rpc/rpc-server-types/Cargo.toml index 08ecd3947742..275d8ea561b9 100644 --- a/crates/rpc/rpc-server-types/Cargo.toml +++ b/crates/rpc/rpc-server-types/Cargo.toml @@ -14,11 +14,11 @@ workspace = true [dependencies] reth-errors.workspace = true reth-network-api.workspace = true -reth-primitives.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true # rpc jsonrpsee-core.workspace = true @@ -27,4 +27,3 @@ jsonrpsee-types.workspace = true # misc strum = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } - diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index 56417dda701c..9f96ff0cef35 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -258,6 +258,8 @@ pub enum RethRpcModule { Reth, /// `ots_` module Ots, + /// `flashbots_` module + Flashbots, } // === impl RethRpcModule === @@ -306,6 +308,7 @@ impl FromStr for RethRpcModule { "rpc" => Self::Rpc, "reth" => Self::Reth, "ots" => Self::Ots, + "flashbots" => Self::Flashbots, _ => return Err(ParseError::VariantNotFound), }) } diff --git a/crates/rpc/rpc-server-types/src/result.rs b/crates/rpc/rpc-server-types/src/result.rs index 78e6436643a7..5d1b702e9fca 100644 --- a/crates/rpc/rpc-server-types/src/result.rs +++ b/crates/rpc/rpc-server-types/src/result.rs @@ -2,9 +2,10 @@ use std::fmt; +use alloy_eips::BlockId; use alloy_rpc_types_engine::PayloadError; use jsonrpsee_core::RpcResult; -use reth_primitives::BlockId; +use reth_errors::ConsensusError; /// Helper trait to easily convert various `Result` types into [`RpcResult`] pub trait ToRpcResult: Sized { @@ -102,6 +103,7 @@ macro_rules! impl_to_rpc_result { } impl_to_rpc_result!(PayloadError); +impl_to_rpc_result!(ConsensusError); impl_to_rpc_result!(reth_errors::RethError); impl_to_rpc_result!(reth_errors::ProviderError); impl_to_rpc_result!(reth_network_api::NetworkError); diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index 4977c3a2c409..149073b1c680 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -19,8 +19,8 @@ reth-rpc-api = { workspace = true, features = ["client"] } # ethereum alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true -alloy-rpc-types.workspace = true alloy-rpc-types-trace.workspace = true +alloy-eips.workspace = true # async futures.workspace = true @@ -36,4 +36,4 @@ similar-asserts.workspace = true tokio = { workspace = true, features = ["rt-multi-thread", "macros", "rt"] } reth-rpc-eth-api.workspace = true jsonrpsee-http-client.workspace = true -alloy-rpc-types-trace.workspace = true \ No newline at end of file +alloy-rpc-types-trace.workspace = true diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index f50064e80ce9..a18771af3b0d 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -6,16 +6,16 @@ use std::{ task::{Context, Poll}, }; +use alloy_eips::BlockId; use alloy_primitives::{TxHash, B256}; -use alloy_rpc_types::{Block, Transaction}; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Transaction}; use alloy_rpc_types_trace::{ common::TraceResult, geth::{GethDebugTracerType, GethDebugTracingOptions, GethTrace}, }; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_primitives::{BlockId, Receipt}; +use reth_primitives::Receipt; use reth_rpc_api::{clients::DebugApiClient, EthApiClient}; const NOOP_TRACER: &str = include_str!("../assets/noop-tracer.js"); @@ -292,7 +292,7 @@ pub struct DebugTraceTransactionsStream<'a> { stream: Pin + 'a>>, } -impl<'a> DebugTraceTransactionsStream<'a> { +impl DebugTraceTransactionsStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, TxHash)> { loop { @@ -324,7 +324,7 @@ pub struct DebugTraceBlockStream<'a> { stream: Pin + 'a>>, } -impl<'a> DebugTraceBlockStream<'a> { +impl DebugTraceBlockStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { loop { diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index c6dc16cf1063..b963fa69d8b9 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -1,8 +1,8 @@ //! Helpers for testing trace calls. +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, TxHash, B256}; -use alloy_rpc_types::Index; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, Index}; use alloy_rpc_types_trace::{ filter::TraceFilter, parity::{LocalizedTransactionTrace, TraceResults, TraceType}, @@ -10,7 +10,6 @@ use alloy_rpc_types_trace::{ }; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_primitives::BlockId; use reth_rpc_api::clients::TraceApiClient; use std::{ pin::Pin, @@ -381,7 +380,7 @@ pub struct TraceBlockStream<'a> { stream: Pin + 'a>>, } -impl<'a> TraceBlockStream<'a> { +impl TraceBlockStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { loop { @@ -514,9 +513,9 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_eips::BlockNumberOrTag; use alloy_rpc_types_trace::filter::TraceFilterMode; use jsonrpsee::http_client::HttpClientBuilder; - use reth_primitives::BlockNumberOrTag; const fn assert_is_stream(_: &St) {} diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index b0fccefbb464..4c5d2ccb2a67 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -1,7 +1,7 @@ //! Integration tests for the trace API. use alloy_primitives::map::HashSet; -use alloy_rpc_types::{Block, Transaction}; +use alloy_rpc_types_eth::{Block, Transaction}; use alloy_rpc_types_trace::{ filter::TraceFilter, parity::TraceType, tracerequest::TraceCallRequest, }; diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index 7d5eac9dbb91..887986ada122 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -17,17 +17,17 @@ reth-primitives.workspace = true reth-trie-common.workspace = true # ethereum +alloy-serde.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true -alloy-rpc-types.workspace = true alloy-rpc-types-eth = { workspace = true, default-features = false, features = ["serde"] } -alloy-serde.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true # io serde.workspace = true +jsonrpsee-types.workspace = true [dev-dependencies] -serde_json.workspace = true \ No newline at end of file +serde_json.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index a954e05e4f65..43086b311bd7 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -1,13 +1,13 @@ //! Compatibility functions for rpc `Block` type. +use alloy_consensus::Sealed; +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{B256, U256}; use alloy_rlp::Encodable; -use alloy_rpc_types::{ - Block, BlockError, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, -}; -use reth_primitives::{ - Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, SealedHeader, Withdrawals, +use alloy_rpc_types_eth::{ + Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; +use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders}; use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; @@ -21,7 +21,7 @@ pub fn from_block( kind: BlockTransactionsKind, block_hash: Option, tx_resp_builder: &T, -) -> Result, BlockError> { +) -> Result, T::Error> { match kind { BlockTransactionsKind::Hashes => { Ok(from_block_with_tx_hashes::(block, total_difficulty, block_hash)) @@ -64,7 +64,7 @@ pub fn from_block_full( total_difficulty: U256, block_hash: Option, tx_resp_builder: &T, -) -> Result, BlockError> { +) -> Result, T::Error> { let block_hash = block_hash.unwrap_or_else(|| block.block.header.hash_slow()); let block_number = block.block.number; let base_fee_per_gas = block.block.base_fee_per_gas; @@ -89,7 +89,7 @@ pub fn from_block_full( from_recovered_with_block_context::(signed_tx_ec_recovered, tx_info, tx_resp_builder) }) - .collect::>(); + .collect::, T::Error>>()?; Ok(from_block_with_transactions( block_length, @@ -100,64 +100,6 @@ pub fn from_block_full( )) } -/// Converts from a [`reth_primitives::SealedHeader`] to a [`alloy-rpc-types::Header`] -/// -/// # Note -/// -/// This does not set the `totalDifficulty` field. -pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) -> Header { - let (header, hash) = primitive_header.split(); - let PrimitiveHeader { - parent_hash, - ommers_hash, - beneficiary, - state_root, - transactions_root, - receipts_root, - logs_bloom, - difficulty, - number, - gas_limit, - gas_used, - timestamp, - mix_hash, - nonce, - base_fee_per_gas, - extra_data, - withdrawals_root, - blob_gas_used, - excess_blob_gas, - parent_beacon_block_root, - requests_hash, - } = header; - - Header { - hash, - parent_hash, - uncles_hash: ommers_hash, - miner: beneficiary, - state_root, - transactions_root, - receipts_root, - withdrawals_root, - number, - gas_used, - gas_limit, - extra_data, - logs_bloom, - timestamp, - difficulty, - mix_hash: Some(mix_hash), - nonce: Some(nonce), - base_fee_per_gas, - blob_gas_used, - excess_blob_gas, - parent_beacon_block_root, - total_difficulty: None, - requests_hash, - } -} - #[inline] fn from_block_with_transactions( block_length: usize, @@ -166,31 +108,19 @@ fn from_block_with_transactions( total_difficulty: U256, transactions: BlockTransactions, ) -> Block { - let uncles = block.body.ommers.into_iter().map(|h| h.hash_slow()).collect(); - let mut header = from_primitive_with_hash(SealedHeader::new(block.header, block_hash)); - header.total_difficulty = Some(total_difficulty); - - let withdrawals = header + let withdrawals = block + .header .withdrawals_root .is_some() - .then(|| block.body.withdrawals.map(Withdrawals::into_inner)) + .then(|| block.body.withdrawals.map(Withdrawals::into_inner).map(Into::into)) .flatten(); - Block { header, uncles, transactions, size: Some(U256::from(block_length)), withdrawals } -} + let uncles = block.body.ommers.into_iter().map(|h| h.hash_slow()).collect(); + let header = Header::from_consensus( + Sealed::new_unchecked(block.header, block_hash), + Some(total_difficulty), + Some(U256::from(block_length)), + ); -/// Build an RPC block response representing -/// an Uncle from its header. -pub fn uncle_block_from_header(header: PrimitiveHeader) -> Block { - let hash = header.hash_slow(); - let uncle_block = PrimitiveBlock { header, ..Default::default() }; - let size = Some(U256::from(uncle_block.length())); - let rpc_header = from_primitive_with_hash(SealedHeader::new(uncle_block.header, hash)); - Block { - uncles: vec![], - header: rpc_header, - transactions: BlockTransactions::Uncle, - withdrawals: None, - size, - } + Block { header, uncles, transactions, withdrawals } } diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index b4c45a617812..7f260a7693c4 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -1,9 +1,10 @@ //! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in //! Ethereum's Engine -use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ eip2718::{Decodable2718, Encodable2718}, + eip4895::Withdrawals, eip7685::Requests, }; use alloy_primitives::{B256, U256}; @@ -14,7 +15,7 @@ use alloy_rpc_types_engine::{ }; use reth_primitives::{ proofs::{self}, - Block, BlockBody, Header, SealedBlock, TransactionSigned, Withdrawals, + Block, BlockBody, SealedBlock, TransactionSigned, }; /// Converts [`ExecutionPayloadV1`] to [`Block`] diff --git a/crates/rpc/rpc-types-compat/src/proof.rs b/crates/rpc/rpc-types-compat/src/proof.rs index 19bc76f3d7bc..b860bc3491d7 100644 --- a/crates/rpc/rpc-types-compat/src/proof.rs +++ b/crates/rpc/rpc-types-compat/src/proof.rs @@ -1,16 +1,22 @@ //! Compatibility functions for rpc proof related types. -use alloy_rpc_types::serde_helpers::JsonStorageKey; use alloy_rpc_types_eth::{EIP1186AccountProofResponse, EIP1186StorageProof}; +use alloy_serde::JsonStorageKey; use reth_trie_common::{AccountProof, StorageProof}; /// Creates a new rpc storage proof from a primitive storage proof type. -pub fn from_primitive_storage_proof(proof: StorageProof) -> EIP1186StorageProof { - EIP1186StorageProof { key: JsonStorageKey(proof.key), value: proof.value, proof: proof.proof } +pub fn from_primitive_storage_proof( + proof: StorageProof, + slot: JsonStorageKey, +) -> EIP1186StorageProof { + EIP1186StorageProof { key: slot, value: proof.value, proof: proof.proof } } /// Creates a new rpc account proof from a primitive account proof type. -pub fn from_primitive_account_proof(proof: AccountProof) -> EIP1186AccountProofResponse { +pub fn from_primitive_account_proof( + proof: AccountProof, + slots: Vec, +) -> EIP1186AccountProofResponse { let info = proof.info.unwrap_or_default(); EIP1186AccountProofResponse { address: proof.address, @@ -19,6 +25,13 @@ pub fn from_primitive_account_proof(proof: AccountProof) -> EIP1186AccountProofR nonce: info.nonce, storage_hash: proof.storage_root, account_proof: proof.proof, - storage_proof: proof.storage_proofs.into_iter().map(from_primitive_storage_proof).collect(), + storage_proof: proof + .storage_proofs + .into_iter() + .filter_map(|proof| { + let input_slot = slots.iter().find(|s| s.as_b256() == proof.key)?; + Some(from_primitive_storage_proof(proof, *input_slot)) + }) + .collect(), } } diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction.rs similarity index 57% rename from crates/rpc/rpc-types-compat/src/transaction/mod.rs rename to crates/rpc/rpc-types-compat/src/transaction.rs index f8b46454dc23..9e8fae670963 100644 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -1,17 +1,14 @@ //! Compatibility functions for rpc `Transaction` type. -mod signature; - -pub use signature::*; +use core::error; use std::fmt; use alloy_consensus::Transaction as _; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ request::{TransactionInput, TransactionRequest}, - Transaction, TransactionInfo, + TransactionInfo, }; -use alloy_serde::WithOtherFields; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered, TxType}; +use reth_primitives::TransactionSignedEcRecovered; use serde::{Deserialize, Serialize}; /// Create a new rpc transaction result for a mined transaction, using the given block hash, @@ -23,7 +20,7 @@ pub fn from_recovered_with_block_context( tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, resp_builder: &T, -) -> T::Transaction { +) -> Result { resp_builder.fill(tx, tx_info) } @@ -32,7 +29,7 @@ pub fn from_recovered_with_block_context( pub fn from_recovered( tx: TransactionSignedEcRecovered, resp_builder: &T, -) -> T::Transaction { +) -> Result { resp_builder.fill(tx, TransactionInfo::default()) } @@ -45,77 +42,23 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { + Sync + Unpin + Clone - + Default + fmt::Debug; - /// - /// Formats gas price and max fee per gas for RPC transaction response w.r.t. network specific - /// transaction type. - fn gas_price(signed_tx: &TransactionSigned, base_fee: Option) -> GasPrice { - #[allow(unreachable_patterns)] - match signed_tx.tx_type() { - TxType::Legacy | TxType::Eip2930 => { - GasPrice { gas_price: Some(signed_tx.max_fee_per_gas()), max_fee_per_gas: None } - } - TxType::Eip1559 | TxType::Eip4844 | TxType::Eip7702 => { - // the gas price field for EIP1559 is set to `min(tip, gasFeeCap - baseFee) + - // baseFee` - let gas_price = base_fee - .and_then(|base_fee| { - signed_tx.effective_tip_per_gas(base_fee).map(|tip| tip + base_fee as u128) - }) - .unwrap_or_else(|| signed_tx.max_fee_per_gas()); - - GasPrice { - gas_price: Some(gas_price), - max_fee_per_gas: Some(signed_tx.max_fee_per_gas()), - } - } - _ => GasPrice::default(), - } - } + /// RPC transaction error type. + type Error: error::Error + Into>; /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. - fn fill(&self, tx: TransactionSignedEcRecovered, tx_inf: TransactionInfo) -> Self::Transaction; + fn fill( + &self, + tx: TransactionSignedEcRecovered, + tx_inf: TransactionInfo, + ) -> Result; /// Truncates the input of a transaction to only the first 4 bytes. // todo: remove in favour of using constructor on `TransactionResponse` or similar // . fn otterscan_api_truncate_input(tx: &mut Self::Transaction); - - /// Returns the transaction type. - // todo: remove when alloy TransactionResponse trait it updated. - fn tx_type(tx: &Self::Transaction) -> u8; -} - -impl TransactionCompat for () { - // this noop impl depends on integration in `reth_rpc_eth_api::EthApiTypes` noop impl, and - // `alloy_network::AnyNetwork` - type Transaction = WithOtherFields; - - fn fill( - &self, - _tx: TransactionSignedEcRecovered, - _tx_info: TransactionInfo, - ) -> Self::Transaction { - WithOtherFields::default() - } - - fn otterscan_api_truncate_input(_tx: &mut Self::Transaction) {} - - fn tx_type(_tx: &Self::Transaction) -> u8 { - 0 - } -} - -/// Gas price and max fee per gas for a transaction. Helper type to format transaction RPC response. -#[derive(Debug, Default)] -pub struct GasPrice { - /// Gas price for transaction. - pub gas_price: Option, - /// Max fee per gas for transaction. - pub max_fee_per_gas: Option, } /// Convert [`TransactionSignedEcRecovered`] to [`TransactionRequest`] diff --git a/crates/rpc/rpc-types-compat/src/transaction/signature.rs b/crates/rpc/rpc-types-compat/src/transaction/signature.rs deleted file mode 100644 index 536f6ac5e5cf..000000000000 --- a/crates/rpc/rpc-types-compat/src/transaction/signature.rs +++ /dev/null @@ -1,52 +0,0 @@ -use alloy_primitives::U256; -use alloy_rpc_types::{Parity, Signature}; -use reth_primitives::{transaction::legacy_parity, Signature as PrimitiveSignature, TxType}; - -/// Creates a new rpc signature from a legacy [primitive -/// signature](reth_primitives::Signature), using the give chain id to compute the signature's -/// recovery id. -/// -/// If the chain id is `Some`, the recovery id is computed according to [EIP-155](https://eips.ethereum.org/EIPS/eip-155). -pub fn from_legacy_primitive_signature( - signature: PrimitiveSignature, - chain_id: Option, -) -> Signature { - Signature { - r: signature.r(), - s: signature.s(), - v: U256::from(legacy_parity(&signature, chain_id).to_u64()), - y_parity: None, - } -} - -/// Creates a new rpc signature from a non-legacy [primitive -/// signature](reth_primitives::Signature). This sets the `v` value to `0` or `1` depending on -/// the signature's `odd_y_parity`. -pub fn from_typed_primitive_signature(signature: PrimitiveSignature) -> Signature { - Signature { - r: signature.r(), - s: signature.s(), - v: U256::from(signature.v().y_parity_byte()), - y_parity: Some(Parity(signature.v().y_parity())), - } -} - -/// Creates a new rpc signature from a legacy [primitive -/// signature](reth_primitives::Signature). -/// -/// The tx type is used to determine whether or not to use the `chain_id` to compute the -/// signature's recovery id. -/// -/// If the transaction is a legacy transaction, it will use the `chain_id` to compute the -/// signature's recovery id. If the transaction is a typed transaction, it will set the `v` -/// value to `0` or `1` depending on the signature's `odd_y_parity`. -pub fn from_primitive_signature( - signature: PrimitiveSignature, - tx_type: TxType, - chain_id: Option, -) -> Signature { - match tx_type { - TxType::Legacy => from_legacy_primitive_signature(signature, chain_id), - _ => from_typed_primitive_signature(signature), - } -} diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index fe150e36eed5..ac3a548f9b5a 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -18,6 +18,7 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true reth-errors.workspace = true +reth-ethereum-consensus.workspace = true reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network-api.workspace = true @@ -31,27 +32,30 @@ reth-network-peers = { workspace = true, features = ["secp256k1"] } reth-evm.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true -reth-node-api.workspace = true reth-network-types.workspace = true reth-trie.workspace = true +reth-consensus.workspace = true +reth-payload-validator.workspace = true # ethereum alloy-consensus.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true -alloy-eips.workspace = true +alloy-eips = { workspace = true, features = ["kzg"] } alloy-dyn-abi.workspace = true alloy-genesis.workspace = true alloy-network.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true +alloy-rpc-types-beacon.workspace = true alloy-rpc-types.workspace = true -alloy-rpc-types-eth = { workspace = true, features = ["jsonrpsee-types"] } +alloy-rpc-types-eth = { workspace = true, features = ["jsonrpsee-types", "serde"] } alloy-rpc-types-debug.workspace = true alloy-rpc-types-trace.workspace = true alloy-rpc-types-mev.workspace = true alloy-rpc-types-txpool.workspace = true alloy-rpc-types-admin.workspace = true +alloy-rpc-types-engine.workspace = true alloy-serde.workspace = true revm = { workspace = true, features = [ "optional_block_gas_limit", diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index 311719a04edd..0358aa3a8d43 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -115,6 +115,7 @@ where .get_final_paris_total_difficulty() .is_some(), terminal_total_difficulty: self.chain_spec.fork(EthereumHardfork::Paris).ttd(), + deposit_contract_address: self.chain_spec.deposit_contract().map(|dc| dc.address), ..self.chain_spec.genesis().config.clone() }; diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 2d9d6f7822e1..a74d1b5a1550 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,11 +1,11 @@ -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; -use alloy_rpc_types::{ - state::EvmOverrides, Block as RpcBlock, BlockError, Bundle, StateContext, TransactionInfo, -}; use alloy_rpc_types_debug::ExecutionWitness; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{ + state::EvmOverrides, transaction::TransactionRequest, Block as RpcBlock, BlockError, Bundle, + StateContext, TransactionInfo, +}; use alloy_rpc_types_trace::geth::{ call::FlatCallFrame, BlockTraceResult, FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, @@ -16,10 +16,9 @@ use jsonrpsee::core::RpcResult; use reth_chainspec::EthereumHardforks; use reth_evm::{ execute::{BlockExecutorProvider, Executor}, - system_calls::SystemCaller, ConfigureEvmEnv, }; -use reth_primitives::{Block, BlockId, BlockNumberOrTag, TransactionSignedEcRecovered}; +use reth_primitives::{Block, SealedBlockWithSenders}; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, TransactionVariant, @@ -27,7 +26,7 @@ use reth_provider::{ use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ - helpers::{Call, EthApiSpec, EthTransactions, LoadState, TraceExt}, + helpers::{EthApiSpec, EthTransactions, TraceExt}, EthApiTypes, FromEthApiError, }; use reth_rpc_eth_types::{EthApiError, StateCacheDb}; @@ -93,47 +92,47 @@ where /// Trace the entire block asynchronously async fn trace_block( &self, - at: BlockId, - transactions: Vec, + block: Arc, cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, opts: GethDebugTracingOptions, ) -> Result, Eth::Error> { - if transactions.is_empty() { - // nothing to trace - return Ok(Vec::new()) - } - // replay all transactions of the block let this = self.clone(); self.eth_api() - .spawn_with_state_at_block(at, move |state| { - let block_hash = at.as_block_hash(); - let mut results = Vec::with_capacity(transactions.len()); + .spawn_with_state_at_block(block.parent_hash.into(), move |state| { + let mut results = Vec::with_capacity(block.body.transactions.len()); let mut db = CacheDB::new(StateProviderDatabase::new(state)); - let mut transactions = transactions.into_iter().enumerate().peekable(); - while let Some((index, tx)) = transactions.next() { + + this.eth_api().apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; + + let mut transactions = block.transactions_with_sender().enumerate().peekable(); + let mut inspector = None; + while let Some((index, (signer, tx))) = transactions.next() { let tx_hash = tx.hash; let env = EnvWithHandlerCfg { env: Env::boxed( cfg.cfg_env.clone(), block_env.clone(), - Call::evm_config(this.eth_api()).tx_env(tx.as_signed(), tx.signer()), + this.eth_api().evm_config().tx_env(tx, *signer), ), handler_cfg: cfg.handler_cfg, }; let (result, state_changes) = this.trace_transaction( - opts.clone(), + &opts, env, &mut db, Some(TransactionContext { - block_hash, + block_hash: Some(block.hash()), tx_hash: Some(tx_hash), tx_index: Some(index), }), + &mut inspector, )?; + inspector = inspector.map(|insp| insp.fused()); + results.push(TraceResult::Success { result, tx_hash: Some(tx_hash) }); if transactions.peek().is_some() { // need to apply the state changes of this transaction before executing the @@ -162,36 +161,40 @@ where .map_err(Eth::Error::from_eth_err)?; let (cfg, block_env) = self.eth_api().evm_env_for_raw_block(&block.header).await?; - // we trace on top the block's parent block - let parent = block.parent_hash; // Depending on EIP-2 we need to recover the transactions differently - let transactions = - if self.inner.provider.chain_spec().is_homestead_active_at_block(block.number) { - block - .body - .transactions - .into_iter() - .map(|tx| { - tx.into_ecrecovered() - .ok_or(EthApiError::InvalidTransactionSignature) - .map_err(Eth::Error::from_eth_err) - }) - .collect::, Eth::Error>>()? - } else { - block - .body - .transactions - .into_iter() - .map(|tx| { - tx.into_ecrecovered_unchecked() - .ok_or(EthApiError::InvalidTransactionSignature) - .map_err(Eth::Error::from_eth_err) - }) - .collect::, Eth::Error>>()? - }; - - self.trace_block(parent.into(), transactions, cfg, block_env, opts).await + let senders = if self.inner.provider.chain_spec().is_homestead_active_at_block(block.number) + { + block + .body + .transactions + .iter() + .map(|tx| { + tx.recover_signer() + .ok_or(EthApiError::InvalidTransactionSignature) + .map_err(Eth::Error::from_eth_err) + }) + .collect::, Eth::Error>>()? + } else { + block + .body + .transactions + .iter() + .map(|tx| { + tx.recover_signer_unchecked() + .ok_or(EthApiError::InvalidTransactionSignature) + .map_err(Eth::Error::from_eth_err) + }) + .collect::, Eth::Error>>()? + }; + + self.trace_block( + Arc::new(block.with_senders_unchecked(senders).seal_slow()), + cfg, + block_env, + opts, + ) + .await } /// Replays a block and returns the trace of each transaction. @@ -213,18 +216,8 @@ where )?; let block = block.ok_or(EthApiError::HeaderNotFound(block_id))?; - // we need to get the state of the parent block because we're replaying this block on top of - // its parent block's state - let state_at = block.parent_hash; - self.trace_block( - state_at.into(), - (*block).clone().into_transactions_ecrecovered().collect(), - cfg, - block_env, - opts, - ) - .await + self.trace_block(block, cfg, block_env, opts).await } /// Trace the transaction according to the provided options. @@ -245,7 +238,6 @@ where // block the transaction is included in let state_at: BlockId = block.parent_hash.into(); let block_hash = block.hash(); - let parent_beacon_block_root = block.parent_beacon_block_root; let this = self.clone(); self.eth_api() @@ -257,24 +249,7 @@ where let mut db = CacheDB::new(StateProviderDatabase::new(state)); - // apply relevant system calls - let mut system_caller = SystemCaller::new( - Call::evm_config(this.eth_api()).clone(), - LoadState::provider(this.eth_api()).chain_spec(), - ); - - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - parent_beacon_block_root, - ) - .map_err(|_| { - EthApiError::EvmCustom( - "failed to apply 4788 beacon root system call".to_string(), - ) - })?; + this.eth_api().apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; // replay all transactions prior to the targeted transaction let index = this.eth_api().replay_transactions_until( @@ -289,13 +264,13 @@ where env: Env::boxed( cfg.cfg_env.clone(), block_env, - Call::evm_config(this.eth_api()).tx_env(tx.as_signed(), tx.signer()), + this.eth_api().evm_config().tx_env(tx.as_signed(), tx.signer()), ), handler_cfg: cfg.handler_cfg, }; this.trace_transaction( - opts, + &opts, env, &mut db, Some(TransactionContext { @@ -303,6 +278,7 @@ where tx_index: Some(index), tx_hash: Some(tx.hash), }), + &mut None, ) .map(|(trace, _)| trace) }) @@ -557,7 +533,7 @@ where env: Env::boxed( cfg.cfg_env.clone(), block_env.clone(), - Call::evm_config(this.eth_api()).tx_env(tx, *signer), + this.eth_api().evm_config().tx_env(tx, *signer), ), handler_cfg: cfg.handler_cfg, }; @@ -573,6 +549,7 @@ where let Bundle { transactions, block_override } = bundle; let block_overrides = block_override.map(Box::new); + let mut inspector = None; let mut transactions = transactions.into_iter().peekable(); while let Some(tx) = transactions.next() { @@ -588,8 +565,15 @@ where overrides, )?; - let (trace, state) = - this.trace_transaction(tracing_options.clone(), env, &mut db, None)?; + let (trace, state) = this.trace_transaction( + &tracing_options, + env, + &mut db, + None, + &mut inspector, + )?; + + inspector = inspector.map(|insp| insp.fused()); // If there is more transactions, commit the database // If there is no transactions, but more bundles, commit to the database too @@ -692,6 +676,13 @@ where /// Executes the configured transaction with the environment on the given database. /// + /// It optionally takes fused inspector ([`TracingInspector::fused`]) to avoid re-creating the + /// inspector for each transaction. This is useful when tracing multiple transactions in a + /// block. This is only useful for block tracing which uses the same tracer for all transactions + /// in the block. + /// + /// Caution: If the inspector is provided then `opts.tracer_config` is ignored. + /// /// Returns the trace frame and the state that got updated after executing the transaction. /// /// Note: this does not apply any state overrides if they're configured in the `opts`. @@ -699,10 +690,11 @@ where /// Caution: this is blocking and should be performed on a blocking task. fn trace_transaction( &self, - opts: GethDebugTracingOptions, + opts: &GethDebugTracingOptions, env: EnvWithHandlerCfg, db: &mut StateCacheDb<'_>, transaction_context: Option, + fused_inspector: &mut Option, ) -> Result<(GethTrace, revm_primitives::EvmState), Eth::Error> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; @@ -716,35 +708,42 @@ where } GethDebugBuiltInTracerType::CallTracer => { let call_config = tracer_config + .clone() .into_call_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_geth_call_config(&call_config), - ); + let mut inspector = fused_inspector.get_or_insert_with(|| { + TracingInspector::new(TracingInspectorConfig::from_geth_call_config( + &call_config, + )) + }); let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; + inspector.set_transaction_gas_limit(env.tx.gas_limit); + let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() + .geth_builder() .geth_call_traces(call_config, res.result.gas_used()); return Ok((frame.into(), res.state)) } GethDebugBuiltInTracerType::PreStateTracer => { let prestate_config = tracer_config + .clone() .into_pre_state_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_geth_prestate_config(&prestate_config), - ); + let mut inspector = fused_inspector.get_or_insert_with(|| { + TracingInspector::new( + TracingInspectorConfig::from_geth_prestate_config(&prestate_config), + ) + }); let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; + inspector.set_transaction_gas_limit(env.tx.gas_limit); let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() + .geth_builder() .geth_prestate_traces(&res, &prestate_config, db) .map_err(Eth::Error::from_eth_err)?; @@ -755,6 +754,7 @@ where } GethDebugBuiltInTracerType::MuxTracer => { let mux_config = tracer_config + .clone() .into_mux_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; @@ -769,6 +769,7 @@ where } GethDebugBuiltInTracerType::FlatCallTracer => { let flat_call_config = tracer_config + .clone() .into_flat_call_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; @@ -799,10 +800,10 @@ where } #[cfg(feature = "js-tracer")] GethDebugTracerType::JsTracer(code) => { - let config = tracer_config.into_json(); + let config = tracer_config.clone().into_json(); let mut inspector = revm_inspectors::tracing::js::JsInspector::with_transaction_context( - code, + code.clone(), config, transaction_context.unwrap_or_default(), ) @@ -818,17 +819,15 @@ where } // default structlog tracer - let inspector_config = TracingInspectorConfig::from_geth_config(&config); - - let mut inspector = TracingInspector::new(inspector_config); - + let mut inspector = fused_inspector.get_or_insert_with(|| { + let inspector_config = TracingInspectorConfig::from_geth_config(config); + TracingInspector::new(inspector_config) + }); let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; let gas_used = res.result.gas_used(); let return_value = res.result.into_output().unwrap_or_default(); - let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() - .geth_traces(gas_used, return_value, config); + inspector.set_transaction_gas_limit(env.tx.gas_limit); + let frame = inspector.geth_builder().geth_traces(gas_used, return_value, *config); Ok((frame.into(), res.state)) } @@ -911,7 +910,7 @@ where .to_rpc_result()? .unwrap_or_default() .into_iter() - .map(|receipt| receipt.with_bloom().envelope_encoded()) + .map(|receipt| receipt.with_bloom().encoded_2718().into()) .collect()) } diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index 928e2050a5c2..fca78d62d637 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -1,11 +1,11 @@ +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256, U64}; -use alloy_rpc_types::{ - state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, +use alloy_rpc_types_eth::{ + state::StateOverride, transaction::TransactionRequest, BlockOverrides, + EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_serde::JsonStorageKey; use jsonrpsee::core::RpcResult as Result; -use reth_primitives::{BlockId, BlockNumberOrTag}; use reth_rpc_api::{EngineEthApiServer, EthApiServer, EthFilterApiServer}; /// Re-export for convenience pub use reth_rpc_engine_api::EngineApi; diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index e97497786ede..a2e0be304374 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -12,7 +12,7 @@ use reth_primitives::{ PooledTransactionsElement, }; use reth_revm::database::StateProviderDatabase; -use reth_rpc_eth_api::{FromEthApiError, FromEvmError}; +use reth_rpc_eth_api::{FromEthApiError, FromEvmError, RpcNodeCore}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ db::CacheDB, @@ -106,7 +106,7 @@ where .into()) } - let block_id: alloy_rpc_types::BlockId = state_block_number.into(); + let block_id: alloy_rpc_types_eth::BlockId = state_block_number.into(); // Note: the block number is considered the `parent` block: let (cfg, mut block_env, at) = self.eth_api().evm_env_at(block_id).await?; @@ -130,12 +130,12 @@ where } else if cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { let parent_block = block_env.number.saturating_to::(); // here we need to fetch the _next_ block's basefee based on the parent block - let parent = LoadPendingBlock::provider(self.eth_api()) + let parent = RpcNodeCore::provider(self.eth_api()) .header_by_number(parent_block) .map_err(Eth::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(parent_block.into()))?; if let Some(base_fee) = parent.next_block_base_fee( - LoadPendingBlock::provider(self.eth_api()) + RpcNodeCore::provider(self.eth_api()) .chain_spec() .base_fee_params_at_block(parent_block), ) { @@ -156,7 +156,8 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, TxEnv::default()); let db = CacheDB::new(StateProviderDatabase::new(state)); - let initial_coinbase = DatabaseRef::basic_ref(&db, coinbase) + let initial_coinbase = db + .basic_ref(coinbase) .map_err(Eth::Error::from_eth_err)? .map(|acc| acc.balance) .unwrap_or_default(); @@ -166,7 +167,7 @@ where let mut total_gas_fess = U256::ZERO; let mut hasher = Keccak256::new(); - let mut evm = Call::evm_config(ð_api).evm_with_env(db, env); + let mut evm = eth_api.evm_config().evm_with_env(db, env); let mut results = Vec::with_capacity(transactions.len()); let mut transactions = transactions.into_iter().peekable(); @@ -187,7 +188,7 @@ where .effective_tip_per_gas(basefee) .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) .map_err(Eth::Error::from_eth_err)?; - Call::evm_config(ð_api).fill_tx_env(evm.tx_mut(), &tx, signer); + eth_api.evm_config().fill_tx_env(evm.tx_mut(), &tx, signer); let ResultAndState { result, state } = evm.transact().map_err(Eth::Error::from_evm_err)?; diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 21787873e966..d6c8f522cda0 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -3,14 +3,15 @@ use std::sync::Arc; -use alloy_network::AnyNetwork; +use alloy_eips::BlockNumberOrTag; +use alloy_network::Ethereum; use alloy_primitives::U256; use derive_more::Deref; -use reth_primitives::BlockNumberOrTag; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, SpawnBlocking}, - EthApiTypes, + node::RpcNodeCoreExt, + EthApiTypes, RpcNodeCore, }; use reth_rpc_eth_types::{ EthApiBuilderCtx, EthApiError, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, @@ -131,8 +132,7 @@ where Self: Send + Sync, { type Error = EthApiError; - // todo: replace with alloy_network::Ethereum - type NetworkTypes = AnyNetwork; + type NetworkTypes = Ethereum; type TransactionCompat = EthTxBuilder; fn tx_resp_builder(&self) -> &Self::TransactionCompat { @@ -140,6 +140,51 @@ where } } +impl RpcNodeCore for EthApi +where + Provider: Send + Sync + Clone + Unpin, + Pool: Send + Sync + Clone + Unpin, + Network: Send + Sync + Clone, + EvmConfig: Send + Sync + Clone + Unpin, +{ + type Provider = Provider; + type Pool = Pool; + type Evm = EvmConfig; + type Network = Network; + type PayloadBuilder = (); + + fn pool(&self) -> &Self::Pool { + self.inner.pool() + } + + fn evm_config(&self) -> &Self::Evm { + self.inner.evm_config() + } + + fn network(&self) -> &Self::Network { + self.inner.network() + } + + fn payload_builder(&self) -> &Self::PayloadBuilder { + &() + } + + fn provider(&self) -> &Self::Provider { + self.inner.provider() + } +} + +impl RpcNodeCoreExt + for EthApi +where + Self: RpcNodeCore, +{ + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } +} + impl std::fmt::Debug for EthApi { @@ -360,13 +405,15 @@ impl EthApiInner EthFilter where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, - Pool: TransactionPool + 'static, - ::Transaction: 'static, + Provider: BlockReader + BlockIdReader + 'static, + Pool: TransactionPool + 'static, Eth: FullEthApiTypes, { /// Returns all the filter changes for the given id, if any @@ -244,7 +244,7 @@ where impl EthFilterApiServer> for EthFilter where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, + Provider: BlockReader + BlockIdReader + 'static, Pool: TransactionPool + 'static, Eth: FullEthApiTypes + 'static, { @@ -367,7 +367,7 @@ struct EthFilterInner { impl EthFilterInner where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, + Provider: BlockReader + BlockIdReader + 'static, Pool: TransactionPool + 'static, { /// Returns logs matching given filter object. @@ -625,10 +625,15 @@ where let mut prepared_stream = self.txs_stream.lock().await; while let Ok(tx) = prepared_stream.try_recv() { - pending_txs.push(from_recovered( - tx.transaction.to_recovered_transaction(), - &self.tx_resp_builder, - )) + match from_recovered(tx.transaction.to_recovered_transaction(), &self.tx_resp_builder) { + Ok(tx) => pending_txs.push(tx), + Err(err) => { + error!(target: "rpc", + %err, + "Failed to fill txn with block context" + ); + } + } } FilterChanges::Transactions(pending_txs) } diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index b29a24c38c4c..fd3b9db9da28 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,14 +1,13 @@ //! Contains RPC handler implementations specific to blocks. -use alloy_rpc_types::{AnyTransactionReceipt, BlockId}; -use alloy_serde::WithOtherFields; +use alloy_rpc_types_eth::{BlockId, TransactionReceipt}; use reth_primitives::TransactionMeta; use reth_provider::{BlockReaderIdExt, HeaderProvider}; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, RpcReceipt, }; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use crate::EthApi; @@ -16,15 +15,10 @@ impl EthBlocks for EthApi, + NetworkTypes: alloy_network::Network, + Provider: HeaderProvider, >, - Provider: HeaderProvider, { - #[inline] - fn provider(&self) -> impl HeaderProvider { - self.inner.provider() - } - async fn block_receipts( &self, block_id: BlockId, @@ -56,9 +50,8 @@ where excess_blob_gas, timestamp, }; - ReceiptBuilder::new(&tx, meta, receipt, &receipts) + EthReceiptBuilder::new(&tx, meta, receipt, &receipts) .map(|builder| builder.build()) - .map(WithOtherFields::new) }) .collect::, Self::Error>>() .map(Some) @@ -73,13 +66,4 @@ where Self: LoadPendingBlock + SpawnBlocking, Provider: BlockReaderIdExt, { - #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index 396bf9bd08e4..1c1e35a5df8d 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,7 +1,7 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. +use alloy_consensus::Header; use reth_evm::ConfigureEvm; -use reth_primitives::Header; use reth_rpc_eth_api::helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}; use crate::EthApi; @@ -13,7 +13,7 @@ impl EthCall for EthApi Call for EthApi where - Self: LoadState + SpawnBlocking, + Self: LoadState> + SpawnBlocking, EvmConfig: ConfigureEvm
, { #[inline] @@ -25,9 +25,4 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.max_simulate_blocks() } - - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index a792f7289514..e1a17ef647c5 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -1,10 +1,9 @@ //! Contains RPC handler implementations for fee history. -use reth_chainspec::EthereumHardforks; -use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; - +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; -use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; +use reth_rpc_eth_types::{FeeHistoryCache, GasPriceOracle}; use crate::EthApi; @@ -15,23 +14,14 @@ impl EthFees for EthApi LoadFee for EthApi where - Self: LoadBlock, - Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider, + Self: LoadBlock, + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, { #[inline] - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn gas_oracle(&self) -> &GasPriceOracle { + fn gas_oracle(&self) -> &GasPriceOracle { self.inner.gas_oracle() } diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 69d55f58bfa2..8540a4684bf8 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -1,10 +1,13 @@ //! Support for building a pending block with transactions from local view of mempool. -use reth_chainspec::EthereumHardforks; +use alloy_consensus::Header; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; -use reth_primitives::Header; use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; -use reth_rpc_eth_api::helpers::{LoadPendingBlock, SpawnBlocking}; +use reth_rpc_eth_api::{ + helpers::{LoadPendingBlock, SpawnBlocking}, + RpcNodeCore, +}; use reth_rpc_eth_types::PendingBlock; use reth_transaction_pool::TransactionPool; @@ -13,36 +16,18 @@ use crate::EthApi; impl LoadPendingBlock for EthApi where - Self: SpawnBlocking, - Provider: BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory, - Pool: TransactionPool, - EvmConfig: ConfigureEvm
, + Self: SpawnBlocking + + RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool, + Evm: ConfigureEvm
, + >, { - #[inline] - fn provider( - &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory { - self.inner.provider() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } - #[inline] fn pending_block(&self) -> &tokio::sync::Mutex> { self.inner.pending_block() } - - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 570ec4fa3c0f..594cffd09f22 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,21 +1,15 @@ //! Builds an RPC receipt response w.r.t. data layout of network. -use alloy_serde::WithOtherFields; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; +use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use crate::EthApi; impl LoadReceipt for EthApi where - Self: Send + Sync, + Self: RpcNodeCoreExt, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - async fn build_transaction_receipt( &self, tx: TransactionSigned, @@ -31,6 +25,6 @@ where .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(hash.into()))?; - Ok(WithOtherFields::new(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build())) + Ok(EthReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) } } diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index e59be0ac2838..e7e9c64447b1 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -6,11 +6,11 @@ use crate::EthApi; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Decodable2718; use alloy_network::{eip2718::Encodable2718, EthereumWallet, TransactionBuilder}; -use alloy_primitives::{eip191_hash_message, Address, B256}; +use alloy_primitives::{eip191_hash_message, Address, PrimitiveSignature as Signature, B256}; use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; use reth_rpc_eth_types::SignError; @@ -109,7 +109,7 @@ impl EthSigner for DevSigner { #[cfg(test)] mod tests { - use alloy_primitives::{Bytes, Parity, U256}; + use alloy_primitives::{Bytes, U256}; use alloy_rpc_types_eth::TransactionInput; use revm_primitives::TxKind; @@ -205,7 +205,7 @@ mod tests { 16, ) .unwrap(), - Parity::Parity(false), + false, ); assert_eq!(sig, expected) } @@ -227,7 +227,7 @@ mod tests { 16, ) .unwrap(), - Parity::Parity(true), + true, ); assert_eq!(sig, expected) } diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index 92445bf5ed1b..a44692e18a35 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -2,32 +2,19 @@ use alloy_primitives::U256; use reth_chainspec::EthereumHardforks; use reth_network_api::NetworkInfo; use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; -use reth_rpc_eth_api::helpers::EthApiSpec; -use reth_transaction_pool::TransactionPool; +use reth_rpc_eth_api::{helpers::EthApiSpec, RpcNodeCore}; use crate::EthApi; impl EthApiSpec for EthApi where - Pool: TransactionPool + 'static, - Provider: ChainSpecProvider - + BlockNumReader - + StageCheckpointReader - + 'static, - Network: NetworkInfo + 'static, - EvmConfig: Send + Sync, + Self: RpcNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, + >, { - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader - { - self.inner.provider() - } - - fn network(&self) -> impl NetworkInfo { - self.inner.network() - } - fn starting_block(&self) -> U256 { self.inner.starting_block() } diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 429a10333d1b..a3e909cf6f6f 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -4,8 +4,10 @@ use reth_chainspec::EthereumHardforks; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_transaction_pool::TransactionPool; -use reth_rpc_eth_api::helpers::{EthState, LoadState, SpawnBlocking}; -use reth_rpc_eth_types::EthStateCache; +use reth_rpc_eth_api::{ + helpers::{EthState, LoadState, SpawnBlocking}, + RpcNodeCore, +}; use crate::EthApi; @@ -18,28 +20,12 @@ where } } -impl LoadState for EthApi -where - Self: Send + Sync, - Provider: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, +impl LoadState for EthApi where + Self: RpcNodeCore< + Provider: StateProviderFactory + ChainSpecProvider, + Pool: TransactionPool, + > { - #[inline] - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } } #[cfg(test)] diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index c40b7acf50d1..d9fe5e18a05b 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -1,18 +1,12 @@ //! Contains RPC handler implementations specific to tracing. +use alloy_consensus::Header; use reth_evm::ConfigureEvm; -use reth_primitives::Header; use reth_rpc_eth_api::helpers::{LoadState, Trace}; use crate::EthApi; -impl Trace for EthApi -where - Self: LoadState, - EvmConfig: ConfigureEvm
, +impl Trace for EthApi where + Self: LoadState> { - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 24a13cb8062c..8ac0785b2620 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -3,9 +3,8 @@ use reth_provider::{BlockReaderIdExt, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FullEthApiTypes, + FullEthApiTypes, RpcNodeCore, }; -use reth_rpc_eth_types::EthStateCache; use reth_transaction_pool::TransactionPool; use crate::EthApi; @@ -13,15 +12,8 @@ use crate::EthApi; impl EthTransactions for EthApi where - Self: LoadTransaction, - Pool: TransactionPool + 'static, - Provider: BlockReaderIdExt, + Self: LoadTransaction, { - #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - #[inline] fn signers(&self) -> &parking_lot::RwLock>> { self.inner.signers() @@ -31,26 +23,10 @@ where impl LoadTransaction for EthApi where - Self: SpawnBlocking + FullEthApiTypes, - Provider: TransactionsProvider, - Pool: TransactionPool, + Self: SpawnBlocking + + FullEthApiTypes + + RpcNodeCore, { - type Pool = Pool; - - #[inline] - fn provider(&self) -> impl TransactionsProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn pool(&self) -> &Self::Pool { - self.inner.pool() - } } #[cfg(test)] diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 848bcdc365a5..d1ce84bc0b77 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -1,15 +1,11 @@ //! L1 `eth` API types. -use alloy_consensus::Transaction as _; -use alloy_network::{AnyNetwork, Network}; -use alloy_primitives::{Address, TxKind}; -use alloy_rpc_types::{Transaction, TransactionInfo}; -use alloy_serde::WithOtherFields; -use reth_primitives::TransactionSignedEcRecovered; -use reth_rpc_types_compat::{ - transaction::{from_primitive_signature, GasPrice}, - TransactionCompat, -}; +use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; +use alloy_network::{Ethereum, Network}; +use alloy_rpc_types_eth::{Transaction, TransactionInfo}; +use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_rpc_eth_types::EthApiError; +use reth_rpc_types_compat::TransactionCompat; /// Builds RPC transaction response for l1. #[derive(Debug, Clone, Copy)] @@ -19,75 +15,70 @@ impl TransactionCompat for EthTxBuilder where Self: Send + Sync, { - type Transaction = ::TransactionResponse; + type Transaction = ::TransactionResponse; + + type Error = EthApiError; fn fill( &self, tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, - ) -> Self::Transaction { - let signer = tx.signer(); - let signed_tx = tx.into_signed(); + ) -> Result { + let from = tx.signer(); + let TransactionSigned { transaction, signature, hash } = tx.into_signed(); - let to: Option
= match signed_tx.kind() { - TxKind::Create => None, - TxKind::Call(to) => Some(Address(*to)), + let inner: TxEnvelope = match transaction { + reth_primitives::Transaction::Legacy(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip2930(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip1559(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip4844(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip7702(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + #[allow(unreachable_patterns)] + _ => unreachable!(), }; let TransactionInfo { - base_fee, block_hash, block_number, index: transaction_index, .. + block_hash, block_number, index: transaction_index, base_fee, .. } = tx_info; - let GasPrice { gas_price, max_fee_per_gas } = - Self::gas_price(&signed_tx, base_fee.map(|fee| fee as u64)); - - let input = signed_tx.input().to_vec().into(); - let chain_id = signed_tx.chain_id(); - let blob_versioned_hashes = signed_tx.blob_versioned_hashes().map(|hs| hs.to_vec()); - let access_list = signed_tx.access_list().cloned(); - let authorization_list = signed_tx.authorization_list().map(|l| l.to_vec()); + let effective_gas_price = base_fee + .map(|base_fee| { + inner.effective_tip_per_gas(base_fee as u64).unwrap_or_default() + base_fee + }) + .unwrap_or_else(|| inner.max_fee_per_gas()); - let signature = from_primitive_signature( - *signed_tx.signature(), - signed_tx.tx_type(), - signed_tx.chain_id(), - ); - - WithOtherFields { - inner: Transaction { - hash: signed_tx.hash(), - nonce: signed_tx.nonce(), - from: signer, - to, - value: signed_tx.value(), - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas: signed_tx.max_priority_fee_per_gas(), - signature: Some(signature), - gas: signed_tx.gas_limit(), - input, - chain_id, - access_list, - transaction_type: Some(signed_tx.tx_type() as u8), - // These fields are set to None because they are not stored as part of the - // transaction - block_hash, - block_number, - transaction_index, - // EIP-4844 fields - max_fee_per_blob_gas: signed_tx.max_fee_per_blob_gas(), - blob_versioned_hashes, - authorization_list, - }, - ..Default::default() - } + Ok(Transaction { + inner, + block_hash, + block_number, + transaction_index, + from, + effective_gas_price: Some(effective_gas_price), + }) } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - tx.inner.input = tx.inner.input.slice(..4); - } - - fn tx_type(tx: &Self::Transaction) -> u8 { - tx.inner.transaction_type.unwrap_or(0) + let input = match &mut tx.inner { + TxEnvelope::Eip1559(tx) => &mut tx.tx_mut().input, + TxEnvelope::Eip2930(tx) => &mut tx.tx_mut().input, + TxEnvelope::Legacy(tx) => &mut tx.tx_mut().input, + TxEnvelope::Eip4844(tx) => match tx.tx_mut() { + TxEip4844Variant::TxEip4844(tx) => &mut tx.input, + TxEip4844Variant::TxEip4844WithSidecar(tx) => &mut tx.tx.input, + }, + TxEnvelope::Eip7702(tx) => &mut tx.tx_mut().input, + _ => return, + }; + *input = input.slice(..4); } } diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 99919110da7b..4d1833add3e7 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -15,4 +15,4 @@ pub use pubsub::EthPubSub; pub use helpers::{signer::DevSigner, types::EthTxBuilder}; -pub use reth_rpc_eth_api::EthApiServer; +pub use reth_rpc_eth_api::{EthApiServer, EthApiTypes, FullEthApiServer, RpcNodeCore}; diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index ac962610ef8a..8ea6d1f87c81 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -3,14 +3,13 @@ use std::sync::Arc; use alloy_primitives::TxHash; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ pubsub::{ Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, SyncStatusMetadata, }, - FilteredParams, Header, Log, Transaction, + FilteredParams, Header, Log, }; -use alloy_serde::WithOtherFields; use futures::StreamExt; use jsonrpsee::{ server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink, @@ -28,6 +27,7 @@ use tokio_stream::{ wrappers::{BroadcastStream, ReceiverStream}, Stream, }; +use tracing::error; /// `Eth` pubsub RPC implementation. /// @@ -123,11 +123,9 @@ where { match kind { SubscriptionKind::NewHeads => { - let stream = pubsub.new_headers_stream().map(|header| { - EthSubscriptionResult::>::Header(Box::new( - header.into(), - )) - }); + let stream = pubsub + .new_headers_stream() + .map(|header| EthSubscriptionResult::<()>::Header(Box::new(header.into()))); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::Logs => { @@ -139,9 +137,9 @@ where } _ => FilteredParams::default(), }; - let stream = pubsub.log_stream(filter).map(|log| { - EthSubscriptionResult::>::Log(Box::new(log)) - }); + let stream = pubsub + .log_stream(filter) + .map(|log| EthSubscriptionResult::<()>::Log(Box::new(log))); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::NewPendingTransactions => { @@ -149,11 +147,23 @@ where match params { Params::Bool(true) => { // full transaction objects requested - let stream = pubsub.full_pending_transaction_stream().map(|tx| { - EthSubscriptionResult::FullTransaction(Box::new(from_recovered( + let stream = pubsub.full_pending_transaction_stream().filter_map(|tx| { + let tx_value = match from_recovered( tx.transaction.to_recovered_transaction(), &tx_resp_builder, - ))) + ) { + Ok(tx) => { + Some(EthSubscriptionResult::FullTransaction(Box::new(tx))) + } + Err(err) => { + error!(target = "rpc", + %err, + "Failed to fill transaction with block context" + ); + None + } + }; + std::future::ready(tx_value) }); return pipe_from_stream(accepted_sink, stream).await } @@ -170,7 +180,7 @@ where let stream = pubsub .pending_transaction_hashes_stream() - .map(EthSubscriptionResult::>::TransactionHash); + .map(EthSubscriptionResult::<()>::TransactionHash); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::Syncing => { @@ -332,7 +342,7 @@ where self.chain_events.canonical_state_stream().flat_map(|new_chain| { let headers = new_chain.committed().headers().collect::>(); futures::stream::iter( - headers.into_iter().map(reth_rpc_types_compat::block::from_primitive_with_hash), + headers.into_iter().map(|h| Header::from_consensus(h.into(), None, None)), ) }) } diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 46dbb45d962e..40d951f755fa 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -1,15 +1,71 @@ //! `Eth` Sim bundle implementation and helpers. -use std::sync::Arc; - -use alloy_rpc_types_mev::{SendBundleRequest, SimBundleOverrides, SimBundleResponse}; +use alloy_eips::BlockNumberOrTag; +use alloy_primitives::U256; +use alloy_rpc_types_eth::BlockId; +use alloy_rpc_types_mev::{ + BundleItem, Inclusion, Privacy, RefundConfig, SendBundleRequest, SimBundleLogs, + SimBundleOverrides, SimBundleResponse, Validity, +}; use jsonrpsee::core::RpcResult; +use reth_chainspec::EthChainSpec; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_primitives::{ + revm_primitives::db::{DatabaseCommit, DatabaseRef}, + TransactionSigned, +}; +use reth_provider::{ChainSpecProvider, HeaderProvider}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_api::MevSimApiServer; -use reth_rpc_eth_api::helpers::{Call, EthTransactions, LoadPendingBlock}; -use reth_rpc_eth_types::EthApiError; +use reth_rpc_eth_api::{ + helpers::{Call, EthTransactions, LoadPendingBlock}, + FromEthApiError, RpcNodeCore, +}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_tasks::pool::BlockingTaskGuard; +use revm::{ + db::CacheDB, + primitives::{Address, EnvWithHandlerCfg, ResultAndState, SpecId, TxEnv}, +}; +use std::{sync::Arc, time::Duration}; use tracing::info; +/// Maximum bundle depth +const MAX_NESTED_BUNDLE_DEPTH: usize = 5; + +/// Maximum body size +const MAX_BUNDLE_BODY_SIZE: usize = 50; + +/// Default simulation timeout +const DEFAULT_SIM_TIMEOUT: Duration = Duration::from_secs(5); + +/// Maximum simulation timeout +const MAX_SIM_TIMEOUT: Duration = Duration::from_secs(30); + +/// Maximum payout cost +const SBUNDLE_PAYOUT_MAX_COST: u64 = 30_000; + +/// A flattened representation of a bundle item containing transaction and associated metadata. +#[derive(Clone, Debug)] +pub struct FlattenedBundleItem { + /// The signed transaction + pub tx: TransactionSigned, + /// The address that signed the transaction + pub signer: Address, + /// Whether the transaction is allowed to revert + pub can_revert: bool, + /// Item-level inclusion constraints + pub inclusion: Inclusion, + /// Optional validity constraints for the bundle item + pub validity: Option, + /// Optional privacy settings for the bundle item + pub privacy: Option, + /// Optional refund percent for the bundle item + pub refund_percent: Option, + /// Optional refund configs for the bundle item + pub refund_configs: Option>, +} + /// `Eth` sim bundle implementation. pub struct EthSimBundle { /// All nested fields bundled together. @@ -21,20 +77,366 @@ impl EthSimBundle { pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { Self { inner: Arc::new(EthSimBundleInner { eth_api, blocking_task_guard }) } } + + /// Access the underlying `Eth` API. + pub fn eth_api(&self) -> &Eth { + &self.inner.eth_api + } } impl EthSimBundle where Eth: EthTransactions + LoadPendingBlock + Call + 'static, { - /// Simulates a bundle of transactions. - pub async fn sim_bundle( + /// Flattens a potentially nested bundle into a list of individual transactions in a + /// `FlattenedBundleItem` with their associated metadata. This handles recursive bundle + /// processing up to `MAX_NESTED_BUNDLE_DEPTH` and `MAX_BUNDLE_BODY_SIZE`, preserving + /// inclusion, validity and privacy settings from parent bundles. + fn parse_and_flatten_bundle( + &self, + request: &SendBundleRequest, + ) -> Result, EthApiError> { + let mut items = Vec::new(); + + // Stack for processing bundles + let mut stack = Vec::new(); + + // Start with initial bundle, index 0, and depth 1 + stack.push((request, 0, 1)); + + while let Some((current_bundle, mut idx, depth)) = stack.pop() { + // Check max depth + if depth > MAX_NESTED_BUNDLE_DEPTH { + return Err(EthApiError::InvalidParams(EthSimBundleError::MaxDepth.to_string())); + } + + // Determine inclusion, validity, and privacy + let inclusion = ¤t_bundle.inclusion; + let validity = ¤t_bundle.validity; + let privacy = ¤t_bundle.privacy; + + // Validate inclusion parameters + let block_number = inclusion.block_number(); + let max_block_number = inclusion.max_block_number().unwrap_or(block_number); + + if max_block_number < block_number || block_number == 0 { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidInclusion.to_string(), + )); + } + + // Validate bundle body size + if current_bundle.bundle_body.len() > MAX_BUNDLE_BODY_SIZE { + return Err(EthApiError::InvalidParams( + EthSimBundleError::BundleTooLarge.to_string(), + )); + } + + // Validate validity and refund config + if let Some(validity) = ¤t_bundle.validity { + // Validate refund entries + if let Some(refunds) = &validity.refund { + let mut total_percent = 0; + for refund in refunds { + if refund.body_idx as usize >= current_bundle.bundle_body.len() { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidValidity.to_string(), + )); + } + if 100 - total_percent < refund.percent { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidValidity.to_string(), + )); + } + total_percent += refund.percent; + } + } + + // Validate refund configs + if let Some(refund_configs) = &validity.refund_config { + let mut total_percent = 0; + for refund_config in refund_configs { + if 100 - total_percent < refund_config.percent { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidValidity.to_string(), + )); + } + total_percent += refund_config.percent; + } + } + } + + let body = ¤t_bundle.bundle_body; + + // Process items in the current bundle + while idx < body.len() { + match &body[idx] { + BundleItem::Tx { tx, can_revert } => { + let recovered_tx = + recover_raw_transaction(tx.clone()).map_err(EthApiError::from)?; + let (tx, signer) = recovered_tx.into_components(); + let tx = tx.into_transaction(); + + let refund_percent = + validity.as_ref().and_then(|v| v.refund.as_ref()).and_then(|refunds| { + refunds.iter().find_map(|refund| { + (refund.body_idx as usize == idx).then_some(refund.percent) + }) + }); + let refund_configs = + validity.as_ref().and_then(|v| v.refund_config.clone()); + + // Create FlattenedBundleItem with current inclusion, validity, and privacy + let flattened_item = FlattenedBundleItem { + tx, + signer, + can_revert: *can_revert, + inclusion: inclusion.clone(), + validity: validity.clone(), + privacy: privacy.clone(), + refund_percent, + refund_configs, + }; + + // Add to items + items.push(flattened_item); + + idx += 1; + } + BundleItem::Bundle { bundle } => { + // Push the current bundle and next index onto the stack to resume later + stack.push((current_bundle, idx + 1, depth)); + + // process the nested bundle next + stack.push((bundle, 0, depth + 1)); + break; + } + BundleItem::Hash { hash: _ } => { + // Hash-only items are not allowed + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidBundle.to_string(), + )); + } + } + } + } + + Ok(items) + } + + async fn sim_bundle( &self, request: SendBundleRequest, overrides: SimBundleOverrides, - ) -> RpcResult { - info!("mev_simBundle called, request: {:?}, overrides: {:?}", request, overrides); - Err(EthApiError::Unsupported("mev_simBundle is not supported").into()) + logs: bool, + ) -> Result { + let SimBundleOverrides { + parent_block, + block_number, + coinbase, + timestamp, + gas_limit, + base_fee, + .. + } = overrides; + + // Parse and validate bundle + // Also, flatten the bundle here so that its easier to process + let flattened_bundle = self.parse_and_flatten_bundle(&request)?; + + let block_id = parent_block.unwrap_or(BlockId::Number(BlockNumberOrTag::Pending)); + let (cfg, mut block_env, current_block) = self.eth_api().evm_env_at(block_id).await?; + + let parent_header = RpcNodeCore::provider(&self.inner.eth_api) + .header_by_number(block_env.number.saturating_to::()) + .map_err(EthApiError::from_eth_err)? // Explicitly map the error + .ok_or_else(|| { + EthApiError::HeaderNotFound((block_env.number.saturating_to::()).into()) + })?; + + // apply overrides + if let Some(block_number) = block_number { + block_env.number = U256::from(block_number); + } + + if let Some(coinbase) = coinbase { + block_env.coinbase = coinbase; + } + + if let Some(timestamp) = timestamp { + block_env.timestamp = U256::from(timestamp); + } + + if let Some(gas_limit) = gas_limit { + block_env.gas_limit = U256::from(gas_limit); + } + + if let Some(base_fee) = base_fee { + block_env.basefee = U256::from(base_fee); + } else if cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { + if let Some(base_fee) = parent_header.next_block_base_fee( + RpcNodeCore::provider(&self.inner.eth_api) + .chain_spec() + .base_fee_params_at_block(block_env.number.saturating_to::()), + ) { + block_env.basefee = U256::from(base_fee); + } + } + + let eth_api = self.inner.eth_api.clone(); + + let sim_response = self + .inner + .eth_api + .spawn_with_state_at_block(current_block, move |state| { + // Setup environment + let current_block_number = current_block.as_u64().unwrap(); + let coinbase = block_env.coinbase; + let basefee = block_env.basefee; + let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, TxEnv::default()); + let db = CacheDB::new(StateProviderDatabase::new(state)); + + let initial_coinbase_balance = DatabaseRef::basic_ref(&db, coinbase) + .map_err(EthApiError::from_eth_err)? + .map(|acc| acc.balance) + .unwrap_or_default(); + + let mut coinbase_balance_before_tx = initial_coinbase_balance; + let mut total_gas_used = 0; + let mut total_profit = U256::ZERO; + let mut refundable_value = U256::ZERO; + let mut body_logs: Vec = Vec::new(); + + let mut evm = eth_api.evm_config().evm_with_env(db, env); + + for item in &flattened_bundle { + // Check inclusion constraints + let block_number = item.inclusion.block_number(); + let max_block_number = + item.inclusion.max_block_number().unwrap_or(block_number); + + if current_block_number < block_number || + current_block_number > max_block_number + { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidInclusion.to_string(), + ) + .into()); + } + eth_api.evm_config().fill_tx_env(evm.tx_mut(), &item.tx, item.signer); + + let ResultAndState { result, state } = + evm.transact().map_err(EthApiError::from_eth_err)?; + + if !result.is_success() && !item.can_revert { + return Err(EthApiError::InvalidParams( + EthSimBundleError::BundleTransactionFailed.to_string(), + ) + .into()); + } + + let gas_used = result.gas_used(); + total_gas_used += gas_used; + + // coinbase is always present in the result state + let coinbase_balance_after_tx = + state.get(&coinbase).map(|acc| acc.info.balance).unwrap_or_default(); + + let coinbase_diff = + coinbase_balance_after_tx.saturating_sub(coinbase_balance_before_tx); + total_profit += coinbase_diff; + + // Add to refundable value if this tx does not have a refund percent + if item.refund_percent.is_none() { + refundable_value += coinbase_diff; + } + + // Update coinbase balance before next tx + coinbase_balance_before_tx = coinbase_balance_after_tx; + + // Collect logs if requested + // TODO: since we are looping over iteratively, we are not collecting bundle + // logs. We should collect bundle logs when we are processing the bundle items. + if logs { + let tx_logs = result.logs().to_vec(); + let sim_bundle_logs = + SimBundleLogs { tx_logs: Some(tx_logs), bundle_logs: None }; + body_logs.push(sim_bundle_logs); + } + + // Apply state changes + evm.context.evm.db.commit(state); + } + + // After processing all transactions, process refunds + for item in &flattened_bundle { + if let Some(refund_percent) = item.refund_percent { + // Get refund configurations + let refund_configs = item.refund_configs.clone().unwrap_or_else(|| { + vec![RefundConfig { address: item.signer, percent: 100 }] + }); + + // Calculate payout transaction fee + let payout_tx_fee = basefee * + U256::from(SBUNDLE_PAYOUT_MAX_COST) * + U256::from(refund_configs.len() as u64); + + // Add gas used for payout transactions + total_gas_used += SBUNDLE_PAYOUT_MAX_COST * refund_configs.len() as u64; + + // Calculate allocated refundable value (payout value) + let payout_value = + refundable_value * U256::from(refund_percent) / U256::from(100); + + if payout_tx_fee > payout_value { + return Err(EthApiError::InvalidParams( + EthSimBundleError::NegativeProfit.to_string(), + ) + .into()); + } + + // Subtract payout value from total profit + total_profit = total_profit.checked_sub(payout_value).ok_or( + EthApiError::InvalidParams( + EthSimBundleError::NegativeProfit.to_string(), + ), + )?; + + // Adjust refundable value + refundable_value = refundable_value.checked_sub(payout_value).ok_or( + EthApiError::InvalidParams( + EthSimBundleError::NegativeProfit.to_string(), + ), + )?; + } + } + + // Calculate mev gas price + let mev_gas_price = if total_gas_used != 0 { + total_profit / U256::from(total_gas_used) + } else { + U256::ZERO + }; + + Ok(SimBundleResponse { + success: true, + state_block: current_block_number, + error: None, + logs: Some(body_logs), + gas_used: total_gas_used, + mev_gas_price, + profit: total_profit, + refundable_value, + exec_error: None, + revert: None, + }) + }) + .await + .map_err(|_| { + EthApiError::InvalidParams(EthSimBundleError::BundleTimeout.to_string()) + })?; + + Ok(sim_response) } } @@ -48,7 +450,23 @@ where request: SendBundleRequest, overrides: SimBundleOverrides, ) -> RpcResult { - Self::sim_bundle(self, request, overrides).await + info!("mev_simBundle called, request: {:?}, overrides: {:?}", request, overrides); + + let override_timeout = overrides.timeout; + + let timeout = override_timeout + .map(Duration::from_secs) + .filter(|&custom_duration| custom_duration <= MAX_SIM_TIMEOUT) + .unwrap_or(DEFAULT_SIM_TIMEOUT); + + let bundle_res = + tokio::time::timeout(timeout, Self::sim_bundle(self, request, overrides, true)) + .await + .map_err(|_| { + EthApiError::InvalidParams(EthSimBundleError::BundleTimeout.to_string()) + })?; + + bundle_res.map_err(Into::into) } } @@ -74,3 +492,35 @@ impl Clone for EthSimBundle { Self { inner: Arc::clone(&self.inner) } } } + +/// [`EthSimBundle`] specific errors. +#[derive(Debug, thiserror::Error)] +pub enum EthSimBundleError { + /// Thrown when max depth is reached + #[error("max depth reached")] + MaxDepth, + /// Thrown when a bundle is unmatched + #[error("unmatched bundle")] + UnmatchedBundle, + /// Thrown when a bundle is too large + #[error("bundle too large")] + BundleTooLarge, + /// Thrown when validity is invalid + #[error("invalid validity")] + InvalidValidity, + /// Thrown when inclusion is invalid + #[error("invalid inclusion")] + InvalidInclusion, + /// Thrown when a bundle is invalid + #[error("invalid bundle")] + InvalidBundle, + /// Thrown when a bundle simulation times out + #[error("bundle simulation timed out")] + BundleTimeout, + /// Thrown when a transaction is reverted in a bundle + #[error("bundle transaction failed")] + BundleTransactionFailed, + /// Thrown when a bundle simulation returns negative profit + #[error("bundle simulation returned negative profit")] + NegativeProfit, +} diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index eec14981bf57..76fb96f91629 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -42,7 +42,9 @@ mod reth; mod rpc; mod trace; mod txpool; +mod validation; mod web3; + pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; @@ -53,4 +55,5 @@ pub use reth::RethApi; pub use rpc::RPCApi; pub use trace::TraceApi; pub use txpool::TxPoolApi; +pub use validation::{ValidationApi, ValidationApiConfig}; pub use web3::Web3Api; diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 024bdd172fb9..d19dcf4d609b 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,7 +1,8 @@ use alloy_consensus::Transaction; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_network::{ReceiptResponse, TransactionResponse}; use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; -use alloy_rpc_types::{BlockTransactions, Header, TransactionReceipt}; +use alloy_rpc_types_eth::{BlockTransactions, Header, TransactionReceipt}; use alloy_rpc_types_trace::{ otterscan::{ BlockDetails, ContractCreator, InternalOperation, OperationType, OtsBlockTransactions, @@ -11,7 +12,6 @@ use alloy_rpc_types_trace::{ }; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, types::ErrorObjectOwned}; -use reth_primitives::{BlockId, BlockNumberOrTag}; use reth_rpc_api::{EthApiServer, OtterscanServer}; use reth_rpc_eth_api::{ helpers::{EthTransactions, TraceExt}, @@ -165,7 +165,7 @@ where } /// Handler for `ots_getBlockDetails` - async fn get_block_details(&self, block_number: u64) -> RpcResult { + async fn get_block_details(&self, block_number: u64) -> RpcResult> { let block_id = block_number.into(); let block = self.eth.block_by_number(block_id, true); let block_id = block_id.into(); @@ -178,7 +178,7 @@ where } /// Handler for `getBlockDetailsByHash` - async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult { + async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult> { let block = self.eth.block_by_hash(block_hash, true); let block_id = block_hash.into(); let receipts = self.eth.block_receipts(block_id); @@ -195,7 +195,7 @@ where block_number: u64, page_number: usize, page_size: usize, - ) -> RpcResult>> { + ) -> RpcResult, Header>> { let block_id = block_number.into(); // retrieve full block and its receipts let block = self.eth.block_by_number(block_id, true); @@ -227,7 +227,8 @@ where *transactions = transactions.drain(page_start..page_end).collect::>(); // The input field returns only the 4 bytes method selector instead of the entire - // calldata byte blob. + // calldata byte blob + // See also: for tx in transactions.iter_mut() { if tx.input().len() > 4 { Eth::TransactionCompat::otterscan_api_truncate_input(tx); @@ -238,7 +239,7 @@ where let timestamp = Some(block.header.timestamp); let receipts = receipts .drain(page_start..page_end) - .zip(transactions.iter().map(Eth::TransactionCompat::tx_type)) + .zip(transactions.iter().map(Transaction::ty)) .map(|(receipt, tx_ty)| { let inner = OtsReceipt { status: receipt.status(), diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index 6d5897df1315..c33f97f5301d 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -1,10 +1,10 @@ use std::{collections::HashMap, future::Future, sync::Arc}; +use alloy_eips::BlockId; use alloy_primitives::{Address, U256}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_errors::RethResult; -use reth_primitives::BlockId; use reth_provider::{BlockReaderIdExt, ChangeSetReader, StateProviderFactory}; use reth_rpc_api::RethApiServer; use reth_rpc_eth_types::{EthApiError, EthResult}; diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index b9b15b5366df..81dc8ff8b8a8 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,9 +1,11 @@ +use alloy_consensus::Header; +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256, U256}; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ state::{EvmOverrides, StateOverride}, + transaction::TransactionRequest, BlockOverrides, Index, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_rpc_types_trace::{ filter::TraceFilter, opcode::{BlockOpcodeGas, TransactionOpcodeGas}, @@ -17,14 +19,10 @@ use reth_consensus_common::calc::{ base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, }; use reth_evm::ConfigureEvmEnv; -use reth_primitives::{BlockId, Header}; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; -use reth_rpc_eth_api::{ - helpers::{Call, TraceExt}, - FromEthApiError, -}; +use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError}; use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ @@ -124,7 +122,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, block, - Call::evm_config(self.eth_api()).tx_env(tx.as_signed(), tx.signer()), + self.eth_api().evm_config().tx_env(tx.as_signed(), tx.signer()), ); let config = TracingInspectorConfig::from_parity_config(&trace_types); diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index d03e10ca75a8..3e46183b4661 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -1,3 +1,4 @@ +use core::fmt; use std::collections::BTreeMap; use alloy_consensus::Transaction; @@ -6,7 +7,7 @@ use alloy_rpc_types_txpool::{ TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolInspectSummary, TxpoolStatus, }; use async_trait::async_trait; -use jsonrpsee::core::RpcResult as Result; +use jsonrpsee::core::RpcResult; use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_api::TxPoolApiServer; use reth_rpc_types_compat::{transaction::from_recovered, TransactionCompat}; @@ -35,33 +36,36 @@ where Pool: TransactionPool + 'static, Eth: TransactionCompat, { - fn content(&self) -> TxpoolContent { + fn content(&self) -> Result, Eth::Error> { #[inline] fn insert( tx: &Tx, content: &mut BTreeMap>, resp_builder: &RpcTxB, - ) where + ) -> Result<(), RpcTxB::Error> + where Tx: PoolTransaction>, RpcTxB: TransactionCompat, { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), - from_recovered(tx.clone().into_consensus().into(), resp_builder), + from_recovered(tx.clone().into_consensus().into(), resp_builder)?, ); + + Ok(()) } let AllPoolTransactions { pending, queued } = self.pool.all_transactions(); let mut content = TxpoolContent { pending: BTreeMap::new(), queued: BTreeMap::new() }; for pending in pending { - insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.tx_resp_builder); + insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.tx_resp_builder)?; } for queued in queued { - insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.tx_resp_builder); + insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.tx_resp_builder)?; } - content + Ok(content) } } @@ -76,7 +80,7 @@ where /// Ref: [Here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_status) /// /// Handler for `txpool_status` - async fn txpool_status(&self) -> Result { + async fn txpool_status(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving txpool_status"); let all = self.pool.all_transactions(); Ok(TxpoolStatus { pending: all.pending.len() as u64, queued: all.queued.len() as u64 }) @@ -88,7 +92,7 @@ where /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_inspect) for more details /// /// Handler for `txpool_inspect` - async fn txpool_inspect(&self) -> Result { + async fn txpool_inspect(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving txpool_inspect"); #[inline] @@ -131,9 +135,9 @@ where async fn txpool_content_from( &self, from: Address, - ) -> Result> { + ) -> RpcResult> { trace!(target: "rpc::eth", ?from, "Serving txpool_contentFrom"); - Ok(self.content().remove_from(&from)) + Ok(self.content().map_err(Into::into)?.remove_from(&from)) } /// Returns the details of all transactions currently pending for inclusion in the next @@ -141,14 +145,14 @@ where /// /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_content) for more details /// Handler for `txpool_content` - async fn txpool_content(&self) -> Result> { + async fn txpool_content(&self) -> RpcResult> { trace!(target: "rpc::eth", "Serving txpool_content"); - Ok(self.content()) + Ok(self.content().map_err(Into::into)?) } } -impl std::fmt::Debug for TxPoolApi { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Debug for TxPoolApi { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TxpoolApi").finish_non_exhaustive() } } diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs new file mode 100644 index 000000000000..b997dec1e015 --- /dev/null +++ b/crates/rpc/rpc/src/validation.rs @@ -0,0 +1,489 @@ +use alloy_consensus::{BlobTransactionValidationError, EnvKzgSettings, Transaction}; +use alloy_eips::eip4844::kzg_to_versioned_hash; +use alloy_rpc_types_beacon::relay::{ + BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, + BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, +}; +use alloy_rpc_types_engine::{ + BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, +}; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_consensus::{Consensus, PostExecutionInput}; +use reth_errors::{BlockExecutionError, ConsensusError, ProviderError, RethError}; +use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; +use reth_evm::execute::{BlockExecutorProvider, Executor}; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{Block, GotExpected, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_provider::{ + AccountReader, BlockExecutionInput, BlockExecutionOutput, BlockReaderIdExt, HeaderProvider, + StateProviderFactory, WithdrawalsProvider, +}; +use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; +use reth_rpc_api::BlockSubmissionValidationApiServer; +use reth_rpc_eth_types::EthApiError; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_trie::HashedPostState; +use revm_primitives::{Address, B256, U256}; +use serde::{Deserialize, Serialize}; +use std::{collections::HashSet, sync::Arc}; +use tokio::sync::RwLock; + +/// The type that implements the `validation` rpc namespace trait +#[derive(Debug, derive_more::Deref)] +pub struct ValidationApi { + #[deref] + inner: Arc>, +} + +impl ValidationApi +where + Provider: ChainSpecProvider, +{ + /// Create a new instance of the [`ValidationApi`] + pub fn new( + provider: Provider, + consensus: Arc, + executor_provider: E, + config: ValidationApiConfig, + ) -> Self { + let ValidationApiConfig { disallow } = config; + + let payload_validator = ExecutionPayloadValidator::new(provider.chain_spec()); + let inner = Arc::new(ValidationApiInner { + provider, + consensus, + payload_validator, + executor_provider, + disallow, + cached_state: Default::default(), + }); + + Self { inner } + } + + /// Returns the cached reads for the given head hash. + async fn cached_reads(&self, head: B256) -> CachedReads { + let cache = self.inner.cached_state.read().await; + if cache.0 == head { + cache.1.clone() + } else { + Default::default() + } + } + + /// Updates the cached state for the given head hash. + async fn update_cached_reads(&self, head: B256, cached_state: CachedReads) { + let mut cache = self.inner.cached_state.write().await; + if cache.0 == head { + cache.1.extend(cached_state); + } else { + *cache = (head, cached_state) + } + } +} + +impl ValidationApi +where + Provider: BlockReaderIdExt + + ChainSpecProvider + + StateProviderFactory + + HeaderProvider + + AccountReader + + WithdrawalsProvider + + 'static, + E: BlockExecutorProvider, +{ + /// Validates the given block and a [`BidTrace`] against it. + pub async fn validate_message_against_block( + &self, + block: SealedBlockWithSenders, + message: BidTrace, + registered_gas_limit: u64, + ) -> Result<(), ValidationApiError> { + self.validate_message_against_header(&block.header, &message)?; + + self.consensus.validate_header_with_total_difficulty(&block.header, U256::MAX)?; + self.consensus.validate_header(&block.header)?; + self.consensus.validate_block_pre_execution(&block)?; + + if !self.disallow.is_empty() { + if self.disallow.contains(&block.beneficiary) { + return Err(ValidationApiError::Blacklist(block.beneficiary)) + } + if self.disallow.contains(&message.proposer_fee_recipient) { + return Err(ValidationApiError::Blacklist(message.proposer_fee_recipient)) + } + for (sender, tx) in block.senders.iter().zip(block.transactions()) { + if self.disallow.contains(sender) { + return Err(ValidationApiError::Blacklist(*sender)) + } + if let Some(to) = tx.to() { + if self.disallow.contains(&to) { + return Err(ValidationApiError::Blacklist(to)) + } + } + } + } + + let latest_header = + self.provider.latest_header()?.ok_or_else(|| ValidationApiError::MissingLatestBlock)?; + + if latest_header.hash() != block.header.parent_hash { + return Err(ConsensusError::ParentHashMismatch( + GotExpected { got: block.header.parent_hash, expected: latest_header.hash() } + .into(), + ) + .into()) + } + self.consensus.validate_header_against_parent(&block.header, &latest_header)?; + self.validate_gas_limit(registered_gas_limit, &latest_header, &block.header)?; + + let latest_header_hash = latest_header.hash(); + let state_provider = self.provider.state_by_block_hash(latest_header_hash)?; + + let mut request_cache = self.cached_reads(latest_header_hash).await; + + let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); + let executor = self.executor_provider.executor(cached_db); + + let block = block.unseal(); + let mut accessed_blacklisted = None; + let output = executor.execute_with_state_closure( + BlockExecutionInput::new(&block, U256::MAX), + |state| { + if !self.disallow.is_empty() { + for account in state.cache.accounts.keys() { + if self.disallow.contains(account) { + accessed_blacklisted = Some(*account); + } + } + } + }, + )?; + + // update the cached reads + self.update_cached_reads(latest_header_hash, request_cache).await; + + if let Some(account) = accessed_blacklisted { + return Err(ValidationApiError::Blacklist(account)) + } + + self.consensus.validate_block_post_execution( + &block, + PostExecutionInput::new(&output.receipts, &output.requests), + )?; + + self.ensure_payment(&block, &output, &message)?; + + let state_root = + state_provider.state_root(HashedPostState::from_bundle_state(&output.state.state))?; + + if state_root != block.state_root { + return Err(ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.state_root }.into(), + ) + .into()) + } + + Ok(()) + } + + /// Ensures that fields of [`BidTrace`] match the fields of the [`SealedHeader`]. + fn validate_message_against_header( + &self, + header: &SealedHeader, + message: &BidTrace, + ) -> Result<(), ValidationApiError> { + if header.hash() != message.block_hash { + Err(ValidationApiError::BlockHashMismatch(GotExpected { + got: message.block_hash, + expected: header.hash(), + })) + } else if header.parent_hash != message.parent_hash { + Err(ValidationApiError::ParentHashMismatch(GotExpected { + got: message.parent_hash, + expected: header.parent_hash, + })) + } else if header.gas_limit != message.gas_limit { + Err(ValidationApiError::GasLimitMismatch(GotExpected { + got: message.gas_limit, + expected: header.gas_limit, + })) + } else if header.gas_used != message.gas_used { + return Err(ValidationApiError::GasUsedMismatch(GotExpected { + got: message.gas_used, + expected: header.gas_used, + })) + } else { + Ok(()) + } + } + + /// Ensures that the chosen gas limit is the closest possible value for the validator's + /// registered gas limit. + /// + /// Ref: + fn validate_gas_limit( + &self, + registered_gas_limit: u64, + parent_header: &SealedHeader, + header: &SealedHeader, + ) -> Result<(), ValidationApiError> { + let max_gas_limit = + parent_header.gas_limit + parent_header.gas_limit / GAS_LIMIT_BOUND_DIVISOR - 1; + let min_gas_limit = + parent_header.gas_limit - parent_header.gas_limit / GAS_LIMIT_BOUND_DIVISOR + 1; + + let best_gas_limit = + std::cmp::max(min_gas_limit, std::cmp::min(max_gas_limit, registered_gas_limit)); + + if best_gas_limit != header.gas_limit { + return Err(ValidationApiError::GasLimitMismatch(GotExpected { + got: header.gas_limit, + expected: best_gas_limit, + })) + } + + Ok(()) + } + + /// Ensures that the proposer has received [`BidTrace::value`] for this block. + /// + /// Firstly attempts to verify the payment by checking the state changes, otherwise falls back + /// to checking the latest block transaction. + fn ensure_payment( + &self, + block: &Block, + output: &BlockExecutionOutput, + message: &BidTrace, + ) -> Result<(), ValidationApiError> { + let (mut balance_before, balance_after) = if let Some(acc) = + output.state.state.get(&message.proposer_fee_recipient) + { + let balance_before = acc.original_info.as_ref().map(|i| i.balance).unwrap_or_default(); + let balance_after = acc.info.as_ref().map(|i| i.balance).unwrap_or_default(); + + (balance_before, balance_after) + } else { + // account might have balance but considering it zero is fine as long as we know + // that balance have not changed + (U256::ZERO, U256::ZERO) + }; + + if let Some(withdrawals) = &block.body.withdrawals { + for withdrawal in withdrawals { + if withdrawal.address == message.proposer_fee_recipient { + balance_before += withdrawal.amount_wei(); + } + } + } + + if balance_after >= balance_before + message.value { + return Ok(()) + } + + let (receipt, tx) = output + .receipts + .last() + .zip(block.body.transactions.last()) + .ok_or(ValidationApiError::ProposerPayment)?; + + if !receipt.success { + return Err(ValidationApiError::ProposerPayment) + } + + if tx.to() != Some(message.proposer_fee_recipient) { + return Err(ValidationApiError::ProposerPayment) + } + + if tx.value() != message.value { + return Err(ValidationApiError::ProposerPayment) + } + + if !tx.input().is_empty() { + return Err(ValidationApiError::ProposerPayment) + } + + if let Some(block_base_fee) = block.base_fee_per_gas { + if tx.effective_tip_per_gas(block_base_fee).unwrap_or_default() != 0 { + return Err(ValidationApiError::ProposerPayment) + } + } + + Ok(()) + } + + /// Validates the given [`BlobsBundleV1`] and returns versioned hashes for blobs. + pub fn validate_blobs_bundle( + &self, + mut blobs_bundle: BlobsBundleV1, + ) -> Result, ValidationApiError> { + if blobs_bundle.commitments.len() != blobs_bundle.proofs.len() || + blobs_bundle.commitments.len() != blobs_bundle.blobs.len() + { + return Err(ValidationApiError::InvalidBlobsBundle) + } + + let versioned_hashes = blobs_bundle + .commitments + .iter() + .map(|c| kzg_to_versioned_hash(c.as_slice())) + .collect::>(); + + let sidecar = blobs_bundle.pop_sidecar(blobs_bundle.blobs.len()); + + sidecar.validate(&versioned_hashes, EnvKzgSettings::default().get())?; + + Ok(versioned_hashes) + } +} + +#[async_trait] +impl BlockSubmissionValidationApiServer for ValidationApi +where + Provider: BlockReaderIdExt + + ChainSpecProvider + + StateProviderFactory + + HeaderProvider + + AccountReader + + WithdrawalsProvider + + Clone + + 'static, + E: BlockExecutorProvider, +{ + async fn validate_builder_submission_v1( + &self, + _request: BuilderBlockValidationRequest, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + async fn validate_builder_submission_v2( + &self, + _request: BuilderBlockValidationRequestV2, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> RpcResult<()> { + let block = self + .payload_validator + .ensure_well_formed_payload( + ExecutionPayload::V3(request.request.execution_payload), + ExecutionPayloadSidecar::v3(CancunPayloadFields { + parent_beacon_block_root: request.parent_beacon_block_root, + versioned_hashes: self + .validate_blobs_bundle(request.request.blobs_bundle) + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result()?, + }), + ) + .to_rpc_result()? + .try_seal_with_senders() + .map_err(|_| EthApiError::InvalidTransactionSignature)?; + + self.validate_message_against_block( + block, + request.request.message, + request.registered_gas_limit, + ) + .await + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result() + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> RpcResult<()> { + let block = self + .payload_validator + .ensure_well_formed_payload( + ExecutionPayload::V3(request.request.execution_payload), + ExecutionPayloadSidecar::v4( + CancunPayloadFields { + parent_beacon_block_root: request.parent_beacon_block_root, + versioned_hashes: self + .validate_blobs_bundle(request.request.blobs_bundle) + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result()?, + }, + request.request.execution_requests.into(), + ), + ) + .to_rpc_result()? + .try_seal_with_senders() + .map_err(|_| EthApiError::InvalidTransactionSignature)?; + + self.validate_message_against_block( + block, + request.request.message, + request.registered_gas_limit, + ) + .await + .map_err(|e| RethError::Other(e.into())) + .to_rpc_result() + } +} + +#[derive(Debug)] +pub struct ValidationApiInner { + /// The provider that can interact with the chain. + provider: Provider, + /// Consensus implementation. + consensus: Arc, + /// Execution payload validator. + payload_validator: ExecutionPayloadValidator, + /// Block executor factory. + executor_provider: E, + /// Set of disallowed addresses + disallow: HashSet
, + /// Cached state reads to avoid redundant disk I/O across multiple validation attempts + /// targeting the same state. Stores a tuple of (`block_hash`, `cached_reads`) for the + /// latest head block state. Uses async `RwLock` to safely handle concurrent validation + /// requests. + cached_state: RwLock<(B256, CachedReads)>, +} + +/// Configuration for validation API. +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct ValidationApiConfig { + /// Disallowed addresses. + pub disallow: HashSet
, +} + +/// Errors thrown by the validation API. +#[derive(Debug, thiserror::Error)] +pub enum ValidationApiError { + #[error("block gas limit mismatch: {_0}")] + GasLimitMismatch(GotExpected), + #[error("block gas used mismatch: {_0}")] + GasUsedMismatch(GotExpected), + #[error("block parent hash mismatch: {_0}")] + ParentHashMismatch(GotExpected), + #[error("block hash mismatch: {_0}")] + BlockHashMismatch(GotExpected), + #[error("missing latest block in database")] + MissingLatestBlock, + #[error("could not verify proposer payment")] + ProposerPayment, + #[error("invalid blobs bundle")] + InvalidBlobsBundle, + #[error("block accesses blacklisted address: {_0}")] + Blacklist(Address), + #[error(transparent)] + Blob(#[from] BlobTransactionValidationError), + #[error(transparent)] + Consensus(#[from] ConsensusError), + #[error(transparent)] + Provider(#[from] ProviderError), + #[error(transparent)] + Execution(#[from] BlockExecutionError), +} diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 65bb2637b620..eedd5f9ca41e 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -40,6 +40,7 @@ reth-trie-db = { workspace = true, features = ["metrics"] } reth-testing-utils = { workspace = true, optional = true } alloy-primitives.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -93,25 +94,25 @@ pprof = { workspace = true, features = [ [features] test-utils = [ - "dep:reth-chainspec", - "reth-network-p2p/test-utils", - "reth-db/test-utils", - "reth-provider/test-utils", - "reth-stages-api/test-utils", - "dep:reth-testing-utils", - "dep:tempfile", - "reth-chainspec?/test-utils", - "reth-consensus/test-utils", - "reth-evm/test-utils", - "reth-downloaders/test-utils", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-revm/test-utils", - "reth-codecs/test-utils", - "reth-db-api/test-utils", - "reth-trie-db/test-utils", - "reth-trie/test-utils", - "reth-prune-types/test-utils" + "dep:reth-chainspec", + "reth-network-p2p/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-stages-api/test-utils", + "dep:reth-testing-utils", + "dep:tempfile", + "reth-chainspec?/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-downloaders/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "reth-codecs/test-utils", + "reth-db-api/test-utils", + "reth-trie-db/test-utils", + "reth-trie/test-utils", + "reth-prune-types/test-utils", ] [[bench]] diff --git a/crates/stages/stages/benches/criterion.rs b/crates/stages/stages/benches/criterion.rs index 7519d81a3622..0f876dd7011a 100644 --- a/crates/stages/stages/benches/criterion.rs +++ b/crates/stages/stages/benches/criterion.rs @@ -2,12 +2,11 @@ use criterion::{criterion_main, measurement::WallTime, BenchmarkGroup, Criterion}; #[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; -use reth_chainspec::ChainSpec; use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; use alloy_primitives::BlockNumber; -use reth_provider::{DatabaseProvider, DatabaseProviderFactory}; +use reth_provider::{test_utils::MockNodeTypesWithDB, DatabaseProvider, DatabaseProviderFactory}; use reth_stages::{ stages::{MerkleStage, SenderRecoveryStage, TransactionLookupStage}, test_utils::TestStageDB, @@ -148,7 +147,8 @@ fn measure_stage( block_interval: RangeInclusive, label: String, ) where - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, F: Fn(S, &TestStageDB, StageRange), { let stage_range = ( diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index 4812fb13c39a..c1c3ff89d727 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -1,14 +1,15 @@ #![allow(unreachable_pub)] -use alloy_primitives::{Address, Sealable, B256, U256}; +use alloy_primitives::{Address, B256, U256}; use itertools::concat; -use reth_chainspec::ChainSpec; use reth_db::{tables, test_utils::TempDatabase, Database, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, SealedBlock, SealedHeader}; -use reth_provider::{DatabaseProvider, DatabaseProviderFactory, TrieWriter}; +use reth_provider::{ + test_utils::MockNodeTypesWithDB, DatabaseProvider, DatabaseProviderFactory, TrieWriter, +}; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, test_utils::{StorageKind, TestStageDB}, @@ -31,7 +32,8 @@ use reth_trie_db::DatabaseStateRoot; pub(crate) type StageRange = (ExecInput, UnwindInput); pub(crate) fn stage_unwind< - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, >( stage: S, db: &TestStageDB, @@ -63,7 +65,8 @@ pub(crate) fn stage_unwind< pub(crate) fn unwind_hashes(stage: S, db: &TestStageDB, range: StageRange) where - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, { let (input, unwind) = range; @@ -144,9 +147,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let cloned_second = second_block.clone(); let mut updated_header = cloned_second.header.unseal(); updated_header.state_root = root; - let sealed = updated_header.seal_slow(); - let (header, seal) = sealed.into_parts(); - *second_block = SealedBlock { header: SealedHeader::new(header, seal), ..cloned_second }; + *second_block = SealedBlock { header: SealedHeader::seal(updated_header), ..cloned_second }; let offset = transitions.len() as u64; @@ -179,9 +180,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let cloned_last = last_block.clone(); let mut updated_header = cloned_last.header.unseal(); updated_header.state_root = root; - let sealed = updated_header.seal_slow(); - let (header, seal) = sealed.into_parts(); - *last_block = SealedBlock { header: SealedHeader::new(header, seal), ..cloned_last }; + *last_block = SealedBlock { header: SealedHeader::seal(updated_header), ..cloned_last }; db.insert_blocks(blocks.iter(), StorageKind::Static).unwrap(); diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 06a5250913ed..eae61b088fde 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -60,7 +60,7 @@ pub struct BodyStage { /// The body downloader. downloader: D, /// Block response buffer. - buffer: Option>, + buffer: Option>>, } impl BodyStage { @@ -70,9 +70,10 @@ impl BodyStage { } } -impl Stage for BodyStage +impl Stage for BodyStage where Provider: DBProvider + StaticFileProviderFactory + StatsReader + BlockReader, + D: BodyDownloader, { /// Return the id of the stage fn id(&self) -> StageId { @@ -608,6 +609,7 @@ mod tests { UnwindStageTestRunner, }, }; + use alloy_consensus::Header; use alloy_primitives::{BlockHash, BlockNumber, TxNumber, B256}; use futures_util::Stream; use reth_db::{static_file::HeaderMask, tables}; @@ -623,7 +625,7 @@ mod tests { }, error::DownloadResult, }; - use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader, StaticFileSegment}; + use reth_primitives::{BlockBody, SealedBlock, SealedHeader, StaticFileSegment}; use reth_provider::{ providers::StaticFileWriter, test_utils::MockNodeTypesWithDB, HeaderProvider, ProviderFactory, StaticFileProviderFactory, TransactionsProvider, @@ -889,6 +891,8 @@ mod tests { } impl BodyDownloader for TestBodyDownloader { + type Body = BlockBody; + fn set_download_range( &mut self, range: RangeInclusive, @@ -909,7 +913,7 @@ mod tests { } impl Stream for TestBodyDownloader { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 47cd9d0445a2..630cc6df03de 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,5 +1,6 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; -use alloy_primitives::{BlockNumber, Sealable}; +use alloy_consensus::Header; +use alloy_primitives::BlockNumber; use num_traits::Zero; use reth_config::config::ExecutionConfig; use reth_db::{static_file::HeaderMask, tables}; @@ -8,9 +9,9 @@ use reth_evm::{ execute::{BatchExecutor, BlockExecutorProvider}, metrics::ExecutorMetrics, }; -use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; -use reth_primitives::{Header, SealedHeader, StaticFileSegment}; +use reth_primitives::{SealedHeader, StaticFileSegment}; use reth_primitives_traits::format_gas_throughput; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, @@ -276,11 +277,8 @@ where let execute_start = Instant::now(); self.metrics.metered_one((&block, td).into(), |input| { - let sealed = block.header.clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - executor.execute_and_verify_one(input).map_err(|error| StageError::Block { - block: Box::new(SealedHeader::new(header, seal)), + block: Box::new(SealedHeader::seal(block.header.clone())), error: BlockErrorKind::Execution(error), }) })?; @@ -325,8 +323,7 @@ where // prepare execution output for writing let time = Instant::now(); - let ExecutionOutcome { bundle, receipts, requests, first_block } = executor.finalize(); - let state = ExecutionOutcome::new(bundle, receipts, first_block, requests); + let state = executor.finalize(); let write_preparation_duration = time.elapsed(); // log the gas per second for the range we just executed diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 14afb37d81db..1ca0e1aa1325 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -58,11 +58,8 @@ impl AccountHashingStage { /// /// Proceeds to go to the `BlockTransitionIndex` end, go back `transitions` and change the /// account state in the `AccountChangeSets` table. - pub fn seed< - Tx: DbTx + DbTxMut + 'static, - Spec: Send + Sync + 'static + reth_chainspec::EthereumHardforks, - >( - provider: &reth_provider::DatabaseProvider, + pub fn seed( + provider: &reth_provider::DatabaseProvider, opts: SeedOpts, ) -> Result, StageError> { use alloy_primitives::U256; @@ -234,7 +231,7 @@ where input.unwind_block_range_with_threshold(self.commit_threshold); // Aggregate all transition changesets and make a list of accounts that have been changed. - provider.unwind_account_hashing(range)?; + provider.unwind_account_hashing_range(range)?; let mut stage_checkpoint = input.checkpoint.account_hashing_stage_checkpoint().unwrap_or_default(); diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index ef070d30c6d6..dcabbe83ee64 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -169,7 +169,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_storage_hashing(BlockNumberAddress::range(range))?; + provider.unwind_storage_hashing_range(BlockNumberAddress::range(range))?; let mut stage_checkpoint = input.checkpoint.storage_hashing_stage_checkpoint().unwrap_or_default(); diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 49e687a96a1d..133a0719d910 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -194,7 +194,7 @@ where impl Stage for HeaderStage where P: HeaderSyncGapProvider, - D: HeaderDownloader, + D: HeaderDownloader
, Provider: DBProvider + StaticFileProviderFactory, { /// Return the id of the stage @@ -392,7 +392,7 @@ mod tests { use crate::test_utils::{ stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; - use alloy_primitives::{Sealable, B256}; + use alloy_primitives::B256; use assert_matches::assert_matches; use reth_execution_types::ExecutionOutcome; use reth_primitives::{BlockBody, SealedBlock, SealedBlockWithSenders}; @@ -441,7 +441,9 @@ mod tests { } } - impl StageTestRunner for HeadersTestRunner { + impl + 'static> StageTestRunner + for HeadersTestRunner + { type S = HeaderStage, D>; fn db(&self) -> &TestStageDB { @@ -459,7 +461,9 @@ mod tests { } } - impl ExecuteStageTestRunner for HeadersTestRunner { + impl + 'static> ExecuteStageTestRunner + for HeadersTestRunner + { type Seed = Vec; fn seed_execution(&mut self, input: ExecInput) -> Result { @@ -505,9 +509,7 @@ mod tests { // validate the header let header = provider.header_by_number(block_num)?; assert!(header.is_some()); - let sealed = header.unwrap().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + let header = SealedHeader::seal(header.unwrap()); assert_eq!(header.hash(), hash); // validate the header total difficulty @@ -537,7 +539,9 @@ mod tests { } } - impl UnwindStageTestRunner for HeadersTestRunner { + impl + 'static> UnwindStageTestRunner + for HeadersTestRunner + { fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> { self.check_no_header_entry_above(input.unwind_to) } diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 8b10283fb4b7..38c238e5d988 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -134,7 +134,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_account_history_indices(range)?; + provider.unwind_account_history_indices_range(range)?; // from HistoryIndex higher than that number. Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) }) diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index ac645b8dd754..ba61e6312302 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -140,7 +140,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_storage_history_indices(BlockNumberAddress::range(range))?; + provider.unwind_storage_history_indices_range(BlockNumberAddress::range(range))?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) }) } diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index d1d3496d917a..2d2503b53919 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use reth_codecs::Compact; use reth_consensus::ConsensusError; use reth_db::tables; @@ -276,10 +276,7 @@ where // Reset the checkpoint self.save_execution_checkpoint(provider, None)?; - let sealed = target_block.seal_slow(); - let (header, seal) = sealed.into_parts(); - - validate_state_root(trie_root, SealedHeader::new(header, seal), to_block)?; + validate_state_root(trie_root, SealedHeader::seal(target_block), to_block)?; Ok(ExecOutput { checkpoint: StageCheckpoint::new(to_block) @@ -332,10 +329,7 @@ where .header_by_number(input.unwind_to)? .ok_or_else(|| ProviderError::HeaderNotFound(input.unwind_to.into()))?; - let sealed = target.seal_slow(); - let (header, seal) = sealed.into_parts(); - - validate_state_root(block_root, SealedHeader::new(header, seal), input.unwind_to)?; + validate_state_root(block_root, SealedHeader::seal(target), input.unwind_to)?; // Validation passed, apply unwind changes to the database. provider.write_trie_updates(&updates)?; @@ -538,9 +532,7 @@ mod tests { .into_iter() .map(|(address, account)| (address, (account, std::iter::empty()))), ); - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let sealed_head = SealedBlock { header: SealedHeader::new(header, seal), body }; + let sealed_head = SealedBlock { header: SealedHeader::seal(header), body }; let head_hash = sealed_head.hash(); let mut blocks = vec![sealed_head]; diff --git a/crates/stages/stages/src/test_utils/runner.rs b/crates/stages/stages/src/test_utils/runner.rs index 26f245c1304d..c3d25b99536a 100644 --- a/crates/stages/stages/src/test_utils/runner.rs +++ b/crates/stages/stages/src/test_utils/runner.rs @@ -1,7 +1,6 @@ use super::TestStageDB; -use reth_chainspec::ChainSpec; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; -use reth_provider::{DatabaseProvider, ProviderError}; +use reth_provider::{test_utils::MockNodeTypesWithDB, DatabaseProvider, ProviderError}; use reth_stages_api::{ ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput, }; @@ -20,7 +19,7 @@ pub(crate) enum TestRunnerError { /// A generic test runner for stages. pub(crate) trait StageTestRunner { - type S: Stage as Database>::TXMut, ChainSpec>> + type S: Stage as Database>::TXMut, MockNodeTypesWithDB>> + 'static; /// Return a reference to the database. diff --git a/crates/static-file/static-file/src/lib.rs b/crates/static-file/static-file/src/lib.rs index 1bfe4134e954..6c95baaae920 100644 --- a/crates/static-file/static-file/src/lib.rs +++ b/crates/static-file/static-file/src/lib.rs @@ -7,14 +7,12 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod event; pub mod segments; mod static_file_producer; -pub use event::StaticFileProducerEvent; pub use static_file_producer::{ StaticFileProducer, StaticFileProducerInner, StaticFileProducerResult, - StaticFileProducerWithResult, StaticFileTargets, + StaticFileProducerWithResult, }; // Re-export for convenience. diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 2c442aedfa34..0f07ec328214 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -10,7 +10,7 @@ use reth_provider::{ }; use reth_prune_types::PruneModes; use reth_stages_types::StageId; -use reth_static_file_types::HighestStaticFiles; +use reth_static_file_types::{HighestStaticFiles, StaticFileTargets}; use reth_storage_errors::provider::ProviderResult; use reth_tokio_util::{EventSender, EventStream}; use std::{ @@ -66,40 +66,6 @@ pub struct StaticFileProducerInner { event_sender: EventSender, } -/// Static File targets, per data segment, measured in [`BlockNumber`]. -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct StaticFileTargets { - headers: Option>, - receipts: Option>, - transactions: Option>, -} - -impl StaticFileTargets { - /// Returns `true` if any of the targets are [Some]. - pub const fn any(&self) -> bool { - self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() - } - - // Returns `true` if all targets are either [`None`] or has beginning of the range equal to the - // highest static_file. - fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { - [ - (self.headers.as_ref(), static_files.headers), - (self.receipts.as_ref(), static_files.receipts), - (self.transactions.as_ref(), static_files.transactions), - ] - .iter() - .all(|(target_block_range, highest_static_fileted_block)| { - target_block_range.map_or(true, |target_block_range| { - *target_block_range.start() == - highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { - highest_static_fileted_block + 1 - }) - }) - }) - } -} - impl StaticFileProducerInner { fn new(provider: Provider, prune_modes: PruneModes) -> Self { Self { provider, prune_modes, event_sender: Default::default() } diff --git a/crates/static-file/static-file/src/event.rs b/crates/static-file/types/src/event.rs similarity index 87% rename from crates/static-file/static-file/src/event.rs rename to crates/static-file/types/src/event.rs index a11333ce53a2..1e5d2cb6032e 100644 --- a/crates/static-file/static-file/src/event.rs +++ b/crates/static-file/types/src/event.rs @@ -1,7 +1,7 @@ use crate::StaticFileTargets; use std::time::Duration; -/// An event emitted by a [`StaticFileProducer`][crate::StaticFileProducer]. +/// An event emitted by the static file producer. #[derive(Debug, PartialEq, Eq, Clone)] pub enum StaticFileProducerEvent { /// Emitted when static file producer started running. diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 6e954a781b71..4e9bf90f1c90 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -9,11 +9,14 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod compression; +mod event; mod segment; use alloy_primitives::BlockNumber; pub use compression::Compression; +pub use event::StaticFileProducerEvent; pub use segment::{SegmentConfig, SegmentHeader, SegmentRangeInclusive, StaticFileSegment}; +use std::ops::RangeInclusive; /// Default static file block count. pub const DEFAULT_BLOCKS_PER_STATIC_FILE: u64 = 500_000; @@ -62,6 +65,43 @@ impl HighestStaticFiles { } } +/// Static File targets, per data segment, measured in [`BlockNumber`]. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct StaticFileTargets { + /// Targeted range of headers. + pub headers: Option>, + /// Targeted range of receipts. + pub receipts: Option>, + /// Targeted range of transactions. + pub transactions: Option>, +} + +impl StaticFileTargets { + /// Returns `true` if any of the targets are [Some]. + pub const fn any(&self) -> bool { + self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() + } + + /// Returns `true` if all targets are either [`None`] or has beginning of the range equal to the + /// highest static file. + pub fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { + [ + (self.headers.as_ref(), static_files.headers), + (self.receipts.as_ref(), static_files.receipts), + (self.transactions.as_ref(), static_files.transactions), + ] + .iter() + .all(|(target_block_range, highest_static_fileted_block)| { + target_block_range.map_or(true, |target_block_range| { + *target_block_range.start() == + highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { + highest_static_fileted_block + 1 + }) + }) + }) + } +} + /// Each static file has a fixed number of blocks. This gives out the range where the requested /// block is positioned. Used for segment filename. pub const fn find_fixed_range( diff --git a/crates/storage/codecs/derive/src/arbitrary.rs b/crates/storage/codecs/derive/src/arbitrary.rs index 8aa44062e21f..753bb1e33a51 100644 --- a/crates/storage/codecs/derive/src/arbitrary.rs +++ b/crates/storage/codecs/derive/src/arbitrary.rs @@ -18,10 +18,26 @@ pub fn maybe_generate_tests( let mut traits = vec![]; let mut roundtrips = vec![]; let mut additional_tests = vec![]; + let mut is_crate = false; - for arg in args { + let mut iter = args.into_iter().peekable(); + + // we check if there's a crate argument which is used from inside the codecs crate directly + if let Some(arg) = iter.peek() { + if arg.to_string() == "crate" { + is_crate = true; + iter.next(); + } + } + + for arg in iter { if arg.to_string() == "compact" { - traits.push(quote! { use super::Compact; }); + let path = if is_crate { + quote! { use crate::Compact; } + } else { + quote! { use reth_codecs::Compact; } + }; + traits.push(path); roundtrips.push(quote! { { let mut buf = vec![]; diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index 1fb6d40fa2b2..cf9bcc0c6295 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -2,10 +2,12 @@ use super::*; use convert_case::{Case, Casing}; +use syn::{Attribute, LitStr}; /// Generates code to implement the `Compact` trait for a data type. pub fn generate_from_to( ident: &Ident, + attrs: &[Attribute], has_lifetime: bool, fields: &FieldList, is_zstd: bool, @@ -20,6 +22,8 @@ pub fn generate_from_to( let fuzz = format_ident!("fuzz_test_{snake_case_ident}"); let test = format_ident!("fuzz_{snake_case_ident}"); + let reth_codecs = parse_reth_codecs_path(attrs).unwrap(); + let lifetime = if has_lifetime { quote! { 'a } } else { @@ -28,11 +32,11 @@ pub fn generate_from_to( let impl_compact = if has_lifetime { quote! { - impl<#lifetime> Compact for #ident<#lifetime> + impl<#lifetime> #reth_codecs::Compact for #ident<#lifetime> } } else { quote! { - impl Compact for #ident + impl #reth_codecs::Compact for #ident } }; @@ -53,6 +57,7 @@ pub fn generate_from_to( #[allow(dead_code)] #[test_fuzz::test_fuzz] fn #fuzz(obj: #ident) { + use #reth_codecs::Compact; let mut buf = vec![]; let len = obj.clone().to_compact(&mut buf); let (same_obj, buf) = #ident::from_compact(buf.as_ref(), len); @@ -191,7 +196,7 @@ fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec< } // Just because a type supports compression, doesn't mean all its values are to be compressed. - // We skip the smaller ones, and thus require a flag `__zstd` to specify if this value is + // We skip the smaller ones, and thus require a flag` __zstd` to specify if this value is // compressed or not. if is_zstd { lines.push(quote! { @@ -232,3 +237,25 @@ fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec< lines } + +/// Function to extract the crate path from `reth_codecs(crate = "...")` attribute. +fn parse_reth_codecs_path(attrs: &[Attribute]) -> syn::Result { + // let default_crate_path: syn::Path = syn::parse_str("reth-codecs").unwrap(); + let mut reth_codecs_path: syn::Path = syn::parse_quote!(reth_codecs); + for attr in attrs { + if attr.path().is_ident("reth_codecs") { + attr.parse_nested_meta(|meta| { + if meta.path.is_ident("crate") { + let value = meta.value()?; + let lit: LitStr = value.parse()?; + reth_codecs_path = syn::parse_str(&lit.value())?; + Ok(()) + } else { + Err(meta.error("unsupported attribute")) + } + })?; + } + } + + Ok(reth_codecs_path) +} diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index e5a79b3fe530..b9d5cf18d6b7 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -43,13 +43,13 @@ pub enum FieldTypes { pub fn derive(input: TokenStream, is_zstd: bool) -> TokenStream { let mut output = quote! {}; - let DeriveInput { ident, data, generics, .. } = parse_macro_input!(input); + let DeriveInput { ident, data, generics, attrs, .. } = parse_macro_input!(input); let has_lifetime = has_lifetime(&generics); let fields = get_fields(&data); output.extend(generate_flag_struct(&ident, has_lifetime, &fields, is_zstd)); - output.extend(generate_from_to(&ident, has_lifetime, &fields, is_zstd)); + output.extend(generate_from_to(&ident, &attrs, has_lifetime, &fields, is_zstd)); output.into() } @@ -233,10 +233,10 @@ mod tests { // Generate code that will impl the `Compact` trait. let mut output = quote! {}; - let DeriveInput { ident, data, .. } = parse2(f_struct).unwrap(); + let DeriveInput { ident, data, attrs, .. } = parse2(f_struct).unwrap(); let fields = get_fields(&data); output.extend(generate_flag_struct(&ident, false, &fields, false)); - output.extend(generate_from_to(&ident, false, &fields, false)); + output.extend(generate_from_to(&ident, &attrs, false, &fields, false)); // Expected output in a TokenStream format. Commas matter! let should_output = quote! { @@ -285,6 +285,7 @@ mod tests { #[allow(dead_code)] #[test_fuzz::test_fuzz] fn fuzz_test_test_struct(obj: TestStruct) { + use reth_codecs::Compact; let mut buf = vec![]; let len = obj.clone().to_compact(&mut buf); let (same_obj, buf) = TestStruct::from_compact(buf.as_ref(), len); @@ -295,7 +296,7 @@ mod tests { pub fn fuzz_test_struct() { fuzz_test_test_struct(TestStruct::default()) } - impl Compact for TestStruct { + impl reth_codecs::Compact for TestStruct { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]> { let mut flags = TestStructFlags::default(); let mut total_length = 0; diff --git a/crates/storage/codecs/derive/src/lib.rs b/crates/storage/codecs/derive/src/lib.rs index 4ffdbfd6ef64..0b4015830f5d 100644 --- a/crates/storage/codecs/derive/src/lib.rs +++ b/crates/storage/codecs/derive/src/lib.rs @@ -49,14 +49,14 @@ mod compact; /// own encoding and do not rely on the bitflag struct. /// - `Bytes` fields and any types containing a `Bytes` field should be placed last to ensure /// efficient decoding. -#[proc_macro_derive(Compact, attributes(maybe_zero))] +#[proc_macro_derive(Compact, attributes(maybe_zero, reth_codecs))] pub fn derive(input: TokenStream) -> TokenStream { let is_zstd = false; compact::derive(input, is_zstd) } /// Adds `zstd` compression to derived [`Compact`]. -#[proc_macro_derive(CompactZstd, attributes(maybe_zero))] +#[proc_macro_derive(CompactZstd, attributes(maybe_zero, reth_codecs))] pub fn derive_zstd(input: TokenStream) -> TokenStream { let is_zstd = true; compact::derive(input, is_zstd) diff --git a/crates/storage/codecs/src/alloy/authorization_list.rs b/crates/storage/codecs/src/alloy/authorization_list.rs index 3fc9518a6376..15285f360473 100644 --- a/crates/storage/codecs/src/alloy/authorization_list.rs +++ b/crates/storage/codecs/src/alloy/authorization_list.rs @@ -11,12 +11,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with `alloy_eips::eip7702::Authorization` #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct Authorization { chain_id: u64, address: Address, @@ -84,12 +85,11 @@ mod tests { nonce: 1, } .into_signed( - alloy_primitives::Signature::from_rs_and_parity( + alloy_primitives::PrimitiveSignature::new( b256!("1fd474b1f9404c0c5df43b7620119ffbc3a1c3f942c73b6e14e9f55255ed9b1d").into(), b256!("29aca24813279a901ec13b5f7bb53385fa1fc627b946592221417ff74a49600d").into(), false, ) - .unwrap(), ); let mut compacted_authorization = Vec::::new(); let len = authorization.to_compact(&mut compacted_authorization); diff --git a/crates/storage/codecs/src/alloy/genesis_account.rs b/crates/storage/codecs/src/alloy/genesis_account.rs index b29fe526df4f..a35d4947db79 100644 --- a/crates/storage/codecs/src/alloy/genesis_account.rs +++ b/crates/storage/codecs/src/alloy/genesis_account.rs @@ -11,6 +11,7 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with `alloy_genesis::GenesisAccount` #[derive(Debug, Clone, PartialEq, Eq, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct GenesisAccountRef<'a> { /// The nonce of the account at genesis. nonce: Option, @@ -27,12 +28,13 @@ pub(crate) struct GenesisAccountRef<'a> { /// Acts as bridge which simplifies Compact implementation for /// `AlloyGenesisAccount`. #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct GenesisAccount { /// The nonce of the account at genesis. nonce: Option, @@ -47,21 +49,23 @@ pub(crate) struct GenesisAccount { } #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct StorageEntries { entries: Vec, } #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct StorageEntry { key: B256, value: B256, diff --git a/crates/storage/codecs/src/alloy/header.rs b/crates/storage/codecs/src/alloy/header.rs index 78f2029c32ea..04b7d6ab718b 100644 --- a/crates/storage/codecs/src/alloy/header.rs +++ b/crates/storage/codecs/src/alloy/header.rs @@ -18,6 +18,7 @@ use alloy_primitives::{Address, BlockNumber, Bloom, Bytes, B256, U256}; )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct Header { parent_hash: B256, ommers_hash: B256, @@ -54,6 +55,7 @@ pub(crate) struct Header { )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct HeaderExt { requests_hash: Option, } diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index f1bf6a00e694..697bac901e46 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -7,7 +7,7 @@ macro_rules! cond_mod { #[cfg(feature = "test-utils")] pub mod $mod_name; #[cfg(not(feature = "test-utils"))] - mod $mod_name; + pub(crate) mod $mod_name; )* }; } diff --git a/crates/storage/codecs/src/alloy/signature.rs b/crates/storage/codecs/src/alloy/signature.rs index 0cc4774d0f8b..b8fd19cf35a3 100644 --- a/crates/storage/codecs/src/alloy/signature.rs +++ b/crates/storage/codecs/src/alloy/signature.rs @@ -1,7 +1,7 @@ //! Compact implementation for [`Signature`] use crate::Compact; -use alloy_primitives::{Parity, Signature, U256}; +use alloy_primitives::{PrimitiveSignature as Signature, U256}; impl Compact for Signature { fn to_compact(&self, buf: &mut B) -> usize @@ -10,7 +10,7 @@ impl Compact for Signature { { buf.put_slice(&self.r().as_le_bytes()); buf.put_slice(&self.s().as_le_bytes()); - self.v().y_parity() as usize + self.v() as usize } fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { @@ -19,6 +19,6 @@ impl Compact for Signature { let r = U256::from_le_slice(&buf[0..32]); let s = U256::from_le_slice(&buf[32..64]); buf.advance(64); - (Self::new(r, s, Parity::Parity(identifier != 0)), buf) + (Self::new(r, s, identifier != 0), buf) } } diff --git a/crates/storage/codecs/src/alloy/transaction/eip1559.rs b/crates/storage/codecs/src/alloy/transaction/eip1559.rs index 0e7f44cdec18..6d910a6900c6 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip1559.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip1559.rs @@ -13,11 +13,12 @@ use alloy_primitives::{Bytes, ChainId, TxKind, U256}; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip1559`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Compact, Default)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] -#[cfg_attr(any(test, feature = "test-utils"), crate::add_arbitrary_tests(compact))] +#[cfg_attr(any(test, feature = "test-utils"), crate::add_arbitrary_tests(crate, compact))] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] pub(crate) struct TxEip1559 { chain_id: ChainId, diff --git a/crates/storage/codecs/src/alloy/transaction/eip2930.rs b/crates/storage/codecs/src/alloy/transaction/eip2930.rs index 75cab9e8a09c..aeb08f361bea 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip2930.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip2930.rs @@ -15,12 +15,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip2930`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip2930 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/eip4844.rs b/crates/storage/codecs/src/alloy/transaction/eip4844.rs index 5ec36e06bf5c..fac9ab9a1b2a 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip4844.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip4844.rs @@ -16,9 +16,10 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip4844`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr(any(test, feature = "test-utils"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip4844 { chain_id: ChainId, nonce: u64, @@ -106,7 +107,7 @@ impl<'a> arbitrary::Arbitrary<'a> for TxEip4844 { } } -#[cfg(any(test, feature = "test-utils"))] +#[cfg(feature = "test-utils")] fn serialize_placeholder(value: &Option<()>, serializer: S) -> Result where S: serde::Serializer, @@ -119,7 +120,7 @@ where } } -#[cfg(any(test, feature = "test-utils"))] +#[cfg(feature = "test-utils")] fn deserialize_placeholder<'de, D>(deserializer: D) -> Result, D::Error> where D: serde::Deserializer<'de>, diff --git a/crates/storage/codecs/src/alloy/transaction/eip7702.rs b/crates/storage/codecs/src/alloy/transaction/eip7702.rs index 8acf59425f2d..eab10af0b663 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip7702.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip7702.rs @@ -16,12 +16,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip7702`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip7702 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/legacy.rs b/crates/storage/codecs/src/alloy/transaction/legacy.rs index c83626aa4cf9..60250ba64af0 100644 --- a/crates/storage/codecs/src/alloy/transaction/legacy.rs +++ b/crates/storage/codecs/src/alloy/transaction/legacy.rs @@ -6,10 +6,11 @@ use alloy_primitives::{Bytes, ChainId, TxKind, U256}; /// Legacy transaction. #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] +#[reth_codecs(crate = "crate")] #[cfg_attr( any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize), - crate::add_arbitrary_tests(compact) + crate::add_arbitrary_tests(crate, compact) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] pub(crate) struct TxLegacy { diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index 22f508fd4ceb..bb970b581775 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -19,7 +19,8 @@ use reth_codecs_derive::add_arbitrary_tests; derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxDeposit { source_hash: B256, from: Address, diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs index 0f3347cec1a5..09e80d1faa7a 100644 --- a/crates/storage/codecs/src/alloy/withdrawal.rs +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -1,7 +1,8 @@ //! Compact implementation for [`AlloyWithdrawal`] use crate::Compact; -use alloy_eips::eip4895::Withdrawal as AlloyWithdrawal; +use alloc::vec::Vec; +use alloy_eips::eip4895::{Withdrawal as AlloyWithdrawal, Withdrawals}; use alloy_primitives::Address; use reth_codecs_derive::add_arbitrary_tests; @@ -13,8 +14,9 @@ use reth_codecs_derive::add_arbitrary_tests; any(test, feature = "test-utils"), derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) )] +#[reth_codecs(crate = "crate")] #[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] -#[add_arbitrary_tests(compact)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct Withdrawal { /// Monotonically increasing identifier issued by consensus layer. index: u64, @@ -52,6 +54,22 @@ impl Compact for AlloyWithdrawal { } } +impl Compact for Withdrawals { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + self.as_ref().to_compact(buf) + } + + fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { + let (withdrawals, new_buf) = Vec::from_compact(buf, buf.len()); + buf = new_buf; + let alloy_withdrawals = Self::new(withdrawals); + (alloy_withdrawals, buf) + } +} + #[cfg(test)] mod tests { use super::*; @@ -60,12 +78,20 @@ mod tests { proptest! { #[test] - fn roundtrip(withdrawal in arb::()) { + fn roundtrip_withdrawal(withdrawal in arb::()) { let mut compacted_withdrawal = Vec::::new(); let len = withdrawal.to_compact(&mut compacted_withdrawal); let (decoded, _) = AlloyWithdrawal::from_compact(&compacted_withdrawal, len); assert_eq!(withdrawal, decoded) } + + #[test] + fn roundtrip_withdrawals(withdrawals in arb::()) { + let mut compacted_withdrawals = Vec::::new(); + let len = withdrawals.to_compact(&mut compacted_withdrawals); + let (decoded, _) = Withdrawals::from_compact(&compacted_withdrawals, len); + assert_eq!(withdrawals, decoded); + } } // each value in the database has an extra field named flags that encodes metadata about other diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 598f2131bde6..284c6454f838 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -662,7 +662,8 @@ mod tests { } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Compact, arbitrary::Arbitrary)] - #[add_arbitrary_tests(compact)] + #[add_arbitrary_tests(crate, compact)] + #[reth_codecs(crate = "crate")] struct TestStruct { f_u64: u64, f_u256: U256, @@ -714,7 +715,8 @@ mod tests { #[derive( Debug, PartialEq, Clone, Default, Serialize, Deserialize, Compact, arbitrary::Arbitrary, )] - #[add_arbitrary_tests(compact)] + #[add_arbitrary_tests(crate, compact)] + #[reth_codecs(crate = "crate")] enum TestEnum { #[default] Var0, diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index f827e48c8c3c..9b8589cb6aa8 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -25,6 +25,7 @@ reth-trie-common.workspace = true # ethereum alloy-primitives.workspace = true alloy-genesis.workspace = true +alloy-consensus.workspace = true # codecs modular-bitfield.workspace = true @@ -57,29 +58,27 @@ proptest-arbitrary-interop.workspace = true [features] test-utils = [ - "arbitrary", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-codecs/test-utils", - "reth-db-models/test-utils", - "reth-trie-common/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils" + "arbitrary", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", + "reth-db-models/test-utils", + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", ] arbitrary = [ - "reth-primitives/arbitrary", - "reth-db-models/arbitrary", - "dep:arbitrary", - "dep:proptest", - "reth-primitives-traits/arbitrary", - "reth-trie-common/arbitrary", - "alloy-primitives/arbitrary", - "parity-scale-codec/arbitrary", - "reth-codecs/arbitrary", - "reth-prune-types/arbitrary", - "reth-stages-types/arbitrary" -] -optimism = [ - "reth-primitives/optimism", - "reth-codecs/optimism" + "reth-primitives/arbitrary", + "reth-db-models/arbitrary", + "dep:arbitrary", + "dep:proptest", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary", + "parity-scale-codec/arbitrary", + "reth-codecs/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary", + "alloy-consensus/arbitrary", ] +optimism = ["reth-primitives/optimism", "reth-codecs/optimism"] diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index 585aa4947a28..9297f738ab5a 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -152,12 +152,7 @@ where impl> Iterator for Walker<'_, T, CURSOR> { type Item = Result, DatabaseError>; fn next(&mut self) -> Option { - let start = self.start.take(); - if start.is_some() { - return start - } - - self.cursor.next().transpose() + self.start.take().or_else(|| self.cursor.next().transpose()) } } diff --git a/crates/storage/db-api/src/models/blocks.rs b/crates/storage/db-api/src/models/blocks.rs index 7268d82dd3cc..0145ceb52b5b 100644 --- a/crates/storage/db-api/src/models/blocks.rs +++ b/crates/storage/db-api/src/models/blocks.rs @@ -1,8 +1,8 @@ //! Block related models and types. +use alloy_consensus::Header; use alloy_primitives::B256; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::Header; use serde::{Deserialize, Serialize}; /// The storage representation of a block's ommers. diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index b077027f2970..00787194c718 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -4,12 +4,11 @@ use crate::{ table::{Compress, Decode, Decompress, Encode}, DatabaseError, }; +use alloy_consensus::Header; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::{ - Account, Bytecode, Header, Receipt, StorageEntry, TransactionSignedNoHash, TxType, -}; +use reth_primitives::{Account, Bytecode, Receipt, StorageEntry, TransactionSignedNoHash, TxType}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *}; @@ -313,7 +312,7 @@ mod tests { fn test_ensure_backwards_compatibility() { use super::*; use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; - use reth_primitives::{Account, Receipt, ReceiptWithBloom, Withdrawals}; + use reth_primitives::{Account, Receipt, ReceiptWithBloom}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, @@ -341,7 +340,6 @@ mod tests { assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); validate_bitflag_backwards_compat!(Account, UnusedBits::NotZero); validate_bitflag_backwards_compat!(AccountHashingCheckpoint, UnusedBits::NotZero); @@ -364,6 +362,5 @@ mod tests { validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); } } diff --git a/crates/storage/db-api/src/models/storage_sharded_key.rs b/crates/storage/db-api/src/models/storage_sharded_key.rs index 5fd79ba655c1..a7a1ffb71be3 100644 --- a/crates/storage/db-api/src/models/storage_sharded_key.rs +++ b/crates/storage/db-api/src/models/storage_sharded_key.rs @@ -12,6 +12,10 @@ use super::ShardedKey; /// Number of indices in one shard. pub const NUM_OF_INDICES_IN_SHARD: usize = 2_000; +/// The size of [`StorageShardedKey`] encode bytes. +/// The fields are: 20-byte address, 32-byte key, and 8-byte block number +const STORAGE_SHARD_KEY_BYTES_SIZE: usize = 20 + 32 + 8; + /// Sometimes data can be too big to be saved for a single key. This helps out by dividing the data /// into different shards. Example: /// @@ -53,7 +57,8 @@ impl Encode for StorageShardedKey { type Encoded = Vec; fn encode(self) -> Self::Encoded { - let mut buf: Vec = Encode::encode(self.address).into(); + let mut buf: Vec = Vec::with_capacity(STORAGE_SHARD_KEY_BYTES_SIZE); + buf.extend_from_slice(&Encode::encode(self.address)); buf.extend_from_slice(&Encode::encode(self.sharded_key.key)); buf.extend_from_slice(&self.sharded_key.highest_block_number.to_be_bytes()); buf @@ -62,6 +67,9 @@ impl Encode for StorageShardedKey { impl Decode for StorageShardedKey { fn decode(value: &[u8]) -> Result { + if value.len() != STORAGE_SHARD_KEY_BYTES_SIZE { + return Err(DatabaseError::Decode) + } let tx_num_index = value.len() - 8; let highest_tx_number = u64::from_be_bytes( diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index 963457af05c3..acdc8efc78fc 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -88,6 +88,9 @@ pub trait Table: Send + Sync + Debug + 'static { /// The table's name. const NAME: &'static str; + /// Whether the table is also a `DUPSORT` table. + const DUPSORT: bool; + /// Key element of `Table`. /// /// Sorting should be taken into account when encoding this. diff --git a/crates/storage/db-common/src/db_tool/mod.rs b/crates/storage/db-common/src/db_tool/mod.rs index 67a5dd627623..3420f2089fd9 100644 --- a/crates/storage/db-common/src/db_tool/mod.rs +++ b/crates/storage/db-common/src/db_tool/mod.rs @@ -12,7 +12,7 @@ use reth_db_api::{ }; use reth_fs_util as fs; use reth_node_types::NodeTypesWithDB; -use reth_provider::{providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory}; +use reth_provider::{providers::ProviderNodeTypes, ChainSpecProvider, DBProvider, ProviderFactory}; use std::{path::Path, rc::Rc, sync::Arc}; use tracing::info; diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 014751733e6b..8c930b22ef81 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -98,7 +98,10 @@ where database_hash: block_hash, }) } - Err(e) => return Err(dbg!(e).into()), + Err(e) => { + debug!(?e); + return Err(e.into()); + } } debug!("Writing genesis block."); diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index d5f773347b09..59d95c2263d0 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -14,10 +14,11 @@ workspace = true [dependencies] # reth reth-codecs.workspace = true -reth-primitives = { workspace = true, features = ["reth-codec"] } +reth-primitives-traits.workspace = true # ethereum alloy-primitives.workspace = true +alloy-eips.workspace = true # codecs modular-bitfield.workspace = true @@ -32,22 +33,25 @@ proptest = { workspace = true, optional = true } [dev-dependencies] # reth -reth-primitives = { workspace = true, features = ["arbitrary"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-codecs.workspace = true +arbitrary = { workspace = true, features = ["derive"] } +proptest.workspace = true proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true [features] test-utils = [ - "arbitrary", - "reth-primitives/test-utils", - "reth-codecs/test-utils" + "reth-primitives-traits/test-utils", + "arbitrary", + "reth-codecs/test-utils", ] arbitrary = [ - "reth-primitives/arbitrary", - "dep:arbitrary", - "dep:proptest", - "alloy-primitives/arbitrary", - "reth-codecs/arbitrary" + "reth-primitives-traits/arbitrary", + "dep:arbitrary", + "dep:proptest", + "alloy-primitives/arbitrary", + "alloy-eips/arbitrary", + "reth-codecs/arbitrary", ] diff --git a/crates/storage/db-models/src/accounts.rs b/crates/storage/db-models/src/accounts.rs index acfd45fe34e8..29a5cf305920 100644 --- a/crates/storage/db-models/src/accounts.rs +++ b/crates/storage/db-models/src/accounts.rs @@ -2,7 +2,7 @@ use reth_codecs::{add_arbitrary_tests, Compact}; use serde::Serialize; use alloy_primitives::{bytes::Buf, Address}; -use reth_primitives::Account; +use reth_primitives_traits::Account; /// Account as it is saved in the database. /// diff --git a/crates/storage/db-models/src/blocks.rs b/crates/storage/db-models/src/blocks.rs index 3e740a2e1aa8..ed1d7fb67722 100644 --- a/crates/storage/db-models/src/blocks.rs +++ b/crates/storage/db-models/src/blocks.rs @@ -1,8 +1,8 @@ use std::ops::Range; +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::TxNumber; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::Withdrawals; use serde::{Deserialize, Serialize}; /// Total number of transactions. diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 324411613fcc..6042b5faa815 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -26,6 +26,7 @@ reth-trie-common.workspace = true # ethereum alloy-primitives.workspace = true +alloy-consensus.workspace = true # mdbx reth-libmdbx = { workspace = true, optional = true, features = [ @@ -90,31 +91,29 @@ mdbx = [ "dep:rustc-hash", ] test-utils = [ - "dep:tempfile", - "arbitrary", - "parking_lot", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-db-api/test-utils", - "reth-nippy-jar/test-utils", - "reth-trie-common/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils" + "dep:tempfile", + "arbitrary", + "parking_lot", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-db-api/test-utils", + "reth-nippy-jar/test-utils", + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", ] bench = [] arbitrary = [ - "reth-primitives/arbitrary", - "reth-db-api/arbitrary", - "reth-primitives-traits/arbitrary", - "reth-trie-common/arbitrary", - "alloy-primitives/arbitrary", - "reth-prune-types/arbitrary", - "reth-stages-types/arbitrary" -] -optimism = [ - "reth-primitives/optimism", - "reth-db-api/optimism" + "reth-primitives/arbitrary", + "reth-db-api/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary", + "alloy-consensus/arbitrary", ] +optimism = ["reth-primitives/optimism", "reth-db-api/optimism"] disable-lock = [] [[bench]] diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index 9700ef94b241..62c4dfe6ecb9 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -26,13 +26,11 @@ where T::Key: Default + Clone + for<'de> serde::Deserialize<'de>, T::Value: Default + Clone + for<'de> serde::Deserialize<'de>, { + let path = + format!("{}/../../../testdata/micro/db/{}.json", env!("CARGO_MANIFEST_DIR"), T::NAME); let list: Vec> = serde_json::from_reader(std::io::BufReader::new( - std::fs::File::open(format!( - "{}/../../../testdata/micro/db/{}.json", - env!("CARGO_MANIFEST_DIR"), - T::NAME - )) - .expect("Test vectors not found. They can be generated from the workspace by calling `cargo run --bin reth -- test-vectors tables`."), + std::fs::File::open(&path) + .unwrap_or_else(|_| panic!("Test vectors not found. They can be generated from the workspace by calling `cargo run --bin reth --features dev -- test-vectors tables`: {:?}", path)) )) .unwrap(); diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 65b804e6a58d..10f3b2282301 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -23,7 +23,7 @@ use reth_libmdbx::{ use reth_storage_errors::db::LogLevel; use reth_tracing::tracing::error; use std::{ - ops::Deref, + ops::{Deref, Range}, path::Path, sync::Arc, time::{SystemTime, UNIX_EPOCH}, @@ -33,8 +33,14 @@ use tx::Tx; pub mod cursor; pub mod tx; -const GIGABYTE: usize = 1024 * 1024 * 1024; -const TERABYTE: usize = GIGABYTE * 1024; +/// 1 KB in bytes +pub const KILOBYTE: usize = 1024; +/// 1 MB in bytes +pub const MEGABYTE: usize = KILOBYTE * 1024; +/// 1 GB in bytes +pub const GIGABYTE: usize = MEGABYTE * 1024; +/// 1 TB in bytes +pub const TERABYTE: usize = GIGABYTE * 1024; /// MDBX allows up to 32767 readers (`MDBX_READERS_LIMIT`), but we limit it to slightly below that const DEFAULT_MAX_READERS: u64 = 32_000; @@ -64,6 +70,8 @@ impl DatabaseEnvKind { pub struct DatabaseArguments { /// Client version that accesses the database. client_version: ClientVersion, + /// Database geometry settings. + geometry: Geometry>, /// Database log level. If [None], the default value is used. log_level: Option, /// Maximum duration of a read transaction. If [None], the default value is used. @@ -93,15 +101,37 @@ pub struct DatabaseArguments { impl DatabaseArguments { /// Create new database arguments with given client version. - pub const fn new(client_version: ClientVersion) -> Self { + pub fn new(client_version: ClientVersion) -> Self { Self { client_version, + geometry: Geometry { + size: Some(0..(4 * TERABYTE)), + growth_step: Some(4 * GIGABYTE as isize), + shrink_threshold: Some(0), + page_size: Some(PageSize::Set(default_page_size())), + }, log_level: None, max_read_transaction_duration: None, exclusive: None, } } + /// Sets the upper size limit of the db environment, the maximum database size in bytes. + pub const fn with_geometry_max_size(mut self, max_size: Option) -> Self { + if let Some(max_size) = max_size { + self.geometry.size = Some(0..max_size); + } + self + } + + /// Configures the database growth step in bytes. + pub const fn with_growth_step(mut self, growth_step: Option) -> Self { + if let Some(growth_step) = growth_step { + self.geometry.growth_step = Some(growth_step as isize); + } + self + } + /// Set the log level. pub const fn with_log_level(mut self, log_level: Option) -> Self { self.log_level = log_level; @@ -278,15 +308,7 @@ impl DatabaseEnv { // environment creation. debug_assert!(Tables::ALL.len() <= 256, "number of tables exceed max dbs"); inner_env.set_max_dbs(256); - inner_env.set_geometry(Geometry { - // Maximum database size of 4 terabytes - size: Some(0..(4 * TERABYTE)), - // We grow the database in increments of 4 gigabytes - growth_step: Some(4 * GIGABYTE as isize), - // The database never shrinks - shrink_threshold: Some(0), - page_size: Some(PageSize::Set(default_page_size())), - }); + inner_env.set_geometry(args.geometry); fn is_current_process(id: u32) -> bool { #[cfg(unix)] @@ -475,6 +497,7 @@ mod tests { test_utils::*, AccountChangeSets, }; + use alloy_consensus::Header; use alloy_primitives::{Address, B256, U256}; use reth_db_api::{ cursor::{DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker}, @@ -482,7 +505,7 @@ mod tests { table::{Encode, Table}, }; use reth_libmdbx::Error; - use reth_primitives::{Account, Header, StorageEntry}; + use reth_primitives::{Account, StorageEntry}; use reth_primitives_traits::IntegerList; use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use std::str::FromStr; diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index ac2811a44d77..405606389bad 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -4,9 +4,9 @@ use crate::{ static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask}, HeaderTerminalDifficulties, RawValue, Receipts, Transactions, }; +use alloy_consensus::Header; use alloy_primitives::BlockHash; use reth_db_api::table::Table; -use reth_primitives::Header; // HEADER MASKS add_static_file_mask!(HeaderMask, Header, 0b001); diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 27f58f8a1f3d..aafdf606bb3d 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -19,6 +19,7 @@ pub use raw::{RawDupSort, RawKey, RawTable, RawValue, TableRawRow}; #[cfg(feature = "mdbx")] pub(crate) mod utils; +use alloy_consensus::Header; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use reth_db_api::{ models::{ @@ -30,7 +31,7 @@ use reth_db_api::{ }, table::{Decode, DupSort, Encode, Table}, }; -use reth_primitives::{Account, Bytecode, Header, Receipt, StorageEntry, TransactionSignedNoHash}; +use reth_primitives::{Account, Bytecode, Receipt, StorageEntry, TransactionSignedNoHash}; use reth_primitives_traits::IntegerList; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; @@ -106,29 +107,41 @@ macro_rules! tables { (@view $name:ident $v:ident) => { $v.view::<$name>() }; (@view $name:ident $v:ident $_subkey:ty) => { $v.view_dupsort::<$name>() }; - ($( $(#[$attr:meta])* table $name:ident; )*) => { + (@value_doc $key:ty, $value:ty) => { + concat!("[`", stringify!($value), "`]") + }; + // Don't generate links if we have generics + (@value_doc $key:ty, $value:ty, $($generic:ident),*) => { + concat!("`", stringify!($value), "`") + }; + + ($($(#[$attr:meta])* table $name:ident$(<$($generic:ident $(= $default:ty)?),*>)? { type Key = $key:ty; type Value = $value:ty; $(type SubKey = $subkey:ty;)? } )*) => { // Table marker types. $( $(#[$attr])* /// - #[doc = concat!("Marker type representing a database table mapping [`", stringify!($key), "`] to [`", stringify!($value), "`].")] + #[doc = concat!("Marker type representing a database table mapping [`", stringify!($key), "`] to ", tables!(@value_doc $key, $value, $($($generic),*)?), ".")] $( #[doc = concat!("\n\nThis table's `DUPSORT` subkey is [`", stringify!($subkey), "`].")] )? - pub struct $name { - _private: (), + pub struct $name$(<$($generic $( = $default)?),*>)? { + _private: std::marker::PhantomData<($($($generic,)*)?)>, } // Ideally this implementation wouldn't exist, but it is necessary to derive `Debug` // when a type is generic over `T: Table`. See: https://github.com/rust-lang/rust/issues/26925 - impl fmt::Debug for $name { + impl$(<$($generic),*>)? fmt::Debug for $name$(<$($generic),*>)? { fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { unreachable!("this type cannot be instantiated") } } - impl reth_db_api::table::Table for $name { + impl$(<$($generic),*>)? reth_db_api::table::Table for $name$(<$($generic),*>)? + where + $value: reth_db_api::table::Value + 'static + { const NAME: &'static str = table_names::$name; + const DUPSORT: bool = tables!(@bool $($subkey)?); type Key = $key; type Value = $value; @@ -248,7 +261,7 @@ macro_rules! tables { /// use reth_db_api::table::Table; /// /// let table = Tables::Headers; - /// let result = tables_to_generic!(table, |GenericTable| GenericTable::NAME); + /// let result = tables_to_generic!(table, |GenericTable| ::NAME); /// assert_eq!(result, table.name()); /// ``` #[macro_export] @@ -269,53 +282,96 @@ macro_rules! tables { tables! { /// Stores the header hashes belonging to the canonical chain. - table CanonicalHeaders; + table CanonicalHeaders { + type Key = BlockNumber; + type Value = HeaderHash; + } /// Stores the total difficulty from a block header. - table HeaderTerminalDifficulties; + table HeaderTerminalDifficulties { + type Key = BlockNumber; + type Value = CompactU256; + } /// Stores the block number corresponding to a header. - table HeaderNumbers; + table HeaderNumbers { + type Key = BlockHash; + type Value = BlockNumber; + } /// Stores header bodies. - table Headers; + table Headers { + type Key = BlockNumber; + type Value = H; + } /// Stores block indices that contains indexes of transaction and the count of them. /// /// More information about stored indices can be found in the [`StoredBlockBodyIndices`] struct. - table BlockBodyIndices; + table BlockBodyIndices { + type Key = BlockNumber; + type Value = StoredBlockBodyIndices; + } /// Stores the uncles/ommers of the block. - table BlockOmmers; + table BlockOmmers { + type Key = BlockNumber; + type Value = StoredBlockOmmers; + } /// Stores the block withdrawals. - table BlockWithdrawals; + table BlockWithdrawals { + type Key = BlockNumber; + type Value = StoredBlockWithdrawals; + } /// Canonical only Stores the transaction body for canonical transactions. - table Transactions; + table Transactions { + type Key = TxNumber; + type Value = T; + } /// Stores the mapping of the transaction hash to the transaction number. - table TransactionHashNumbers; + table TransactionHashNumbers { + type Key = TxHash; + type Value = TxNumber; + } /// Stores the mapping of transaction number to the blocks number. /// /// The key is the highest transaction ID in the block. - table TransactionBlocks; + table TransactionBlocks { + type Key = TxNumber; + type Value = BlockNumber; + } /// Canonical only Stores transaction receipts. - table Receipts; + table Receipts { + type Key = TxNumber; + type Value = Receipt; + } /// Stores all smart contract bytecodes. /// There will be multiple accounts that have same bytecode /// So we would need to introduce reference counter. /// This will be small optimization on state. - table Bytecodes; + table Bytecodes { + type Key = B256; + type Value = Bytecode; + } /// Stores the current state of an [`Account`]. - table PlainAccountState; + table PlainAccountState { + type Key = Address; + type Value = Account; + } /// Stores the current value of a storage key. - table PlainStorageState; + table PlainStorageState { + type Key = Address; + type Value = StorageEntry; + type SubKey = B256; + } /// Stores pointers to block changeset with changes for each account key. /// @@ -335,7 +391,10 @@ tables! { /// * If there were no shard we would get `None` entry or entry of different storage key. /// /// Code example can be found in `reth_provider::HistoricalStateProviderRef` - table AccountsHistory, Value = BlockNumberList>; + table AccountsHistory { + type Key = ShardedKey
; + type Value = BlockNumberList; + } /// Stores pointers to block number changeset with changes for each storage key. /// @@ -355,55 +414,98 @@ tables! { /// * If there were no shard we would get `None` entry or entry of different storage key. /// /// Code example can be found in `reth_provider::HistoricalStateProviderRef` - table StoragesHistory; + table StoragesHistory { + type Key = StorageShardedKey; + type Value = BlockNumberList; + } /// Stores the state of an account before a certain transaction changed it. /// Change on state can be: account is created, selfdestructed, touched while empty /// or changed balance,nonce. - table AccountChangeSets; + table AccountChangeSets { + type Key = BlockNumber; + type Value = AccountBeforeTx; + type SubKey = Address; + } /// Stores the state of a storage key before a certain transaction changed it. /// If [`StorageEntry::value`] is zero, this means storage was not existing /// and needs to be removed. - table StorageChangeSets; + table StorageChangeSets { + type Key = BlockNumberAddress; + type Value = StorageEntry; + type SubKey = B256; + } /// Stores the current state of an [`Account`] indexed with `keccak256Address` /// This table is in preparation for merklization and calculation of state root. /// We are saving whole account data as it is needed for partial update when /// part of storage is changed. Benefit for merklization is that hashed addresses are sorted. - table HashedAccounts; + table HashedAccounts { + type Key = B256; + type Value = Account; + } /// Stores the current storage values indexed with `keccak256Address` and /// hash of storage key `keccak256key`. /// This table is in preparation for merklization and calculation of state root. /// Benefit for merklization is that hashed addresses/keys are sorted. - table HashedStorages; + table HashedStorages { + type Key = B256; + type Value = StorageEntry; + type SubKey = B256; + } /// Stores the current state's Merkle Patricia Tree. - table AccountsTrie; + table AccountsTrie { + type Key = StoredNibbles; + type Value = BranchNodeCompact; + } /// From HashedAddress => NibblesSubKey => Intermediate value - table StoragesTrie; + table StoragesTrie { + type Key = B256; + type Value = StorageTrieEntry; + type SubKey = StoredNibblesSubKey; + } /// Stores the transaction sender for each canonical transaction. /// It is needed to speed up execution stage and allows fetching signer without doing /// transaction signed recovery - table TransactionSenders; + table TransactionSenders { + type Key = TxNumber; + type Value = Address; + } /// Stores the highest synced block number and stage-specific checkpoint of each stage. - table StageCheckpoints; + table StageCheckpoints { + type Key = StageId; + type Value = StageCheckpoint; + } /// Stores arbitrary data to keep track of a stage first-sync progress. - table StageCheckpointProgresses>; + table StageCheckpointProgresses { + type Key = StageId; + type Value = Vec; + } /// Stores the highest pruned block number and prune mode of each prune segment. - table PruneCheckpoints; + table PruneCheckpoints { + type Key = PruneSegment; + type Value = PruneCheckpoint; + } /// Stores the history of client versions that have accessed the database with write privileges by unix timestamp in seconds. - table VersionHistory; + table VersionHistory { + type Key = u64; + type Value = ClientVersion; + } /// Stores generic chain state info, like the last finalized block. - table ChainState; + table ChainState { + type Key = ChainStateKey; + type Value = BlockNumber; + } } /// Keys for the `ChainState` table. diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index 6b6de41613eb..453116ee5e35 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -14,6 +14,7 @@ pub struct RawTable { impl Table for RawTable { const NAME: &'static str = T::NAME; + const DUPSORT: bool = false; type Key = RawKey; type Value = RawValue; @@ -28,6 +29,7 @@ pub struct RawDupSort { impl Table for RawDupSort { const NAME: &'static str = T::NAME; + const DUPSORT: bool = true; type Key = RawKey; type Value = RawValue; diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ index dbe94755087c..767f37912802 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ @@ -851,7 +851,7 @@ struct LIBMDBX_API_TYPE slice : public ::MDBX_val { /// \brief Checks whether the content of the slice is printable. /// \param [in] disable_utf8 By default if `disable_utf8` is `false` function /// checks that content bytes are printable ASCII-7 characters or a valid UTF8 - /// sequences. Otherwise, if if `disable_utf8` is `true` function checks that + /// sequences. Otherwise, if `disable_utf8` is `true` function checks that /// content bytes are printable extended 8-bit ASCII codes. MDBX_NOTHROW_PURE_FUNCTION bool is_printable(bool disable_utf8 = false) const noexcept; diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 3deff0c249bc..26cfef54d8d5 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -59,19 +59,18 @@ where } /// Returns an iterator over the raw key value slices. - #[allow(clippy::needless_lifetimes)] - pub fn iter_slices<'a>(&'a self) -> IntoIter<'a, K, Cow<'a, [u8]>, Cow<'a, [u8]>> { + pub fn iter_slices<'a>(self) -> IntoIter, Cow<'a, [u8]>> { self.into_iter() } /// Returns an iterator over database items. #[allow(clippy::should_implement_trait)] - pub fn into_iter(&self) -> IntoIter<'_, K, Key, Value> + pub fn into_iter(self) -> IntoIter where Key: TableObject, Value: TableObject, { - IntoIter::new(self.clone(), MDBX_NEXT, MDBX_NEXT) + IntoIter::new(self, MDBX_NEXT, MDBX_NEXT) } /// Retrieves a key/data pair from the cursor. Depending on the cursor op, @@ -508,7 +507,7 @@ unsafe impl Sync for Cursor where K: TransactionKind {} /// An iterator over the key/value pairs in an MDBX database. #[derive(Debug)] -pub enum IntoIter<'cur, K, Key, Value> +pub enum IntoIter where K: TransactionKind, Key: TableObject, @@ -535,11 +534,11 @@ where /// The next and subsequent operations to perform. next_op: ffi::MDBX_cursor_op, - _marker: PhantomData<(&'cur (), Key, Value)>, + _marker: PhantomData<(Key, Value)>, }, } -impl IntoIter<'_, K, Key, Value> +impl IntoIter where K: TransactionKind, Key: TableObject, @@ -547,11 +546,11 @@ where { /// Creates a new iterator backed by the given cursor. fn new(cursor: Cursor, op: ffi::MDBX_cursor_op, next_op: ffi::MDBX_cursor_op) -> Self { - IntoIter::Ok { cursor, op, next_op, _marker: Default::default() } + Self::Ok { cursor, op, next_op, _marker: Default::default() } } } -impl Iterator for IntoIter<'_, K, Key, Value> +impl Iterator for IntoIter where K: TransactionKind, Key: TableObject, @@ -747,13 +746,13 @@ where } } -impl<'cur, K, Key, Value> Iterator for IterDup<'cur, K, Key, Value> +impl Iterator for IterDup<'_, K, Key, Value> where K: TransactionKind, Key: TableObject, Value: TableObject, { - type Item = IntoIter<'cur, K, Key, Value>; + type Item = IntoIter; fn next(&mut self) -> Option { match self { diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index edf9321ace40..6a0b210401e4 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -4,7 +4,7 @@ use crate::{ flags::EnvironmentFlags, transaction::{RO, RW}, txn_manager::{TxnManager, TxnManagerMessage, TxnPtr}, - Transaction, TransactionKind, + Mode, SyncMode, Transaction, TransactionKind, }; use byteorder::{ByteOrder, NativeEndian}; use mem::size_of; @@ -72,14 +72,14 @@ impl Environment { /// Returns true if the environment was opened in [`crate::Mode::ReadWrite`] mode. #[inline] - pub fn is_read_write(&self) -> bool { - self.inner.env_kind.is_write_map() + pub fn is_read_write(&self) -> Result { + Ok(!self.is_read_only()?) } /// Returns true if the environment was opened in [`crate::Mode::ReadOnly`] mode. #[inline] - pub fn is_read_only(&self) -> bool { - !self.inner.env_kind.is_write_map() + pub fn is_read_only(&self) -> Result { + Ok(matches!(self.info()?.mode(), Mode::ReadOnly)) } /// Returns the transaction manager. @@ -425,6 +425,23 @@ impl Info { fsync: self.0.mi_pgop_stat.fsync, } } + + /// Return the mode of the database + #[inline] + pub const fn mode(&self) -> Mode { + let mode = self.0.mi_mode; + if (mode & ffi::MDBX_RDONLY) != 0 { + Mode::ReadOnly + } else if (mode & ffi::MDBX_UTTERLY_NOSYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::UtterlyNoSync } + } else if (mode & ffi::MDBX_NOMETASYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::NoMetaSync } + } else if (mode & ffi::MDBX_SAFE_NOSYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::SafeNoSync } + } else { + Mode::ReadWrite { sync_mode: SyncMode::Durable } + } + } } impl fmt::Debug for Environment { @@ -472,8 +489,10 @@ pub struct PageOps { pub mincore: u64, } +/// Represents the geometry settings for the database environment #[derive(Clone, Debug, PartialEq, Eq)] pub struct Geometry { + /// The size range in bytes. pub size: Option, pub growth_step: Option, pub shrink_threshold: Option, @@ -781,15 +800,14 @@ impl EnvironmentBuilder { } /// Sets the interprocess/shared threshold to force flush the data buffers to disk, if - /// [`SyncMode::SafeNoSync`](crate::flags::SyncMode::SafeNoSync) is used. + /// [`SyncMode::SafeNoSync`] is used. pub fn set_sync_bytes(&mut self, v: usize) -> &mut Self { self.sync_bytes = Some(v as u64); self } /// Sets the interprocess/shared relative period since the last unsteady commit to force flush - /// the data buffers to disk, if [`SyncMode::SafeNoSync`](crate::flags::SyncMode::SafeNoSync) is - /// used. + /// the data buffers to disk, if [`SyncMode::SafeNoSync`] is used. pub fn set_sync_period(&mut self, v: Duration) -> &mut Self { // For this option, mdbx uses units of 1/65536 of a second. let as_mdbx_units = (v.as_secs_f64() * 65536f64) as u64; diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 88236ebe9914..84b2dabc90a8 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -6,7 +6,7 @@ use crate::{ txn_manager::{TxnManagerMessage, TxnPtr}, Cursor, Error, Stat, TableObject, }; -use ffi::{mdbx_txn_renew, MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE}; +use ffi::{MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE}; use indexmap::IndexSet; use parking_lot::{Mutex, MutexGuard}; use std::{ @@ -18,6 +18,9 @@ use std::{ time::Duration, }; +#[cfg(feature = "read-tx-timeouts")] +use ffi::mdbx_txn_renew; + mod private { use super::*; diff --git a/crates/storage/libmdbx-rs/src/txn_manager.rs b/crates/storage/libmdbx-rs/src/txn_manager.rs index 716e8ee62bde..ae4a93724c41 100644 --- a/crates/storage/libmdbx-rs/src/txn_manager.rs +++ b/crates/storage/libmdbx-rs/src/txn_manager.rs @@ -5,7 +5,10 @@ use crate::{ }; use std::{ ptr, - sync::mpsc::{sync_channel, Receiver, SyncSender}, + sync::{ + mpsc::{sync_channel, Receiver, SyncSender}, + Arc, + }, }; #[derive(Copy, Clone, Debug)] @@ -28,7 +31,7 @@ pub(crate) enum TxnManagerMessage { pub(crate) struct TxnManager { sender: SyncSender, #[cfg(feature = "read-tx-timeouts")] - read_transactions: Option>, + read_transactions: Option>, } impl TxnManager { @@ -289,11 +292,11 @@ mod read_transactions { // Sleep not more than `READ_TRANSACTIONS_CHECK_INTERVAL`, but at least until // the closest deadline of an active read transaction - let duration_until_closest_deadline = - self.max_duration - max_active_transaction_duration.unwrap_or_default(); - std::thread::sleep( - READ_TRANSACTIONS_CHECK_INTERVAL.min(duration_until_closest_deadline), + let sleep_duration = READ_TRANSACTIONS_CHECK_INTERVAL.min( + self.max_duration - max_active_transaction_duration.unwrap_or_default(), ); + trace!(target: "libmdbx", ?sleep_duration, elapsed = ?now.elapsed(), "Putting transaction monitor to sleep"); + std::thread::sleep(sleep_duration); } }; std::thread::Builder::new() diff --git a/crates/storage/libmdbx-rs/tests/environment.rs b/crates/storage/libmdbx-rs/tests/environment.rs index 99453ef113af..007418f76bb9 100644 --- a/crates/storage/libmdbx-rs/tests/environment.rs +++ b/crates/storage/libmdbx-rs/tests/environment.rs @@ -128,6 +128,18 @@ fn test_info() { // assert_eq!(info.last_pgno(), 1); // assert_eq!(info.last_txnid(), 0); assert_eq!(info.num_readers(), 0); + assert!(matches!(info.mode(), Mode::ReadWrite { sync_mode: SyncMode::Durable })); + assert!(env.is_read_write().unwrap()); + + drop(env); + let env = Environment::builder() + .set_geometry(Geometry { size: Some(map_size..), ..Default::default() }) + .set_flags(EnvironmentFlags { mode: Mode::ReadOnly, ..Default::default() }) + .open(dir.path()) + .unwrap(); + let info = env.info().unwrap(); + assert!(matches!(info.mode(), Mode::ReadOnly)); + assert!(env.is_read_only().unwrap()); } #[test] diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 04a0bf42908e..399e3e000b99 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -38,6 +38,7 @@ reth-node-types.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true revm.workspace = true # optimism @@ -65,7 +66,6 @@ strum.workspace = true # test-utils reth-ethereum-engine-primitives = { workspace = true, optional = true } -alloy-consensus = { workspace = true, optional = true } # parallel utils rayon.workspace = true @@ -88,44 +88,43 @@ alloy-consensus.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-execution-types/optimism", - "reth-optimism-primitives", - "reth-codecs/optimism", - "reth-db/optimism", - "reth-db-api/optimism", - "revm/optimism" + "reth-primitives/optimism", + "reth-execution-types/optimism", + "reth-optimism-primitives", + "reth-codecs/optimism", + "reth-db/optimism", + "reth-db-api/optimism", + "revm/optimism", ] serde = [ - "reth-execution-types/serde", - "reth-trie-db/serde", - "reth-trie/serde", - "alloy-consensus?/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "alloy-rpc-types-engine/serde", - "dashmap/serde", - "notify/serde", - "parking_lot/serde", - "rand/serde", - "revm/serde", - "reth-codecs/serde" + "reth-execution-types/serde", + "reth-trie-db/serde", + "reth-trie/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "alloy-rpc-types-engine/serde", + "dashmap/serde", + "notify/serde", + "parking_lot/serde", + "rand/serde", + "revm/serde", + "reth-codecs/serde", ] test-utils = [ - "reth-db/test-utils", - "reth-nippy-jar/test-utils", - "reth-trie/test-utils", - "reth-chain-state/test-utils", - "reth-ethereum-engine-primitives", - "alloy-consensus", - "reth-chainspec/test-utils", - "reth-evm/test-utils", - "reth-network-p2p/test-utils", - "reth-primitives/test-utils", - "reth-codecs/test-utils", - "reth-db-api/test-utils", - "reth-trie-db/test-utils", - "revm/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils" + "reth-db/test-utils", + "reth-nippy-jar/test-utils", + "reth-trie/test-utils", + "reth-chain-state/test-utils", + "reth-ethereum-engine-primitives", + "reth-chainspec/test-utils", + "reth-evm/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives/test-utils", + "reth-codecs/test-utils", + "reth-db-api/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", ] diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 894a41620c52..deccdea2831f 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -46,6 +46,9 @@ pub use reth_chain_state::{ CanonStateNotifications, CanonStateSubscriptions, }; +// reexport traits to avoid breaking changes +pub use reth_storage_api::{HistoryWriter, StatsReader}; + pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { std::ops::Bound::Included(&v) => v, diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 64a8a204a329..0f0693471b0f 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -9,7 +9,11 @@ use crate::{ StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_consensus::Header; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, +}; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ @@ -23,9 +27,8 @@ use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, - Withdrawal, Withdrawals, + Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -768,7 +771,7 @@ mod tests { BlockWriter, CanonChainTracker, ProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; - use alloy_eips::{BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; + use alloy_eips::{eip4895::Withdrawals, BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, TxNumber, B256}; use itertools::Itertools; use rand::Rng; @@ -786,9 +789,7 @@ mod tests { use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives::{ - Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash, Withdrawals, - }; + use reth_primitives::{Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, ReceiptProvider, @@ -1444,7 +1445,7 @@ mod tests { assert_eq!( provider .withdrawals_by_block( - reth_primitives::BlockHashOrNumber::Number(15), + alloy_eips::BlockHashOrNumber::Number(15), shainghai_timestamp ) .expect("could not call withdrawals by block"), @@ -1456,7 +1457,7 @@ mod tests { assert_eq!( provider .withdrawals_by_block( - reth_primitives::BlockHashOrNumber::Number(block.number), + alloy_eips::BlockHashOrNumber::Number(block.number), shainghai_timestamp )? .unwrap(), diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index d6847fa1b8f6..37c00be23bee 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -6,8 +6,12 @@ use crate::{ StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber}; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use alloy_consensus::Header; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber, +}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_db::models::BlockNumberAddress; @@ -15,9 +19,8 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; use reth_primitives::{ - Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, - Withdrawal, Withdrawals, + Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -296,7 +299,7 @@ impl ConsistentProvider { ) -> ProviderResult> where F: FnOnce( - &DatabaseProviderRO, + &DatabaseProviderRO, RangeInclusive, &mut P, ) -> ProviderResult>, @@ -413,7 +416,7 @@ impl ConsistentProvider { ) -> ProviderResult> where S: FnOnce( - &DatabaseProviderRO, + &DatabaseProviderRO, RangeInclusive, ) -> ProviderResult>, M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, @@ -511,7 +514,7 @@ impl ConsistentProvider { fetch_from_block_state: M, ) -> ProviderResult> where - S: FnOnce(&DatabaseProviderRO) -> ProviderResult>, + S: FnOnce(&DatabaseProviderRO) -> ProviderResult>, M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, { let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); @@ -578,7 +581,7 @@ impl ConsistentProvider { fetch_from_block_state: M, ) -> ProviderResult where - S: FnOnce(&DatabaseProviderRO) -> ProviderResult, + S: FnOnce(&DatabaseProviderRO) -> ProviderResult, M: Fn(&BlockState) -> ProviderResult, { if let Some(Some(block_state)) = self.head_block.as_ref().map(|b| b.block_on_chain(id)) { @@ -1322,34 +1325,20 @@ impl BlockReaderIdExt for ConsistentProvider { Ok(self.canonical_in_memory_state.get_finalized_header()) } BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Earliest => self + .header_by_number(0)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Number(num) => self + .header_by_number(num)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), } } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }), + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), }) } @@ -1386,7 +1375,7 @@ impl StorageChangeSetReader for ConsistentProvider { .bundle .reverts .clone() - .into_plain_state_reverts() + .to_plain_state_reverts() .storage .into_iter() .flatten() @@ -1439,7 +1428,7 @@ impl ChangeSetReader for ConsistentProvider { .bundle .reverts .clone() - .into_plain_state_reverts() + .to_plain_state_reverts() .accounts .into_iter() .flatten() diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 54186dca6f69..b4d2e5e48b87 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -7,7 +7,11 @@ use crate::{ PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::BlockHashOrNumber; +use alloy_consensus::Header; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, +}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use core::fmt; use reth_chainspec::{ChainInfo, EthereumHardforks}; @@ -17,9 +21,8 @@ use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, - Withdrawals, + Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -130,7 +133,7 @@ impl ProviderFactory { /// This sets the [`PruneModes`] to [`None`], because they should only be relevant for writing /// data. #[track_caller] - pub fn provider(&self) -> ProviderResult> { + pub fn provider(&self) -> ProviderResult> { Ok(DatabaseProvider::new( self.db.tx()?, self.chain_spec.clone(), @@ -144,7 +147,7 @@ impl ProviderFactory { /// [`BlockHashReader`]. This may fail if the inner read/write database transaction fails to /// open. #[track_caller] - pub fn provider_rw(&self) -> ProviderResult> { + pub fn provider_rw(&self) -> ProviderResult> { Ok(DatabaseProviderRW(DatabaseProvider::new_rw( self.db.tx_mut()?, self.chain_spec.clone(), @@ -186,8 +189,8 @@ impl ProviderFactory { impl DatabaseProviderFactory for ProviderFactory { type DB = N::DB; - type Provider = DatabaseProvider<::TX, N::ChainSpec>; - type ProviderRW = DatabaseProvider<::TXMut, N::ChainSpec>; + type Provider = DatabaseProvider<::TX, N>; + type ProviderRW = DatabaseProvider<::TXMut, N>; fn database_provider_ro(&self) -> ProviderResult { self.provider() @@ -622,7 +625,8 @@ mod tests { use crate::{ providers::{StaticFileProvider, StaticFileWriter}, test_utils::{blocks::TEST_BLOCK, create_test_provider_factory, MockNodeTypesWithDB}, - BlockHashReader, BlockNumReader, BlockWriter, HeaderSyncGapProvider, TransactionsProvider, + BlockHashReader, BlockNumReader, BlockWriter, DBProvider, HeaderSyncGapProvider, + TransactionsProvider, }; use alloy_primitives::{TxNumber, B256, U256}; use assert_matches::assert_matches; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 308fa364a3d3..eef0ff5b668c 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -6,17 +6,20 @@ use crate::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, writer::UnifiedStorageWriter, - AccountReader, BlockExecutionReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, - BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, - DBProvider, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, - HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, - LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, - PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, - StateChangeWriter, StateProviderBox, StateReader, StateWriter, StaticFileProviderFactory, - StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + AccountReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, + BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, EvmEnvProvider, + HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, + HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, + StageCheckpointReader, StateChangeWriter, StateProviderBox, StateReader, StateWriter, + StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, + TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, +}; +use alloy_consensus::Header; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, }; -use alloy_eips::BlockHashOrNumber; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use itertools::{izip, Itertools}; use rayon::slice::ParallelSliceMut; @@ -25,7 +28,6 @@ use reth_db::{ cursor::DbDupCursorRW, tables, BlockNumberList, PlainAccountState, PlainStorageState, }; use reth_db_api::{ - common::KeyValue, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, database::Database, models::{ @@ -39,11 +41,11 @@ use reth_db_api::{ use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; +use reth_node_types::NodeTypes; use reth_primitives::{ - Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, - SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, - TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, - Withdrawal, Withdrawals, + Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -63,7 +65,7 @@ use std::{ cmp::Ordering, collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, fmt::Debug, - ops::{Bound, Deref, DerefMut, Range, RangeBounds, RangeInclusive}, + ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::{mpsc, Arc}, time::{Duration, Instant}, }; @@ -71,40 +73,40 @@ use tokio::sync::watch; use tracing::{debug, error, trace, warn}; /// A [`DatabaseProvider`] that holds a read-only database transaction. -pub type DatabaseProviderRO = DatabaseProvider<::TX, Spec>; +pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; /// A [`DatabaseProvider`] that holds a read-write database transaction. /// /// Ideally this would be an alias type. However, there's some weird compiler error (), that forces us to wrap this in a struct instead. /// Once that issue is solved, we can probably revert back to being an alias type. #[derive(Debug)] -pub struct DatabaseProviderRW( - pub DatabaseProvider<::TXMut, Spec>, +pub struct DatabaseProviderRW( + pub DatabaseProvider<::TXMut, N>, ); -impl Deref for DatabaseProviderRW { - type Target = DatabaseProvider<::TXMut, Spec>; +impl Deref for DatabaseProviderRW { + type Target = DatabaseProvider<::TXMut, N>; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for DatabaseProviderRW { +impl DerefMut for DatabaseProviderRW { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl AsRef::TXMut, Spec>> - for DatabaseProviderRW +impl AsRef::TXMut, N>> + for DatabaseProviderRW { - fn as_ref(&self) -> &DatabaseProvider<::TXMut, Spec> { + fn as_ref(&self) -> &DatabaseProvider<::TXMut, N> { &self.0 } } -impl DatabaseProviderRW { +impl DatabaseProviderRW { /// Commit database transaction and static file if it exists. pub fn commit(self) -> ProviderResult { self.0.commit() @@ -116,10 +118,10 @@ impl DatabaseProviderRW { } } -impl From> - for DatabaseProvider<::TXMut, Spec> +impl From> + for DatabaseProvider<::TXMut, N> { - fn from(provider: DatabaseProviderRW) -> Self { + fn from(provider: DatabaseProviderRW) -> Self { provider.0 } } @@ -127,25 +129,25 @@ impl From> /// A provider struct that fetches data from the database. /// Wrapper around [`DbTx`] and [`DbTxMut`]. Example: [`HeaderProvider`] [`BlockHashReader`] #[derive(Debug)] -pub struct DatabaseProvider { +pub struct DatabaseProvider { /// Database transaction. tx: TX, /// Chain spec - chain_spec: Arc, + chain_spec: Arc, /// Static File provider static_file_provider: StaticFileProvider, /// Pruning configuration prune_modes: PruneModes, } -impl DatabaseProvider { +impl DatabaseProvider { /// Returns reference to prune modes. pub const fn prune_modes_ref(&self) -> &PruneModes { &self.prune_modes } } -impl DatabaseProvider { +impl DatabaseProvider { /// State provider for latest block pub fn latest<'a>(&'a self) -> ProviderResult> { trace!(target: "providers::db", "Returning latest state provider"); @@ -203,28 +205,28 @@ impl DatabaseProvider { } } -impl StaticFileProviderFactory for DatabaseProvider { +impl StaticFileProviderFactory for DatabaseProvider { /// Returns a static file provider fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } } -impl ChainSpecProvider - for DatabaseProvider +impl> ChainSpecProvider + for DatabaseProvider { - type ChainSpec = Spec; + type ChainSpec = N::ChainSpec; fn chain_spec(&self) -> Arc { self.chain_spec.clone() } } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-write transaction. pub const fn new_rw( tx: TX, - chain_spec: Arc, + chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { @@ -232,15 +234,13 @@ impl DatabaseProvider { } } -impl AsRef for DatabaseProvider { +impl AsRef for DatabaseProvider { fn as_ref(&self) -> &Self { self } } -impl TryIntoHistoricalStateProvider - for DatabaseProvider -{ +impl TryIntoHistoricalStateProvider for DatabaseProvider { fn try_into_history_at_block( self, mut block_number: BlockNumber, @@ -283,8 +283,8 @@ impl TryIntoHistoricalStateProvider } } -impl - DatabaseProvider +impl + 'static> + DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] @@ -366,11 +366,11 @@ where Ok(Vec::new()) } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. pub const fn new( tx: TX, - chain_spec: Arc, + chain_spec: Arc, static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { @@ -393,79 +393,10 @@ impl DatabaseProvider { } /// Returns a reference to the chain specification. - pub fn chain_spec(&self) -> &Spec { + pub fn chain_spec(&self) -> &N::ChainSpec { &self.chain_spec } - /// Disables long-lived read transaction safety guarantees for leaks prevention and - /// observability improvements. - /// - /// CAUTION: In most of the cases, you want the safety guarantees for long read transactions - /// enabled. Use this only if you're sure that no write transaction is open in parallel, meaning - /// that Reth as a node is offline and not progressing. - pub fn disable_long_read_transaction_safety(mut self) -> Self { - self.tx.disable_long_read_transaction_safety(); - self - } - - /// Return full table as Vec - pub fn table(&self) -> Result>, DatabaseError> - where - T::Key: Default + Ord, - { - self.tx - .cursor_read::()? - .walk(Some(T::Key::default()))? - .collect::, DatabaseError>>() - } - - /// Return a list of entries from the table, based on the given range. - #[inline] - pub fn get( - &self, - range: impl RangeBounds, - ) -> Result>, DatabaseError> { - self.tx.cursor_read::()?.walk_range(range)?.collect::, _>>() - } - - /// Iterates over read only values in the given table and collects them into a vector. - /// - /// Early-returns if the range is empty, without opening a cursor transaction. - fn cursor_read_collect>( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - let capacity = match range_size_hint(&range) { - Some(0) | None => return Ok(Vec::new()), - Some(capacity) => capacity, - }; - let mut cursor = self.tx.cursor_read::()?; - self.cursor_collect_with_capacity(&mut cursor, range, capacity) - } - - /// Iterates over read only values in the given table and collects them into a vector. - fn cursor_collect>( - &self, - cursor: &mut impl DbCursorRO, - range: impl RangeBounds, - ) -> ProviderResult> { - let capacity = range_size_hint(&range).unwrap_or(0); - self.cursor_collect_with_capacity(cursor, range, capacity) - } - - fn cursor_collect_with_capacity>( - &self, - cursor: &mut impl DbCursorRO, - range: impl RangeBounds, - capacity: usize, - ) -> ProviderResult> { - let mut items = Vec::with_capacity(capacity); - for entry in cursor.walk_range(range)? { - items.push(entry?.1); - } - Ok(items) - } - fn transactions_by_tx_range_with_cursor( &self, range: impl RangeBounds, @@ -491,7 +422,7 @@ impl DatabaseProvider { construct_block: BF, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(BlockNumber) -> ProviderResult>, BF: FnOnce( @@ -557,7 +488,7 @@ impl DatabaseProvider { mut assemble_block: F, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(RangeInclusive) -> ProviderResult>, F: FnMut(H, Range, Vec
, Option) -> ProviderResult, @@ -634,7 +565,7 @@ impl DatabaseProvider { assemble_block: BF, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, H: AsRef
, HF: Fn(RangeInclusive) -> ProviderResult>, BF: Fn( @@ -684,152 +615,6 @@ impl DatabaseProvider { }) } - /// Get requested blocks transaction with senders - pub(crate) fn get_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult)>> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.get::(range)?; - - if block_bodies.is_empty() { - return Ok(Vec::new()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(block_bodies.into_iter().map(|(n, _)| (n, Vec::new())).collect()) - } - - // Get transactions and senders - let transactions = self - .get::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - let mut senders = - self.get::(first_transaction..=last_transaction)?; - - recover_block_senders(&mut senders, &transactions, first_transaction, last_transaction)?; - - // Merge transaction into blocks - let mut block_tx = Vec::with_capacity(block_bodies.len()); - let mut senders = senders.into_iter(); - let mut transactions = transactions.into_iter(); - for (block_number, block_body) in block_bodies { - let mut one_block_tx = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - let tx = transactions.next(); - let sender = senders.next(); - - let recovered = match (tx, sender) { - (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { - if tx_id == sender_tx_id { - Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) - } else { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - } - (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - (None, None) => Err(ProviderError::BlockBodyTransactionCount), - }?; - one_block_tx.push(recovered) - } - block_tx.push((block_number, one_block_tx)); - } - - Ok(block_tx) - } - - /// Get the given range of blocks. - pub fn get_block_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult> - where - Spec: EthereumHardforks, - { - // For blocks we need: - // - // - Headers - // - Bodies (transactions) - // - Uncles/ommers - // - Withdrawals - // - Signers - - let block_headers = self.get::(range.clone())?; - if block_headers.is_empty() { - return Ok(Vec::new()) - } - - let block_header_hashes = self.get::(range.clone())?; - let block_ommers = self.get::(range.clone())?; - let block_withdrawals = self.get::(range.clone())?; - - let block_tx = self.get_block_transaction_range(range)?; - let mut blocks = Vec::with_capacity(block_headers.len()); - - // merge all into block - let block_header_iter = block_headers.into_iter(); - let block_header_hashes_iter = block_header_hashes.into_iter(); - let block_tx_iter = block_tx.into_iter(); - - // Ommers can be empty for some blocks - let mut block_ommers_iter = block_ommers.into_iter(); - let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_ommers = block_ommers_iter.next(); - let mut block_withdrawals = block_withdrawals_iter.next(); - - for ((main_block_number, header), (_, header_hash), (_, tx)) in - izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) - { - let header = SealedHeader::new(header, header_hash); - - let (transactions, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip(); - - // Ommers can be missing - let mut ommers = Vec::new(); - if let Some((block_number, _)) = block_ommers.as_ref() { - if *block_number == main_block_number { - ommers = block_ommers.take().unwrap().1.ommers; - block_ommers = block_ommers_iter.next(); - } - }; - - // withdrawal can be missing - let shanghai_is_active = - self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp); - let mut withdrawals = Some(Withdrawals::default()); - if shanghai_is_active { - if let Some((block_number, _)) = block_withdrawals.as_ref() { - if *block_number == main_block_number { - withdrawals = Some(block_withdrawals.take().unwrap().1.withdrawals); - block_withdrawals = block_withdrawals_iter.next(); - } - } - } else { - withdrawals = None - } - - blocks.push(SealedBlockWithSenders { - block: SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals }, - }, - senders, - }) - } - - Ok(blocks) - } - /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. /// /// 1. Iterate over the [`BlockBodyIndices`][tables::BlockBodyIndices] table to get all the @@ -1000,44 +785,12 @@ impl DatabaseProvider { } } -impl DatabaseProvider { +impl DatabaseProvider { /// Commit database transaction. pub fn commit(self) -> ProviderResult { Ok(self.tx.commit()?) } - /// Remove list of entries from the table. Returns the number of entries removed. - #[inline] - pub fn remove( - &self, - range: impl RangeBounds, - ) -> Result { - let mut entries = 0; - let mut cursor_write = self.tx.cursor_write::()?; - let mut walker = cursor_write.walk_range(range)?; - while walker.next().transpose()?.is_some() { - walker.delete_current()?; - entries += 1; - } - Ok(entries) - } - - /// Return a list of entries from the table, and remove them, based on the given range. - #[inline] - pub fn take( - &self, - range: impl RangeBounds, - ) -> Result>, DatabaseError> { - let mut cursor_write = self.tx.cursor_write::()?; - let mut walker = cursor_write.walk_range(range)?; - let mut items = Vec::new(); - while let Some(i) = walker.next().transpose()? { - walker.delete_current()?; - items.push(i) - } - Ok(items) - } - /// Remove requested block transactions, without returning them. /// /// This will remove block data for the given range from the following tables: @@ -1226,7 +979,7 @@ impl DatabaseProvider { range: impl RangeBounds + Clone, ) -> ProviderResult> where - Spec: EthereumHardforks, + N::ChainSpec: EthereumHardforks, { // For blocks we need: // @@ -1365,13 +1118,13 @@ impl DatabaseProvider { } } -impl AccountReader for DatabaseProvider { +impl AccountReader for DatabaseProvider { fn basic_account(&self, address: Address) -> ProviderResult> { Ok(self.tx.get::(address)?) } } -impl AccountExtReader for DatabaseProvider { +impl AccountExtReader for DatabaseProvider { fn changed_accounts_with_range( &self, range: impl RangeBounds, @@ -1415,7 +1168,7 @@ impl AccountExtReader for DatabaseProvider StorageChangeSetReader for DatabaseProvider { +impl StorageChangeSetReader for DatabaseProvider { fn storage_changeset( &self, block_number: BlockNumber, @@ -1430,7 +1183,7 @@ impl StorageChangeSetReader for DatabaseProvider ChangeSetReader for DatabaseProvider { +impl ChangeSetReader for DatabaseProvider { fn account_block_changeset( &self, block_number: BlockNumber, @@ -1447,7 +1200,7 @@ impl ChangeSetReader for DatabaseProvider } } -impl HeaderSyncGapProvider for DatabaseProvider { +impl HeaderSyncGapProvider for DatabaseProvider { fn sync_gap( &self, tip: watch::Receiver, @@ -1491,8 +1244,8 @@ impl HeaderSyncGapProvider for DatabaseProvider HeaderProvider - for DatabaseProvider +impl> HeaderProvider + for DatabaseProvider { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(num) = self.block_number(*block_hash)? { @@ -1591,7 +1344,7 @@ impl HeaderProvider } } -impl BlockHashReader for DatabaseProvider { +impl BlockHashReader for DatabaseProvider { fn block_hash(&self, number: u64) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, @@ -1618,7 +1371,7 @@ impl BlockHashReader for DatabaseProvider } } -impl BlockNumReader for DatabaseProvider { +impl BlockNumReader for DatabaseProvider { fn chain_info(&self) -> ProviderResult { let best_number = self.best_block_number()?; let best_hash = self.block_hash(best_number)?.unwrap_or_default(); @@ -1649,7 +1402,9 @@ impl BlockNumReader for DatabaseProvider } } -impl BlockReader for DatabaseProvider { +impl> BlockReader + for DatabaseProvider +{ fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { if source.is_canonical() { self.block(hash.into()) @@ -1824,8 +1579,8 @@ impl BlockReader for DatabasePr } } -impl TransactionsProviderExt - for DatabaseProvider +impl> TransactionsProviderExt + for DatabaseProvider { /// Recovers transaction hashes by walking through `Transactions` table and /// calculating them in a parallel manner. Returned unsorted. @@ -1852,7 +1607,7 @@ impl TransactionsProviderExt rlp_buf: &mut Vec, ) -> Result<(B256, TxNumber), Box> { let (tx_id, tx) = entry.map_err(|e| Box::new(e.into()))?; - tx.transaction.encode_with_signature(&tx.signature, rlp_buf, false); + tx.transaction.eip2718_encode(&tx.signature, rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } @@ -1894,8 +1649,8 @@ impl TransactionsProviderExt } // Calculates the hash of the given transaction -impl TransactionsProvider - for DatabaseProvider +impl> TransactionsProvider + for DatabaseProvider { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { Ok(self.tx.get::(tx_hash)?) @@ -2054,8 +1809,8 @@ impl TransactionsProvider } } -impl ReceiptProvider - for DatabaseProvider +impl> ReceiptProvider + for DatabaseProvider { fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( @@ -2102,8 +1857,8 @@ impl ReceiptProvider } } -impl WithdrawalsProvider - for DatabaseProvider +impl> WithdrawalsProvider + for DatabaseProvider { fn withdrawals_by_block( &self, @@ -2132,8 +1887,8 @@ impl WithdrawalsProvider } } -impl EvmEnvProvider - for DatabaseProvider +impl> EvmEnvProvider + for DatabaseProvider { fn fill_env_at( &self, @@ -2198,11 +1953,16 @@ impl EvmEnvProvider } } -impl StageCheckpointReader for DatabaseProvider { +impl StageCheckpointReader for DatabaseProvider { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { Ok(self.tx.get::(id.to_string())?) } + /// Get stage checkpoint progress. + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { + Ok(self.tx.get::(id.to_string())?) + } + fn get_all_checkpoints(&self) -> ProviderResult> { self.tx .cursor_read::()? @@ -2210,14 +1970,9 @@ impl StageCheckpointReader for DatabaseProvider, _>>() .map_err(ProviderError::Database) } - - /// Get stage checkpoint progress. - fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - Ok(self.tx.get::(id.to_string())?) - } } -impl StageCheckpointWriter for DatabaseProvider { +impl StageCheckpointWriter for DatabaseProvider { /// Save stage checkpoint. fn save_stage_checkpoint( &self, @@ -2258,7 +2013,7 @@ impl StageCheckpointWriter for DatabaseProvider< } } -impl StorageReader for DatabaseProvider { +impl StorageReader for DatabaseProvider { fn plain_state_storages( &self, addresses_with_keys: impl IntoIterator)>, @@ -2321,7 +2076,7 @@ impl StorageReader for DatabaseProvider { } } -impl StateChangeWriter for DatabaseProvider { +impl StateChangeWriter for DatabaseProvider { fn write_state_reverts( &self, reverts: PlainStateReverts, @@ -2698,7 +2453,7 @@ impl StateChangeWriter for DatabaseProvid } } -impl TrieWriter for DatabaseProvider { +impl TrieWriter for DatabaseProvider { /// Writes trie updates. Returns the number of entries modified. fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { if trie_updates.is_empty() { @@ -2748,7 +2503,7 @@ impl TrieWriter for DatabaseProvider StorageTrieWriter for DatabaseProvider { +impl StorageTrieWriter for DatabaseProvider { /// Writes storage trie updates from the given storage trie map. First sorts the storage trie /// updates by the hashed address, writing in sorted order. fn write_storage_trie_updates( @@ -2785,20 +2540,18 @@ impl StorageTrieWriter for DatabaseProvid } } -impl HashingWriter for DatabaseProvider { - fn unwind_account_hashing( +impl HashingWriter for DatabaseProvider { + fn unwind_account_hashing<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, ) -> ProviderResult>> { // Aggregate all block changesets and make a list of accounts that have been changed. // Note that collecting and then reversing the order is necessary to ensure that the // changes are applied in the correct order. - let hashed_accounts = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| entry.map(|(_, e)| (keccak256(e.address), e.info))) - .collect::, _>>()? + let hashed_accounts = changesets + .into_iter() + .map(|(_, e)| (keccak256(e.address), e.info)) + .collect::>() .into_iter() .rev() .collect::>(); @@ -2816,13 +2569,25 @@ impl HashingWriter for DatabaseProvider, + ) -> ProviderResult>> { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_account_hashing(changesets.iter()) + } + fn insert_account_for_hashing( &self, - accounts: impl IntoIterator)>, + changesets: impl IntoIterator)>, ) -> ProviderResult>> { let mut hashed_accounts_cursor = self.tx.cursor_write::()?; let hashed_accounts = - accounts.into_iter().map(|(ad, ac)| (keccak256(ad), ac)).collect::>(); + changesets.into_iter().map(|(ad, ac)| (keccak256(ad), ac)).collect::>(); for (hashed_address, account) in &hashed_accounts { if let Some(account) = account { hashed_accounts_cursor.upsert(*hashed_address, *account)?; @@ -2835,18 +2600,15 @@ impl HashingWriter for DatabaseProvider, + changesets: impl Iterator, ) -> ProviderResult>> { // Aggregate all block changesets and make list of accounts that have been changed. - let mut changesets = self.tx.cursor_read::()?; let mut hashed_storages = changesets - .walk_range(range)? - .map(|entry| { - entry.map(|(BlockNumberAddress((_, address)), storage_entry)| { - (keccak256(address), keccak256(storage_entry.key), storage_entry.value) - }) + .into_iter() + .map(|(BlockNumberAddress((_, address)), storage_entry)| { + (keccak256(address), keccak256(storage_entry.key), storage_entry.value) }) - .collect::, _>>()?; + .collect::>(); hashed_storages.sort_by_key(|(ha, hk, _)| (*ha, *hk)); // Apply values to HashedState, and remove the account if it's None. @@ -2871,6 +2633,18 @@ impl HashingWriter for DatabaseProvider, + ) -> ProviderResult>> { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_storage_hashing(changesets.into_iter()) + } + fn insert_storage_for_hashing( &self, storages: impl IntoIterator)>, @@ -2991,17 +2765,15 @@ impl HashingWriter for DatabaseProvider HistoryWriter for DatabaseProvider { - fn unwind_account_history_indices( +impl HistoryWriter for DatabaseProvider { + fn unwind_account_history_indices<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, ) -> ProviderResult { - let mut last_indices = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| entry.map(|(index, account)| (account.address, index))) - .collect::, _>>()?; + let mut last_indices = changesets + .into_iter() + .map(|(index, account)| (account.address, *index)) + .collect::>(); last_indices.sort_by_key(|(a, _)| *a); // Unwind the account history index. @@ -3028,6 +2800,18 @@ impl HistoryWriter for DatabaseProvider, + ) -> ProviderResult { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_account_history_indices(changesets.iter()) + } + fn insert_account_history_index( &self, account_transitions: impl IntoIterator)>, @@ -3040,16 +2824,12 @@ impl HistoryWriter for DatabaseProvider, + changesets: impl Iterator, ) -> ProviderResult { - let mut storage_changesets = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| { - entry.map(|(BlockNumberAddress((bn, address)), storage)| (address, storage.key, bn)) - }) - .collect::, _>>()?; + let mut storage_changesets = changesets + .into_iter() + .map(|(BlockNumberAddress((bn, address)), storage)| (address, storage.key, bn)) + .collect::>(); storage_changesets.sort_by_key(|(address, key, _)| (*address, *key)); let mut cursor = self.tx.cursor_write::()?; @@ -3078,6 +2858,18 @@ impl HistoryWriter for DatabaseProvider, + ) -> ProviderResult { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_storage_history_indices(changesets.into_iter()) + } + fn insert_storage_history_index( &self, storage_transitions: impl IntoIterator)>, @@ -3107,40 +2899,27 @@ impl HistoryWriter for DatabaseProvider BlockExecutionReader - for DatabaseProvider -{ - fn get_block_and_execution_range( - &self, - range: RangeInclusive, - ) -> ProviderResult { - // get blocks - let blocks = self.get_block_range(range.clone())?; - - // get execution res - let execution_state = self.get_state(range)?.unwrap_or_default(); - - Ok(Chain::new(blocks, execution_state, None)) - } -} - -impl StateReader for DatabaseProvider { +impl StateReader for DatabaseProvider { fn get_state(&self, block: BlockNumber) -> ProviderResult> { self.get_state(block..=block) } } -impl - BlockExecutionWriter for DatabaseProvider +impl + 'static> + BlockExecutionWriter for DatabaseProvider { fn take_block_and_execution_range( &self, range: RangeInclusive, ) -> ProviderResult { - let storage_range = BlockNumberAddress::range(range.clone()); + let changed_accounts = self + .tx + .cursor_read::()? + .walk_range(range.clone())? + .collect::, _>>()?; // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(range.clone())?; + let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); let mut destroyed_accounts = HashSet::default(); for (hashed_address, account) in hashed_addresses { @@ -3151,12 +2930,19 @@ impl()? + .walk_range(storage_range)? + .collect::, _>>()?; // Unwind storage hashes. Add changed account and storage keys to corresponding prefix // sets. let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(storage_range.clone())?; + let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; for (hashed_address, hashed_slots) in storage_entries { account_prefix_set.insert(Nibbles::unpack(hashed_address)); let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); @@ -3167,7 +2953,7 @@ impl, ) -> ProviderResult<()> { - let storage_range = BlockNumberAddress::range(range.clone()); + let changed_accounts = self + .tx + .cursor_read::()? + .walk_range(range.clone())? + .collect::, _>>()?; // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(range.clone())?; + let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); let mut destroyed_accounts = HashSet::default(); for (hashed_address, account) in hashed_addresses { @@ -3239,12 +3029,19 @@ impl()? + .walk_range(storage_range)? + .collect::, _>>()?; // Unwind storage hashes. Add changed account and storage keys to corresponding prefix // sets. let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(storage_range.clone())?; + let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; for (hashed_address, hashed_slots) in storage_entries { account_prefix_set.insert(Nibbles::unpack(hashed_address)); let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); @@ -3255,7 +3052,7 @@ impl BlockWriter - for DatabaseProvider +impl + 'static> BlockWriter + for DatabaseProvider { /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) @@ -3523,7 +3320,7 @@ impl PruneCheckpointReader for DatabaseProvider { +impl PruneCheckpointReader for DatabaseProvider { fn get_prune_checkpoint( &self, segment: PruneSegment, @@ -3540,7 +3337,7 @@ impl PruneCheckpointReader for DatabaseProvider PruneCheckpointWriter for DatabaseProvider { +impl PruneCheckpointWriter for DatabaseProvider { fn save_prune_checkpoint( &self, segment: PruneSegment, @@ -3550,7 +3347,7 @@ impl PruneCheckpointWriter for DatabaseProvider< } } -impl StatsReader for DatabaseProvider { +impl StatsReader for DatabaseProvider { fn count_entries(&self) -> ProviderResult { let db_entries = self.tx.entries::()?; let static_file_entries = match self.static_file_provider.count_entries::() { @@ -3563,7 +3360,7 @@ impl StatsReader for DatabaseProvider { } } -impl ChainStateBlockReader for DatabaseProvider { +impl ChainStateBlockReader for DatabaseProvider { fn last_finalized_block_number(&self) -> ProviderResult> { let mut finalized_blocks = self .tx @@ -3589,7 +3386,7 @@ impl ChainStateBlockReader for DatabaseProvider ChainStateBlockWriter for DatabaseProvider { +impl ChainStateBlockWriter for DatabaseProvider { fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()> { Ok(self .tx @@ -3603,7 +3400,7 @@ impl ChainStateBlockWriter for DatabaseProvider< } } -impl DBProvider for DatabaseProvider { +impl DBProvider for DatabaseProvider { type Tx = TX; fn tx_ref(&self) -> &Self::Tx { @@ -3698,17 +3495,3 @@ fn recover_block_senders( Ok(()) } - -fn range_size_hint(range: &impl RangeBounds) -> Option { - let start = match range.start_bound().cloned() { - Bound::Included(start) => start, - Bound::Excluded(start) => start.checked_add(1)?, - Bound::Unbounded => 0, - }; - let end = match range.end_bound().cloned() { - Bound::Included(end) => end.saturating_add(1), - Bound::Excluded(end) => end, - Bound::Unbounded => return None, - }; - end.checked_sub(start).map(|x| x as _) -} diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index c81ef05d2eaa..d1e1822d8c9d 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -7,8 +7,12 @@ use crate::{ StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use alloy_consensus::Header; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, +}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, @@ -20,9 +24,8 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, - Withdrawals, + Account, Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -601,35 +604,6 @@ impl StateProviderFactory for BlockchainProvider { self.database.latest() } - fn history_by_block_number( - &self, - block_number: BlockNumber, - ) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); - self.ensure_canonical_block(block_number)?; - self.database.history_by_block_number(block_number) - } - - fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - self.database.history_by_block_hash(block_hash) - } - - fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); - let mut state = self.history_by_block_hash(block); - - // we failed to get the state by hash, from disk, hash block be the pending block - if state.is_err() { - if let Ok(Some(pending)) = self.pending_state_by_hash(block) { - // we found pending block by hash - state = Ok(pending) - } - } - - state - } - /// Returns a [`StateProviderBox`] indexed by the given block number or tag. /// /// Note: if a number is provided this will only look at historical(canonical) state. @@ -662,6 +636,35 @@ impl StateProviderFactory for BlockchainProvider { } } + fn history_by_block_number( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); + self.ensure_canonical_block(block_number)?; + self.database.history_by_block_number(block_number) + } + + fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { + trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); + self.database.history_by_block_hash(block_hash) + } + + fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { + trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); + let mut state = self.history_by_block_hash(block); + + // we failed to get the state by hash, from disk, hash block be the pending block + if state.is_err() { + if let Ok(Some(pending)) = self.pending_state_by_hash(block) { + // we found pending block by hash + state = Ok(pending) + } + } + + state + } + /// Returns the state provider for pending state. /// /// If there's no pending block available then the latest state provider is returned: @@ -845,34 +848,20 @@ where BlockNumberOrTag::Latest => Ok(Some(self.chain_info.get_canonical_head())), BlockNumberOrTag::Finalized => Ok(self.chain_info.get_finalized_header()), BlockNumberOrTag::Safe => Ok(self.chain_info.get_safe_header()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Earliest => self + .header_by_number(0)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), BlockNumberOrTag::Pending => Ok(self.tree.pending_header()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Number(num) => self + .header_by_number(num)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), } } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }), + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), }) } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 8d1dbd117cfb..9c303394ed22 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -6,13 +6,14 @@ use crate::{ to_range, BlockHashReader, BlockNumReader, HeaderProvider, ReceiptProvider, TransactionsProvider, }; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; use reth_db::static_file::{HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}; use reth_db_api::models::CompactU256; use reth_primitives::{ - Header, Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index e81dc01f722c..a5d4537245dd 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -7,7 +7,11 @@ use crate::{ ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; -use alloy_eips::BlockHashOrNumber; +use alloy_consensus::Header; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, +}; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use dashmap::DashMap; use notify::{RecommendedWatcher, RecursiveMode, Watcher}; @@ -30,9 +34,8 @@ use reth_primitives::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }, - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, - Withdrawals, + Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_stages_types::{PipelineTarget, StageId}; use reth_storage_api::DBProvider; @@ -1646,7 +1649,7 @@ impl StatsReader for StaticFileProvider { fn count_entries(&self) -> ProviderResult { match T::NAME { tables::CanonicalHeaders::NAME | - tables::Headers::NAME | + tables::Headers::
::NAME | tables::HeaderTerminalDifficulties::NAME => Ok(self .get_highest_static_file_block(StaticFileSegment::Headers) .map(|block| block + 1) @@ -1656,10 +1659,11 @@ impl StatsReader for StaticFileProvider { .get_highest_static_file_tx(StaticFileSegment::Receipts) .map(|receipts| receipts + 1) .unwrap_or_default() as usize), - tables::Transactions::NAME => Ok(self + tables::Transactions::::NAME => Ok(self .get_highest_static_file_tx(StaticFileSegment::Transactions) .map(|txs| txs + 1) - .unwrap_or_default() as usize), + .unwrap_or_default() + as usize), _ => Err(ProviderError::UnsupportedProvider), } } @@ -1672,6 +1676,6 @@ fn calculate_hash( rlp_buf: &mut Vec, ) -> Result<(B256, TxNumber), Box> { let (tx_id, tx) = entry; - tx.transaction.encode_with_signature(&tx.signature, rlp_buf, false); + tx.transaction.eip2718_encode(&tx.signature, rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 52eb6ed666ea..dd52adf52f87 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -56,7 +56,7 @@ impl Deref for LoadedJar { mod tests { use super::*; use crate::{test_utils::create_test_provider_factory, HeaderProvider}; - use alloy_consensus::Transaction; + use alloy_consensus::{Header, Transaction}; use alloy_primitives::{BlockHash, TxNumber, B256, U256}; use rand::seq::SliceRandom; use reth_db::{ @@ -66,7 +66,7 @@ mod tests { use reth_db_api::transaction::DbTxMut; use reth_primitives::{ static_file::{find_fixed_range, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE}, - Header, Receipt, TransactionSignedNoHash, + Receipt, TransactionSignedNoHash, }; use reth_storage_api::{ReceiptProvider, TransactionsProvider}; use reth_testing_utils::generators::{self, random_header_range}; diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 8c31c021f218..ed1a51068c30 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -2,6 +2,7 @@ use super::{ manager::StaticFileProviderInner, metrics::StaticFileProviderMetrics, StaticFileProvider, }; use crate::providers::static_file::metrics::StaticFileProviderOperation; +use alloy_consensus::Header; use alloy_primitives::{BlockHash, BlockNumber, TxNumber, U256}; use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock}; use reth_codecs::Compact; @@ -9,7 +10,7 @@ use reth_db_api::models::CompactU256; use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; use reth_primitives::{ static_file::{SegmentHeader, SegmentRangeInclusive}, - Header, Receipt, StaticFileSegment, TransactionSignedNoHash, + Receipt, StaticFileSegment, TransactionSignedNoHash, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 07486f5557cb..3259eee2bfbf 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -1,24 +1,27 @@ //! Dummy blocks and data for tests -use crate::{DatabaseProviderRW, ExecutionOutcome}; +use crate::{DBProvider, DatabaseProviderRW, ExecutionOutcome}; use alloy_consensus::{TxLegacy, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{ - b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, Parity, Sealable, - TxKind, B256, U256, + b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, TxKind, B256, U256, }; +use alloy_consensus::Header; +use alloy_eips::eip4895::{Withdrawal, Withdrawals}; +use alloy_primitives::PrimitiveSignature as Signature; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; +use reth_node_types::NodeTypes; use reth_primitives::{ - Account, BlockBody, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - Signature, Transaction, TransactionSigned, TxType, Withdrawal, Withdrawals, + Account, BlockBody, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + TransactionSigned, TxType, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; use revm::{db::BundleState, primitives::AccountInfo}; use std::{str::FromStr, sync::LazyLock}; /// Assert genesis block -pub fn assert_genesis_block( - provider: &DatabaseProviderRW, +pub fn assert_genesis_block( + provider: &DatabaseProviderRW, g: SealedBlock, ) { let n = g.number; @@ -96,7 +99,7 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo "29056683545955299640297374067888344259176096769870751649153779895496107008675", ) .unwrap(), - Parity::NonEip155(false), + false, ), transaction: Transaction::Legacy(TxLegacy { gas_price: 10, @@ -230,9 +233,7 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { header.number = number; header.state_root = state_root; header.parent_hash = B256::ZERO; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x30; 20])] }, execution_outcome) } @@ -296,9 +297,7 @@ fn block2( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -362,9 +361,7 @@ fn block3( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -453,9 +450,7 @@ fn block4( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -541,9 +536,7 @@ fn block5( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 08530acf0a7f..9661ab2057ca 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,29 +1,31 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, - AccountReader, BlockExecutionReader, BlockHashReader, BlockIdReader, BlockNumReader, - BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, DatabaseProvider, - EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, StateProvider, StateProviderBox, - StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, TransactionsProvider, - WithdrawalsProvider, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + ChainSpecProvider, ChangeSetReader, DatabaseProvider, EvmEnvProvider, HeaderProvider, + ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateReader, + StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, +}; +use alloy_consensus::{constants::EMPTY_ROOT_HASH, Header}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumberOrTag, }; -use alloy_consensus::constants::EMPTY_ROOT_HASH; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, - Address, BlockHash, BlockNumber, Bytes, Sealable, StorageKey, StorageValue, TxHash, TxNumber, - B256, U256, + Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; use parking_lot::Mutex; use reth_chainspec::{ChainInfo, ChainSpec}; use reth_db::mock::{DatabaseMock, TxMock}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_execution_types::ExecutionOutcome; +use reth_node_types::NodeTypes; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, + Account, Block, BlockWithSenders, Bytecode, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, + TransactionSignedNoHash, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ @@ -34,6 +36,7 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, TrieInput, }; +use reth_trie_db::MerklePatriciaTrie; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ collections::BTreeMap, @@ -150,10 +153,20 @@ impl MockEthProvider { } } +/// Mock node. +#[derive(Debug)] +pub struct MockNode; + +impl NodeTypes for MockNode { + type Primitives = (); + type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; +} + impl DatabaseProviderFactory for MockEthProvider { type DB = DatabaseMock; - type Provider = DatabaseProvider; - type ProviderRW = DatabaseProvider; + type Provider = DatabaseProvider; + type ProviderRW = DatabaseProvider; fn database_provider_ro(&self) -> ProviderResult { Err(ConsistentViewError::Syncing { best_block: GotExpected::new(0, 0) }.into()) @@ -204,11 +217,7 @@ impl HeaderProvider for MockEthProvider { } fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - Ok(self.header_by_number(number)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - })) + Ok(self.header_by_number(number)?.map(SealedHeader::seal)) } fn sealed_headers_while( @@ -219,11 +228,7 @@ impl HeaderProvider for MockEthProvider { Ok(self .headers_range(range)? .into_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) + .map(SealedHeader::seal) .take_while(|h| predicate(h)) .collect()) } @@ -452,15 +457,15 @@ impl BlockNumReader for MockEthProvider { } impl BlockIdReader for MockEthProvider { - fn pending_block_num_hash(&self) -> ProviderResult> { + fn pending_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn safe_block_num_hash(&self) -> ProviderResult> { + fn safe_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn finalized_block_num_hash(&self) -> ProviderResult> { + fn finalized_block_num_hash(&self) -> ProviderResult> { Ok(None) } } @@ -552,14 +557,7 @@ impl BlockReaderIdExt for MockEthProvider { } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { - self.header_by_id(id)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ) + self.header_by_id(id)?.map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) } fn header_by_id(&self, id: BlockId) -> ProviderResult> { @@ -749,18 +747,6 @@ impl StateProviderFactory for MockEthProvider { Ok(Box::new(self.clone())) } - fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { - Ok(Box::new(self.clone())) - } - - fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(self.clone())) - } - - fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(self.clone())) - } - fn state_by_block_number_or_tag( &self, number_or_tag: BlockNumberOrTag, @@ -787,6 +773,18 @@ impl StateProviderFactory for MockEthProvider { } } + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + fn pending(&self) -> ProviderResult { Ok(Box::new(self.clone())) } @@ -818,15 +816,6 @@ impl ChangeSetReader for MockEthProvider { } } -impl BlockExecutionReader for MockEthProvider { - fn get_block_and_execution_range( - &self, - _range: RangeInclusive, - ) -> ProviderResult { - Ok(Chain::default()) - } -} - impl StateReader for MockEthProvider { fn get_state(&self, _block: BlockNumber) -> ProviderResult> { Ok(None) diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index 2200781096d0..c0e80930b318 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -25,6 +25,7 @@ pub type MockNodeTypes = reth_node_types::AnyNodeTypesWithEngine< (), reth_ethereum_engine_primitives::EthEngineTypes, reth_chainspec::ChainSpec, + reth_trie_db::MerklePatriciaTrie, >; /// Mock [`reth_node_types::NodeTypesWithDB`] for testing. diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index f6f7e185de63..38fab0dc3112 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -4,7 +4,11 @@ use std::{ sync::Arc, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; +use alloy_consensus::Header; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumberOrTag, +}; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, @@ -18,9 +22,8 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, Header, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, + Account, Block, BlockWithSenders, Bytecode, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -175,15 +178,15 @@ impl BlockReaderIdExt for NoopProvider { } impl BlockIdReader for NoopProvider { - fn pending_block_num_hash(&self) -> ProviderResult> { + fn pending_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn safe_block_num_hash(&self) -> ProviderResult> { + fn safe_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn finalized_block_num_hash(&self) -> ProviderResult> { + fn finalized_block_num_hash(&self) -> ProviderResult> { Ok(None) } } @@ -465,22 +468,6 @@ impl StateProviderFactory for NoopProvider { Ok(Box::new(*self)) } - fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn pending(&self) -> ProviderResult { - Ok(Box::new(*self)) - } - fn state_by_block_number_or_tag( &self, number_or_tag: BlockNumberOrTag, @@ -507,6 +494,22 @@ impl StateProviderFactory for NoopProvider { } } + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn pending(&self) -> ProviderResult { + Ok(Box::new(*self)) + } + fn pending_state_by_hash(&self, _block_hash: B256) -> ProviderResult> { Ok(Some(Box::new(*self))) } diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 8e3a54d86b9c..7202c405f068 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -2,7 +2,6 @@ use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::SealedBlockWithSenders; -use reth_storage_api::BlockReader; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; use std::ops::RangeInclusive; @@ -23,16 +22,6 @@ pub trait BlockExecutionWriter: BlockWriter + Send + Sync { ) -> ProviderResult<()>; } -/// BlockExecution Reader -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait BlockExecutionReader: BlockReader + Send + Sync { - /// Get range of blocks and its execution result - fn get_block_and_execution_range( - &self, - range: RangeInclusive, - ) -> ProviderResult; -} - /// This just receives state, or [`ExecutionOutcome`], from the provider #[auto_impl::auto_impl(&, Arc, Box)] pub trait StateReader: Send + Sync { diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/provider/src/traits/hashing.rs index 2b759afa7295..c6958aa4d644 100644 --- a/crates/storage/provider/src/traits/hashing.rs +++ b/crates/storage/provider/src/traits/hashing.rs @@ -1,11 +1,11 @@ use alloy_primitives::{Address, BlockNumber, B256}; use auto_impl::auto_impl; -use reth_db_api::models::BlockNumberAddress; +use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; use reth_primitives::{Account, StorageEntry}; use reth_storage_errors::provider::ProviderResult; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, - ops::{Range, RangeInclusive}, + ops::{RangeBounds, RangeInclusive}, }; /// Hashing Writer @@ -16,9 +16,19 @@ pub trait HashingWriter: Send + Sync { /// # Returns /// /// Set of hashed keys of updated accounts. - fn unwind_account_hashing( + fn unwind_account_hashing<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, + ) -> ProviderResult>>; + + /// Unwind and clear account hashing in a given block range. + /// + /// # Returns + /// + /// Set of hashed keys of updated accounts. + fn unwind_account_hashing_range( + &self, + range: impl RangeBounds, ) -> ProviderResult>>; /// Inserts all accounts into [reth_db::tables::AccountsHistory] table. @@ -38,7 +48,17 @@ pub trait HashingWriter: Send + Sync { /// Mapping of hashed keys of updated accounts to their respective updated hashed slots. fn unwind_storage_hashing( &self, - range: Range, + changesets: impl Iterator, + ) -> ProviderResult>>; + + /// Unwind and clear storage hashing in a given block range. + /// + /// # Returns + /// + /// Mapping of hashed keys of updated accounts to their respective updated hashed slots. + fn unwind_storage_hashing_range( + &self, + range: impl RangeBounds, ) -> ProviderResult>>; /// Iterates over storages and inserts them to hashing table. diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index c31c7c1e2f21..5542ea168abc 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -9,9 +9,6 @@ pub use reth_evm::provider::EvmEnvProvider; mod block; pub use block::*; -mod chain_info; -pub use chain_info::CanonChainTracker; - mod header_sync_gap; pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider}; @@ -26,15 +23,9 @@ pub use hashing::HashingWriter; mod trie; pub use trie::{StorageTrieWriter, TrieWriter}; -mod history; -pub use history::HistoryWriter; - mod static_file_provider; pub use static_file_provider::StaticFileProviderFactory; -mod stats; -pub use stats::StatsReader; - mod full; pub use full::{FullProvider, FullRpcProvider}; diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 5b16b2da4e5a..0fbec6c1b881 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -3,6 +3,7 @@ use crate::{ writer::static_file::StaticFileWriter, BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, TrieWriter, }; +use alloy_consensus::Header; use alloy_primitives::{BlockNumber, B256, U256}; use reth_chain_state::ExecutedBlock; use reth_db::{ @@ -13,7 +14,7 @@ use reth_db::{ }; use reth_errors::{ProviderError, ProviderResult}; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{Header, SealedBlock, StaticFileSegment, TransactionSignedNoHash}; +use reth_primitives::{SealedBlock, StaticFileSegment, TransactionSignedNoHash}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ DBProvider, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, @@ -523,7 +524,7 @@ where is_value_known: OriginalValuesKnown, ) -> ProviderResult<()> { let (plain_state, reverts) = - execution_outcome.bundle.into_plain_state_and_reverts(is_value_known); + execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); self.database().write_state_reverts(reverts, execution_outcome.first_block)?; @@ -664,8 +665,8 @@ mod tests { let mut revm_bundle_state = state.take_bundle(); // Write plain state and reverts separately. - let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); - let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + let reverts = revm_bundle_state.take_all_reverts().to_plain_state_reverts(); + let plain_state = revm_bundle_state.to_plain_state(OriginalValuesKnown::Yes); assert!(plain_state.storage.is_empty()); assert!(plain_state.contracts.is_empty()); provider.write_state_changes(plain_state).expect("Could not write plain state to DB"); @@ -722,8 +723,8 @@ mod tests { let mut revm_bundle_state = state.take_bundle(); // Write plain state and reverts separately. - let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); - let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + let reverts = revm_bundle_state.take_all_reverts().to_plain_state_reverts(); + let plain_state = revm_bundle_state.to_plain_state(OriginalValuesKnown::Yes); // Account B selfdestructed so flag for it should be present. assert_eq!( plain_state.storage, @@ -1129,7 +1130,8 @@ mod tests { let bundle = state.take_bundle(); - let outcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); + let outcome: ExecutionOutcome = + ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); let mut writer = UnifiedStorageWriter::from_database(&provider); writer .write_to_storage(outcome, OriginalValuesKnown::Yes) @@ -1375,7 +1377,7 @@ mod tests { #[test] fn revert_to_indices() { - let base = ExecutionOutcome { + let base: ExecutionOutcome = ExecutionOutcome { bundle: BundleState::default(), receipts: vec![vec![Some(Receipt::default()); 2]; 7].into(), first_block: 10, @@ -1441,7 +1443,7 @@ mod tests { assert_eq!( StateRoot::overlay_root( tx, - ExecutionOutcome::new( + ExecutionOutcome::::new( state.bundle_state.clone(), Receipts::default(), 0, @@ -1592,7 +1594,7 @@ mod tests { .build(); assert_eq!(previous_state.reverts.len(), 1); - let mut test = ExecutionOutcome { + let mut test: ExecutionOutcome = ExecutionOutcome { bundle: present_state, receipts: vec![vec![Some(Receipt::default()); 2]; 1].into(), first_block: 2, diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 0ae8b284588e..2b13f6332f87 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -22,10 +22,12 @@ reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie.workspace = true +reth-db.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-rpc-types-engine.workspace = true auto_impl.workspace = true diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 01238be745e2..929f7ecca432 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -2,11 +2,12 @@ use crate::{ BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use reth_db_models::StoredBlockBodyIndices; use reth_primitives::{ - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + Block, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, }; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; @@ -243,14 +244,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { ) -> ProviderResult> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))? - .map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ) + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) } /// Returns the sealed header with the matching `BlockId` from the database. diff --git a/crates/storage/storage-api/src/block_id.rs b/crates/storage/storage-api/src/block_id.rs index 55cd6ab1c763..00856d348a5e 100644 --- a/crates/storage/storage-api/src/block_id.rs +++ b/crates/storage/storage-api/src/block_id.rs @@ -99,13 +99,13 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { } /// Get the current pending block number and hash. - fn pending_block_num_hash(&self) -> ProviderResult>; + fn pending_block_num_hash(&self) -> ProviderResult>; /// Get the current safe block number and hash. - fn safe_block_num_hash(&self) -> ProviderResult>; + fn safe_block_num_hash(&self) -> ProviderResult>; /// Get the current finalized block number and hash. - fn finalized_block_num_hash(&self) -> ProviderResult>; + fn finalized_block_num_hash(&self) -> ProviderResult>; /// Get the safe block number. fn safe_block_number(&self) -> ProviderResult> { diff --git a/crates/storage/provider/src/traits/chain_info.rs b/crates/storage/storage-api/src/chain_info.rs similarity index 100% rename from crates/storage/provider/src/traits/chain_info.rs rename to crates/storage/storage-api/src/chain_info.rs diff --git a/crates/storage/storage-api/src/database_provider.rs b/crates/storage/storage-api/src/database_provider.rs index 6a463ed01e92..20aebce88feb 100644 --- a/crates/storage/storage-api/src/database_provider.rs +++ b/crates/storage/storage-api/src/database_provider.rs @@ -1,6 +1,14 @@ -use reth_db_api::{database::Database, transaction::DbTx}; +use reth_db_api::{ + common::KeyValue, + cursor::DbCursorRO, + database::Database, + table::Table, + transaction::{DbTx, DbTxMut}, + DatabaseError, +}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderResult; +use std::ops::{Bound, RangeBounds}; /// Database provider. pub trait DBProvider: Send + Sync + Sized + 'static { @@ -34,6 +42,101 @@ pub trait DBProvider: Send + Sync + Sized + 'static { /// Returns a reference to prune modes. fn prune_modes_ref(&self) -> &PruneModes; + + /// Return full table as Vec + fn table(&self) -> Result>, DatabaseError> + where + T::Key: Default + Ord, + { + self.tx_ref() + .cursor_read::()? + .walk(Some(T::Key::default()))? + .collect::, DatabaseError>>() + } + + /// Return a list of entries from the table, based on the given range. + #[inline] + fn get( + &self, + range: impl RangeBounds, + ) -> Result>, DatabaseError> { + self.tx_ref().cursor_read::()?.walk_range(range)?.collect::, _>>() + } + + /// Iterates over read only values in the given table and collects them into a vector. + /// + /// Early-returns if the range is empty, without opening a cursor transaction. + fn cursor_read_collect>( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + let capacity = match range_size_hint(&range) { + Some(0) | None => return Ok(Vec::new()), + Some(capacity) => capacity, + }; + let mut cursor = self.tx_ref().cursor_read::()?; + self.cursor_collect_with_capacity(&mut cursor, range, capacity) + } + + /// Iterates over read only values in the given table and collects them into a vector. + fn cursor_collect>( + &self, + cursor: &mut impl DbCursorRO, + range: impl RangeBounds, + ) -> ProviderResult> { + let capacity = range_size_hint(&range).unwrap_or(0); + self.cursor_collect_with_capacity(cursor, range, capacity) + } + + /// Iterates over read only values in the given table and collects them into a vector with + /// capacity. + fn cursor_collect_with_capacity>( + &self, + cursor: &mut impl DbCursorRO, + range: impl RangeBounds, + capacity: usize, + ) -> ProviderResult> { + let mut items = Vec::with_capacity(capacity); + for entry in cursor.walk_range(range)? { + items.push(entry?.1); + } + Ok(items) + } + + /// Remove list of entries from the table. Returns the number of entries removed. + #[inline] + fn remove(&self, range: impl RangeBounds) -> Result + where + Self::Tx: DbTxMut, + { + let mut entries = 0; + let mut cursor_write = self.tx_ref().cursor_write::()?; + let mut walker = cursor_write.walk_range(range)?; + while walker.next().transpose()?.is_some() { + walker.delete_current()?; + entries += 1; + } + Ok(entries) + } + + /// Return a list of entries from the table, and remove them, based on the given range. + #[inline] + fn take( + &self, + range: impl RangeBounds, + ) -> Result>, DatabaseError> + where + Self::Tx: DbTxMut, + { + let mut cursor_write = self.tx_ref().cursor_write::()?; + let mut walker = cursor_write.walk_range(range)?; + let mut items = Vec::new(); + while let Some(i) = walker.next().transpose()? { + walker.delete_current()?; + items.push(i) + } + Ok(items) + } } /// Database provider factory. @@ -54,3 +157,17 @@ pub trait DatabaseProviderFactory: Send + Sync { /// Create new read-write database provider. fn database_provider_rw(&self) -> ProviderResult; } + +fn range_size_hint(range: &impl RangeBounds) -> Option { + let start = match range.start_bound().cloned() { + Bound::Included(start) => start, + Bound::Excluded(start) => start.checked_add(1)?, + Bound::Unbounded => 0, + }; + let end = match range.end_bound().cloned() { + Bound::Included(end) => end.saturating_add(1), + Bound::Excluded(end) => end, + Bound::Unbounded => return None, + }; + end.checked_sub(start).map(|x| x as _) +} diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index 7202f51ddf1f..c068f7c1d295 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -1,6 +1,7 @@ +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockHash, BlockNumber, U256}; -use reth_primitives::{Header, SealedHeader}; +use reth_primitives::SealedHeader; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; diff --git a/crates/storage/provider/src/traits/history.rs b/crates/storage/storage-api/src/history.rs similarity index 59% rename from crates/storage/provider/src/traits/history.rs rename to crates/storage/storage-api/src/history.rs index cbf9bece4b94..4eadd6031c35 100644 --- a/crates/storage/provider/src/traits/history.rs +++ b/crates/storage/storage-api/src/history.rs @@ -1,8 +1,9 @@ use alloy_primitives::{Address, BlockNumber, B256}; use auto_impl::auto_impl; -use reth_db_api::models::BlockNumberAddress; +use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; +use reth_primitives::StorageEntry; use reth_storage_errors::provider::ProviderResult; -use std::ops::{Range, RangeInclusive}; +use std::ops::{RangeBounds, RangeInclusive}; /// History Writer #[auto_impl(&, Arc, Box)] @@ -10,9 +11,17 @@ pub trait HistoryWriter: Send + Sync { /// Unwind and clear account history indices. /// /// Returns number of changesets walked. - fn unwind_account_history_indices( + fn unwind_account_history_indices<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, + ) -> ProviderResult; + + /// Unwind and clear account history indices in a given block range. + /// + /// Returns number of changesets walked. + fn unwind_account_history_indices_range( + &self, + range: impl RangeBounds, ) -> ProviderResult; /// Insert account change index to database. Used inside AccountHistoryIndex stage @@ -26,7 +35,15 @@ pub trait HistoryWriter: Send + Sync { /// Returns number of changesets walked. fn unwind_storage_history_indices( &self, - range: Range, + changesets: impl Iterator, + ) -> ProviderResult; + + /// Unwind and clear storage history indices in a given block range. + /// + /// Returns number of changesets walked. + fn unwind_storage_history_indices_range( + &self, + range: impl RangeBounds, ) -> ProviderResult; /// Insert storage change index to database. Used inside StorageHistoryIndex stage diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 4e589242a91e..13a44b482a62 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -46,6 +46,9 @@ pub use transactions::*; mod trie; pub use trie::*; +mod chain_info; +pub use chain_info::*; + mod withdrawals; pub use withdrawals::*; @@ -53,3 +56,9 @@ mod database_provider; pub use database_provider::*; pub mod noop; + +mod history; +pub use history::*; + +mod stats; +pub use stats::*; diff --git a/crates/storage/provider/src/traits/stats.rs b/crates/storage/storage-api/src/stats.rs similarity index 100% rename from crates/storage/provider/src/traits/stats.rs rename to crates/storage/storage-api/src/stats.rs diff --git a/crates/storage/storage-api/src/withdrawals.rs b/crates/storage/storage-api/src/withdrawals.rs index 2de69b34eb66..47aa4944410d 100644 --- a/crates/storage/storage-api/src/withdrawals.rs +++ b/crates/storage/storage-api/src/withdrawals.rs @@ -1,5 +1,7 @@ -use alloy_eips::BlockHashOrNumber; -use reth_primitives::{Withdrawal, Withdrawals}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, +}; use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching [Withdrawal] related data. diff --git a/crates/tokio-util/src/ratelimit.rs b/crates/tokio-util/src/ratelimit.rs index 16e403f10aa2..33a9c5273d8f 100644 --- a/crates/tokio-util/src/ratelimit.rs +++ b/crates/tokio-util/src/ratelimit.rs @@ -8,7 +8,7 @@ use std::{ }; use tokio::time::Sleep; -/// Given a [Rate] this type enforces a rate limit. +/// Given a [`Rate`] this type enforces a rate limit. #[derive(Debug)] pub struct RateLimit { rate: Rate, @@ -122,6 +122,7 @@ impl Rate { #[cfg(test)] mod tests { use super::*; + use tokio::time; #[tokio::test] async fn test_rate_limit() { @@ -157,4 +158,118 @@ mod tests { }) .await; } + + #[tokio::test] + async fn test_rate_limit_initialization() { + let rate = Rate::new(5, Duration::from_secs(1)); + let limit = RateLimit::new(rate); + + // Verify the limit is correctly set + assert_eq!(limit.limit(), 5); + } + + #[tokio::test] + async fn test_rate_limit_allows_within_limit() { + let mut limit = RateLimit::new(Rate::new(3, Duration::from_millis(1))); + + // Check that the rate limiter is ready initially + for _ in 0..3 { + poll_fn(|cx| { + // Should be ready within the limit + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + // Signal that a request has been made + limit.tick(); + } + + // After 3 requests, it should be pending (rate limit hit) + poll_fn(|cx| { + // Exceeded limit, should now be limited + assert!(limit.poll_ready(cx).is_pending()); + Poll::Ready(()) + }) + .await; + } + + #[tokio::test] + async fn test_rate_limit_enforces_wait_after_limit() { + let mut limit = RateLimit::new(Rate::new(2, Duration::from_millis(500))); + + // Consume the limit + for _ in 0..2 { + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + limit.tick(); + } + + // Should now be limited (pending) + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_pending()); + Poll::Ready(()) + }) + .await; + + // Wait until the rate period elapses + time::sleep(limit.rate.duration()).await; + + // Now it should be ready again after the wait + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + } + + #[tokio::test] + async fn test_wait_method_awaits_readiness() { + let mut limit = RateLimit::new(Rate::new(1, Duration::from_millis(500))); + + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + + limit.tick(); + + // The limit should now be exceeded + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_pending()); + Poll::Ready(()) + }) + .await; + + // The `wait` method should block until the rate period elapses + limit.wait().await; + + // After `wait`, it should now be ready + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + } + + #[tokio::test] + #[should_panic(expected = "RateLimit limited; poll_ready must be called first")] + async fn test_tick_panics_when_limited() { + let mut limit = RateLimit::new(Rate::new(1, Duration::from_secs(1))); + + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + + // Consume the limit + limit.tick(); + + // Attempting to tick again without poll_ready being ready should panic + limit.tick(); + } } diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 1bfb10d86d77..7c760c81c540 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -17,6 +17,7 @@ reth-chain-state.workspace = true reth-chainspec.workspace = true reth-eth-wire-types.workspace = true reth-primitives = { workspace = true, features = ["c-kzg", "secp256k1"] } +reth-primitives-traits.workspace = true reth-execution-types.workspace = true reth-fs-util.workspace = true reth-storage-api.workspace = true @@ -50,6 +51,7 @@ bitflags.workspace = true auto_impl.workspace = true smallvec.workspace = true + # testing rand = { workspace = true, optional = true } paste = { workspace = true, optional = true } @@ -84,7 +86,7 @@ serde = [ "parking_lot/serde", "rand?/serde", "revm/serde", - "smallvec/serde" + "smallvec/serde", ] test-utils = [ "rand", @@ -94,7 +96,8 @@ test-utils = [ "reth-chainspec/test-utils", "reth-primitives/test-utils", "reth-provider/test-utils", - "revm/test-utils" + "revm/test-utils", + "reth-primitives-traits/test-utils", ] arbitrary = [ "proptest", @@ -107,7 +110,8 @@ arbitrary = [ "alloy-primitives/arbitrary", "bitflags/arbitrary", "revm/arbitrary", - "smallvec/arbitrary" + "reth-primitives-traits/arbitrary", + "smallvec/arbitrary", ] [[bench]] diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 787d4985ff1b..9d02276db85a 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -1,10 +1,9 @@ //! A simple diskstore for blobs use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize}; -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::{TxHash, B256}; use parking_lot::{Mutex, RwLock}; -use reth_primitives::BlobTransactionSidecar; use schnellru::{ByLength, LruMap}; use std::{collections::HashSet, fmt, fs, io, path::PathBuf, sync::Arc}; use tracing::{debug, trace}; @@ -103,7 +102,7 @@ impl BlobStore for DiskFileBlobStore { stat } - fn get(&self, tx: B256) -> Result, BlobStoreError> { + fn get(&self, tx: B256) -> Result>, BlobStoreError> { self.inner.get_one(tx) } @@ -114,14 +113,17 @@ impl BlobStore for DiskFileBlobStore { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { if txs.is_empty() { return Ok(Vec::new()) } self.inner.get_all(txs) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { if txs.is_empty() { return Ok(Vec::new()) } @@ -164,7 +166,7 @@ impl BlobStore for DiskFileBlobStore { struct DiskFileBlobStoreInner { blob_dir: PathBuf, - blob_cache: Mutex>, + blob_cache: Mutex, ByLength>>, size_tracker: BlobStoreSize, file_lock: RwLock<()>, txs_to_delete: RwLock>, @@ -205,7 +207,7 @@ impl DiskFileBlobStoreInner { fn insert_one(&self, tx: B256, data: BlobTransactionSidecar) -> Result<(), BlobStoreError> { let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length()); data.rlp_encode_fields(&mut buf); - self.blob_cache.lock().insert(tx, data); + self.blob_cache.lock().insert(tx, Arc::new(data)); let size = self.write_one_encoded(tx, &buf)?; self.size_tracker.add_size(size); @@ -227,7 +229,7 @@ impl DiskFileBlobStoreInner { { let mut cache = self.blob_cache.lock(); for (tx, data) in txs { - cache.insert(tx, data); + cache.insert(tx, Arc::new(data)); } } let mut add = 0; @@ -278,15 +280,19 @@ impl DiskFileBlobStoreInner { } /// Retrieves the blob for the given transaction hash from the blob cache or disk. - fn get_one(&self, tx: B256) -> Result, BlobStoreError> { + fn get_one(&self, tx: B256) -> Result>, BlobStoreError> { if let Some(blob) = self.blob_cache.lock().get(&tx) { return Ok(Some(blob.clone())) } let blob = self.read_one(tx)?; + if let Some(blob) = &blob { - self.blob_cache.lock().insert(tx, blob.clone()); + let blob_arc = Arc::new(blob.clone()); + self.blob_cache.lock().insert(tx, blob_arc.clone()); + return Ok(Some(blob_arc)) } - Ok(blob) + + Ok(None) } /// Returns the path to the blob file for the given transaction hash. @@ -374,7 +380,7 @@ impl DiskFileBlobStoreInner { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { let mut res = Vec::with_capacity(txs.len()); let mut cache_miss = Vec::new(); { @@ -396,8 +402,9 @@ impl DiskFileBlobStoreInner { } let mut cache = self.blob_cache.lock(); for (tx, data) in from_disk { - cache.insert(tx, data.clone()); - res.push((tx, data)); + let arc = Arc::new(data.clone()); + cache.insert(tx, arc.clone()); + res.push((tx, arc.clone())); } Ok(res) @@ -407,7 +414,10 @@ impl DiskFileBlobStoreInner { /// /// Returns an error if there are any missing blobs. #[inline] - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { txs.into_iter() .map(|tx| self.get_one(tx)?.ok_or(BlobStoreError::MissingSidecar(tx))) .collect() @@ -514,14 +524,17 @@ mod tests { let blobs = rng_blobs(10); let all_hashes = blobs.iter().map(|(tx, _)| *tx).collect::>(); store.insert_all(blobs.clone()).unwrap(); + // all cached for (tx, blob) in &blobs { assert!(store.is_cached(tx)); - assert_eq!(store.get(*tx).unwrap().unwrap(), *blob); + let b = store.get(*tx).unwrap().map(Arc::unwrap_or_clone).unwrap(); + assert_eq!(b, *blob); } + let all = store.get_all(all_hashes.clone()).unwrap(); for (tx, blob) in all { - assert!(blobs.contains(&(tx, blob)), "missing blob {tx:?}"); + assert!(blobs.contains(&(tx, Arc::unwrap_or_clone(blob))), "missing blob {tx:?}"); } assert!(store.contains(all_hashes[0]).unwrap()); diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index c98a01b88c18..0ab9c0d7af06 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -1,7 +1,5 @@ -use crate::blobstore::{ - BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize, BlobTransactionSidecar, -}; -use alloy_eips::eip4844::BlobAndProofV1; +use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize}; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::B256; use parking_lot::RwLock; use std::{collections::HashMap, sync::Arc}; @@ -15,7 +13,7 @@ pub struct InMemoryBlobStore { #[derive(Debug, Default)] struct InMemoryBlobStoreInner { /// Storage for all blob data. - store: RwLock>, + store: RwLock>>, size_tracker: BlobStoreSize, } @@ -75,7 +73,7 @@ impl BlobStore for InMemoryBlobStore { } // Retrieves the decoded blob data for the given transaction hash. - fn get(&self, tx: B256) -> Result, BlobStoreError> { + fn get(&self, tx: B256) -> Result>, BlobStoreError> { Ok(self.inner.store.read().get(&tx).cloned()) } @@ -86,16 +84,17 @@ impl BlobStore for InMemoryBlobStore { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { let store = self.inner.store.read(); Ok(txs.into_iter().filter_map(|tx| store.get(&tx).map(|item| (tx, item.clone()))).collect()) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { let store = self.inner.store.read(); - txs.into_iter() - .map(|tx| store.get(&tx).cloned().ok_or_else(|| BlobStoreError::MissingSidecar(tx))) - .collect() + Ok(txs.into_iter().filter_map(|tx| store.get(&tx).cloned()).collect()) } fn get_by_versioned_hashes( @@ -134,7 +133,7 @@ impl BlobStore for InMemoryBlobStore { /// Removes the given blob from the store and returns the size of the blob that was removed. #[inline] -fn remove_size(store: &mut HashMap, tx: &B256) -> usize { +fn remove_size(store: &mut HashMap>, tx: &B256) -> usize { store.remove(tx).map(|rem| rem.size()).unwrap_or_default() } @@ -143,11 +142,11 @@ fn remove_size(store: &mut HashMap, tx: &B256) -> /// We don't need to handle the size updates for replacements because transactions are unique. #[inline] fn insert_size( - store: &mut HashMap, + store: &mut HashMap>, tx: B256, blob: BlobTransactionSidecar, ) -> usize { let add = blob.size(); - store.insert(tx, blob); + store.insert(tx, Arc::new(blob)); add } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index ee98e3eed85e..f1612bcd022e 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -1,14 +1,16 @@ //! Storage for blob data of EIP4844 transactions. -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::B256; pub use disk::{DiskFileBlobStore, DiskFileBlobStoreConfig, OpenDiskFileBlobStore}; pub use mem::InMemoryBlobStore; pub use noop::NoopBlobStore; -use reth_primitives::BlobTransactionSidecar; use std::{ fmt, - sync::atomic::{AtomicUsize, Ordering}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, }; pub use tracker::{BlobStoreCanonTracker, BlobStoreUpdates}; @@ -44,7 +46,7 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { fn cleanup(&self) -> BlobStoreCleanupStat; /// Retrieves the decoded blob data for the given transaction hash. - fn get(&self, tx: B256) -> Result, BlobStoreError>; + fn get(&self, tx: B256) -> Result>, BlobStoreError>; /// Checks if the given transaction hash is in the blob store. fn contains(&self, tx: B256) -> Result; @@ -58,13 +60,14 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError>; + ) -> Result)>, BlobStoreError>; /// Returns the exact [`BlobTransactionSidecar`] for the given transaction hashes in the exact /// order they were requested. /// /// Returns an error if any of the blobs are not found in the blob store. - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError>; + fn get_exact(&self, txs: Vec) + -> Result>, BlobStoreError>; /// Return the [`BlobTransactionSidecar`]s for a list of blob versioned hashes. fn get_by_versioned_hashes( diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs index 0e99858bd627..943a6eeda958 100644 --- a/crates/transaction-pool/src/blobstore/noop.rs +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -1,6 +1,7 @@ -use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobTransactionSidecar}; -use alloy_eips::eip4844::BlobAndProofV1; +use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError}; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::B256; +use std::sync::Arc; /// A blobstore implementation that does nothing #[derive(Clone, Copy, Debug, PartialOrd, PartialEq, Eq, Default)] @@ -28,7 +29,7 @@ impl BlobStore for NoopBlobStore { BlobStoreCleanupStat::default() } - fn get(&self, _tx: B256) -> Result, BlobStoreError> { + fn get(&self, _tx: B256) -> Result>, BlobStoreError> { Ok(None) } @@ -39,11 +40,14 @@ impl BlobStore for NoopBlobStore { fn get_all( &self, _txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { Ok(vec![]) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { if txs.is_empty() { return Ok(vec![]) } diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index d4518846258a..212df34bd371 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -27,6 +27,9 @@ pub const DEFAULT_PRICE_BUMP: u128 = 10; /// This enforces that a blob transaction requires a 100% price bump to be replaced pub const REPLACE_BLOB_PRICE_BUMP: u128 = 100; +/// Default maximum new transactions for broadcasting. +pub const MAX_NEW_PENDING_TXS_NOTIFICATIONS: usize = 200; + /// Configuration options for the Transaction pool. #[derive(Debug, Clone)] pub struct PoolConfig { @@ -53,6 +56,8 @@ pub struct PoolConfig { pub pending_tx_listener_buffer_size: usize, /// Bound on number of new transactions from `reth_network::TransactionsManager` to buffer. pub new_tx_listener_buffer_size: usize, + /// How many new pending transactions to buffer and send iterators in progress. + pub max_new_pending_txs_notifications: usize, } impl PoolConfig { @@ -80,6 +85,7 @@ impl Default for PoolConfig { local_transactions_config: Default::default(), pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE, new_tx_listener_buffer_size: NEW_TX_LISTENER_BUFFER_SIZE, + max_new_pending_txs_notifications: MAX_NEW_PENDING_TXS_NOTIFICATIONS, } } } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index a4766a89d5c1..f71bf018807e 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -1,7 +1,8 @@ //! Transaction pool errors +use alloy_eips::eip4844::BlobTransactionValidationError; use alloy_primitives::{Address, TxHash, U256}; -use reth_primitives::{BlobTransactionValidationError, InvalidTransactionError}; +use reth_primitives::InvalidTransactionError; /// Transaction pool result type. pub type PoolResult = Result; diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 2cffcd33fa8b..8d11d7595b14 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -151,12 +151,12 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use crate::{identifier::TransactionId, pool::PoolInner}; -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::{Address, TxHash, B256, U256}; use aquamarine as _; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; -use reth_primitives::{BlobTransactionSidecar, PooledTransactionsElement}; +use reth_primitives::PooledTransactionsElement; use reth_storage_api::StateProviderFactory; use std::{collections::HashSet, sync::Arc}; use tokio::sync::mpsc::Receiver; @@ -166,9 +166,9 @@ pub use crate::{ blobstore::{BlobStore, BlobStoreError}, config::{ LocalTransactionConfig, PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, - DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, REPLACE_BLOB_PRICE_BUMP, - TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, - TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, MAX_NEW_PENDING_TXS_NOTIFICATIONS, + REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, + TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, }, error::PoolResult, ordering::{CoinbaseTipOrdering, Priority, TransactionOrdering}, @@ -430,13 +430,6 @@ where Box::new(self.pool.best_transactions()) } - fn best_transactions_with_base_fee( - &self, - base_fee: u64, - ) -> Box>>> { - self.pool.best_transactions_with_attributes(BestTransactionsAttributes::base_fee(base_fee)) - } - fn best_transactions_with_attributes( &self, best_transactions_attributes: BestTransactionsAttributes, @@ -503,6 +496,27 @@ where self.pool.get_transactions_by_sender(sender) } + fn get_pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.pool.pending_transactions_with_predicate(predicate) + } + + fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + self.pool.get_pending_transactions_by_sender(sender) + } + + fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + self.pool.get_queued_transactions_by_sender(sender) + } + fn get_highest_transaction_by_sender( &self, sender: Address, @@ -547,21 +561,24 @@ where self.pool.unique_senders() } - fn get_blob(&self, tx_hash: TxHash) -> Result, BlobStoreError> { + fn get_blob( + &self, + tx_hash: TxHash, + ) -> Result>, BlobStoreError> { self.pool.blob_store().get(tx_hash) } fn get_all_blobs( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { self.pool.blob_store().get_all(tx_hashes) } fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result>, BlobStoreError> { self.pool.blob_store().get_exact(tx_hashes) } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 23a8d0dc66a5..271c63a388a4 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -5,9 +5,10 @@ use crate::{ error::PoolError, metrics::MaintainPoolMetrics, traits::{CanonicalStateUpdate, TransactionPool, TransactionPoolExt}, - BlockInfo, PoolTransaction, + BlockInfo, PoolTransaction, PoolUpdateKind, }; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable}; +use alloy_eips::BlockNumberOrTag; +use alloy_primitives::{Address, BlockHash, BlockNumber}; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -17,7 +18,7 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives::{ - BlockNumberOrTag, PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, + PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, }; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; @@ -27,6 +28,7 @@ use std::{ collections::HashSet, hash::{Hash, Hasher}, path::{Path, PathBuf}, + sync::Arc, }; use tokio::sync::oneshot; use tracing::{debug, error, info, trace, warn}; @@ -104,9 +106,7 @@ pub async fn maintain_transaction_pool( let MaintainPoolConfig { max_update_depth, max_reload_accounts, .. } = config; // ensure the pool points to latest state if let Ok(Some(latest)) = client.header_by_number_or_tag(BlockNumberOrTag::Latest) { - let sealed = latest.seal_slow(); - let (header, seal) = sealed.into_parts(); - let latest = SealedHeader::new(header, seal); + let latest = SealedHeader::seal(latest); let chain_spec = client.chain_spec(); let info = BlockInfo { block_gas_limit: latest.gas_limit, @@ -328,6 +328,7 @@ pub async fn maintain_transaction_pool( pool.get_blob(tx.hash) .ok() .flatten() + .map(Arc::unwrap_or_clone) .and_then(|sidecar| { PooledTransactionsElementEcRecovered::try_from_blob_transaction( tx, sidecar, @@ -351,6 +352,7 @@ pub async fn maintain_transaction_pool( changed_accounts, // all transactions mined in the new chain need to be removed from the pool mined_transactions: new_blocks.transaction_hashes().collect(), + update_kind: PoolUpdateKind::Reorg, }; pool.on_canonical_state_change(update); @@ -433,6 +435,7 @@ pub async fn maintain_transaction_pool( pending_block_blob_fee, changed_accounts, mined_transactions, + update_kind: PoolUpdateKind::Commit, }; pool.on_canonical_state_change(update); @@ -575,7 +578,7 @@ where // Filter out errors ::try_from_consensus(tx.into()).ok() }) - .collect::>(); + .collect(); let outcome = pool.add_transactions(crate::TransactionOrigin::Local, pool_transactions).await; diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 11c5e7eea29b..cf2270978abe 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -16,10 +16,12 @@ use crate::{ PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; -use alloy_eips::{eip1559::ETHEREUM_BLOCK_GAS_LIMIT, eip4844::BlobAndProofV1}; +use alloy_eips::{ + eip1559::ETHEREUM_BLOCK_GAS_LIMIT, + eip4844::{BlobAndProofV1, BlobTransactionSidecar}, +}; use alloy_primitives::{Address, TxHash, B256, U256}; use reth_eth_wire_types::HandleMempoolData; -use reth_primitives::BlobTransactionSidecar; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; @@ -150,13 +152,6 @@ impl TransactionPool for NoopTransactionPool { Box::new(std::iter::empty()) } - fn best_transactions_with_base_fee( - &self, - _: u64, - ) -> Box>>> { - Box::new(std::iter::empty()) - } - fn best_transactions_with_attributes( &self, _: BestTransactionsAttributes, @@ -220,6 +215,27 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn get_pending_transactions_with_predicate( + &self, + _predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + vec![] + } + + fn get_pending_transactions_by_sender( + &self, + _sender: Address, + ) -> Vec>> { + vec![] + } + + fn get_queued_transactions_by_sender( + &self, + _sender: Address, + ) -> Vec>> { + vec![] + } + fn get_highest_transaction_by_sender( &self, _sender: Address, @@ -261,21 +277,24 @@ impl TransactionPool for NoopTransactionPool { Default::default() } - fn get_blob(&self, _tx_hash: TxHash) -> Result, BlobStoreError> { + fn get_blob( + &self, + _tx_hash: TxHash, + ) -> Result>, BlobStoreError> { Ok(None) } fn get_all_blobs( &self, _tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { Ok(vec![]) } fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result>, BlobStoreError> { if tx_hashes.is_empty() { return Ok(vec![]) } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 77cd35d8a6be..17165611794e 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,14 +1,16 @@ use crate::{ - identifier::TransactionId, pool::pending::PendingTransaction, PoolTransaction, - TransactionOrdering, ValidPoolTransaction, + identifier::{SenderId, TransactionId}, + pool::pending::PendingTransaction, + PayloadTransactions, PoolTransaction, TransactionOrdering, ValidPoolTransaction, }; -use alloy_primitives::B256 as TxHash; +use alloy_consensus::Transaction; +use alloy_primitives::Address; use core::fmt; +use reth_primitives::TransactionSignedEcRecovered; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, }; - use tokio::sync::broadcast::{error::TryRecvError, Receiver}; use tracing::debug; @@ -48,7 +50,7 @@ impl Iterator for BestTransactionsWithFees { fn next(&mut self) -> Option { // find the next transaction that satisfies the base fee loop { - let best = self.best.next()?; + let best = Iterator::next(&mut self.best)?; // If both the base fee and blob fee (if applicable for EIP-4844) are satisfied, return // the transaction if best.transaction.max_fee_per_gas() >= self.base_fee as u128 && @@ -80,7 +82,7 @@ pub(crate) struct BestTransactions { /// then can be moved from the `all` set to the `independent` set. pub(crate) independent: BTreeSet>, /// There might be the case where a yielded transactions is invalid, this will track it. - pub(crate) invalid: HashSet, + pub(crate) invalid: HashSet, /// Used to receive any new pending transactions that have been added to the pool after this /// iterator was static fileted /// @@ -94,7 +96,7 @@ pub(crate) struct BestTransactions { impl BestTransactions { /// Mark the transaction and it's descendants as invalid. pub(crate) fn mark_invalid(&mut self, tx: &Arc>) { - self.invalid.insert(*tx.hash()); + self.invalid.insert(tx.sender_id()); } /// Returns the ancestor the given transaction, the transaction with `nonce - 1`. @@ -128,6 +130,15 @@ impl BestTransactions { } } + /// Removes the currently best independent transaction from the independent set and the total + /// set. + fn pop_best(&mut self) -> Option> { + self.independent.pop_last().inspect(|best| { + let removed = self.all.remove(best.transaction.id()); + debug_assert!(removed.is_some(), "must be present in both sets"); + }) + } + /// Checks for new transactions that have come into the `PendingPool` after this iterator was /// created and inserts them fn add_new_transactions(&mut self) { @@ -167,15 +178,15 @@ impl Iterator for BestTransactions { loop { self.add_new_transactions(); // Remove the next independent tx with the highest priority - let best = self.independent.pop_last()?; - let hash = best.transaction.hash(); + let best = self.pop_best()?; + let sender_id = best.transaction.sender_id(); - // skip transactions that were marked as invalid - if self.invalid.contains(hash) { + // skip transactions for which sender was marked as invalid + if self.invalid.contains(&sender_id) { debug!( target: "txpool", "[{:?}] skipping invalid transaction", - hash + best.transaction.hash() ); continue } @@ -186,7 +197,7 @@ impl Iterator for BestTransactions { } if self.skip_blobs && best.transaction.transaction.is_eip4844() { - // blobs should be skipped, marking the as invalid will ensure that no dependent + // blobs should be skipped, marking them as invalid will ensure that no dependent // transactions are returned self.mark_invalid(&best.transaction) } else { @@ -196,7 +207,50 @@ impl Iterator for BestTransactions { } } -/// A[`BestTransactions`](crate::traits::BestTransactions) implementation that filters the +/// Wrapper struct that allows to convert `BestTransactions` (used in tx pool) to +/// `PayloadTransactions` (used in block composition). +#[derive(Debug)] +pub struct BestPayloadTransactions +where + T: PoolTransaction>, + I: Iterator>>, +{ + invalid: HashSet
, + best: I, +} + +impl BestPayloadTransactions +where + T: PoolTransaction>, + I: Iterator>>, +{ + /// Create a new `BestPayloadTransactions` with the given iterator. + pub fn new(best: I) -> Self { + Self { invalid: Default::default(), best } + } +} + +impl PayloadTransactions for BestPayloadTransactions +where + T: PoolTransaction>, + I: Iterator>>, +{ + fn next(&mut self, _ctx: ()) -> Option { + loop { + let tx = self.best.next()?; + if self.invalid.contains(&tx.sender()) { + continue + } + return Some(tx.to_recovered_transaction()) + } + } + + fn mark_invalid(&mut self, sender: Address, _nonce: u64) { + self.invalid.insert(sender); + } +} + +/// A [`BestTransactions`](crate::traits::BestTransactions) implementation that filters the /// transactions of iter with predicate. /// /// Filter out transactions are marked as invalid: @@ -259,6 +313,212 @@ impl fmt::Debug for BestTransactionFilter { } } +/// Wrapper over [`crate::traits::BestTransactions`] that prioritizes transactions of certain +/// senders capping total gas used by such transactions. +#[derive(Debug)] +pub struct BestTransactionsWithPrioritizedSenders { + /// Inner iterator + inner: I, + /// A set of senders which transactions should be prioritized + prioritized_senders: HashSet
, + /// Maximum total gas limit of prioritized transactions + max_prioritized_gas: u64, + /// Buffer with transactions that are not being prioritized. Those will be the first to be + /// included after the prioritized transactions + buffer: VecDeque, + /// Tracker of total gas limit of prioritized transactions. Once it reaches + /// `max_prioritized_gas` no more transactions will be prioritized + prioritized_gas: u64, +} + +impl BestTransactionsWithPrioritizedSenders { + /// Constructs a new [`BestTransactionsWithPrioritizedSenders`]. + pub fn new(prioritized_senders: HashSet
, max_prioritized_gas: u64, inner: I) -> Self { + Self { + inner, + prioritized_senders, + max_prioritized_gas, + buffer: Default::default(), + prioritized_gas: Default::default(), + } + } +} + +impl Iterator for BestTransactionsWithPrioritizedSenders +where + I: crate::traits::BestTransactions>>, + T: PoolTransaction, +{ + type Item = ::Item; + + fn next(&mut self) -> Option { + // If we have space, try prioritizing transactions + if self.prioritized_gas < self.max_prioritized_gas { + for item in &mut self.inner { + if self.prioritized_senders.contains(&item.transaction.sender()) && + self.prioritized_gas + item.transaction.gas_limit() <= + self.max_prioritized_gas + { + self.prioritized_gas += item.transaction.gas_limit(); + return Some(item) + } + self.buffer.push_back(item); + } + } + + if let Some(item) = self.buffer.pop_front() { + Some(item) + } else { + self.inner.next() + } + } +} + +impl crate::traits::BestTransactions for BestTransactionsWithPrioritizedSenders +where + I: crate::traits::BestTransactions>>, + T: PoolTransaction, +{ + fn mark_invalid(&mut self, tx: &Self::Item) { + self.inner.mark_invalid(tx) + } + + fn no_updates(&mut self) { + self.inner.no_updates() + } + + fn set_skip_blobs(&mut self, skip_blobs: bool) { + if skip_blobs { + self.buffer.retain(|tx| !tx.transaction.is_eip4844()) + } + self.inner.set_skip_blobs(skip_blobs) + } +} + +/// An implementation of [`crate::traits::PayloadTransactions`] that yields +/// a pre-defined set of transactions. +/// +/// This is useful to put a sequencer-specified set of transactions into the block +/// and compose it with the rest of the transactions. +#[derive(Debug)] +pub struct PayloadTransactionsFixed { + transactions: Vec, + index: usize, +} + +impl PayloadTransactionsFixed { + /// Constructs a new [`PayloadTransactionsFixed`]. + pub fn new(transactions: Vec) -> Self { + Self { transactions, index: Default::default() } + } + + /// Constructs a new [`PayloadTransactionsFixed`] with a single transaction. + pub fn single(transaction: T) -> Self { + Self { transactions: vec![transaction], index: Default::default() } + } +} + +impl PayloadTransactions for PayloadTransactionsFixed { + fn next(&mut self, _ctx: ()) -> Option { + (self.index < self.transactions.len()).then(|| { + let tx = self.transactions[self.index].clone(); + self.index += 1; + tx + }) + } + + fn mark_invalid(&mut self, _sender: Address, _nonce: u64) {} +} + +/// Wrapper over [`crate::traits::PayloadTransactions`] that combines transactions from multiple +/// `PayloadTransactions` iterators and keeps track of the gas for both of iterators. +/// +/// We can't use [`Iterator::chain`], because: +/// (a) we need to propagate the `mark_invalid` and `no_updates` +/// (b) we need to keep track of the gas +/// +/// Notes that [`PayloadTransactionsChain`] fully drains the first iterator +/// before moving to the second one. +/// +/// If the `before` iterator has transactions that are not fitting into the block, +/// the after iterator will get propagated a `mark_invalid` call for each of them. +#[derive(Debug)] +pub struct PayloadTransactionsChain { + /// Iterator that will be used first + before: B, + /// Allowed gas for the transactions from `before` iterator. If `None`, no gas limit is + /// enforced. + before_max_gas: Option, + /// Gas used by the transactions from `before` iterator + before_gas: u64, + /// Iterator that will be used after `before` iterator + after: A, + /// Allowed gas for the transactions from `after` iterator. If `None`, no gas limit is + /// enforced. + after_max_gas: Option, + /// Gas used by the transactions from `after` iterator + after_gas: u64, +} + +impl PayloadTransactionsChain { + /// Constructs a new [`PayloadTransactionsChain`]. + pub fn new( + before: B, + before_max_gas: Option, + after: A, + after_max_gas: Option, + ) -> Self { + Self { + before, + before_max_gas, + before_gas: Default::default(), + after, + after_max_gas, + after_gas: Default::default(), + } + } +} + +impl PayloadTransactions for PayloadTransactionsChain +where + B: PayloadTransactions, + A: PayloadTransactions, +{ + fn next(&mut self, ctx: ()) -> Option { + while let Some(tx) = self.before.next(ctx) { + if let Some(before_max_gas) = self.before_max_gas { + if self.before_gas + tx.transaction.gas_limit() <= before_max_gas { + self.before_gas += tx.transaction.gas_limit(); + return Some(tx); + } + self.before.mark_invalid(tx.signer(), tx.transaction.nonce()); + self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + } else { + return Some(tx); + } + } + + while let Some(tx) = self.after.next(ctx) { + if let Some(after_max_gas) = self.after_max_gas { + if self.after_gas + tx.transaction.gas_limit() <= after_max_gas { + self.after_gas += tx.transaction.gas_limit(); + return Some(tx); + } + self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + } else { + return Some(tx); + } + } + + None + } + + fn mark_invalid(&mut self, sender: Address, nonce: u64) { + self.before.mark_invalid(sender, nonce); + self.after.mark_invalid(sender, nonce); + } +} + #[cfg(test)] mod tests { use super::*; @@ -319,6 +579,29 @@ mod tests { assert!(best.next().is_none()); } + #[test] + fn test_best_transactions_iter_invalid() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 10; + // insert 10 gapless tx + let tx = MockTransaction::eip1559(); + for nonce in 0..num_tx { + let tx = tx.clone().rng_hash().with_nonce(nonce); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best: Box< + dyn crate::traits::BestTransactions>>, + > = Box::new(pool.best()); + + let tx = Iterator::next(&mut best).unwrap(); + crate::traits::BestTransactions::mark_invalid(&mut *best, &tx); + assert!(Iterator::next(&mut best).is_none()); + } + #[test] fn test_best_with_fees_iter_base_fee_satisfied() { let mut pool = PendingPool::new(MockOrdering::default()); @@ -623,4 +906,119 @@ mod tests { assert_eq!(tx.nonce() % 2, 0); } } + + #[test] + fn test_best_transactions_prioritized_senders() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add 5 plain transactions from different senders with increasing gas price + for gas_price in 0..5 { + let tx = MockTransaction::eip1559().with_gas_price(gas_price); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + // Add another transaction with 0 gas price that's going to be prioritized by sender + let prioritized_tx = MockTransaction::eip1559().with_gas_price(0); + let valid_prioritized_tx = f.validated(prioritized_tx.clone()); + pool.add_transaction(Arc::new(valid_prioritized_tx), 0); + + let prioritized_senders = HashSet::from([prioritized_tx.sender()]); + let best = + BestTransactionsWithPrioritizedSenders::new(prioritized_senders, 200, pool.best()); + + // Verify that the prioritized transaction is returned first + // and the rest are returned in the reverse order of gas price + let mut iter = best.into_iter(); + let top_of_block_tx = iter.next().unwrap(); + assert_eq!(top_of_block_tx.max_fee_per_gas(), 0); + assert_eq!(top_of_block_tx.sender(), prioritized_tx.sender()); + for gas_price in (0..5).rev() { + assert_eq!(iter.next().unwrap().max_fee_per_gas(), gas_price); + } + + // TODO: Test that gas limits for prioritized transactions are respected + } + + #[test] + fn test_best_transactions_chained_iterators() { + let mut priority_pool = PendingPool::new(MockOrdering::default()); + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Block composition + // === + // (1) up to 100 gas: custom top-of-block transaction + // (2) up to 100 gas: transactions from the priority pool + // (3) up to 200 gas: only transactions from address A + // (4) up to 200 gas: only transactions from address B + // (5) until block gas limit: all transactions from the main pool + + // Notes: + // - If prioritized addresses overlap, a single transaction will be prioritized twice and + // therefore use the per-segment gas limit twice. + // - Priority pool and main pool must synchronize between each other to make sure there are + // no conflicts for the same nonce. For example, in this scenario, pools can't reject + // transactions with seemingly incorrect nonces, because previous transactions might be in + // the other pool. + + let address_top_of_block = Address::random(); + let address_in_priority_pool = Address::random(); + let address_a = Address::random(); + let address_b = Address::random(); + let address_regular = Address::random(); + + // Add transactions to the main pool + { + let prioritized_tx_a = + MockTransaction::eip1559().with_gas_price(5).with_sender(address_a); + // without our custom logic, B would be prioritized over A due to gas price: + let prioritized_tx_b = + MockTransaction::eip1559().with_gas_price(10).with_sender(address_b); + let regular_tx = + MockTransaction::eip1559().with_gas_price(15).with_sender(address_regular); + pool.add_transaction(Arc::new(f.validated(prioritized_tx_a)), 0); + pool.add_transaction(Arc::new(f.validated(prioritized_tx_b)), 0); + pool.add_transaction(Arc::new(f.validated(regular_tx)), 0); + } + + // Add transactions to the priority pool + { + let prioritized_tx = + MockTransaction::eip1559().with_gas_price(0).with_sender(address_in_priority_pool); + let valid_prioritized_tx = f.validated(prioritized_tx); + priority_pool.add_transaction(Arc::new(valid_prioritized_tx), 0); + } + + let mut block = PayloadTransactionsChain::new( + PayloadTransactionsFixed::single( + MockTransaction::eip1559().with_sender(address_top_of_block).into(), + ), + Some(100), + PayloadTransactionsChain::new( + BestPayloadTransactions::new(priority_pool.best()), + Some(100), + BestPayloadTransactions::new(BestTransactionsWithPrioritizedSenders::new( + HashSet::from([address_a]), + 200, + BestTransactionsWithPrioritizedSenders::new( + HashSet::from([address_b]), + 200, + pool.best(), + ), + )), + None, + ), + None, + ); + + assert_eq!(block.next(()).unwrap().signer(), address_top_of_block); + assert_eq!(block.next(()).unwrap().signer(), address_in_priority_pool); + assert_eq!(block.next(()).unwrap().signer(), address_a); + assert_eq!(block.next(()).unwrap().signer(), address_b); + assert_eq!(block.next(()).unwrap().signer(), address_regular); + } + + // TODO: Same nonce test } diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index cb09e8234095..ac39c6ab781a 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -11,7 +11,7 @@ use std::{ /// A set of validated blob transactions in the pool that are __not pending__. /// -/// The purpose of this pool is keep track of blob transactions that are queued and to evict the +/// The purpose of this pool is to keep track of blob transactions that are queued and to evict the /// worst blob transactions once the sub-pool is full. /// /// This expects that certain constraints are met: @@ -198,14 +198,13 @@ impl BlobTransactions { &mut self, pending_fees: &PendingFees, ) -> Vec>> { - let to_remove = self.satisfy_pending_fee_ids(pending_fees); - - let mut removed = Vec::with_capacity(to_remove.len()); - for id in to_remove { - removed.push(self.remove_transaction(&id).expect("transaction exists")); - } + let removed = self + .satisfy_pending_fee_ids(pending_fees) + .into_iter() + .map(|id| self.remove_transaction(&id).expect("transaction exists")) + .collect(); - // set pending fees and reprioritize / resort + // Update pending fees and reprioritize self.pending_fees = pending_fees.clone(); self.reprioritize(); diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 600a8da934ec..78cc790e9423 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -86,9 +86,9 @@ use parking_lot::{Mutex, RwLock, RwLockReadGuard}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; +use alloy_eips::eip4844::BlobTransactionSidecar; use reth_primitives::{ - BlobTransaction, BlobTransactionSidecar, PooledTransactionsElement, TransactionSigned, - TransactionSignedEcRecovered, + BlobTransaction, PooledTransactionsElement, TransactionSigned, TransactionSignedEcRecovered, }; use std::{ collections::{HashMap, HashSet}, @@ -106,7 +106,10 @@ use crate::{ traits::{GetPooledTransactionLimit, NewBlobSidecar, TransactionListenerKind}, validate::ValidTransaction, }; -pub use best::BestTransactionFilter; +pub use best::{ + BestPayloadTransactions, BestTransactionFilter, BestTransactionsWithPrioritizedSenders, + PayloadTransactionsChain, PayloadTransactionsFixed, +}; pub use blob::{blob_tx_priority, fee_delta}; pub use events::{FullTransactionEvent, TransactionEvent}; pub use listener::{AllTransactionsEvents, TransactionEvents}; @@ -307,7 +310,9 @@ where /// Caution: this assumes the given transaction is eip-4844 fn get_blob_transaction(&self, transaction: TransactionSigned) -> Option { if let Ok(Some(sidecar)) = self.blob_store.get(transaction.hash()) { - if let Ok(blob) = BlobTransaction::try_from_signed(transaction, sidecar) { + if let Ok(blob) = + BlobTransaction::try_from_signed(transaction, Arc::unwrap_or_clone(sidecar)) + { return Some(blob) } } @@ -389,7 +394,9 @@ where trace!(target: "txpool", ?update, "updating pool on canonical state change"); let block_info = update.block_info(); - let CanonicalStateUpdate { new_tip, changed_accounts, mined_transactions, .. } = update; + let CanonicalStateUpdate { + new_tip, changed_accounts, mined_transactions, update_kind, .. + } = update; self.validator.on_new_head_block(new_tip); let changed_senders = self.changed_senders(changed_accounts.into_iter()); @@ -399,6 +406,7 @@ where block_info, mined_transactions, changed_senders, + update_kind, ); // This will discard outdated transactions based on the account's nonce @@ -776,6 +784,32 @@ where self.get_pool_data().get_transactions_by_sender(sender_id) } + /// Returns all queued transactions of the address by sender + pub(crate) fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + let sender_id = self.get_sender_id(sender); + self.get_pool_data().pending_txs_by_sender(sender_id) + } + + /// Returns all pending transactions filtered by predicate + pub(crate) fn pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.get_pool_data().pending_transactions_with_predicate(predicate) + } + + /// Returns all pending transactions of the address by sender + pub(crate) fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + let sender_id = self.get_sender_id(sender); + self.get_pool_data().queued_txs_by_sender(sender_id) + } + /// Returns the highest transaction of the address pub(crate) fn get_highest_transaction_by_sender( &self, @@ -1218,7 +1252,8 @@ mod tests { validate::ValidTransaction, BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionValidationOutcome, U256, }; - use reth_primitives::{kzg::Blob, transaction::generate_blob_sidecar}; + use alloy_eips::eip4844::BlobTransactionSidecar; + use reth_primitives::kzg::Blob; use std::{fs, path::PathBuf}; #[test] @@ -1253,7 +1288,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs. - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create an in-memory blob store. let blob_store = InMemoryBlobStore::default(); diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index ff5269014c4c..f4bce8c85a63 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -8,7 +8,7 @@ use crate::{ }; use std::{ cmp::Ordering, - collections::{BTreeMap, BTreeSet}, + collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap}, ops::Bound::Unbounded, sync::Arc, }; @@ -38,14 +38,10 @@ pub struct PendingPool { all: BTreeSet>, /// The highest nonce transactions for each sender - like the `independent` set, but the /// highest instead of lowest nonce. - /// - /// Sorted by their scoring value. - highest_nonces: BTreeSet>, + highest_nonces: HashMap>, /// Independent transactions that can be included directly and don't require other /// transactions. - /// - /// Sorted by their scoring value. - independent_transactions: BTreeSet>, + independent_transactions: HashMap>, /// Keeps track of the size of this pool. /// /// See also [`PoolTransaction::size`](crate::traits::PoolTransaction::size). @@ -108,7 +104,7 @@ impl PendingPool { pub(crate) fn best(&self) -> BestTransactions { BestTransactions { all: self.by_id.clone(), - independent: self.independent_transactions.clone(), + independent: self.independent_transactions.values().cloned().collect(), invalid: Default::default(), new_transaction_receiver: Some(self.new_transaction_notifier.subscribe()), skip_blobs: false, @@ -255,17 +251,26 @@ impl PendingPool { /// Updates the independent transaction and highest nonces set, assuming the given transaction /// is being _added_ to the pool. fn update_independents_and_highest_nonces(&mut self, tx: &PendingTransaction) { - let ancestor_id = tx.transaction.id().unchecked_ancestor(); - if let Some(ancestor) = ancestor_id.and_then(|id| self.by_id.get(&id)) { - // the transaction already has an ancestor, so we only need to ensure that the - // highest nonces set actually contains the highest nonce for that sender - self.highest_nonces.remove(ancestor); - } else { - // If there's __no__ ancestor in the pool, then this transaction is independent, this is - // guaranteed because this pool is gapless. - self.independent_transactions.insert(tx.clone()); + match self.highest_nonces.entry(tx.transaction.sender_id()) { + Entry::Occupied(mut entry) => { + if entry.get().transaction.nonce() < tx.transaction.nonce() { + *entry.get_mut() = tx.clone(); + } + } + Entry::Vacant(entry) => { + entry.insert(tx.clone()); + } + } + match self.independent_transactions.entry(tx.transaction.sender_id()) { + Entry::Occupied(mut entry) => { + if entry.get().transaction.nonce() > tx.transaction.nonce() { + *entry.get_mut() = tx.clone(); + } + } + Entry::Vacant(entry) => { + entry.insert(tx.clone()); + } } - self.highest_nonces.insert(tx.clone()); } /// Returns the ancestor the given transaction, the transaction with `nonce - 1`. @@ -320,19 +325,26 @@ impl PendingPool { &mut self, id: &TransactionId, ) -> Option>> { - // mark the next as independent if it exists - if let Some(unlocked) = self.get(&id.descendant()) { - self.independent_transactions.insert(unlocked.clone()); + if let Some(lowest) = self.independent_transactions.get(&id.sender) { + if lowest.transaction.nonce() == id.nonce { + self.independent_transactions.remove(&id.sender); + // mark the next as independent if it exists + if let Some(unlocked) = self.get(&id.descendant()) { + self.independent_transactions.insert(id.sender, unlocked.clone()); + } + } } + let tx = self.by_id.remove(id)?; self.size_of -= tx.transaction.size(); self.all.remove(&tx); - self.independent_transactions.remove(&tx); - // switch out for the next ancestor if there is one - if self.highest_nonces.remove(&tx) { + if let Some(highest) = self.highest_nonces.get(&id.sender) { + if highest.transaction.nonce() == id.nonce { + self.highest_nonces.remove(&id.sender); + } if let Some(ancestor) = self.ancestor(id) { - self.highest_nonces.insert(ancestor.clone()); + self.highest_nonces.insert(id.sender, ancestor.clone()); } } Some(tx.transaction) @@ -398,8 +410,12 @@ impl PendingPool { // we can reuse the temp array removed.clear(); + // we prefer removing transactions with lower ordering + let mut worst_transactions = self.highest_nonces.values().collect::>(); + worst_transactions.sort(); + // loop through the highest nonces set, removing transactions until we reach the limit - for tx in &self.highest_nonces { + for tx in worst_transactions { // return early if the pool is under limits if !limit.is_exceeded(original_length - total_removed, original_size - total_size) || non_local_senders == 0 @@ -513,6 +529,12 @@ impl PendingPool { self.by_id.get(id) } + /// Returns a reference to the independent transactions in the pool + #[cfg(test)] + pub(crate) const fn independent(&self) -> &HashMap> { + &self.independent_transactions + } + /// Asserts that the bijection between `by_id` and `all` is valid. #[cfg(any(test, feature = "test-utils"))] pub(crate) fn assert_invariants(&self) { @@ -668,7 +690,7 @@ mod tests { // First transaction should be evicted. assert_eq!( - pool.highest_nonces.iter().next().map(|tx| *tx.transaction.hash()), + pool.highest_nonces.values().min().map(|tx| *tx.transaction.hash()), Some(*t.hash()) ); @@ -723,7 +745,7 @@ mod tests { .collect::>(); let actual_highest_nonces = pool .highest_nonces - .iter() + .values() .map(|tx| (tx.transaction.sender(), tx.transaction.nonce())) .collect::>(); assert_eq!(expected_highest_nonces, actual_highest_nonces); @@ -815,4 +837,47 @@ mod tests { pending.into_iter().map(|tx| (tx.sender(), tx.nonce())).collect::>(); assert_eq!(pending, expected_pending); } + + // + #[test] + fn test_eligible_updates_promoted() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_senders = 10; + + let first_txs: Vec<_> = (0..num_senders) // + .map(|_| MockTransaction::eip1559()) + .collect(); + let second_txs: Vec<_> = + first_txs.iter().map(|tx| tx.clone().rng_hash().inc_nonce()).collect(); + + for tx in first_txs { + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best(); + + for _ in 0..num_senders { + if let Some(tx) = best.next() { + assert_eq!(tx.nonce(), 0); + } else { + panic!("cannot read one of first_txs"); + } + } + + for tx in second_txs { + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + for _ in 0..num_senders { + if let Some(tx) = best.next() { + assert_eq!(tx.nonce(), 1); + } else { + panic!("cannot read one of second_txs"); + } + } + } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 42de860db794..3d72d6a9f15a 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -15,16 +15,18 @@ use crate::{ AddedPendingTransaction, AddedTransaction, OnNewCanonicalStateOutcome, }, traits::{BestTransactionsAttributes, BlockInfo, PoolSize}, - PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, + PoolConfig, PoolResult, PoolTransaction, PoolUpdateKind, PriceBumpConfig, TransactionOrdering, ValidPoolTransaction, U256, }; use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; -use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; +use alloy_eips::{ + eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, + eip4844::BLOB_TX_MIN_BLOB_GASPRICE, +}; use alloy_primitives::{Address, TxHash, B256}; -use reth_primitives::constants::eip4844::BLOB_TX_MIN_BLOB_GASPRICE; use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ @@ -74,6 +76,8 @@ pub struct TxPool { all_transactions: AllTransactions, /// Transaction pool metrics metrics: TxPoolMetrics, + /// The last update kind that was applied to the pool. + latest_update_kind: Option, } // === impl TxPool === @@ -90,6 +94,7 @@ impl TxPool { all_transactions: AllTransactions::new(&config), config, metrics: Default::default(), + latest_update_kind: None, } } @@ -362,11 +367,34 @@ impl TxPool { self.pending_pool.all() } + /// Returns all pending transactions filtered by predicate + pub(crate) fn pending_transactions_with_predicate( + &self, + mut predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.pending_transactions_iter().filter(|tx| predicate(tx)).collect() + } + + /// Returns all pending transactions for the specified sender + pub(crate) fn pending_txs_by_sender( + &self, + sender: SenderId, + ) -> Vec>> { + self.pending_transactions_iter().filter(|tx| tx.sender_id() == sender).collect() + } + /// Returns all transactions from parked pools pub(crate) fn queued_transactions(&self) -> Vec>> { self.basefee_pool.all().chain(self.queued_pool.all()).collect() } + /// Returns an iterator over all transactions from parked pools + pub(crate) fn queued_transactions_iter( + &self, + ) -> impl Iterator>> + '_ { + self.basefee_pool.all().chain(self.queued_pool.all()) + } + /// Returns queued and pending transactions for the specified sender pub fn queued_and_pending_txs_by_sender( &self, @@ -375,6 +403,14 @@ impl TxPool { (self.queued_pool.get_txs_by_sender(sender), self.pending_pool.get_txs_by_sender(sender)) } + /// Returns all queued transactions for the specified sender + pub(crate) fn queued_txs_by_sender( + &self, + sender: SenderId, + ) -> Vec>> { + self.queued_transactions_iter().filter(|tx| tx.sender_id() == sender).collect() + } + /// Returns `true` if the transaction with the given hash is already included in this pool. pub(crate) fn contains(&self, tx_hash: &TxHash) -> bool { self.all_transactions.contains(tx_hash) @@ -446,6 +482,7 @@ impl TxPool { block_info: BlockInfo, mined_transactions: Vec, changed_senders: HashMap, + update_kind: PoolUpdateKind, ) -> OnNewCanonicalStateOutcome { // update block info let block_hash = block_info.last_seen_block_hash; @@ -464,6 +501,9 @@ impl TxPool { self.update_transaction_type_metrics(); self.metrics.performed_state_updates.increment(1); + // Update the latest update kind + self.latest_update_kind = Some(update_kind); + OnNewCanonicalStateOutcome { block_hash, mined: mined_transactions, promoted, discarded } } @@ -603,8 +643,8 @@ impl TxPool { *transaction.hash(), PoolErrorKind::InvalidTransaction( InvalidPoolTransactionError::ExceedsGasLimit( - block_gas_limit, tx_gas_limit, + block_gas_limit, ), ), )), @@ -1643,29 +1683,17 @@ impl AllTransactions { // The next transaction of this sender let on_chain_id = TransactionId::new(transaction.sender_id(), on_chain_nonce); { - // get all transactions of the sender's account - let mut descendants = self.descendant_txs_mut(&on_chain_id).peekable(); - // Tracks the next nonce we expect if the transactions are gapless let mut next_nonce = on_chain_id.nonce; // We need to find out if the next transaction of the sender is considered pending - let mut has_parked_ancestor = if ancestor.is_none() { - // the new transaction is the next one - false - } else { - // The transaction was added above so the _inclusive_ descendants iterator - // returns at least 1 tx. - let (id, tx) = descendants.peek().expect("includes >= 1"); - if id.nonce < inserted_tx_id.nonce { - !tx.state.is_pending() - } else { - true - } - }; + // The direct descendant has _no_ parked ancestors because the `on_chain_nonce` is + // pending, so we can set this to `false` + let mut has_parked_ancestor = false; - // Traverse all transactions of the sender and update existing transactions - for (id, tx) in descendants { + // Traverse all future transactions of the sender starting with the on chain nonce, and + // update existing transactions: `[on_chain_nonce,..]` + for (id, tx) in self.descendant_txs_mut(&on_chain_id) { let current_pool = tx.subpool; // If there's a nonce gap, we can shortcircuit @@ -1903,15 +1931,14 @@ impl SenderInfo { #[cfg(test)] mod tests { - use alloy_primitives::address; - use reth_primitives::TxType; - use super::*; use crate::{ test_utils::{MockOrdering, MockTransaction, MockTransactionFactory, MockTransactionSet}, traits::TransactionOrigin, SubPoolLimit, }; + use alloy_primitives::address; + use reth_primitives::TxType; #[test] fn test_insert_blob() { @@ -2870,7 +2897,7 @@ mod tests { pool.update_basefee(pool_base_fee); // 2 txs, that should put the pool over the size limit but not max txs - let a_txs = MockTransactionSet::dependent(a_sender, 0, 2, TxType::Eip1559) + let a_txs = MockTransactionSet::dependent(a_sender, 0, 3, TxType::Eip1559) .into_iter() .map(|mut tx| { tx.set_size(default_limits.max_size / 2 + 1); @@ -3225,4 +3252,75 @@ mod tests { vec![1, 2, 3] ); } + + #[test] + fn test_pending_ordering() { + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::new(MockOrdering::default(), Default::default()); + + let tx_0 = MockTransaction::eip1559().with_nonce(1).set_gas_price(100).inc_limit(); + let tx_1 = tx_0.next(); + + let v0 = f.validated(tx_0); + let v1 = f.validated(tx_1); + + // nonce gap, tx should be queued + pool.add_transaction(v0.clone(), U256::MAX, 0).unwrap(); + assert_eq!(1, pool.queued_transactions().len()); + + // nonce gap is closed on-chain, both transactions should be moved to pending + pool.add_transaction(v1, U256::MAX, 1).unwrap(); + + assert_eq!(2, pool.pending_transactions().len()); + assert_eq!(0, pool.queued_transactions().len()); + + assert_eq!( + pool.pending_pool.independent().get(&v0.sender_id()).unwrap().transaction.nonce(), + v0.nonce() + ); + } + + // + #[test] + fn one_sender_one_independent_transaction() { + let mut on_chain_balance = U256::from(4_999); // only enough for 4 txs + let mut on_chain_nonce = 40; + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::mock(); + let mut submitted_txs = Vec::new(); + + // We use a "template" because we want all txs to have the same sender. + let template = + MockTransaction::eip1559().inc_price().inc_limit().with_value(U256::from(1_001)); + + // Add 8 txs. Because the balance is only sufficient for 4, so the last 4 will be + // Queued. + for tx_nonce in 40..48 { + let tx = f.validated(template.clone().with_nonce(tx_nonce).rng_hash()); + submitted_txs.push(*tx.id()); + pool.add_transaction(tx, on_chain_balance, on_chain_nonce).unwrap(); + } + + // A block is mined with two txs (so nonce is changed from 40 to 42). + // Now the balance gets so high that it's enough to execute alltxs. + on_chain_balance = U256::from(999_999); + on_chain_nonce = 42; + pool.remove_transaction(&submitted_txs[0]); + pool.remove_transaction(&submitted_txs[1]); + + // Add 4 txs. + for tx_nonce in 48..52 { + pool.add_transaction( + f.validated(template.clone().with_nonce(tx_nonce).rng_hash()), + on_chain_balance, + on_chain_nonce, + ) + .unwrap(); + } + + let best_txs: Vec<_> = pool.pending().best().map(|tx| *tx.id()).collect(); + assert_eq!(best_txs.len(), 10); // 8 - 2 + 4 = 10 + + assert_eq!(pool.pending_pool.independent().len(), 1); + } } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 99b0caaf48a3..fc43349f3f18 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -11,19 +11,23 @@ use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID}, TxEip1559, TxEip2930, TxEip4844, TxLegacy, }; -use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2930::AccessList}; -use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; +use alloy_eips::{ + eip1559::MIN_PROTOCOL_BASE_FEE, + eip2930::AccessList, + eip4844::{BlobTransactionSidecar, BlobTransactionValidationError, DATA_GAS_PER_BLOB}, +}; +use alloy_primitives::{ + Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, +}; use paste::paste; use rand::{ distributions::{Uniform, WeightedIndex}, prelude::Distribution, }; use reth_primitives::{ - constants::eip4844::DATA_GAS_PER_BLOB, transaction::TryFromRecoveredTransactionError, - BlobTransactionSidecar, BlobTransactionValidationError, PooledTransactionsElementEcRecovered, - Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, + transaction::TryFromRecoveredTransactionError, PooledTransactionsElementEcRecovered, + Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, }; - use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; /// A transaction pool implementation using [`MockOrdering`] for transaction ordering. @@ -761,7 +765,7 @@ impl EthPoolTransaction for MockTransaction { &self, _blob: &BlobTransactionSidecar, _settings: &reth_primitives::kzg::KzgSettings, - ) -> Result<(), reth_primitives::BlobTransactionValidationError> { + ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { match &self { Self::Eip4844 { .. } => Ok(()), _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())), @@ -1002,6 +1006,7 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { use proptest::prelude::Strategy; use proptest_arbitrary_interop::arb; + use reth_primitives_traits::size::InMemorySize; arb::<(Transaction, Address, B256)>() .prop_map(|(tx, sender, tx_hash)| match &tx { diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index c21a7a4ea75a..9b4cccfb9d17 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1,5 +1,3 @@ -#![allow(deprecated)] - use crate::{ blobstore::BlobStoreError, error::{InvalidPoolTransactionError, PoolResult}, @@ -11,14 +9,17 @@ use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, Transaction as _, }; -use alloy_eips::{eip2718::Encodable2718, eip2930::AccessList, eip4844::BlobAndProofV1}; +use alloy_eips::{ + eip2718::Encodable2718, + eip2930::AccessList, + eip4844::{BlobAndProofV1, BlobTransactionSidecar, BlobTransactionValidationError}, +}; use alloy_primitives::{Address, TxHash, TxKind, B256, U256}; use futures_util::{ready, Stream}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ - kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, - BlobTransactionValidationError, PooledTransactionsElement, + kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, PooledTransactionsElement, PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, }; #[cfg(feature = "serde")] @@ -248,16 +249,6 @@ pub trait TransactionPool: Send + Sync + Clone { &self, ) -> Box>>>; - /// Returns an iterator that yields transactions that are ready for block production with the - /// given base fee. - /// - /// Consumer: Block production - #[deprecated(note = "Use best_transactions_with_attributes instead.")] - fn best_transactions_with_base_fee( - &self, - base_fee: u64, - ) -> Box>>>; - /// Returns an iterator that yields transactions that are ready for block production with the /// given base fee and optional blob fee attributes. /// @@ -352,6 +343,24 @@ pub trait TransactionPool: Send + Sync + Clone { sender: Address, ) -> Vec>>; + /// Returns all pending transactions filtered by predicate + fn get_pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>>; + + /// Returns all pending transactions sent by a given user + fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>>; + + /// Returns all queued transactions sent by a given user + fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>>; + /// Returns the highest transaction sent by a given user fn get_highest_transaction_by_sender( &self, @@ -431,7 +440,10 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns the [BlobTransactionSidecar] for the given transaction hash if it exists in the blob /// store. - fn get_blob(&self, tx_hash: TxHash) -> Result, BlobStoreError>; + fn get_blob( + &self, + tx_hash: TxHash, + ) -> Result>, BlobStoreError>; /// Returns all [BlobTransactionSidecar] for the given transaction hashes if they exists in the /// blob store. @@ -441,7 +453,7 @@ pub trait TransactionPool: Send + Sync + Clone { fn get_all_blobs( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError>; + ) -> Result)>, BlobStoreError>; /// Returns the exact [BlobTransactionSidecar] for the given transaction hashes in the order /// they were requested. @@ -450,7 +462,7 @@ pub trait TransactionPool: Send + Sync + Clone { fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError>; + ) -> Result>, BlobStoreError>; /// Return the [`BlobTransactionSidecar`]s for a list of blob versioned hashes. fn get_blobs_for_versioned_hashes( @@ -471,7 +483,7 @@ pub trait TransactionPoolExt: TransactionPool { /// /// ## Fee changes /// - /// The [CanonicalStateUpdate] includes the base and blob fee of the pending block, which + /// The [`CanonicalStateUpdate`] includes the base and blob fee of the pending block, which /// affects the dynamic fee requirement of pending transactions in the pool. /// /// ## EIP-4844 Blob transactions @@ -657,6 +669,15 @@ impl TransactionOrigin { } } +/// Represents the kind of update to the canonical state. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PoolUpdateKind { + /// The update was due to a block commit. + Commit, + /// The update was due to a reorganization. + Reorg, +} + /// Represents changes after a new canonical block or range of canonical blocks was added to the /// chain. /// @@ -681,6 +702,8 @@ pub struct CanonicalStateUpdate<'a> { pub changed_accounts: Vec, /// All mined transactions in the block range. pub mined_transactions: Vec, + /// The kind of update to the canonical state. + pub update_kind: PoolUpdateKind, } impl CanonicalStateUpdate<'_> { @@ -724,6 +747,11 @@ impl fmt::Display for CanonicalStateUpdate<'_> { } } +/// Alias to restrict the [`BestTransactions`] items to the pool's transaction type. +pub type BestTransactionsFor = Box< + dyn BestTransactions::Transaction>>>, +>; + /// An `Iterator` that only returns transactions that are ready to be executed. /// /// This makes no assumptions about the order of the transactions, but expects that _all_ @@ -764,7 +792,9 @@ pub trait BestTransactions: Iterator + Send { /// If called then the iterator will no longer yield blob transactions. /// /// Note: this will also exclude any transactions that depend on blob transactions. - fn skip_blobs(&mut self); + fn skip_blobs(&mut self) { + self.set_skip_blobs(true); + } /// Controls whether the iterator skips blob transactions or not. /// @@ -1332,7 +1362,7 @@ impl TryFrom for EthPooledTransaction { } EIP4844_TX_TYPE_ID => { // doesn't have a blob sidecar - return Err(TryFromRecoveredTransactionError::BlobSidecarMissing) + return Err(TryFromRecoveredTransactionError::BlobSidecarMissing); } unsupported => { // unsupported transaction type @@ -1480,11 +1510,31 @@ impl Stream for NewSubpoolTransactionStream { } } +/// Iterator that returns transactions for the block building process in the order they should be +/// included in the block. +/// +/// Can include transactions from the pool and other sources (alternative pools, +/// sequencer-originated transactions, etc.). +pub trait PayloadTransactions { + /// Returns the next transaction to include in the block. + fn next( + &mut self, + // In the future, `ctx` can include access to state for block building purposes. + ctx: (), + ) -> Option; + + /// Exclude descendants of the transaction with given sender and nonce from the iterator, + /// because this transaction won't be included in the block. + fn mark_invalid(&mut self, sender: Address, nonce: u64); +} + #[cfg(test)] mod tests { use super::*; use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; - use reth_primitives::{constants::eip4844::DATA_GAS_PER_BLOB, Signature, TransactionSigned}; + use alloy_eips::eip4844::DATA_GAS_PER_BLOB; + use alloy_primitives::PrimitiveSignature as Signature; + use reth_primitives::TransactionSigned; #[test] fn test_pool_size_invariants() { diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index bf7749fb85c3..d5f7101eb550 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -15,10 +15,10 @@ use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; +use alloy_eips::eip4844::MAX_BLOBS_PER_BLOCK; use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_primitives::{ - constants::eip4844::MAX_BLOBS_PER_BLOCK, GotExpected, InvalidTransactionError, SealedBlock, -}; +use reth_primitives::{InvalidTransactionError, SealedBlock}; +use reth_primitives_traits::GotExpected; use reth_storage_api::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; use revm::{ diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 4a82a1a148ff..6a3b0b96e976 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -6,9 +6,10 @@ use crate::{ traits::{PoolTransaction, TransactionOrigin}, PriceBumpConfig, }; +use alloy_eips::eip4844::BlobTransactionSidecar; use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; -use reth_primitives::{BlobTransactionSidecar, SealedBlock, TransactionSignedEcRecovered}; +use reth_primitives::{SealedBlock, TransactionSignedEcRecovered}; use std::{fmt, future::Future, time::Instant}; mod constants; diff --git a/crates/trie/common/src/key.rs b/crates/trie/common/src/key.rs new file mode 100644 index 000000000000..9e440d199fa1 --- /dev/null +++ b/crates/trie/common/src/key.rs @@ -0,0 +1,18 @@ +use alloy_primitives::B256; +use revm_primitives::keccak256; + +/// Trait for hashing keys in state. +pub trait KeyHasher: Default + Clone + Send + Sync + 'static { + /// Hashes the given bytes into a 256-bit hash. + fn hash_key>(bytes: T) -> B256; +} + +/// A key hasher that uses the Keccak-256 hash function. +#[derive(Clone, Debug, Default)] +pub struct KeccakKeyHasher; + +impl KeyHasher for KeccakKeyHasher { + fn hash_key>(bytes: T) -> B256 { + keccak256(bytes) + } +} diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index bdec36028b94..7645ebd3a1cb 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -14,6 +14,9 @@ pub mod hash_builder; mod account; pub use account::TrieAccount; +mod key; +pub use key::{KeccakKeyHasher, KeyHasher}; + mod nibbles; pub use nibbles::{Nibbles, StoredNibbles, StoredNibblesSubKey}; diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index a94b2b96fbdf..f6eaf3960ec0 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -17,7 +17,7 @@ use std::collections::HashMap; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes /// in the paths of target accounts. -#[derive(Clone, Default, Debug)] +#[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct MultiProof { /// State trie multiproof for requested accounts. pub account_subtree: ProofNodes, @@ -79,7 +79,7 @@ impl MultiProof { } /// The merkle multiproof of storage trie. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct StorageMultiProof { /// Storage trie root. pub root: B256, @@ -234,11 +234,12 @@ impl StorageProof { #[cfg(any(test, feature = "test-utils"))] pub mod triehash { use alloy_primitives::{keccak256, B256}; + use alloy_rlp::RlpEncodable; use hash_db::Hasher; use plain_hasher::PlainHasher; /// A [Hasher] that calculates a keccak256 hash of the given data. - #[derive(Default, Debug, Clone, PartialEq, Eq)] + #[derive(Default, Debug, Clone, PartialEq, Eq, RlpEncodable)] #[non_exhaustive] pub struct KeccakHasher; diff --git a/crates/trie/common/src/root.rs b/crates/trie/common/src/root.rs index 20f3ba1366d5..dbcbf4200d74 100644 --- a/crates/trie/common/src/root.rs +++ b/crates/trie/common/src/root.rs @@ -18,38 +18,6 @@ pub const fn adjust_index_for_rlp(i: usize, len: usize) -> usize { } } -/// Compute a trie root of the collection of rlp encodable items. -pub fn ordered_trie_root(items: &[T]) -> B256 { - ordered_trie_root_with_encoder(items, |item, buf| item.encode(buf)) -} - -/// Compute a trie root of the collection of items with a custom encoder. -pub fn ordered_trie_root_with_encoder(items: &[T], mut encode: F) -> B256 -where - F: FnMut(&T, &mut Vec), -{ - if items.is_empty() { - return alloy_trie::EMPTY_ROOT_HASH; - } - - let mut value_buffer = Vec::new(); - - let mut hb = HashBuilder::default(); - let items_len = items.len(); - for i in 0..items_len { - let index = adjust_index_for_rlp(i, items_len); - - let index_buffer = alloy_rlp::encode_fixed_size(&index); - - value_buffer.clear(); - encode(&items[index], &mut value_buffer); - - hb.add_leaf(Nibbles::unpack(&index_buffer), &value_buffer); - } - - hb.root() -} - /// Hashes and sorts account keys, then proceeds to calculating the root hash of the state /// represented as MPT. /// See [`state_root_unsorted`] for more info. diff --git a/crates/trie/db/src/commitment.rs b/crates/trie/db/src/commitment.rs new file mode 100644 index 000000000000..c608aefff8ae --- /dev/null +++ b/crates/trie/db/src/commitment.rs @@ -0,0 +1,39 @@ +use crate::{ + DatabaseHashedCursorFactory, DatabaseProof, DatabaseStateRoot, DatabaseStorageRoot, + DatabaseTrieCursorFactory, DatabaseTrieWitness, +}; +use reth_db::transaction::DbTx; +use reth_trie::{ + proof::Proof, witness::TrieWitness, KeccakKeyHasher, KeyHasher, StateRoot, StorageRoot, +}; + +/// The `StateCommitment` trait provides associated types for state commitment operations. +pub trait StateCommitment: std::fmt::Debug + Send + Sync + Unpin + 'static { + /// The state root type. + type StateRoot<'a, TX: DbTx + 'a>: DatabaseStateRoot<'a, TX>; + /// The storage root type. + type StorageRoot<'a, TX: DbTx + 'a>: DatabaseStorageRoot<'a, TX>; + /// The state proof type. + type StateProof<'a, TX: DbTx + 'a>: DatabaseProof<'a, TX>; + /// The state witness type. + type StateWitness<'a, TX: DbTx + 'a>: DatabaseTrieWitness<'a, TX>; + /// The key hasher type. + type KeyHasher: KeyHasher; +} + +/// The state commitment type for Ethereum's Merkle Patricia Trie. +#[derive(Debug)] +#[non_exhaustive] +pub struct MerklePatriciaTrie; + +impl StateCommitment for MerklePatriciaTrie { + type StateRoot<'a, TX: DbTx + 'a> = + StateRoot, DatabaseHashedCursorFactory<'a, TX>>; + type StorageRoot<'a, TX: DbTx + 'a> = + StorageRoot, DatabaseHashedCursorFactory<'a, TX>>; + type StateProof<'a, TX: DbTx + 'a> = + Proof, DatabaseHashedCursorFactory<'a, TX>>; + type StateWitness<'a, TX: DbTx + 'a> = + TrieWitness, DatabaseHashedCursorFactory<'a, TX>>; + type KeyHasher = KeccakKeyHasher; +} diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs index 3a9b1e328239..27c18af6cbfd 100644 --- a/crates/trie/db/src/lib.rs +++ b/crates/trie/db/src/lib.rs @@ -1,5 +1,6 @@ //! An integration of [`reth-trie`] with [`reth-db`]. +mod commitment; mod hashed_cursor; mod prefix_set; mod proof; @@ -8,6 +9,7 @@ mod storage; mod trie_cursor; mod witness; +pub use commitment::{MerklePatriciaTrie, StateCommitment}; pub use hashed_cursor::{ DatabaseHashedAccountCursor, DatabaseHashedCursorFactory, DatabaseHashedStorageCursor, }; diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index 079fe393764d..cd50503bc703 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -36,13 +36,13 @@ impl PrefixSetLoader<'_, TX> { // Walk account changeset and insert account prefixes. let mut account_changeset_cursor = self.cursor_read::()?; - let mut account_plain_state_cursor = self.cursor_read::()?; + let mut account_hashed_state_cursor = self.cursor_read::()?; for account_entry in account_changeset_cursor.walk_range(range.clone())? { let (_, AccountBeforeTx { address, .. }) = account_entry?; let hashed_address = keccak256(address); account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account_plain_state_cursor.seek_exact(address)?.is_none() { + if account_hashed_state_cursor.seek_exact(hashed_address)?.is_none() { destroyed_accounts.insert(hashed_address); } } diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 4d46183dfda4..0d2171604d5f 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -7,16 +7,12 @@ use reth_db_api::{ transaction::DbTx, }; use reth_execution_errors::StateRootError; -use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, updates::TrieUpdates, HashedPostState, HashedStorage, StateRoot, StateRootProgress, TrieInput, }; -use std::{ - collections::{hash_map, HashMap}, - ops::RangeInclusive, -}; +use std::{collections::HashMap, ops::RangeInclusive}; use tracing::debug; /// Extends [`StateRoot`] with operations specific for working with a database transaction. @@ -222,13 +218,11 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> impl DatabaseHashedPostState for HashedPostState { fn from_reverts(tx: &TX, from: BlockNumber) -> Result { // Iterate over account changesets and record value before first occurring account change. - let mut accounts = HashMap::>::default(); + let mut accounts = HashMap::new(); let mut account_changesets_cursor = tx.cursor_read::()?; for entry in account_changesets_cursor.walk_range(from..)? { let (_, AccountBeforeTx { address, info }) = entry?; - if let hash_map::Entry::Vacant(entry) = accounts.entry(address) { - entry.insert(info); - } + accounts.entry(address).or_insert(info); } // Iterate over storage changesets and record value before first occurring storage change. @@ -239,9 +233,7 @@ impl DatabaseHashedPostState for HashedPostState { { let (BlockNumberAddress((_, address)), storage) = entry?; let account_storage = storages.entry(address).or_default(); - if let hash_map::Entry::Vacant(entry) = account_storage.entry(storage.key) { - entry.insert(storage.value); - } + account_storage.entry(storage.key).or_insert(storage.value); } let hashed_accounts = diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index f5823404c899..aee264364798 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -11,7 +11,8 @@ use reth_db_api::{ }; use reth_primitives::{Account, StorageEntry}; use reth_provider::{ - test_utils::create_test_provider_factory, DatabaseProviderRW, StorageTrieWriter, TrieWriter, + providers::ProviderNodeTypes, test_utils::create_test_provider_factory, DatabaseProviderRW, + StorageTrieWriter, TrieWriter, }; use reth_trie::{ prefix_set::PrefixSetMut, @@ -693,8 +694,8 @@ fn storage_trie_around_extension_node() { assert_trie_updates(updates.storage_nodes_ref()); } -fn extension_node_storage_trie( - tx: &DatabaseProviderRW>, Spec>, +fn extension_node_storage_trie( + tx: &DatabaseProviderRW>, N>, hashed_address: B256, ) -> (B256, StorageTrieUpdates) { let value = U256::from(1); @@ -721,8 +722,8 @@ fn extension_node_storage_trie( (root, trie_updates) } -fn extension_node_trie( - tx: &DatabaseProviderRW>, Spec>, +fn extension_node_trie( + tx: &DatabaseProviderRW>, N>, ) -> B256 { let a = Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; let val = encode_account(a, None); diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index cc35fe9f914a..1b3e2d59be10 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-primitives.workspace = true reth-db.workspace = true reth-trie.workspace = true +reth-trie-common.workspace = true reth-trie-db.workspace = true reth-execution-errors.workspace = true reth-provider.workspace = true diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index d1ffe49dd0ad..eb5b6575b9f7 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -13,7 +13,7 @@ use reth_trie::{ TrieInput, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot}; -use reth_trie_parallel::parallel_root::ParallelStateRoot; +use reth_trie_parallel::root::ParallelStateRoot; use std::collections::HashMap; pub fn calculate_state_root(c: &mut Criterion) { diff --git a/crates/trie/parallel/src/lib.rs b/crates/trie/parallel/src/lib.rs index 40a6af347580..5be2a658387c 100644 --- a/crates/trie/parallel/src/lib.rs +++ b/crates/trie/parallel/src/lib.rs @@ -14,7 +14,10 @@ pub use storage_root_targets::StorageRootTargets; pub mod stats; /// Implementation of parallel state root computation. -pub mod parallel_root; +pub mod root; + +/// Implementation of parallel proof computation. +pub mod proof; /// Parallel state root metrics. #[cfg(feature = "metrics")] diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs new file mode 100644 index 000000000000..bafb9917c600 --- /dev/null +++ b/crates/trie/parallel/src/proof.rs @@ -0,0 +1,297 @@ +use crate::{root::ParallelStateRootError, stats::ParallelTrieTracker, StorageRootTargets}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, +}; +use alloy_rlp::{BufMut, Encodable}; +use itertools::Itertools; +use reth_db::DatabaseError; +use reth_execution_errors::StorageRootError; +use reth_provider::{ + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, +}; +use reth_trie::{ + hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, + node_iter::{TrieElement, TrieNodeIter}, + prefix_set::{PrefixSetMut, TriePrefixSetsMut}, + proof::StorageProof, + trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, + walker::TrieWalker, + HashBuilder, MultiProof, Nibbles, TrieAccount, TrieInput, +}; +use reth_trie_common::proof::ProofRetainer; +use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; +use std::sync::Arc; +use tracing::debug; + +#[cfg(feature = "metrics")] +use crate::metrics::ParallelStateRootMetrics; + +/// TODO: +#[derive(Debug)] +pub struct ParallelProof { + /// Consistent view of the database. + view: ConsistentDbView, + /// Trie input. + input: TrieInput, + /// Parallel state root metrics. + #[cfg(feature = "metrics")] + metrics: ParallelStateRootMetrics, +} + +impl ParallelProof { + /// Create new state proof generator. + pub fn new(view: ConsistentDbView, input: TrieInput) -> Self { + Self { + view, + input, + #[cfg(feature = "metrics")] + metrics: ParallelStateRootMetrics::default(), + } + } +} + +impl ParallelProof +where + Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, +{ + /// Generate a state multiproof according to specified targets. + pub fn multiproof( + self, + targets: HashMap>, + ) -> Result { + let mut tracker = ParallelTrieTracker::default(); + + let trie_nodes_sorted = Arc::new(self.input.nodes.into_sorted()); + let hashed_state_sorted = Arc::new(self.input.state.into_sorted()); + + // Extend prefix sets with targets + let mut prefix_sets = self.input.prefix_sets.clone(); + prefix_sets.extend(TriePrefixSetsMut { + account_prefix_set: PrefixSetMut::from(targets.keys().copied().map(Nibbles::unpack)), + storage_prefix_sets: targets + .iter() + .filter(|&(_hashed_address, slots)| (!slots.is_empty())) + .map(|(hashed_address, slots)| { + (*hashed_address, PrefixSetMut::from(slots.iter().map(Nibbles::unpack))) + }) + .collect(), + destroyed_accounts: Default::default(), + }); + let prefix_sets = prefix_sets.freeze(); + + let storage_root_targets = StorageRootTargets::new( + prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), + prefix_sets.storage_prefix_sets.clone(), + ); + + // Pre-calculate storage roots for accounts which were changed. + tracker.set_precomputed_storage_roots(storage_root_targets.len() as u64); + debug!(target: "trie::parallel_state_root", len = storage_root_targets.len(), "pre-generating storage proofs"); + let mut storage_proofs = HashMap::with_capacity(storage_root_targets.len()); + for (hashed_address, prefix_set) in + storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) + { + let view = self.view.clone(); + let target_slots: HashSet = + targets.get(&hashed_address).cloned().unwrap_or_default(); + + let trie_nodes_sorted = trie_nodes_sorted.clone(); + let hashed_state_sorted = hashed_state_sorted.clone(); + + let (tx, rx) = std::sync::mpsc::sync_channel(1); + + rayon::spawn_fifo(move || { + let result = (|| -> Result<_, ParallelStateRootError> { + let provider_ro = view.provider_ro()?; + let trie_cursor_factory = InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + &trie_nodes_sorted, + ); + let hashed_cursor_factory = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), + &hashed_state_sorted, + ); + + StorageProof::new_hashed( + trie_cursor_factory, + hashed_cursor_factory, + hashed_address, + ) + .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().cloned())) + .storage_multiproof(target_slots) + .map_err(|e| { + ParallelStateRootError::StorageRoot(StorageRootError::Database( + DatabaseError::Other(e.to_string()), + )) + }) + })(); + let _ = tx.send(result); + }); + storage_proofs.insert(hashed_address, rx); + } + + let provider_ro = self.view.provider_ro()?; + let trie_cursor_factory = InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + &trie_nodes_sorted, + ); + let hashed_cursor_factory = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), + &hashed_state_sorted, + ); + + // Create the walker. + let walker = TrieWalker::new( + trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, + prefix_sets.account_prefix_set, + ) + .with_deletions_retained(true); + + // Create a hash builder to rebuild the root node since it is not available in the database. + let retainer: ProofRetainer = targets.keys().map(Nibbles::unpack).collect(); + let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + + let mut storages = HashMap::default(); + let mut account_rlp = Vec::with_capacity(128); + let mut account_node_iter = TrieNodeIter::new( + walker, + hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, + ); + while let Some(account_node) = + account_node_iter.try_next().map_err(ProviderError::Database)? + { + match account_node { + TrieElement::Branch(node) => { + hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); + } + TrieElement::Leaf(hashed_address, account) => { + let storage_multiproof = match storage_proofs.remove(&hashed_address) { + Some(rx) => rx.recv().map_err(|_| { + ParallelStateRootError::StorageRoot(StorageRootError::Database( + DatabaseError::Other(format!( + "channel closed for {hashed_address}" + )), + )) + })??, + // Since we do not store all intermediate nodes in the database, there might + // be a possibility of re-adding a non-modified leaf to the hash builder. + None => { + tracker.inc_missed_leaves(); + StorageProof::new_hashed( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + hashed_address, + ) + .with_prefix_set_mut(Default::default()) + .storage_multiproof( + targets.get(&hashed_address).cloned().unwrap_or_default(), + ) + .map_err(|e| { + ParallelStateRootError::StorageRoot(StorageRootError::Database( + DatabaseError::Other(e.to_string()), + )) + })? + } + }; + + // Encode account + account_rlp.clear(); + let account = TrieAccount::from((account, storage_multiproof.root)); + account.encode(&mut account_rlp as &mut dyn BufMut); + + hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + storages.insert(hashed_address, storage_multiproof); + } + } + } + let _ = hash_builder.root(); + + #[cfg(feature = "metrics")] + self.metrics.record_state_trie(tracker.finish()); + + Ok(MultiProof { account_subtree: hash_builder.take_proof_nodes(), storages }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{keccak256, map::DefaultHashBuilder, Address, U256}; + use rand::Rng; + use reth_primitives::{Account, StorageEntry}; + use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; + use reth_trie::proof::Proof; + + #[test] + fn random_parallel_proof() { + let factory = create_test_provider_factory(); + let consistent_view = ConsistentDbView::new(factory.clone(), None); + + let mut rng = rand::thread_rng(); + let state = (0..100) + .map(|_| { + let address = Address::random(); + let account = + Account { balance: U256::from(rng.gen::()), ..Default::default() }; + let mut storage = HashMap::::default(); + let has_storage = rng.gen_bool(0.7); + if has_storage { + for _ in 0..100 { + storage.insert( + B256::from(U256::from(rng.gen::())), + U256::from(rng.gen::()), + ); + } + } + (address, (account, storage)) + }) + .collect::>(); + + { + let provider_rw = factory.provider_rw().unwrap(); + provider_rw + .insert_account_for_hashing( + state.iter().map(|(address, (account, _))| (*address, Some(*account))), + ) + .unwrap(); + provider_rw + .insert_storage_for_hashing(state.iter().map(|(address, (_, storage))| { + ( + *address, + storage + .iter() + .map(|(slot, value)| StorageEntry { key: *slot, value: *value }), + ) + })) + .unwrap(); + provider_rw.commit().unwrap(); + } + + let mut targets = + HashMap::, DefaultHashBuilder>::default(); + for (address, (_, storage)) in state.iter().take(10) { + let hashed_address = keccak256(*address); + let mut target_slots = HashSet::::default(); + + for (slot, _) in storage.iter().take(5) { + target_slots.insert(*slot); + } + + if !target_slots.is_empty() { + targets.insert(hashed_address, target_slots); + } + } + + let provider_rw = factory.provider_rw().unwrap(); + let trie_cursor_factory = DatabaseTrieCursorFactory::new(provider_rw.tx_ref()); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(provider_rw.tx_ref()); + + assert_eq!( + ParallelProof::new(consistent_view, Default::default()) + .multiproof(targets.clone()) + .unwrap(), + Proof::new(trie_cursor_factory, hashed_cursor_factory).multiproof(targets).unwrap() + ); + } +} diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/root.rs similarity index 100% rename from crates/trie/parallel/src/parallel_root.rs rename to crates/trie/parallel/src/root.rs diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 26d036f57ff9..1c5bb7d8a33e 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -41,3 +41,7 @@ rand.workspace = true [[bench]] name = "root" harness = false + +[[bench]] +name = "rlp_node" +harness = false diff --git a/crates/trie/sparse/benches/rlp_node.rs b/crates/trie/sparse/benches/rlp_node.rs new file mode 100644 index 000000000000..57ab52978b64 --- /dev/null +++ b/crates/trie/sparse/benches/rlp_node.rs @@ -0,0 +1,78 @@ +#![allow(missing_docs, unreachable_pub)] + +use std::time::{Duration, Instant}; + +use alloy_primitives::{B256, U256}; +use criterion::{criterion_group, criterion_main, Criterion}; +use prop::strategy::ValueTree; +use proptest::{prelude::*, test_runner::TestRunner}; +use rand::seq::IteratorRandom; +use reth_testing_utils::generators; +use reth_trie::Nibbles; +use reth_trie_sparse::RevealedSparseTrie; + +pub fn update_rlp_node_level(c: &mut Criterion) { + let mut rng = generators::rng(); + + let mut group = c.benchmark_group("update rlp node level"); + group.sample_size(20); + + for size in [100_000] { + let mut runner = TestRunner::new(ProptestConfig::default()); + let state = proptest::collection::hash_map(any::(), any::(), size) + .new_tree(&mut runner) + .unwrap() + .current(); + + // Create a sparse trie with `size` leaves + let mut sparse = RevealedSparseTrie::default(); + for (key, value) in &state { + sparse + .update_leaf(Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec()) + .unwrap(); + } + sparse.root(); + + for updated_leaves in [0.1, 1.0] { + for key in state + .keys() + .choose_multiple(&mut rng, (size as f64 * (updated_leaves / 100.0)) as usize) + { + sparse + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(&rng.gen::()).to_vec(), + ) + .unwrap(); + } + + // Calculate the maximum depth of the trie for the given number of leaves + let max_depth = (size as f64).log(16.0).ceil() as usize; + + for depth in 0..=max_depth { + group.bench_function( + format!("size {size} | updated {updated_leaves}% | depth {depth}"), + |b| { + // Use `iter_custom` to avoid measuring clones and drops + b.iter_custom(|iters| { + let mut elapsed = Duration::ZERO; + + let mut cloned = sparse.clone(); + for _ in 0..iters { + let start = Instant::now(); + cloned.update_rlp_node_level(depth); + elapsed += start.elapsed(); + cloned = sparse.clone(); + } + + elapsed + }) + }, + ); + } + } + } +} + +criterion_group!(rlp_node, update_rlp_node_level); +criterion_main!(rlp_node); diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index bc221a8f8313..30ce566fb5f6 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -1,4 +1,5 @@ #![allow(missing_docs, unreachable_pub)] + use alloy_primitives::{map::HashMap, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use itertools::Itertools; diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index cfb17ef36ff1..126e05e85824 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,3 +1,5 @@ +use std::iter::Peekable; + use crate::{SparseStateTrieError, SparseStateTrieResult, SparseTrie}; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -12,10 +14,8 @@ pub struct SparseStateTrie { /// Sparse account trie. pub(crate) state: SparseTrie, /// Sparse storage tries. - #[allow(dead_code)] pub(crate) storages: HashMap, /// Collection of revealed account and storage keys. - #[allow(dead_code)] pub(crate) revealed: HashMap>, } @@ -35,7 +35,7 @@ impl SparseStateTrie { self.revealed.get(account).map_or(false, |slots| slots.contains(slot)) } - /// Reveal unknown trie paths from provided leaf path and its proof. + /// Reveal unknown trie paths from provided leaf path and its proof for the account. /// NOTE: This method does not extensively validate the proof. pub fn reveal_account( &mut self, @@ -44,22 +44,12 @@ impl SparseStateTrie { ) -> SparseStateTrieResult<()> { let mut proof = proof.into_iter().peekable(); - // reveal root and initialize the trie if not already - let Some((path, node)) = proof.next() else { return Ok(()) }; - if !path.is_empty() { - return Err(SparseStateTrieError::InvalidRootNode { path, node }) - } - - // Decode root node and perform sanity check. - let root_node = TrieNode::decode(&mut &node[..])?; - if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { - return Err(SparseStateTrieError::InvalidRootNode { path, node }) - } + let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. let trie = self.state.reveal_root(root_node)?; - // add the remaining proof nodes + // Reveal the remaining proof nodes. for (path, bytes) in proof { let node = TrieNode::decode(&mut &bytes[..])?; trie.reveal_node(path, node)?; @@ -71,6 +61,55 @@ impl SparseStateTrie { Ok(()) } + /// Reveal unknown trie paths from provided leaf path and its proof for the storage slot. + /// NOTE: This method does not extensively validate the proof. + pub fn reveal_storage_slot( + &mut self, + account: B256, + slot: B256, + proof: impl IntoIterator, + ) -> SparseStateTrieResult<()> { + let mut proof = proof.into_iter().peekable(); + + let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; + + // Reveal root node if it wasn't already. + let trie = self.storages.entry(account).or_default().reveal_root(root_node)?; + + // Reveal the remaining proof nodes. + for (path, bytes) in proof { + let node = TrieNode::decode(&mut &bytes[..])?; + trie.reveal_node(path, node)?; + } + + // Mark leaf path as revealed. + self.revealed.entry(account).or_default().insert(slot); + + Ok(()) + } + + /// Validates the root node of the proof and returns it if it exists and is valid. + fn validate_proof>( + &self, + proof: &mut Peekable, + ) -> SparseStateTrieResult> { + let mut proof = proof.into_iter().peekable(); + + // Validate root node. + let Some((path, node)) = proof.next() else { return Ok(None) }; + if !path.is_empty() { + return Err(SparseStateTrieError::InvalidRootNode { path, node }) + } + + // Decode root node and perform sanity check. + let root_node = TrieNode::decode(&mut &node[..])?; + if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { + return Err(SparseStateTrieError::InvalidRootNode { path, node }) + } + + Ok(Some(root_node)) + } + /// Update the leaf node. pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseStateTrieResult<()> { self.state.update_leaf(path, value)?; @@ -81,6 +120,11 @@ impl SparseStateTrie { pub fn root(&mut self) -> Option { self.state.root() } + + /// Returns storage sparse trie root if the trie has been revealed. + pub fn storage_root(&mut self, account: B256) -> Option { + self.storages.get_mut(&account).and_then(|trie| trie.root()) + } } #[cfg(test)] @@ -93,7 +137,30 @@ mod tests { use reth_trie_common::proof::ProofRetainer; #[test] - fn sparse_trie_reveal_empty() { + fn validate_proof_first_node_not_root() { + let sparse = SparseStateTrie::default(); + let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; + assert_matches!( + sparse.validate_proof(&mut proof.into_iter().peekable()), + Err(SparseStateTrieError::InvalidRootNode { .. }) + ); + } + + #[test] + fn validate_proof_invalid_proof_with_empty_root() { + let sparse = SparseStateTrie::default(); + let proof = [ + (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), + (Nibbles::from_nibbles([0x1]), Bytes::new()), + ]; + assert_matches!( + sparse.validate_proof(&mut proof.into_iter().peekable()), + Err(SparseStateTrieError::InvalidRootNode { .. }) + ); + } + + #[test] + fn reveal_account_empty() { let retainer = ProofRetainer::from_iter([Nibbles::default()]); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); hash_builder.root(); @@ -107,25 +174,21 @@ mod tests { } #[test] - fn reveal_first_node_not_root() { - let mut sparse = SparseStateTrie::default(); - let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; - assert_matches!( - sparse.reveal_account(Default::default(), proof), - Err(SparseStateTrieError::InvalidRootNode { .. }) - ); - } + fn reveal_storage_slot_empty() { + let retainer = ProofRetainer::from_iter([Nibbles::default()]); + let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + hash_builder.root(); + let proofs = hash_builder.take_proof_nodes(); + assert_eq!(proofs.len(), 1); - #[test] - fn reveal_invalid_proof_with_empty_root() { let mut sparse = SparseStateTrie::default(); - let proof = [ - (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), - (Nibbles::from_nibbles([0x1]), Bytes::new()), - ]; - assert_matches!( - sparse.reveal_account(Default::default(), proof), - Err(SparseStateTrieError::InvalidRootNode { .. }) + assert!(sparse.storages.is_empty()); + sparse + .reveal_storage_slot(Default::default(), Default::default(), proofs.into_inner()) + .unwrap(); + assert_eq!( + sparse.storages, + HashMap::from_iter([(Default::default(), SparseTrie::revealed_empty())]) ); } } diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 8d65378f614e..9db1dff53131 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -11,7 +11,7 @@ use reth_trie_common::{ EMPTY_ROOT_HASH, }; use smallvec::SmallVec; -use std::{collections::HashSet, fmt}; +use std::fmt; /// Inner representation of the sparse trie. /// Sparse trie is blind by default until nodes are revealed. @@ -77,7 +77,7 @@ impl SparseTrie { /// - Each leaf entry in `nodes` collection must have a corresponding entry in `values` collection. /// The opposite is also true. /// - All keys in `values` collection are full leaf paths. -#[derive(PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq)] pub struct RevealedSparseTrie { /// All trie nodes. nodes: HashMap, @@ -558,7 +558,7 @@ impl RevealedSparseTrie { pub fn root(&mut self) -> B256 { // take the current prefix set. let mut prefix_set = std::mem::take(&mut self.prefix_set).freeze(); - let root_rlp = self.rlp_node(Nibbles::default(), &mut prefix_set); + let root_rlp = self.rlp_node_allocate(Nibbles::default(), &mut prefix_set); if let Some(root_hash) = root_rlp.as_hash() { root_hash } else { @@ -569,39 +569,54 @@ impl RevealedSparseTrie { /// Update hashes of the nodes that are located at a level deeper than or equal to the provided /// depth. Root node has a level of 0. pub fn update_rlp_node_level(&mut self, depth: usize) { - let targets = self.get_nodes_at_depth(depth); let mut prefix_set = self.prefix_set.clone().freeze(); + let mut buffers = RlpNodeBuffers::default(); + + let targets = self.get_changed_nodes_at_depth(&mut prefix_set, depth); for target in targets { - self.rlp_node(target, &mut prefix_set); + buffers.path_stack.push((target, Some(true))); + self.rlp_node(&mut prefix_set, &mut buffers); } } - /// Returns a list of paths to the nodes that are located at the provided depth when counting - /// from the root node. If there's a leaf at a depth less than the provided depth, it will be - /// included in the result. - fn get_nodes_at_depth(&self, depth: usize) -> HashSet { + /// Returns a list of paths to the nodes that were changed according to the prefix set and are + /// located at the provided depth when counting from the root node. If there's a leaf at a + /// depth less than the provided depth, it will be included in the result. + fn get_changed_nodes_at_depth(&self, prefix_set: &mut PrefixSet, depth: usize) -> Vec { let mut paths = Vec::from([(Nibbles::default(), 0)]); - let mut targets = HashSet::::default(); + let mut targets = Vec::new(); while let Some((mut path, level)) = paths.pop() { match self.nodes.get(&path).unwrap() { SparseNode::Empty | SparseNode::Hash(_) => {} - SparseNode::Leaf { .. } => { - targets.insert(path); + SparseNode::Leaf { hash, .. } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + + targets.push(path); } - SparseNode::Extension { key, .. } => { + SparseNode::Extension { key, hash } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + if level >= depth { - targets.insert(path); + targets.push(path); } else { path.extend_from_slice_unchecked(key); paths.push((path, level + 1)); } } - SparseNode::Branch { state_mask, .. } => { + SparseNode::Branch { state_mask, hash } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + if level >= depth { - targets.insert(path); + targets.push(path); } else { - for bit in CHILD_INDEX_RANGE { + for bit in CHILD_INDEX_RANGE.rev() { if state_mask.is_bit_set(bit) { let mut child_path = path.clone(); child_path.push_unchecked(bit); @@ -616,17 +631,19 @@ impl RevealedSparseTrie { targets } - fn rlp_node(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { - // stack of paths we need rlp nodes for - let mut path_stack = Vec::from([path]); - // stack of rlp nodes - let mut rlp_node_stack = Vec::<(Nibbles, RlpNode)>::new(); - // reusable branch child path - let mut branch_child_buf = SmallVec::<[Nibbles; 16]>::new_const(); - // reusable branch value stack - let mut branch_value_stack_buf = SmallVec::<[RlpNode; 16]>::new_const(); - - 'main: while let Some(path) = path_stack.pop() { + fn rlp_node_allocate(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { + let mut buffers = RlpNodeBuffers::new_with_path(path); + self.rlp_node(prefix_set, &mut buffers) + } + + fn rlp_node(&mut self, prefix_set: &mut PrefixSet, buffers: &mut RlpNodeBuffers) -> RlpNode { + 'main: while let Some((path, mut is_in_prefix_set)) = buffers.path_stack.pop() { + // Check if the path is in the prefix set. + // First, check the cached value. If it's `None`, then check the prefix set, and update + // the cached value. + let mut prefix_set_contains = + |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); + let rlp_node = match self.nodes.get_mut(&path).unwrap() { SparseNode::Empty => RlpNode::word_rlp(&EMPTY_ROOT_HASH), SparseNode::Hash(hash) => RlpNode::word_rlp(hash), @@ -634,7 +651,7 @@ impl RevealedSparseTrie { self.rlp_buf.clear(); let mut path = path.clone(); path.extend_from_slice_unchecked(key); - if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { RlpNode::word_rlp(&hash) } else { let value = self.values.get(&path).unwrap(); @@ -646,58 +663,70 @@ impl RevealedSparseTrie { SparseNode::Extension { key, hash } => { let mut child_path = path.clone(); child_path.extend_from_slice_unchecked(key); - if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { RlpNode::word_rlp(&hash) - } else if rlp_node_stack.last().map_or(false, |e| e.0 == child_path) { - let (_, child) = rlp_node_stack.pop().unwrap(); + } else if buffers.rlp_node_stack.last().map_or(false, |e| e.0 == child_path) { + let (_, child) = buffers.rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); rlp_node } else { - path_stack.extend([path, child_path]); // need to get rlp node for child first + // need to get rlp node for child first + buffers.path_stack.extend([(path, is_in_prefix_set), (child_path, None)]); continue } } SparseNode::Branch { state_mask, hash } => { - if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { - rlp_node_stack.push((path, RlpNode::word_rlp(&hash))); + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { + buffers.rlp_node_stack.push((path, RlpNode::word_rlp(&hash))); continue } - branch_child_buf.clear(); - for bit in CHILD_INDEX_RANGE { + buffers.branch_child_buf.clear(); + // Walk children in a reverse order from `f` to `0`, so we pop the `0` first + // from the stack. + for bit in CHILD_INDEX_RANGE.rev() { if state_mask.is_bit_set(bit) { let mut child = path.clone(); child.push_unchecked(bit); - branch_child_buf.push(child); + buffers.branch_child_buf.push(child); } } - branch_value_stack_buf.clear(); - for child_path in &branch_child_buf { - if rlp_node_stack.last().map_or(false, |e| &e.0 == child_path) { - let (_, child) = rlp_node_stack.pop().unwrap(); - branch_value_stack_buf.push(child); + buffers + .branch_value_stack_buf + .resize(buffers.branch_child_buf.len(), Default::default()); + let mut added_children = false; + for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { + if buffers.rlp_node_stack.last().map_or(false, |e| &e.0 == child_path) { + let (_, child) = buffers.rlp_node_stack.pop().unwrap(); + // Insert children in the resulting buffer in a normal order, because + // initially we iterated in reverse. + buffers.branch_value_stack_buf + [buffers.branch_child_buf.len() - i - 1] = child; + added_children = true; } else { - debug_assert!(branch_value_stack_buf.is_empty()); - path_stack.push(path); - path_stack.extend(branch_child_buf.drain(..)); + debug_assert!(!added_children); + buffers.path_stack.push((path, is_in_prefix_set)); + buffers + .path_stack + .extend(buffers.branch_child_buf.drain(..).map(|p| (p, None))); continue 'main } } self.rlp_buf.clear(); - let rlp_node = BranchNodeRef::new(&branch_value_stack_buf, *state_mask) + let rlp_node = BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask) .rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); rlp_node } }; - rlp_node_stack.push((path, rlp_node)); + buffers.rlp_node_stack.push((path, rlp_node)); } - rlp_node_stack.pop().unwrap().1 + buffers.rlp_node_stack.pop().unwrap().1 } } @@ -777,18 +806,42 @@ struct RemovedSparseNode { unset_branch_nibble: Option, } +/// Collection of reusable buffers for [`RevealedSparseTrie::rlp_node`]. +#[derive(Debug, Default)] +struct RlpNodeBuffers { + /// Stack of paths we need rlp nodes for and whether the path is in the prefix set. + path_stack: Vec<(Nibbles, Option)>, + /// Stack of rlp nodes + rlp_node_stack: Vec<(Nibbles, RlpNode)>, + /// Reusable branch child path + branch_child_buf: SmallVec<[Nibbles; 16]>, + /// Reusable branch value stack + branch_value_stack_buf: SmallVec<[RlpNode; 16]>, +} + +impl RlpNodeBuffers { + /// Creates a new instance of buffers with the given path on the stack. + fn new_with_path(path: Nibbles) -> Self { + Self { + path_stack: vec![(path, None)], + rlp_node_stack: Vec::new(), + branch_child_buf: SmallVec::<[Nibbles; 16]>::new_const(), + branch_value_stack_buf: SmallVec::<[RlpNode; 16]>::new_const(), + } + } +} + #[cfg(test)] mod tests { use std::collections::BTreeMap; use super::*; - use alloy_primitives::U256; + use alloy_primitives::{map::HashSet, U256}; use assert_matches::assert_matches; use itertools::Itertools; use prop::sample::SizeRange; use proptest::prelude::*; use rand::seq::IteratorRandom; - use reth_testing_utils::generators; use reth_trie::{BranchNode, ExtensionNode, LeafNode}; use reth_trie_common::{ proof::{ProofNodes, ProofRetainer}, @@ -1250,6 +1303,7 @@ mod tests { ); } + #[allow(clippy::type_complexity)] #[test] fn sparse_trie_fuzz() { // Having only the first 3 nibbles set, we narrow down the range of keys @@ -1257,63 +1311,51 @@ mod tests { // to test the sparse trie updates. const KEY_NIBBLES_LEN: usize = 3; - fn test(updates: I) - where - I: IntoIterator, - T: IntoIterator)> + Clone, - { - let mut rng = generators::rng(); + fn test(updates: Vec<(HashMap>, HashSet)>) { + { + let mut state = BTreeMap::default(); + let mut sparse = RevealedSparseTrie::default(); - let mut state = BTreeMap::default(); - let mut sparse = RevealedSparseTrie::default(); + for (update, keys_to_delete) in updates { + // Insert state updates into the sparse trie and calculate the root + for (key, value) in update.clone() { + sparse.update_leaf(key, value).unwrap(); + } + let sparse_root = sparse.root(); + + // Insert state updates into the hash builder and calculate the root + state.extend(update); + let (hash_builder_root, hash_builder_proof_nodes) = + hash_builder_root_with_proofs( + state.clone(), + state.keys().cloned().collect::>(), + ); - for update in updates { - let mut count = 0; - // Insert state updates into the sparse trie and calculate the root - for (key, value) in update.clone() { - sparse.update_leaf(key, value).unwrap(); - count += 1; - } - let keys_to_delete_len = count / 2; - let sparse_root = sparse.root(); - - // Insert state updates into the hash builder and calculate the root - state.extend(update); - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - state.clone(), - state.keys().cloned().collect::>(), - ); - - // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder_root); - // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); - - // Delete some keys from both the hash builder and the sparse trie and check - // that the sparse trie root still matches the hash builder root - - let keys_to_delete = state - .keys() - .choose_multiple(&mut rng, keys_to_delete_len) - .into_iter() - .cloned() - .collect::>(); - for key in keys_to_delete { - state.remove(&key).unwrap(); - sparse.remove_leaf(&key).unwrap(); - } + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); - let sparse_root = sparse.root(); + // Delete some keys from both the hash builder and the sparse trie and check + // that the sparse trie root still matches the hash builder root + for key in keys_to_delete { + state.remove(&key).unwrap(); + sparse.remove_leaf(&key).unwrap(); + } + + let sparse_root = sparse.root(); - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - state.clone(), - state.keys().cloned().collect::>(), - ); + let (hash_builder_root, hash_builder_proof_nodes) = + hash_builder_root_with_proofs( + state.clone(), + state.keys().cloned().collect::>(), + ); - // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder_root); - // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } } } @@ -1325,17 +1367,41 @@ mod tests { base } + fn transform_updates( + updates: Vec>>, + mut rng: impl Rng, + ) -> Vec<(HashMap>, HashSet)> { + let mut keys = HashSet::new(); + updates + .into_iter() + .map(|update| { + keys.extend(update.keys().cloned()); + + let keys_to_delete_len = update.len() / 2; + let keys_to_delete = (0..keys_to_delete_len) + .map(|_| { + let key = keys.iter().choose(&mut rng).unwrap().clone(); + keys.take(&key).unwrap() + }) + .collect(); + + (update, keys_to_delete) + }) + .collect::>() + } + proptest!(ProptestConfig::with_cases(10), |( updates in proptest::collection::vec( proptest::collection::hash_map( any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles), any::>(), 1..100, - ), + ).prop_map(HashMap::from_iter), 1..100, - ) + ).prop_perturb(transform_updates) )| { - test(updates) }); + test(updates) + }); } /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has @@ -1534,7 +1600,7 @@ mod tests { } #[test] - fn sparse_trie_get_nodes_at_depth() { + fn sparse_trie_get_changed_nodes_at_depth() { let mut sparse = RevealedSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -1568,38 +1634,41 @@ mod tests { .unwrap(); sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); - assert_eq!(sparse.get_nodes_at_depth(0), HashSet::from([Nibbles::default()])); assert_eq!( - sparse.get_nodes_at_depth(1), - HashSet::from([Nibbles::from_nibbles_unchecked([0x5])]) + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 0), + vec![Nibbles::default()] + ); + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 1), + vec![Nibbles::from_nibbles_unchecked([0x5])] ); assert_eq!( - sparse.get_nodes_at_depth(2), - HashSet::from([ + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 2), + vec![ Nibbles::from_nibbles_unchecked([0x5, 0x0]), Nibbles::from_nibbles_unchecked([0x5, 0x2]), Nibbles::from_nibbles_unchecked([0x5, 0x3]) - ]) + ] ); assert_eq!( - sparse.get_nodes_at_depth(3), - HashSet::from([ + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 3), + vec![ Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3]), Nibbles::from_nibbles_unchecked([0x5, 0x2]), Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3]) - ]) + ] ); assert_eq!( - sparse.get_nodes_at_depth(4), - HashSet::from([ + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 4), + vec![ Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x1]), Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x3]), Nibbles::from_nibbles_unchecked([0x5, 0x2]), Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x0]), Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x2]) - ]) + ] ); } } diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 134a3055c2b7..6136fa8e56bb 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -25,6 +25,7 @@ revm.workspace = true alloy-rlp.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-trie.workspace = true # tracing tracing.workspace = true @@ -68,7 +69,8 @@ serde = [ "dep:serde", "alloy-consensus/serde", "alloy-primitives/serde", - "revm/serde" + "revm/serde", + "alloy-trie/serde" ] serde-bincode-compat = [ "serde_with", diff --git a/crates/trie/trie/src/hashed_cursor/noop.rs b/crates/trie/trie/src/hashed_cursor/noop.rs index 4783d5afd9d4..a21e1026b380 100644 --- a/crates/trie/trie/src/hashed_cursor/noop.rs +++ b/crates/trie/trie/src/hashed_cursor/noop.rs @@ -32,11 +32,11 @@ pub struct NoopHashedAccountCursor; impl HashedCursor for NoopHashedAccountCursor { type Value = Account; - fn next(&mut self) -> Result, DatabaseError> { + fn seek(&mut self, _key: B256) -> Result, DatabaseError> { Ok(None) } - fn seek(&mut self, _key: B256) -> Result, DatabaseError> { + fn next(&mut self) -> Result, DatabaseError> { Ok(None) } } @@ -49,11 +49,11 @@ pub struct NoopHashedStorageCursor; impl HashedCursor for NoopHashedStorageCursor { type Value = U256; - fn next(&mut self) -> Result, DatabaseError> { + fn seek(&mut self, _key: B256) -> Result, DatabaseError> { Ok(None) } - fn seek(&mut self, _key: B256) -> Result, DatabaseError> { + fn next(&mut self) -> Result, DatabaseError> { Ok(None) } } diff --git a/crates/trie/trie/src/metrics.rs b/crates/trie/trie/src/metrics.rs index 7582f37418d9..006dc7e36555 100644 --- a/crates/trie/trie/src/metrics.rs +++ b/crates/trie/trie/src/metrics.rs @@ -1,5 +1,5 @@ use crate::stats::TrieStats; -use metrics::Histogram; +use metrics::{Counter, Histogram}; use reth_metrics::Metrics; /// Wrapper for state root metrics. @@ -63,3 +63,23 @@ impl TrieType { } } } + +/// Metrics for trie walker +#[derive(Clone, Metrics)] +#[metrics(scope = "trie.walker")] +pub struct WalkerMetrics { + /// The number of subnodes out of order due to wrong tree mask. + out_of_order_subnode: Counter, +} + +impl WalkerMetrics { + /// Create new metrics for the given trie type. + pub fn new(ty: TrieType) -> Self { + Self::new_with_labels(&[("type", ty.as_str())]) + } + + /// Increment `out_of_order_subnode`. + pub fn inc_out_of_order_subnode(&self, amount: u64) { + self.out_of_order_subnode.increment(amount); + } +} diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/trie/src/prefix_set.rs index 0cf16f939d7b..d904ef38fdd5 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/trie/src/prefix_set.rs @@ -211,8 +211,8 @@ impl PrefixSet { } impl<'a> IntoIterator for &'a PrefixSet { - type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; type Item = &'a reth_trie_common::Nibbles; + type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; fn into_iter(self) -> Self::IntoIter { self.iter() } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 2af48dfff798..eca126744e96 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -347,6 +347,7 @@ impl HashedStorageSorted { #[cfg(test)] mod tests { + use super::*; use alloy_primitives::Bytes; use revm::{ db::{ @@ -356,8 +357,6 @@ mod tests { primitives::{AccountInfo, Bytecode}, }; - use super::*; - #[test] fn hashed_state_wiped_extension() { let hashed_address = B256::default(); diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index e75a96d0f1f4..aaff293b379d 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -7,6 +7,9 @@ use alloy_primitives::B256; use reth_storage_errors::db::DatabaseError; use std::collections::HashSet; +#[cfg(feature = "metrics")] +use crate::metrics::WalkerMetrics; + /// `TrieWalker` is a structure that enables traversal of a Merkle trie. /// It allows moving through the trie in a depth-first manner, skipping certain branches /// if they have not changed. @@ -24,13 +27,23 @@ pub struct TrieWalker { pub changes: PrefixSet, /// The retained trie node keys that need to be removed. removed_keys: Option>, + #[cfg(feature = "metrics")] + /// Walker metrics. + metrics: WalkerMetrics, } impl TrieWalker { /// Constructs a new `TrieWalker` from existing stack and a cursor. pub fn from_stack(cursor: C, stack: Vec, changes: PrefixSet) -> Self { - let mut this = - Self { cursor, changes, stack, can_skip_current_node: false, removed_keys: None }; + let mut this = Self { + cursor, + changes, + stack, + can_skip_current_node: false, + removed_keys: None, + #[cfg(feature = "metrics")] + metrics: WalkerMetrics::default(), + }; this.update_skip_node(); this } @@ -113,6 +126,8 @@ impl TrieWalker { stack: vec![CursorSubNode::default()], can_skip_current_node: false, removed_keys: None, + #[cfg(feature = "metrics")] + metrics: WalkerMetrics::default(), }; // Set up the root node of the trie in the stack, if it exists. @@ -179,6 +194,19 @@ impl TrieWalker { self.stack[0].set_nibble(key[0] as i8); } + // The current tree mask might have been set incorrectly. + // Sanity check that the newly retrieved trie node key is the child of the last item + // on the stack. If not, advance to the next sibling instead of adding the node to the + // stack. + if let Some(subnode) = self.stack.last() { + if !key.starts_with(subnode.full_key()) { + #[cfg(feature = "metrics")] + self.metrics.inc_out_of_order_subnode(1); + self.move_to_next_sibling(false)?; + return Ok(()) + } + } + // Create a new CursorSubNode and push it to the stack. let subnode = CursorSubNode::new(key, Some(node)); let nibble = subnode.nibble(); diff --git a/deny.toml b/deny.toml index e58234602504..8d0807f9de5c 100644 --- a/deny.toml +++ b/deny.toml @@ -4,8 +4,12 @@ [advisories] yanked = "warn" ignore = [ - # proc-macro-error 1.0.4 unmaintained https://rustsec.org/advisories/RUSTSEC-2024-0370 - "RUSTSEC-2024-0370" + # https://rustsec.org/advisories/RUSTSEC-2024-0379 used by boa (js-tracer) + "RUSTSEC-2024-0379", + # https://rustsec.org/advisories/RUSTSEC-2024-0384 used by sse example + "RUSTSEC-2024-0384", + # https://rustsec.org/advisories/RUSTSEC-2024-0388 used by ssz, will be removed https://github.com/sigp/ethereum_ssz/pull/34 + "RUSTSEC-2024-0388" ] # This section is considered when running `cargo deny check bans`. @@ -58,7 +62,7 @@ allow = [ # aren't accepted for every possible crate as with the normal allow list exceptions = [ # TODO: decide on MPL-2.0 handling - # These dependencies are grandfathered in in https://github.com/paradigmxyz/reth/pull/6980 + # These dependencies are grandfathered in https://github.com/paradigmxyz/reth/pull/6980 { allow = ["MPL-2.0"], name = "option-ext" }, { allow = ["MPL-2.0"], name = "webpki-roots" }, ] diff --git a/docs/crates/stages.md b/docs/crates/stages.md index c7815b453b4e..14666c1f44f9 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -43,7 +43,7 @@ pub trait Stage: Send + Sync { } ``` -To get a better idea of what is happening at each part of the pipeline, lets walk through what is going on under the hood within the `execute()` function at each stage, starting with `HeaderStage`. +To get a better idea of what is happening at each part of the pipeline, let's walk through what is going on under the hood within the `execute()` function at each stage, starting with `HeaderStage`.
diff --git a/docs/repo/labels.md b/docs/repo/labels.md index 6b3dba97ee6d..6772b828ffcd 100644 --- a/docs/repo/labels.md +++ b/docs/repo/labels.md @@ -30,7 +30,7 @@ For easier at-a-glance communication of the status of issues and PRs the followi - https://github.com/paradigmxyz/reth/labels/S-duplicate - https://github.com/paradigmxyz/reth/labels/S-wontfix -**Misc.** +**Miscellaneous** - https://github.com/paradigmxyz/reth/labels/S-needs-triage - https://github.com/paradigmxyz/reth/labels/S-controversial diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 6ed91e79656f..f78abe961227 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -82,7 +82,6 @@ The networking component mainly lives in [`net/network`](../../crates/net/networ Different consensus mechanisms. - [`consensus/common`](../../crates/consensus/common): Common consensus functions and traits (e.g. fee calculation) -- [`consensus/auto-seal`](../../crates/consensus/auto-seal): A consensus mechanism that auto-seals blocks for local development (also commonly known as "auto-mine") - [`consensus/beacon`](../../crates/consensus/beacon): Consensus mechanism that handles messages from a beacon node ("eth2") ### Execution diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 8c77f5979fe9..39ccdffe34f4 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -1953,6 +1953,7 @@ "mode": "off" } }, + "decimals": 4, "mappings": [], "thresholds": { "mode": "absolute", @@ -3016,6 +3017,102 @@ "legendFormat": "Gas/s", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[1m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (1m)", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[5m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (5m)", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[10m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (10m)", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[30m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (30m)", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[1h])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (1h)", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[24h])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (24h)", + "range": true, + "refId": "G", + "useBackend": false } ], "title": "Execution throughput", @@ -5065,6 +5162,80 @@ "title": "Pipeline runs", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Latency histogram for the engine_newPayload to Forkchoice Update", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "mode": "none" + } + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 188 + }, + "id": 213, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_forkchoice_updated_time_diff{instance=~\"$instance\"}", + "legendFormat": "new_payload_forkchoice_updated", + "range": true, + "refId": "A" + } + ], + "title": "Engine API newPayload Forkchoice Update Latency", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index ebb693184a5a..bba5dbd0e229 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -1493,6 +1493,108 @@ "title": "Incoming Gossip and Requests", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Measures the message send rate (MPS) for queued outgoing messages. Outgoing messages are added to the queue before being sent to other peers, and this metric helps track the rate of message dispatch.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "mps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 29 + }, + "id": 219, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_queued_outgoing_messages{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Queued Messages per Second", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Queued Outgoing Messages", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -2931,7 +3033,7 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, + "x": 0, "y": 69 }, "id": 214, diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 1c53e4f41051..2436ee0210e4 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -13,6 +13,7 @@ use serde::{Deserialize, Serialize}; use std::{ collections::VecDeque, pin::Pin, + sync::Arc, task::{Context, Poll}, }; use thiserror::Error; @@ -110,9 +111,10 @@ where match self.pool.get_all_blobs_exact(txs.iter().map(|(tx, _)| tx.hash()).collect()) { Ok(blobs) => { actions_to_queue.reserve_exact(txs.len()); - for ((tx, _), sidecar) in txs.iter().zip(blobs.iter()) { - let transaction = BlobTransaction::try_from_signed(tx.clone(), sidecar.clone()) - .expect("should not fail to convert blob tx if it is already eip4844"); + for ((tx, _), sidecar) in txs.iter().zip(blobs.into_iter()) { + let transaction = + BlobTransaction::try_from_signed(tx.clone(), Arc::unwrap_or_clone(sidecar)) + .expect("should not fail to convert blob tx if it is already eip4844"); let block_metadata = BlockMetadata { block_hash: block.hash(), diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 09dad2f7007d..43e5f7428f63 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -3,7 +3,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] -use alloy_eips::eip7685::Requests; +use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; use alloy_sol_macro::sol; use alloy_sol_types::SolCall; #[cfg(feature = "optimism")] @@ -30,7 +30,7 @@ use reth_primitives::{ revm_primitives::{ address, Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, U256, }, - BlockWithSenders, Receipt, Withdrawal, + BlockWithSenders, Receipt, }; use std::{fmt::Display, sync::Arc}; diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index f826451d2038..9afd16bea160 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -16,9 +16,11 @@ reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true +reth-trie-db.workspace = true alloy-genesis.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } alloy-primitives.workspace = true +alloy-eips.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 46a7d7d9af93..704ecb7e3c4b 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -22,6 +22,7 @@ use std::{convert::Infallible, sync::Arc}; use serde::{Deserialize, Serialize}; use thiserror::Error; +use alloy_eips::eip4895::Withdrawals; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; use alloy_rpc_types::{ @@ -68,8 +69,8 @@ use reth_payload_builder::{ EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderError, PayloadBuilderHandle, PayloadBuilderService, }; -use reth_primitives::Withdrawals; use reth_tracing::{RethTracer, Tracer}; +use reth_trie_db::MerklePatriciaTrie; /// A custom payload attributes type. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -110,7 +111,11 @@ impl PayloadBuilderAttributes for CustomPayloadBuilderAttributes { type RpcPayloadAttributes = CustomPayloadAttributes; type Error = Infallible; - fn try_new(parent: B256, attributes: CustomPayloadAttributes) -> Result { + fn try_new( + parent: B256, + attributes: CustomPayloadAttributes, + _version: u8, + ) -> Result { Ok(Self(EthPayloadBuilderAttributes::new(parent, attributes.inner))) } @@ -156,7 +161,7 @@ impl PayloadTypes for CustomEngineTypes { } impl EngineTypes for CustomEngineTypes { - type ExecutionPayloadV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; @@ -224,6 +229,7 @@ struct MyCustomNode; impl NodeTypes for MyCustomNode { type Primitives = (); type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; } /// Configure the node types with the custom engine types @@ -337,7 +343,7 @@ where args: BuildArguments, ) -> Result, PayloadBuilderError> { let BuildArguments { client, pool, cached_reads, config, cancel, best_payload } = args; - let PayloadConfig { parent_block, extra_data, attributes } = config; + let PayloadConfig { parent_header, extra_data, attributes } = config; let chain_spec = client.chain_spec(); @@ -350,7 +356,7 @@ where client, pool, cached_reads, - config: PayloadConfig { parent_block, extra_data, attributes: attributes.0 }, + config: PayloadConfig { parent_header, extra_data, attributes: attributes.0 }, cancel, best_payload, }) @@ -361,10 +367,10 @@ where client: &Client, config: PayloadConfig, ) -> Result { - let PayloadConfig { parent_block, extra_data, attributes } = config; + let PayloadConfig { parent_header, extra_data, attributes } = config; let chain_spec = client.chain_spec(); >::build_empty_payload(&reth_ethereum_payload_builder::EthereumPayloadBuilder::new(EthEvmConfig::new(chain_spec.clone())),client, - PayloadConfig { parent_block, extra_data, attributes: attributes.0}) + PayloadConfig { parent_header, extra_data, attributes: attributes.0}) } } diff --git a/examples/custom-evm/Cargo.toml b/examples/custom-evm/Cargo.toml index 53563ab9575b..e763a932eabf 100644 --- a/examples/custom-evm/Cargo.toml +++ b/examples/custom-evm/Cargo.toml @@ -16,6 +16,7 @@ reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 55063fc9bbcb..c564c5b28b6f 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -2,6 +2,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_consensus::Header; use alloy_genesis::Genesis; use alloy_primitives::{address, Address, Bytes, U256}; use reth::{ @@ -35,10 +36,10 @@ use reth_node_ethereum::{ }; use reth_primitives::{ revm_primitives::{CfgEnvWithHandlerCfg, TxEnv}, - Header, TransactionSigned, + TransactionSigned, }; use reth_tracing::{RethTracer, Tracer}; -use std::sync::Arc; +use std::{convert::Infallible, sync::Arc}; /// Custom EVM configuration #[derive(Debug, Clone)] @@ -87,6 +88,7 @@ impl MyEvmConfig { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { self.inner.fill_tx_env(tx_env, transaction, sender); @@ -115,7 +117,7 @@ impl ConfigureEvmEnv for MyEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { self.inner.next_cfg_and_block_env(parent, attributes) } } diff --git a/examples/custom-inspector/Cargo.toml b/examples/custom-inspector/Cargo.toml index 18629556c42f..ee6f887e64c0 100644 --- a/examples/custom-inspector/Cargo.toml +++ b/examples/custom-inspector/Cargo.toml @@ -8,7 +8,8 @@ license.workspace = true [dependencies] reth.workspace = true reth-node-ethereum.workspace = true -alloy-rpc-types.workspace = true +alloy-rpc-types-eth.workspace = true clap = { workspace = true, features = ["derive"] } futures-util.workspace = true alloy-primitives.workspace = true +alloy-eips.workspace = true diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index 12b7620f4adc..67863d00e1e9 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -10,15 +10,15 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_eips::BlockNumberOrTag; use alloy_primitives::Address; -use alloy_rpc_types::state::EvmOverrides; +use alloy_rpc_types_eth::state::EvmOverrides; use clap::Parser; use futures_util::StreamExt; use reth::{ builder::NodeHandle, chainspec::EthereumChainSpecParser, cli::Cli, - primitives::BlockNumberOrTag, revm::{ inspector_handle_register, interpreter::{Interpreter, OpCode}, diff --git a/examples/custom-payload-builder/Cargo.toml b/examples/custom-payload-builder/Cargo.toml index 1c160fe5ec87..b77a3f2945ca 100644 --- a/examples/custom-payload-builder/Cargo.toml +++ b/examples/custom-payload-builder/Cargo.toml @@ -16,6 +16,7 @@ reth-node-ethereum.workspace = true reth-ethereum-payload-builder.workspace = true alloy-primitives.workspace = true +alloy-eips.workspace = true tracing.workspace = true futures-util.workspace = true diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index f5d64e41cd09..2e264d017a3b 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -1,4 +1,5 @@ use crate::job::EmptyBlockPayloadJob; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::Bytes; use reth::{ providers::{BlockReaderIdExt, BlockSource, StateProviderFactory}, @@ -8,7 +9,7 @@ use reth::{ use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder, PayloadConfig}; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::{PayloadBuilderError, PayloadJobGenerator}; -use reth_primitives::BlockNumberOrTag; +use reth_primitives::SealedHeader; use std::sync::Arc; /// The generator type that creates new jobs that builds empty blocks. @@ -77,7 +78,10 @@ where // we already know the hash, so we can seal it block.seal(attributes.parent()) }; - let config = PayloadConfig::new(Arc::new(parent_block), Bytes::default(), attributes); + let hash = parent_block.hash(); + let header = SealedHeader::new(parent_block.header().clone(), hash); + + let config = PayloadConfig::new(Arc::new(header), Bytes::default(), attributes); Ok(EmptyBlockPayloadJob { client: self.client.clone(), _pool: self.pool.clone(), diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml index 0a7ef9bb6b29..3310d1cbd676 100644 --- a/examples/db-access/Cargo.toml +++ b/examples/db-access/Cargo.toml @@ -14,7 +14,7 @@ reth-provider.workspace = true reth-node-ethereum.workspace = true reth-node-types.workspace = true -alloy-rpc-types.workspace = true +alloy-rpc-types-eth.workspace = true alloy-primitives.workspace = true diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 5772461bd7a7..0f7d1a269f3c 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,5 +1,5 @@ -use alloy_primitives::{Address, Sealable, B256}; -use alloy_rpc_types::{Filter, FilteredParams}; +use alloy_primitives::{Address, B256}; +use alloy_rpc_types_eth::{Filter, FilteredParams}; use reth_chainspec::ChainSpecBuilder; use reth_db::{open_db_read_only, DatabaseEnv}; use reth_node_ethereum::EthereumNode; @@ -63,9 +63,7 @@ fn header_provider_example(provider: T, number: u64) -> eyre: // We can convert a header to a sealed header which contains the hash w/o needing to re-compute // it every time. - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let sealed_header = SealedHeader::new(header, seal); + let sealed_header = SealedHeader::seal(header); // Can also query the header by hash! let header_by_hash = diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index 857a8a1c1269..79a2ff26a274 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -106,7 +106,8 @@ async fn handshake_eth(p2p_stream: AuthedP2PStream) -> eyre::Result<(AuthedEthSt .forkid(MAINNET.hardfork_fork_id(EthereumHardfork::Shanghai).unwrap()) .build(); - let status = Status { version: p2p_stream.shared_capabilities().eth()?.version(), ..status }; + let status = + Status { version: p2p_stream.shared_capabilities().eth()?.version().try_into()?, ..status }; let eth_unauthed = UnauthedEthStream::new(p2p_stream); Ok(eth_unauthed.handshake(status, fork_filter).await?) } diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 1b2899a6485b..92ae86f00bb9 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -16,6 +16,7 @@ use std::{path::Path, sync::Arc}; use reth::{ api::NodeTypesWithDBAdapter, + beacon_consensus::EthBeaconConsensus, providers::{ providers::{BlockchainProvider, StaticFileProvider}, ProviderFactory, @@ -66,9 +67,10 @@ async fn main() -> eyre::Result<()> { .with_noop_pool() .with_noop_network() .with_executor(TokioTaskExecutor::default()) - .with_evm_config(EthEvmConfig::new(spec)) + .with_evm_config(EthEvmConfig::new(spec.clone())) .with_events(TestCanonStateSubscriptions::default()) - .with_block_executor(EthExecutorProvider::ethereum(provider.chain_spec())); + .with_block_executor(EthExecutorProvider::ethereum(provider.chain_spec())) + .with_consensus(EthBeaconConsensus::new(spec)); // Pick which namespaces to expose. let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); diff --git a/examples/stateful-precompile/Cargo.toml b/examples/stateful-precompile/Cargo.toml index 47a784c36e14..478886d061f8 100644 --- a/examples/stateful-precompile/Cargo.toml +++ b/examples/stateful-precompile/Cargo.toml @@ -15,6 +15,7 @@ reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true eyre.workspace = true parking_lot.workspace = true diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index b0165e4de26c..5be45ad76741 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -2,6 +2,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_consensus::Header; use alloy_genesis::Genesis; use alloy_primitives::{Address, Bytes, U256}; use parking_lot::RwLock; @@ -26,11 +27,11 @@ use reth_node_ethereum::{ }; use reth_primitives::{ revm_primitives::{SpecId, StatefulPrecompileMut}, - Header, TransactionSigned, + TransactionSigned, }; use reth_tracing::{RethTracer, Tracer}; use schnellru::{ByLength, LruMap}; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, convert::Infallible, sync::Arc}; /// Type alias for the LRU cache used within the [`PrecompileCache`]. type PrecompileLRUCache = LruMap<(Bytes, u64), PrecompileResult>; @@ -147,6 +148,7 @@ impl StatefulPrecompileMut for WrappedPrecompile { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { self.inner.fill_tx_env(tx_env, transaction, sender) @@ -175,7 +177,7 @@ impl ConfigureEvmEnv for MyEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { self.inner.next_cfg_and_block_env(parent, attributes) } } diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index a56c44ec3db5..2fc0c7512441 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -14,15 +14,19 @@ workspace = true [features] ef-tests = [] asm-keccak = [ - "reth-primitives/asm-keccak", - "alloy-primitives/asm-keccak", - "revm/asm-keccak" + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak", ] [dependencies] reth-chainspec.workspace = true reth-primitives.workspace = true -reth-db = { workspace = true, features = ["mdbx", "test-utils", "disable-lock"] } +reth-db = { workspace = true, features = [ + "mdbx", + "test-utils", + "disable-lock", +] } reth-db-api.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-stages.workspace = true @@ -33,6 +37,8 @@ revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } alloy-rlp.workspace = true alloy-primitives.workspace = true +alloy-eips.workspace = true +alloy-consensus.workspace = true walkdir = "2.3.3" serde.workspace = true diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index b5dc073c1da3..292b32e8ce0f 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -1,6 +1,8 @@ //! Shared models for use crate::{assert::assert_equal, Error}; +use alloy_consensus::Header as RethHeader; +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{keccak256, Address, Bloom, Bytes, B256, B64, U256}; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_db::tables; @@ -8,9 +10,7 @@ use reth_db_api::{ cursor::DbDupCursorRO, transaction::{DbTx, DbTxMut}, }; -use reth_primitives::{ - Account as RethAccount, Bytecode, Header as RethHeader, SealedHeader, StorageEntry, Withdrawals, -}; +use reth_primitives::{Account as RethAccount, Bytecode, SealedHeader, StorageEntry}; use serde::Deserialize; use std::{collections::BTreeMap, ops::Deref}; diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index 98bfeabdfb15..3e0f58a7bd08 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -17,6 +17,7 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 571727cb2fd6..582298feab9f 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,14 +1,15 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. -use alloy_consensus::{Transaction as _, TxLegacy}; -use alloy_primitives::{Address, BlockNumber, Bytes, Parity, Sealable, TxKind, B256, U256}; +use alloy_consensus::{Header, Transaction as _, TxLegacy}; +use alloy_eips::eip4895::{Withdrawal, Withdrawals}; +use alloy_primitives::{Address, BlockNumber, Bytes, TxKind, B256, U256}; pub use rand::Rng; use rand::{ distributions::uniform::SampleRange, rngs::StdRng, seq::SliceRandom, thread_rng, SeedableRng, }; use reth_primitives::{ - proofs, sign_message, Account, BlockBody, Header, Log, Receipt, SealedBlock, SealedHeader, - StorageEntry, Transaction, TransactionSigned, Withdrawal, Withdrawals, + proofs, sign_message, Account, BlockBody, Log, Receipt, SealedBlock, SealedHeader, + StorageEntry, Transaction, TransactionSigned, }; use secp256k1::{Keypair, Secp256k1}; use std::{ @@ -98,16 +99,14 @@ pub fn random_header_range( /// /// The header is assumed to not be correct if validated. pub fn random_header(rng: &mut R, number: u64, parent: Option) -> SealedHeader { - let header = reth_primitives::Header { + let header = alloy_consensus::Header { number, nonce: rng.gen(), difficulty: U256::from(rng.gen::()), parent_hash: parent.unwrap_or_default(), ..Default::default() }; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) + SealedHeader::seal(header) } /// Generates a random legacy [Transaction]. @@ -147,17 +146,9 @@ pub fn sign_tx_with_random_key_pair(rng: &mut R, tx: Transaction) -> Tra /// Signs the [Transaction] with the given key pair. pub fn sign_tx_with_key_pair(key_pair: Keypair, tx: Transaction) -> TransactionSigned { - let mut signature = + let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); - if matches!(tx, Transaction::Legacy(_)) { - signature = if let Some(chain_id) = tx.chain_id() { - signature.with_chain_id(chain_id) - } else { - signature.with_parity(Parity::NonEip155(signature.v().y_parity())) - } - } - TransactionSigned::from_transaction_and_signature(tx, signature) } @@ -210,7 +201,7 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) }); let withdrawals_root = withdrawals.as_ref().map(|w| proofs::calculate_withdrawals_root(w)); - let sealed = Header { + let header = Header { parent_hash: block_params.parent.unwrap_or_default(), number, gas_used: total_gas, @@ -222,13 +213,10 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) requests_hash: None, withdrawals_root, ..Default::default() - } - .seal_slow(); - - let (header, seal) = sealed.into_parts(); + }; SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions, ommers, withdrawals: withdrawals.map(Withdrawals::new) }, } } @@ -463,8 +451,8 @@ mod tests { use super::*; use alloy_consensus::TxEip1559; use alloy_eips::eip2930::AccessList; - use alloy_primitives::{hex, Parity}; - use reth_primitives::{public_key_to_address, Signature}; + use alloy_primitives::{hex, PrimitiveSignature as Signature}; + use reth_primitives::public_key_to_address; use std::str::FromStr; #[test] @@ -537,7 +525,7 @@ mod tests { "46948507304638947509940763649030358759909902576025900602547168820602576006531", ) .unwrap(), - Parity::Parity(false), + false, ); assert_eq!(expected, signature); }